aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/.gitignore4
-rw-r--r--arch/Kconfig99
-rw-r--r--arch/alpha/Kconfig27
-rw-r--r--arch/alpha/configs/defconfig13
-rw-r--r--arch/alpha/include/asm/atomic.h88
-rw-r--r--arch/alpha/include/asm/cmpxchg.h12
-rw-r--r--arch/alpha/include/asm/io.h5
-rw-r--r--arch/alpha/include/asm/machvec.h6
-rw-r--r--arch/alpha/include/asm/mmzone.h100
-rw-r--r--arch/alpha/include/asm/page.h6
-rw-r--r--arch/alpha/include/asm/pgalloc.h1
-rw-r--r--arch/alpha/include/asm/pgtable.h5
-rw-r--r--arch/alpha/include/asm/topology.h39
-rw-r--r--arch/alpha/include/asm/unaligned.h12
-rw-r--r--arch/alpha/include/uapi/asm/mman.h3
-rw-r--r--arch/alpha/include/uapi/asm/socket.h2
-rw-r--r--arch/alpha/kernel/core_marvel.c53
-rw-r--r--arch/alpha/kernel/core_wildfire.c29
-rw-r--r--arch/alpha/kernel/pc873xx.c4
-rw-r--r--arch/alpha/kernel/pci_iommu.c29
-rw-r--r--arch/alpha/kernel/process.c2
-rw-r--r--arch/alpha/kernel/proto.h8
-rw-r--r--arch/alpha/kernel/setup.c18
-rw-r--r--arch/alpha/kernel/smp.c1
-rw-r--r--arch/alpha/kernel/sys_marvel.c5
-rw-r--r--arch/alpha/kernel/sys_wildfire.c5
-rw-r--r--arch/alpha/kernel/syscalls/Makefile14
-rw-r--r--arch/alpha/kernel/syscalls/syscall.tbl4
-rw-r--r--arch/alpha/kernel/syscalls/syscallhdr.sh36
-rw-r--r--arch/alpha/kernel/syscalls/syscalltbl.sh32
-rw-r--r--arch/alpha/kernel/systbls.S3
-rw-r--r--arch/alpha/lib/csum_partial_copy.c1
-rw-r--r--arch/alpha/mm/Makefile2
-rw-r--r--arch/alpha/mm/init.c4
-rw-r--r--arch/alpha/mm/numa.c223
-rw-r--r--arch/arc/Kconfig22
-rw-r--r--arch/arc/Makefile2
-rw-r--r--arch/arc/boot/dts/haps_hs.dts2
-rw-r--r--arch/arc/include/asm/atomic.h60
-rw-r--r--arch/arc/include/asm/cmpxchg.h14
-rw-r--r--arch/arc/include/asm/mmzone.h40
-rw-r--r--arch/arc/include/asm/page.h12
-rw-r--r--arch/arc/include/asm/pgalloc.h2
-rw-r--r--arch/arc/include/asm/pgtable.h20
-rw-r--r--arch/arc/include/uapi/asm/page.h1
-rw-r--r--arch/arc/include/uapi/asm/sigcontext.h1
-rw-r--r--arch/arc/kernel/entry.S4
-rw-r--r--arch/arc/kernel/kgdb.c1
-rw-r--r--arch/arc/kernel/kprobes.c16
-rw-r--r--arch/arc/kernel/process.c8
-rw-r--r--arch/arc/kernel/signal.c51
-rw-r--r--arch/arc/kernel/smp.c1
-rw-r--r--arch/arc/kernel/stacktrace.c2
-rw-r--r--arch/arc/kernel/troubleshoot.c8
-rw-r--r--arch/arc/kernel/unwind.c27
-rw-r--r--arch/arc/kernel/vmlinux.lds.S2
-rw-r--r--arch/arc/mm/init.c33
-rw-r--r--arch/arc/mm/ioremap.c5
-rw-r--r--arch/arc/mm/tlb.c2
-rw-r--r--arch/arm/Kconfig27
-rw-r--r--arch/arm/Kconfig.debug6
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/boot/compressed/Makefile11
-rw-r--r--arch/arm/boot/dts/Makefile22
-rw-r--r--arch/arm/boot/dts/am335x-boneblack.dts132
-rw-r--r--arch/arm/boot/dts/am335x-pocketbeagle.dts140
-rw-r--r--arch/arm/boot/dts/am33xx-clocks.dtsi10
-rw-r--r--arch/arm/boot/dts/am33xx-l4.dtsi28
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi3
-rw-r--r--arch/arm/boot/dts/am43xx-clocks.dtsi12
-rw-r--r--arch/arm/boot/dts/armada-385-atl-x530.dts235
-rw-r--r--arch/arm/boot/dts/armada-385-turris-omnia.dts4
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-amd-ethanolx.dts30
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts202
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts5
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts1565
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-ibm-rainier-1s4u.dts14
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-ibm-rainier-4u.dts14
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts748
-rw-r--r--arch/arm/boot/dts/aspeed-bmc-opp-mihawk.dts33
-rw-r--r--arch/arm/boot/dts/aspeed-g4.dtsi76
-rw-r--r--arch/arm/boot/dts/aspeed-g5.dtsi137
-rw-r--r--arch/arm/boot/dts/aspeed-g6.dtsi137
-rw-r--r--arch/arm/boot/dts/at91-sam9x60ek.dts11
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_som1.dtsi4
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_som1_ek.dts3
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts3
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_icp.dts3
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts3
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_xplained.dts3
-rw-r--r--arch/arm/boot/dts/at91-sama5d3_xplained.dts3
-rw-r--r--arch/arm/boot/dts/at91sam9260ek.dts3
-rw-r--r--arch/arm/boot/dts/at91sam9g20ek_common.dtsi3
-rw-r--r--arch/arm/boot/dts/bcm2711.dtsi12
-rw-r--r--arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts4
-rw-r--r--arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts4
-rw-r--r--arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts4
-rw-r--r--arch/arm/boot/dts/bcm4708-linksys-ea6300-v1.dts5
-rw-r--r--arch/arm/boot/dts/bcm4708-netgear-r6250.dts4
-rw-r--r--arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts4
-rw-r--r--arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts4
-rw-r--r--arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts4
-rw-r--r--arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts4
-rw-r--r--arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts4
-rw-r--r--arch/arm/boot/dts/bcm47081-luxul-xwr-1200.dts5
-rw-r--r--arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts4
-rw-r--r--arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts4
-rw-r--r--arch/arm/boot/dts/bcm4709-linksys-ea9200.dts9
-rw-r--r--arch/arm/boot/dts/bcm4709-netgear-r7000.dts4
-rw-r--r--arch/arm/boot/dts/bcm4709-netgear-r8000.dts4
-rw-r--r--arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts4
-rw-r--r--arch/arm/boot/dts/bcm47094-linksys-panamera.dts26
-rw-r--r--arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts9
-rw-r--r--arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts9
-rw-r--r--arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts4
-rw-r--r--arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts9
-rw-r--r--arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts9
-rw-r--r--arch/arm/boot/dts/bcm47094-netgear-r8500.dts4
-rw-r--r--arch/arm/boot/dts/bcm47094-phicomm-k3.dts4
-rw-r--r--arch/arm/boot/dts/dra7-l4.dtsi79
-rw-r--r--arch/arm/boot/dts/dra7.dtsi240
-rw-r--r--arch/arm/boot/dts/ep7209.dtsi18
-rw-r--r--arch/arm/boot/dts/ep7211-edb7211.dts2
-rw-r--r--arch/arm/boot/dts/exynos3250-monk.dts4
-rw-r--r--arch/arm/boot/dts/exynos3250-rinato.dts4
-rw-r--r--arch/arm/boot/dts/exynos4210-i9100.dts56
-rw-r--r--arch/arm/boot/dts/exynos4412-midas.dtsi6
-rw-r--r--arch/arm/boot/dts/exynos4412-odroid-common.dtsi5
-rw-r--r--arch/arm/boot/dts/exynos4412-odroidx.dts3
-rw-r--r--arch/arm/boot/dts/exynos4412-p4note.dtsi4
-rw-r--r--arch/arm/boot/dts/exynos4412-ppmu-common.dtsi48
-rw-r--r--arch/arm/boot/dts/exynos5250-smdk5250.dts2
-rw-r--r--arch/arm/boot/dts/exynos5250-snow-common.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos5410-pinctrl.dtsi28
-rw-r--r--arch/arm/boot/dts/imx50-kobo-aura.dts16
-rw-r--r--arch/arm/boot/dts/imx51.dtsi2
-rw-r--r--arch/arm/boot/dts/imx53-qsb-common.dtsi2
-rw-r--r--arch/arm/boot/dts/imx53.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6dl-plybas.dts6
-rw-r--r--arch/arm/boot/dts/imx6dl-yapp4-common.dtsi6
-rw-r--r--arch/arm/boot/dts/imx6q-b450v3.dts5
-rw-r--r--arch/arm/boot/dts/imx6q-b650v3.dts5
-rw-r--r--arch/arm/boot/dts/imx6q-b850v3.dts5
-rw-r--r--arch/arm/boot/dts/imx6q-ba16.dtsi21
-rw-r--r--arch/arm/boot/dts/imx6q-bx50v3.dtsi12
-rw-r--r--arch/arm/boot/dts/imx6q-dhcom-som.dtsi12
-rw-r--r--arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw52xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw53xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw54xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw551x.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw552x.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw560x.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw5903.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw5904.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw5907.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw5910.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw5912.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw5913.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-phytec-pbab01.dtsi6
-rw-r--r--arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi40
-rw-r--r--arch/arm/boot/dts/imx6qdl-ts7970.dtsi1
-rw-r--r--arch/arm/boot/dts/imx6qdl-wandboard.dtsi24
-rw-r--r--arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts7
-rw-r--r--arch/arm/boot/dts/imx6ul-14x14-evk.dtsi22
-rw-r--r--arch/arm/boot/dts/imx6ull-colibri.dtsi12
-rw-r--r--arch/arm/boot/dts/imx6ull-myir-mys-6ulx-eval.dts1
-rw-r--r--arch/arm/boot/dts/imx7d-mba7.dts2
-rw-r--r--arch/arm/boot/dts/imx7d-meerkat96.dts2
-rw-r--r--arch/arm/boot/dts/imx7d-pico.dtsi2
-rw-r--r--arch/arm/boot/dts/imx7d-remarkable2.dts146
-rw-r--r--arch/arm/boot/dts/iwg20d-q7-common.dtsi4
-rw-r--r--arch/arm/boot/dts/ls1021a.dtsi3
-rw-r--r--arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi59
-rw-r--r--arch/arm/boot/dts/mstar-infinity2m-ssd202d-unitv2.dts25
-rw-r--r--arch/arm/boot/dts/mstar-v7.dtsi23
-rw-r--r--arch/arm/boot/dts/mt2701.dtsi19
-rw-r--r--arch/arm/boot/dts/mt6589.dtsi1
-rw-r--r--arch/arm/boot/dts/mt7623.dtsi26
-rw-r--r--arch/arm/boot/dts/mt7623n.dtsi4
-rw-r--r--arch/arm/boot/dts/mt7629.dtsi12
-rw-r--r--arch/arm/boot/dts/nuvoton-npcm730-gbs.dts1135
-rw-r--r--arch/arm/boot/dts/nuvoton-npcm750-evb.dts2
-rw-r--r--arch/arm/boot/dts/nuvoton-wpcm450-supermicro-x9sci-ln4f.dts40
-rw-r--r--arch/arm/boot/dts/nuvoton-wpcm450.dtsi76
-rw-r--r--arch/arm/boot/dts/omap3-echo.dts476
-rw-r--r--arch/arm/boot/dts/omap3.dtsi3
-rw-r--r--arch/arm/boot/dts/omap4-l4.dtsi43
-rw-r--r--arch/arm/boot/dts/omap4.dtsi174
-rw-r--r--arch/arm/boot/dts/omap4460.dtsi13
-rw-r--r--arch/arm/boot/dts/omap44xx-clocks.dtsi8
-rw-r--r--arch/arm/boot/dts/omap5-l4.dtsi67
-rw-r--r--arch/arm/boot/dts/omap5.dtsi209
-rw-r--r--arch/arm/boot/dts/owl-s500-roseapplepi.dts132
-rw-r--r--arch/arm/boot/dts/qcom-ipq4019.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts6
-rw-r--r--arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts52
-rw-r--r--arch/arm/boot/dts/qcom-msm8974.dtsi9
-rw-r--r--arch/arm/boot/dts/qcom-sdx55-t55.dts281
-rw-r--r--arch/arm/boot/dts/qcom-sdx55-telit-fn980-tlb.dts282
-rw-r--r--arch/arm/boot/dts/qcom-sdx55.dtsi207
-rw-r--r--arch/arm/boot/dts/r7s9210-rza2mevb.dts55
-rw-r--r--arch/arm/boot/dts/r8a7742-iwg21d-q7-dbcm-ca.dts159
-rw-r--r--arch/arm/boot/dts/r8a7742-iwg21d-q7-dbcm-ov5640-single.dtsi32
-rw-r--r--arch/arm/boot/dts/r8a7742-iwg21d-q7-dbcm-ov7725-single.dtsi29
-rw-r--r--arch/arm/boot/dts/r8a7742-iwg21d-q7.dts4
-rw-r--r--arch/arm/boot/dts/r8a7742.dtsi4
-rw-r--r--arch/arm/boot/dts/r8a7743-sk-rzg1m.dts2
-rw-r--r--arch/arm/boot/dts/r8a7743.dtsi2
-rw-r--r--arch/arm/boot/dts/r8a7744.dtsi2
-rw-r--r--arch/arm/boot/dts/r8a7745-iwg22d-sodimm.dts4
-rw-r--r--arch/arm/boot/dts/r8a7745-sk-rzg1e.dts2
-rw-r--r--arch/arm/boot/dts/r8a7745.dtsi2
-rw-r--r--arch/arm/boot/dts/r8a77470.dtsi2
-rw-r--r--arch/arm/boot/dts/r8a7790-lager.dts11
-rw-r--r--arch/arm/boot/dts/r8a7790-stout.dts4
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi4
-rw-r--r--arch/arm/boot/dts/r8a7791-koelsch.dts19
-rw-r--r--arch/arm/boot/dts/r8a7791-porter.dts6
-rw-r--r--arch/arm/boot/dts/r8a7791.dtsi2
-rw-r--r--arch/arm/boot/dts/r8a7792-blanche.dts2
-rw-r--r--arch/arm/boot/dts/r8a7792-wheat.dts2
-rw-r--r--arch/arm/boot/dts/r8a7792.dtsi2
-rw-r--r--arch/arm/boot/dts/r8a7793-gose.dts11
-rw-r--r--arch/arm/boot/dts/r8a7793.dtsi2
-rw-r--r--arch/arm/boot/dts/r8a7794-alt.dts5
-rw-r--r--arch/arm/boot/dts/r8a7794-silk.dts8
-rw-r--r--arch/arm/boot/dts/r8a7794.dtsi2
-rw-r--r--arch/arm/boot/dts/rk3036.dtsi4
-rw-r--r--arch/arm/boot/dts/rk3228-evb.dts4
-rw-r--r--arch/arm/boot/dts/rk3229-evb.dts4
-rw-r--r--arch/arm/boot/dts/rk3229-xms6.dts6
-rw-r--r--arch/arm/boot/dts/rk322x.dtsi5
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi4
-rw-r--r--arch/arm/boot/dts/rv1108-elgin-r1.dts4
-rw-r--r--arch/arm/boot/dts/rv1108-evb.dts4
-rw-r--r--arch/arm/boot/dts/rv1108.dtsi6
-rw-r--r--arch/arm/boot/dts/s5pv210-fascinate4g.dts2
-rw-r--r--arch/arm/boot/dts/sam9x60.dtsi9
-rw-r--r--arch/arm/boot/dts/sama5d2.dtsi6
-rw-r--r--arch/arm/boot/dts/sama5d3.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d4.dtsi7
-rw-r--r--arch/arm/boot/dts/ste-ab8500.dtsi4
-rw-r--r--arch/arm/boot/dts/ste-href-tvk1281618-r2.dtsi214
-rw-r--r--arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi210
-rw-r--r--arch/arm/boot/dts/ste-href-tvk1281618.dtsi218
-rw-r--r--arch/arm/boot/dts/ste-href520-tvk.dts4
-rw-r--r--arch/arm/boot/dts/ste-hrefprev60-tvk.dts2
-rw-r--r--arch/arm/boot/dts/ste-hrefv60plus-tvk.dts4
-rw-r--r--arch/arm/boot/dts/ste-ux500-samsung-janice.dts48
-rw-r--r--arch/arm/boot/dts/stm32h7-pinctrl.dtsi275
-rw-r--r--arch/arm/boot/dts/stm32h743-pinctrl.dtsi306
-rw-r--r--arch/arm/boot/dts/stm32h743.dtsi177
-rw-r--r--arch/arm/boot/dts/stm32h743i-disco.dts2
-rw-r--r--arch/arm/boot/dts/stm32h743i-eval.dts2
-rw-r--r--arch/arm/boot/dts/stm32h750.dtsi6
-rw-r--r--arch/arm/boot/dts/stm32h750i-art-pi.dts229
-rw-r--r--arch/arm/boot/dts/stm32mp15-pinctrl.dtsi21
-rw-r--r--arch/arm/boot/dts/stm32mp151.dtsi32
-rw-r--r--arch/arm/boot/dts/stm32mp153c-dhcom-drc02.dts4
-rw-r--r--arch/arm/boot/dts/stm32mp157a-icore-stm32mp1-ctouch2.dts47
-rw-r--r--arch/arm/boot/dts/stm32mp157a-icore-stm32mp1-edimm2.2.dts47
-rw-r--r--arch/arm/boot/dts/stm32mp157a-icore-stm32mp1.dtsi196
-rw-r--r--arch/arm/boot/dts/stm32mp157a-microgea-stm32mp1-microdev2.0-of7.dts154
-rw-r--r--arch/arm/boot/dts/stm32mp157a-microgea-stm32mp1-microdev2.0.dts55
-rw-r--r--arch/arm/boot/dts/stm32mp157a-microgea-stm32mp1.dtsi148
-rw-r--r--arch/arm/boot/dts/stm32mp157c-dhcom-pdk2.dts4
-rw-r--r--arch/arm/boot/dts/stm32mp157c-dhcom-picoitx.dts4
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-dhcom-drc02.dtsi12
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-dhcom-picoitx.dtsi12
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi64
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi35
-rw-r--r--arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi4
-rw-r--r--arch/arm/boot/dts/sun4i-a10-topwise-a721.dts242
-rw-r--r--arch/arm/boot/dts/sun6i-a31-hummingbird.dts2
-rw-r--r--arch/arm/boot/dts/sun6i-a31-m9.dts2
-rw-r--r--arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts2
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi6
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-primo81.dts2
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-sina31s-core.dtsi2
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts2
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts2
-rw-r--r--arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi2
-rw-r--r--arch/arm/boot/dts/sun8i-a23-a33.dtsi6
-rw-r--r--arch/arm/boot/dts/sun8i-a33-olinuxino.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts4
-rw-r--r--arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts4
-rw-r--r--arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts4
-rw-r--r--arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts4
-rw-r--r--arch/arm/boot/dts/sun8i-a83t.dtsi5
-rw-r--r--arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts25
-rw-r--r--arch/arm/boot/dts/sun8i-h3-beelink-x2.dts11
-rw-r--r--arch/arm/boot/dts/sun8i-r16-bananapi-m2m.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-r16-parrot.dts2
-rw-r--r--arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi2
-rw-r--r--arch/arm/boot/dts/sunxi-h3-h5.dtsi12
-rw-r--r--arch/arm/boot/dts/tegra124-peripherals-opp.dtsi5
-rw-r--r--arch/arm/boot/dts/tegra20-acer-a500-picasso.dts16
-rw-r--r--arch/arm/boot/dts/tegra20-cpu-opp.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra20-paz00.dts14
-rw-r--r--arch/arm/boot/dts/tegra20-peripherals-opp.dtsi1
-rw-r--r--arch/arm/boot/dts/tegra20-ventana.dts78
-rw-r--r--arch/arm/boot/dts/tegra30-apalis.dtsi1
-rw-r--r--arch/arm/boot/dts/tegra30-asus-nexus7-grouper-common.dtsi14
-rw-r--r--arch/arm/boot/dts/tegra30-asus-nexus7-grouper-ti-pmic.dtsi1
-rw-r--r--arch/arm/boot/dts/tegra30-beaver.dts1
-rw-r--r--arch/arm/boot/dts/tegra30-cardhu-a04.dts48
-rw-r--r--arch/arm/boot/dts/tegra30-cardhu.dtsi84
-rw-r--r--arch/arm/boot/dts/tegra30-colibri.dtsi1
-rw-r--r--arch/arm/boot/dts/tegra30-cpu-opp.dtsi3
-rw-r--r--arch/arm/boot/dts/tegra30-ouya.dts16
-rw-r--r--arch/arm/boot/dts/tegra30-peripherals-opp.dtsi3
-rw-r--r--arch/arm/boot/dts/uniphier-pxs2.dtsi2
-rw-r--r--arch/arm/configs/at91_dt_defconfig5
-rw-r--r--arch/arm/configs/dove_defconfig1
-rw-r--r--arch/arm/configs/ezx_defconfig1
-rw-r--r--arch/arm/configs/footbridge_defconfig2
-rw-r--r--arch/arm/configs/imote2_defconfig1
-rw-r--r--arch/arm/configs/magician_defconfig1
-rw-r--r--arch/arm/configs/moxart_defconfig1
-rw-r--r--arch/arm/configs/mps2_defconfig1
-rw-r--r--arch/arm/configs/multi_v5_defconfig3
-rw-r--r--arch/arm/configs/multi_v7_defconfig4
-rw-r--r--arch/arm/configs/mvebu_v5_defconfig1
-rw-r--r--arch/arm/configs/omap2plus_defconfig151
-rw-r--r--arch/arm/configs/pxa_defconfig3
-rw-r--r--arch/arm/configs/qcom_defconfig10
-rw-r--r--arch/arm/configs/sama5_defconfig51
-rw-r--r--arch/arm/configs/shmobile_defconfig1
-rw-r--r--arch/arm/configs/socfpga_defconfig2
-rw-r--r--arch/arm/configs/u8500_defconfig14
-rw-r--r--arch/arm/configs/xcep_defconfig1
-rw-r--r--arch/arm/crypto/Makefile10
-rw-r--r--arch/arm/crypto/aes-cipher-core.S42
-rw-r--r--arch/arm/crypto/blake2b-neon-glue.c4
-rw-r--r--arch/arm/crypto/blake2s-core.S21
-rw-r--r--arch/arm/crypto/chacha-scalar-core.S43
-rw-r--r--arch/arm/crypto/curve25519-core.S2
-rw-r--r--arch/arm/crypto/poly1305-core.S_shipped1158
-rw-r--r--arch/arm/crypto/poly1305-glue.c2
-rw-r--r--arch/arm/crypto/sha256-core.S_shipped2816
-rw-r--r--arch/arm/crypto/sha512-core.S_shipped1869
-rw-r--r--arch/arm/include/asm/atomic.h96
-rw-r--r--arch/arm/include/asm/bug.h1
-rw-r--r--arch/arm/include/asm/cmpxchg.h20
-rw-r--r--arch/arm/include/asm/cpuidle.h10
-rw-r--r--arch/arm/include/asm/hypervisor.h3
-rw-r--r--arch/arm/include/asm/io.h5
-rw-r--r--arch/arm/include/asm/kexec.h3
-rw-r--r--arch/arm/include/asm/memory.h15
-rw-r--r--arch/arm/include/asm/paravirt.h14
-rw-r--r--arch/arm/include/asm/pgalloc.h1
-rw-r--r--arch/arm/include/asm/pgtable-3level.h2
-rw-r--r--arch/arm/include/asm/pgtable.h3
-rw-r--r--arch/arm/include/asm/set_memory.h8
-rw-r--r--arch/arm/include/asm/spinlock.h2
-rw-r--r--arch/arm/include/asm/sync_bitops.h2
-rw-r--r--arch/arm/include/asm/tlbflush.h13
-rw-r--r--arch/arm/include/asm/unaligned.h27
-rw-r--r--arch/arm/include/asm/xen/swiotlb-xen.h1
-rw-r--r--arch/arm/include/uapi/asm/Kbuild1
-rw-r--r--arch/arm/include/uapi/asm/unistd.h1
-rw-r--r--arch/arm/kernel/asm-offsets.c3
-rw-r--r--arch/arm/kernel/entry-common.S8
-rw-r--r--arch/arm/kernel/hw_breakpoint.c2
-rw-r--r--arch/arm/kernel/irq.c22
-rw-r--r--arch/arm/kernel/machine_kexec.c8
-rw-r--r--arch/arm/kernel/paravirt.c9
-rw-r--r--arch/arm/kernel/perf_event_v7.c4
-rw-r--r--arch/arm/kernel/process.c13
-rw-r--r--arch/arm/kernel/setup.c16
-rw-r--r--arch/arm/kernel/smccc-call.S11
-rw-r--r--arch/arm/kernel/smp.c1
-rw-r--r--arch/arm/kernel/suspend.c19
-rw-r--r--arch/arm/kernel/traps.c1
-rw-r--r--arch/arm/mach-at91/pm.c19
-rw-r--r--arch/arm/mach-davinci/board-da830-evm.c6
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c6
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c6
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c6
-rw-r--r--arch/arm/mach-davinci/board-mityomapl138.c6
-rw-r--r--arch/arm/mach-davinci/board-sffsdr.c6
-rw-r--r--arch/arm/mach-exynos/platsmp.c28
-rw-r--r--arch/arm/mach-footbridge/Kconfig21
-rw-r--r--arch/arm/mach-footbridge/Makefile2
-rw-r--r--arch/arm/mach-footbridge/cats-pci.c4
-rw-r--r--arch/arm/mach-footbridge/ebsa285-pci.c4
-rw-r--r--arch/arm/mach-footbridge/netwinder-pci.c2
-rw-r--r--arch/arm/mach-footbridge/personal-pci.c58
-rw-r--r--arch/arm/mach-footbridge/personal.c25
-rw-r--r--arch/arm/mach-hisi/hisilicon.c4
-rw-r--r--arch/arm/mach-hisi/hotplug.c2
-rw-r--r--arch/arm/mach-hisi/platmcpm.c2
-rw-r--r--arch/arm/mach-hisi/platsmp.c2
-rw-r--r--arch/arm/mach-imx/Kconfig2
-rw-r--r--arch/arm/mach-imx/avic.c16
-rw-r--r--arch/arm/mach-imx/common.h1
-rw-r--r--arch/arm/mach-imx/mach-imx1.c11
-rw-r--r--arch/arm/mach-imx/mach-imx25.c12
-rw-r--r--arch/arm/mach-imx/mach-imx27.c12
-rw-r--r--arch/arm/mach-imx/mach-imx31.c1
-rw-r--r--arch/arm/mach-imx/mach-imx35.c1
-rw-r--r--arch/arm/mach-imx/mm-imx3.c24
-rw-r--r--arch/arm/mach-imx/pm-imx27.c1
-rw-r--r--arch/arm/mach-imx/pm-imx5.c2
-rw-r--r--arch/arm/mach-iop32x/n2100.c8
-rw-r--r--arch/arm/mach-ixp4xx/common.c26
-rw-r--r--arch/arm/mach-keystone/keystone.c4
-rw-r--r--arch/arm/mach-mstar/Kconfig1
-rw-r--r--arch/arm/mach-mvebu/kirkwood.c3
-rw-r--r--arch/arm/mach-npcm/Kconfig13
-rw-r--r--arch/arm/mach-npcm/Makefile1
-rw-r--r--arch/arm/mach-npcm/wpcm450.c13
-rw-r--r--arch/arm/mach-omap1/ams-delta-fiq-handler.S1
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c14
-rw-r--r--arch/arm/mach-omap1/board-h2.c4
-rw-r--r--arch/arm/mach-omap1/board-osk.c6
-rw-r--r--arch/arm/mach-omap1/pm.c10
-rw-r--r--arch/arm/mach-omap1/timer.c2
-rw-r--r--arch/arm/mach-omap2/Kconfig3
-rw-r--r--arch/arm/mach-omap2/Makefile11
-rw-r--r--arch/arm/mach-omap2/board-generic.c2
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c2
-rw-r--r--arch/arm/mach-omap2/common.h9
-rw-r--r--arch/arm/mach-omap2/io.c12
-rw-r--r--arch/arm/mach-omap2/omap-secure.c39
-rw-r--r--arch/arm/mach-omap2/omap-secure.h1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c9
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h14
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c877
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_54xx_data.c467
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c719
-rw-r--r--arch/arm/mach-omap2/omap_twl.c2
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c31
-rw-r--r--arch/arm/mach-omap2/pm-debug.c4
-rw-r--r--arch/arm/mach-omap2/pmic-cpcap.c4
-rw-r--r--arch/arm/mach-omap2/powerdomain.c12
-rw-r--r--arch/arm/mach-omap2/sr_device.c82
-rw-r--r--arch/arm/mach-pxa/icontrol.c12
-rw-r--r--arch/arm/mach-pxa/mainstone.c8
-rw-r--r--arch/arm/mach-pxa/pxa_cplds_irqs.c31
-rw-r--r--arch/arm/mach-pxa/stargate2.c6
-rw-r--r--arch/arm/mach-pxa/zeus.c6
-rw-r--r--arch/arm/mach-s3c/mach-mini2440.c6
-rw-r--r--arch/arm/mach-s3c/mach-rx1950.c11
-rw-r--r--arch/arm/mach-socfpga/Kconfig4
-rw-r--r--arch/arm/mach-spear/spear320.c2
-rw-r--r--arch/arm/mach-spear/spear3xx.c10
-rw-r--r--arch/arm/mach-stm32/board-dt.c1
-rw-r--r--arch/arm/mach-ux500/platsmp.c2
-rw-r--r--arch/arm/mach-zynq/Kconfig2
-rw-r--r--arch/arm/mm/cache-v7.S58
-rw-r--r--arch/arm/mm/copypage-v4mc.c1
-rw-r--r--arch/arm/mm/copypage-v6.c1
-rw-r--r--arch/arm/mm/copypage-xscale.c1
-rw-r--r--arch/arm/mm/dump.c4
-rw-r--r--arch/arm/mm/init.c29
-rw-r--r--arch/arm/mm/mmu.c3
-rw-r--r--arch/arm/mm/pmsa-v7.c4
-rw-r--r--arch/arm/mm/pmsa-v8.c4
-rw-r--r--arch/arm/mm/proc-v7.S39
-rw-r--r--arch/arm/mm/ptdump_debugfs.c15
-rw-r--r--arch/arm/mm/tlb-v6.S2
-rw-r--r--arch/arm/mm/tlb-v7.S2
-rw-r--r--arch/arm/plat-omap/dma.c3
-rw-r--r--arch/arm/probes/kprobes/core.c23
-rw-r--r--arch/arm/probes/kprobes/test-arm.c294
-rw-r--r--arch/arm/probes/kprobes/test-core.h1
-rw-r--r--arch/arm/probes/uprobes/core.c4
-rw-r--r--arch/arm/tools/Makefile26
-rw-r--r--arch/arm/tools/syscall.tbl4
-rw-r--r--arch/arm/tools/syscallhdr.sh31
-rw-r--r--arch/arm/tools/syscalltbl.sh22
-rw-r--r--arch/arm/xen/mm.c10
-rw-r--r--arch/arm/xen/p2m.c5
-rw-r--r--arch/arm64/Kbuild3
-rw-r--r--arch/arm64/Kconfig162
-rw-r--r--arch/arm64/Kconfig.platforms27
-rw-r--r--arch/arm64/Makefile7
-rw-r--r--arch/arm64/boot/dts/Makefile1
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-amarula-relic.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts2
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi6
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts44
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts16
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi24
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts4
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi12
-rw-r--r--arch/arm64/boot/dts/altera/Makefile2
-rw-r--r--arch/arm64/boot/dts/amlogic/Makefile3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-a1.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-axg.dtsi1
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi20
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-gsking-x.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi56
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905d-mecool-kii-pro.dts86
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts8
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-mecool-kiii-pro.dts113
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-minix-neo-u9h.dts120
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-wetek-core2.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts2
-rw-r--r--arch/arm64/boot/dts/apple/Makefile2
-rw-r--r--arch/arm64/boot/dts/apple/t8103-j274.dts45
-rw-r--r--arch/arm64/boot/dts/apple/t8103.dtsi135
-rw-r--r--arch/arm64/boot/dts/arm/juno-base.dtsi5
-rw-r--r--arch/arm64/boot/dts/arm/juno-r1.dts4
-rw-r--r--arch/arm64/boot/dts/arm/juno-r2.dts4
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm4908/Makefile1
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm4908/bcm4906-netgear-r8000p.dts107
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm4908/bcm4906-tplink-archer-c2300-v1.dts182
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts44
-rw-r--r--arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi47
-rw-r--r--arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi2
-rw-r--r--arch/arm64/boot/dts/exynos/exynos5433.dtsi6
-rw-r--r--arch/arm64/boot/dts/freescale/Makefile4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts3
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts5
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts80
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi22
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi1
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi24
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi64
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-adma.dtsi8
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-audio.dtsi68
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi184
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-ddr.dtsi18
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi202
-rw-r--r--arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi311
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-icore-mx8mm-ctouch2.dts97
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-icore-mx8mm-edimm2.2.dts97
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-icore-mx8mm.dtsi232
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts314
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi6
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mn.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-evk.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts58
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp.dtsi43
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-kontron-pitx-imx8m.dts613
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts70
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-librem5-r2.dts2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi24
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts10
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi23
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm-mek.dts144
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm-ss-conn.dtsi21
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi51
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm-ss-lsio.dtsi61
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qm.dtsi176
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp-ai_ml.dts20
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp-colibri-eval-v3.dtsi8
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp-colibri.dtsi12
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp-mek.dts50
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp-ss-adma.dtsi37
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp-ss-conn.dtsi25
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp-ss-lsio.dtsi61
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp.dtsi423
-rw-r--r--arch/arm64/boot/dts/intel/Makefile6
-rw-r--r--arch/arm64/boot/dts/intel/socfpga_agilex.dtsi222
-rw-r--r--arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts14
-rw-r--r--arch/arm64/boot/dts/intel/socfpga_agilex_socdk_nand.dts18
-rw-r--r--arch/arm64/boot/dts/intel/socfpga_n5x_socdk.dts12
-rw-r--r--arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-37xx.dtsi3
-rw-r--r--arch/arm64/boot/dts/marvell/armada-7040-db.dts14
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts127
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-db.dts21
-rw-r--r--arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi19
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp11x.dtsi25
-rw-r--r--arch/arm64/boot/dts/marvell/cn9130-db.dts12
-rw-r--r--arch/arm64/boot/dts/marvell/cn9131-db.dts9
-rw-r--r--arch/arm64/boot/dts/marvell/cn9132-db.dts11
-rw-r--r--arch/arm64/boot/dts/mediatek/Makefile8
-rw-r--r--arch/arm64/boot/dts/mediatek/mt2712e.dtsi8
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts12
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622.dtsi9
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173-evb.dts6
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173.dtsi15
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-evb.dts4
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts31
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper-sku16.dts13
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper.dtsi27
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi474
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dts13
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi378
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku16.dts21
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku272.dts21
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku288.dts21
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku32.dts21
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi343
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts378
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8183.dtsi160
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8516.dtsi9
-rw-r--r--arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi3
-rw-r--r--arch/arm64/boot/dts/microchip/sparx5.dtsi94
-rw-r--r--arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi481
-rw-r--r--arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi621
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra186.dtsi2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts3
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi4
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p3668-0001.dtsi4
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/Makefile15
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc.dts2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916.dtsi8
-rw-r--r--arch/arm64/boot/dts/qcom/msm8994.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998-oneplus-cheeseburger.dts42
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998-oneplus-common.dtsi514
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998-oneplus-dumpling.dts25
-rw-r--r--arch/arm64/boot/dts/qcom/msm8998.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/pm8150.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/pm8350.dtsi25
-rw-r--r--arch/arm64/boot/dts/qcom/pm8350b.dtsi25
-rw-r--r--arch/arm64/boot/dts/qcom/pm8350c.dtsi25
-rw-r--r--arch/arm64/boot/dts/qcom/pmk8350.dtsi25
-rw-r--r--arch/arm64/boot/dts/qcom/pmr735a.dtsi25
-rw-r--r--arch/arm64/boot/dts/qcom/pmr735b.dtsi25
-rw-r--r--arch/arm64/boot/dts/qcom/qrb5165-rb5.dts18
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r1-lte.dts18
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r1.dts141
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r2-lte.dts18
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r2.dts15
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi266
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r4.dts34
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots.dts26
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen.dts42
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r0.dts9
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1.dts9
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-kb.dts5
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-lte.dts4
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3.dts10
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor.dtsi39
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r1-lte.dts14
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r1.dts26
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r2-lte.dts14
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r2.dts44
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi295
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor-r1.dts35
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi222
-rw-r--r--arch/arm64/boot/dts/qcom/sc7180.dtsi58
-rw-r--r--arch/arm64/boot/dts/qcom/sc7280-idp.dts259
-rw-r--r--arch/arm64/boot/dts/qcom/sc7280.dtsi1128
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-db845c.dts31
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium.dts71
-rw-r--r--arch/arm64/boot/dts/qcom/sdm845.dtsi155
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150.dtsi555
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250-mtp.dts4
-rw-r--r--arch/arm64/boot/dts/qcom/sm8250.dtsi676
-rw-r--r--arch/arm64/boot/dts/qcom/sm8350-hdk.dts319
-rw-r--r--arch/arm64/boot/dts/qcom/sm8350-mtp.dts116
-rw-r--r--arch/arm64/boot/dts/qcom/sm8350.dtsi1637
-rw-r--r--arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/hihope-common.dtsi3
-rw-r--r--arch/arm64/boot/dts/renesas/hihope-rev4.dtsi2
-rw-r--r--arch/arm64/boot/dts/renesas/hihope-rzg2-ex-aistarvision-mipi-adapter-2.1.dtsi2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts3
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774a1.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts3
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774b1.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts6
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0-ek874-mipi-2.1.dts2
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774c0.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts3
-rw-r--r--arch/arm64/boot/dts/renesas/r8a774e1.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77950-salvator-x.dts37
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77950.dtsi9
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77951-salvator-x.dts37
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77951-salvator-xs.dts37
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77951.dtsi12
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77960-salvator-x.dts46
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77960-salvator-xs.dts46
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77960.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77961-salvator-xs.dts46
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77961-ulcb.dts11
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77961.dtsi348
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77965-salvator-x.dts45
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77965-salvator-xs.dts45
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77965.dtsi8
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77970.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77980.dtsi24
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts11
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77990.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi79
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779a0-falcon-csi-dsi.dtsi15
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779a0-falcon-ethernet.dtsi15
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts50
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779a0.dtsi263
-rw-r--r--arch/arm64/boot/dts/renesas/salvator-common.dtsi55
-rw-r--r--arch/arm64/boot/dts/renesas/ulcb-kf.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/ulcb.dtsi12
-rw-r--r--arch/arm64/boot/dts/rockchip/Makefile1
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-engicam-common.dtsi5
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/px30-evb.dts6
-rw-r--r--arch/arm64/boot/dts/rockchip/px30.dtsi27
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3308.dtsi27
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3318-a95x-z2.dts11
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-a1.dts5
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-evb.dts6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts5
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts5
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328-rock64.dts5
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3328.dtsi28
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-lion-haikou.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts5
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts5
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-r88.dts5
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi9
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-evb.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-firefly.dts6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi5
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts5
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts133
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi40
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi55
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-roc-pc-mezzanine.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi5
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi5
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4c.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi6
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi19
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts4
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi5
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399.dtsi9
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399pro-vmarc-som.dtsi5
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi2
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi4
-rw-r--r--arch/arm64/boot/dts/ti/Makefile8
-rw-r--r--arch/arm64/boot/dts/ti/k3-am64-main.dtsi676
-rw-r--r--arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi104
-rw-r--r--arch/arm64/boot/dts/ti/k3-am64.dtsi105
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-evm.dts468
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642-sk.dts334
-rw-r--r--arch/arm64/boot/dts/ti/k3-am642.dtsi65
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi655
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-main.dtsi405
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi13
-rw-r--r--arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi13
-rw-r--r--arch/arm64/boot/dts/ti/k3-am6528-iot2050-basic.dts61
-rw-r--r--arch/arm64/boot/dts/ti/k3-am654-base-board.dts35
-rw-r--r--arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced.dts60
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts94
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-main.dtsi94
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi58
-rw-r--r--arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi36
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-main.dtsi289
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi15
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi4
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts20
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts20
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi1
-rw-r--r--arch/arm64/configs/defconfig39
-rw-r--r--arch/arm64/crypto/Makefile10
-rw-r--r--arch/arm64/crypto/aes-modes.S3
-rw-r--r--arch/arm64/crypto/poly1305-core.S_shipped835
-rw-r--r--arch/arm64/crypto/poly1305-glue.c2
-rw-r--r--arch/arm64/crypto/sha1-ce-core.S2
-rw-r--r--arch/arm64/crypto/sha2-ce-core.S2
-rw-r--r--arch/arm64/crypto/sha256-core.S_shipped2069
-rw-r--r--arch/arm64/crypto/sha3-ce-core.S4
-rw-r--r--arch/arm64/crypto/sha512-ce-core.S2
-rw-r--r--arch/arm64/crypto/sha512-core.S_shipped1093
-rw-r--r--arch/arm64/include/asm/Kbuild2
-rw-r--r--arch/arm64/include/asm/alternative-macros.h17
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h5
-rw-r--r--arch/arm64/include/asm/arch_timer.h21
-rw-r--r--arch/arm64/include/asm/asm-prototypes.h6
-rw-r--r--arch/arm64/include/asm/asm_pointer_auth.h67
-rw-r--r--arch/arm64/include/asm/assembler.h233
-rw-r--r--arch/arm64/include/asm/atomic.h2
-rw-r--r--arch/arm64/include/asm/barrier.h24
-rw-r--r--arch/arm64/include/asm/cache.h2
-rw-r--r--arch/arm64/include/asm/cacheflush.h71
-rw-r--r--arch/arm64/include/asm/checksum.h2
-rw-r--r--arch/arm64/include/asm/compiler.h16
-rw-r--r--arch/arm64/include/asm/cpu.h45
-rw-r--r--arch/arm64/include/asm/cpucaps.h72
-rw-r--r--arch/arm64/include/asm/cpufeature.h32
-rw-r--r--arch/arm64/include/asm/cpuidle.h35
-rw-r--r--arch/arm64/include/asm/cputype.h6
-rw-r--r--arch/arm64/include/asm/daifflags.h13
-rw-r--r--arch/arm64/include/asm/efi.h2
-rw-r--r--arch/arm64/include/asm/el2_setup.h34
-rw-r--r--arch/arm64/include/asm/exception.h34
-rw-r--r--arch/arm64/include/asm/fpsimd.h14
-rw-r--r--arch/arm64/include/asm/fpsimdmacros.h14
-rw-r--r--arch/arm64/include/asm/hugetlb.h3
-rw-r--r--arch/arm64/include/asm/hyp_image.h7
-rw-r--r--arch/arm64/include/asm/hypervisor.h3
-rw-r--r--arch/arm64/include/asm/insn-def.h9
-rw-r--r--arch/arm64/include/asm/insn.h67
-rw-r--r--arch/arm64/include/asm/io.h11
-rw-r--r--arch/arm64/include/asm/irq.h4
-rw-r--r--arch/arm64/include/asm/irq_work.h2
-rw-r--r--arch/arm64/include/asm/irqflags.h16
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h21
-rw-r--r--arch/arm64/include/asm/kexec.h4
-rw-r--r--arch/arm64/include/asm/kvm_arm.h6
-rw-r--r--arch/arm64/include/asm/kvm_asm.h21
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h8
-rw-r--r--arch/arm64/include/asm/kvm_host.h78
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h22
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h42
-rw-r--r--arch/arm64/include/asm/kvm_mte.h66
-rw-r--r--arch/arm64/include/asm/kvm_pgtable.h172
-rw-r--r--arch/arm64/include/asm/linkage.h8
-rw-r--r--arch/arm64/include/asm/memory.h25
-rw-r--r--arch/arm64/include/asm/mmu_context.h25
-rw-r--r--arch/arm64/include/asm/module.lds.h17
-rw-r--r--arch/arm64/include/asm/mte-def.h1
-rw-r--r--arch/arm64/include/asm/mte-kasan.h101
-rw-r--r--arch/arm64/include/asm/mte.h62
-rw-r--r--arch/arm64/include/asm/page.h12
-rw-r--r--arch/arm64/include/asm/paravirt.h14
-rw-r--r--arch/arm64/include/asm/patching.h13
-rw-r--r--arch/arm64/include/asm/perf_event.h5
-rw-r--r--arch/arm64/include/asm/pgalloc.h20
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h22
-rw-r--r--arch/arm64/include/asm/pgtable-prot.h11
-rw-r--r--arch/arm64/include/asm/pgtable.h63
-rw-r--r--arch/arm64/include/asm/pointer_auth.h98
-rw-r--r--arch/arm64/include/asm/preempt.h2
-rw-r--r--arch/arm64/include/asm/processor.h27
-rw-r--r--arch/arm64/include/asm/ptdump.h2
-rw-r--r--arch/arm64/include/asm/scs.h8
-rw-r--r--arch/arm64/include/asm/sdei.h10
-rw-r--r--arch/arm64/include/asm/sections.h1
-rw-r--r--arch/arm64/include/asm/smp.h3
-rw-r--r--arch/arm64/include/asm/sparsemem.h3
-rw-r--r--arch/arm64/include/asm/stacktrace.h56
-rw-r--r--arch/arm64/include/asm/sysreg.h155
-rw-r--r--arch/arm64/include/asm/thread_info.h2
-rw-r--r--arch/arm64/include/asm/tlb.h4
-rw-r--r--arch/arm64/include/asm/topology.h10
-rw-r--r--arch/arm64/include/asm/uaccess.h22
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h8
-rw-r--r--arch/arm64/include/asm/vdso/compat_gettimeofday.h3
-rw-r--r--arch/arm64/include/asm/vdso/gettimeofday.h8
-rw-r--r--arch/arm64/include/asm/vmalloc.h24
-rw-r--r--arch/arm64/include/asm/word-at-a-time.h14
-rw-r--r--arch/arm64/include/asm/xen/swiotlb-xen.h1
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h11
-rw-r--r--arch/arm64/kernel/Makefile16
-rw-r--r--arch/arm64/kernel/acpi.c22
-rw-r--r--arch/arm64/kernel/acpi_parking_protocol.c3
-rw-r--r--arch/arm64/kernel/alternative.c5
-rw-r--r--arch/arm64/kernel/asm-offsets.c27
-rw-r--r--arch/arm64/kernel/cpu-reset.S5
-rw-r--r--arch/arm64/kernel/cpu-reset.h10
-rw-r--r--arch/arm64/kernel/cpu_errata.c8
-rw-r--r--arch/arm64/kernel/cpufeature.c247
-rw-r--r--arch/arm64/kernel/cpuidle.c2
-rw-r--r--arch/arm64/kernel/cpuinfo.c60
-rw-r--r--arch/arm64/kernel/crash_dump.c2
-rw-r--r--arch/arm64/kernel/efi-entry.S9
-rw-r--r--arch/arm64/kernel/entry-common.c279
-rw-r--r--arch/arm64/kernel/entry-fpsimd.S27
-rw-r--r--arch/arm64/kernel/entry.S438
-rw-r--r--arch/arm64/kernel/fpsimd.c41
-rw-r--r--arch/arm64/kernel/ftrace.c3
-rw-r--r--arch/arm64/kernel/head.S123
-rw-r--r--arch/arm64/kernel/hibernate-asm.S7
-rw-r--r--arch/arm64/kernel/hibernate.c20
-rw-r--r--arch/arm64/kernel/hyp-stub.S13
-rw-r--r--arch/arm64/kernel/idle.c46
-rw-r--r--arch/arm64/kernel/idreg-override.c72
-rw-r--r--arch/arm64/kernel/image-vars.h39
-rw-r--r--arch/arm64/kernel/irq.c35
-rw-r--r--arch/arm64/kernel/jump_label.c1
-rw-r--r--arch/arm64/kernel/kaslr.c30
-rw-r--r--arch/arm64/kernel/kgdb.c1
-rw-r--r--arch/arm64/kernel/machine_kexec.c30
-rw-r--r--arch/arm64/kernel/machine_kexec_file.c196
-rw-r--r--arch/arm64/kernel/module.c16
-rw-r--r--arch/arm64/kernel/mte.c139
-rw-r--r--arch/arm64/kernel/paravirt.c13
-rw-r--r--arch/arm64/kernel/patching.c150
-rw-r--r--arch/arm64/kernel/perf_callchain.c2
-rw-r--r--arch/arm64/kernel/perf_event.c47
-rw-r--r--arch/arm64/kernel/pointer_auth.c63
-rw-r--r--arch/arm64/kernel/probes/kprobes.c44
-rw-r--r--arch/arm64/kernel/probes/simulate-insn.c1
-rw-r--r--arch/arm64/kernel/probes/uprobes.c2
-rw-r--r--arch/arm64/kernel/process.c145
-rw-r--r--arch/arm64/kernel/psci.c3
-rw-r--r--arch/arm64/kernel/ptrace.c43
-rw-r--r--arch/arm64/kernel/sdei.c64
-rw-r--r--arch/arm64/kernel/setup.c9
-rw-r--r--arch/arm64/kernel/signal.c26
-rw-r--r--arch/arm64/kernel/sleep.S2
-rw-r--r--arch/arm64/kernel/smccc-call.S83
-rw-r--r--arch/arm64/kernel/smp.c18
-rw-r--r--arch/arm64/kernel/smp_spin_table.c10
-rw-r--r--arch/arm64/kernel/stacktrace.c47
-rw-r--r--arch/arm64/kernel/suspend.c18
-rw-r--r--arch/arm64/kernel/sys_compat.c2
-rw-r--r--arch/arm64/kernel/syscall.c16
-rw-r--r--arch/arm64/kernel/topology.c109
-rw-r--r--arch/arm64/kernel/traps.c135
-rw-r--r--arch/arm64/kernel/vdso.c26
-rw-r--r--arch/arm64/kernel/vdso/vdso.lds.S8
-rw-r--r--arch/arm64/kernel/vdso32/Makefile8
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S74
-rw-r--r--arch/arm64/kvm/Kconfig5
-rw-r--r--arch/arm64/kvm/Makefile2
-rw-r--r--arch/arm64/kvm/arch_timer.c162
-rw-r--r--arch/arm64/kvm/arm.c280
-rw-r--r--arch/arm64/kvm/debug.c116
-rw-r--r--arch/arm64/kvm/fpsimd.c26
-rw-r--r--arch/arm64/kvm/guest.c145
-rw-r--r--arch/arm64/kvm/handle_exit.c45
-rw-r--r--arch/arm64/kvm/hyp/Makefile2
-rw-r--r--arch/arm64/kvm/hyp/entry.S15
-rw-r--r--arch/arm64/kvm/hyp/exception.c21
-rw-r--r--arch/arm64/kvm/hyp/fpsimd.S10
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S6
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/adjust_pc.h18
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h116
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h21
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/early_alloc.h14
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/gfp.h33
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/mem_protect.h36
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/memory.h48
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/mm.h97
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/spinlock.h92
-rw-r--r--arch/arm64/kvm/hyp/nvhe/Makefile15
-rw-r--r--arch/arm64/kvm/hyp/nvhe/cache.S13
-rw-r--r--arch/arm64/kvm/hyp/nvhe/debug-sr.c66
-rw-r--r--arch/arm64/kvm/hyp/nvhe/early_alloc.c54
-rw-r--r--arch/arm64/kvm/hyp/nvhe/gen-hyprel.c18
-rw-r--r--arch/arm64/kvm/hyp/nvhe/host.S25
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-init.S54
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c95
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-smp.c6
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp.lds.S1
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mem_protect.c279
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mm.c173
-rw-r--r--arch/arm64/kvm/hyp/nvhe/page_alloc.c247
-rw-r--r--arch/arm64/kvm/hyp/nvhe/psci-relay.c4
-rw-r--r--arch/arm64/kvm/hyp/nvhe/setup.c219
-rw-r--r--arch/arm64/kvm/hyp/nvhe/stub.c22
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c43
-rw-r--r--arch/arm64/kvm/hyp/nvhe/tlb.c9
-rw-r--r--arch/arm64/kvm/hyp/pgtable.c462
-rw-r--r--arch/arm64/kvm/hyp/reserved_mem.c112
-rw-r--r--arch/arm64/kvm/hyp/vgic-v3-sr.c49
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c7
-rw-r--r--arch/arm64/kvm/hyp/vhe/tlb.c3
-rw-r--r--arch/arm64/kvm/hypercalls.c80
-rw-r--r--arch/arm64/kvm/mmu.c455
-rw-r--r--arch/arm64/kvm/perf.c5
-rw-r--r--arch/arm64/kvm/pmu-emul.c16
-rw-r--r--arch/arm64/kvm/pmu.c8
-rw-r--r--arch/arm64/kvm/reset.c105
-rw-r--r--arch/arm64/kvm/sys_regs.c90
-rw-r--r--arch/arm64/kvm/trace_arm.h66
-rw-r--r--arch/arm64/kvm/va_layout.c7
-rw-r--r--arch/arm64/kvm/vgic/vgic-init.c48
-rw-r--r--arch/arm64/kvm/vgic/vgic-its.c14
-rw-r--r--arch/arm64/kvm/vgic/vgic-kvm-device.c7
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio-v3.c85
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio.c10
-rw-r--r--arch/arm64/kvm/vgic/vgic-v2.c19
-rw-r--r--arch/arm64/kvm/vgic/vgic-v3.c97
-rw-r--r--arch/arm64/kvm/vgic/vgic-v4.c38
-rw-r--r--arch/arm64/kvm/vgic/vgic.c17
-rw-r--r--arch/arm64/kvm/vgic/vgic.h2
-rw-r--r--arch/arm64/lib/Makefile4
-rw-r--r--arch/arm64/lib/clear_page.S4
-rw-r--r--arch/arm64/lib/clear_user.S47
-rw-r--r--arch/arm64/lib/copy_page.S4
-rw-r--r--arch/arm64/lib/insn.c (renamed from arch/arm64/kernel/insn.c)249
-rw-r--r--arch/arm64/lib/kasan_sw_tags.S76
-rw-r--r--arch/arm64/lib/memchr.S65
-rw-r--r--arch/arm64/lib/memcmp.S346
-rw-r--r--arch/arm64/lib/memcpy.S272
-rw-r--r--arch/arm64/lib/memmove.S189
-rw-r--r--arch/arm64/lib/mte.S20
-rw-r--r--arch/arm64/lib/strcmp.S289
-rw-r--r--arch/arm64/lib/strlen.S258
-rw-r--r--arch/arm64/lib/strncmp.S406
-rw-r--r--arch/arm64/lib/uaccess_flushcache.c4
-rw-r--r--arch/arm64/mm/cache.S158
-rw-r--r--arch/arm64/mm/context.c6
-rw-r--r--arch/arm64/mm/dma-mapping.c4
-rw-r--r--arch/arm64/mm/fault.c68
-rw-r--r--arch/arm64/mm/flush.c33
-rw-r--r--arch/arm64/mm/hugetlbpage.c12
-rw-r--r--arch/arm64/mm/init.c33
-rw-r--r--arch/arm64/mm/ioremap.c4
-rw-r--r--arch/arm64/mm/kasan_init.c29
-rw-r--r--arch/arm64/mm/mmu.c133
-rw-r--r--arch/arm64/mm/proc.S82
-rw-r--r--arch/arm64/mm/ptdump.c10
-rw-r--r--arch/arm64/mm/ptdump_debugfs.c2
-rw-r--r--arch/arm64/net/bpf_jit_comp.c20
-rw-r--r--arch/arm64/tools/Makefile22
-rw-r--r--arch/arm64/tools/cpucaps66
-rwxr-xr-xarch/arm64/tools/gen-cpucaps.awk40
-rw-r--r--arch/csky/Kconfig2
-rw-r--r--arch/csky/abiv1/cacheflush.c1
-rw-r--r--arch/csky/include/asm/Kbuild1
-rw-r--r--arch/csky/include/asm/asid.h2
-rw-r--r--arch/csky/include/asm/barrier.h2
-rw-r--r--arch/csky/include/asm/cmpxchg.h8
-rw-r--r--arch/csky/include/asm/page.h2
-rw-r--r--arch/csky/include/asm/pgalloc.h2
-rw-r--r--arch/csky/include/asm/pgtable.h1
-rw-r--r--arch/csky/include/asm/segment.h7
-rw-r--r--arch/csky/include/asm/uaccess.h452
-rw-r--r--arch/csky/include/asm/vdso.h2
-rw-r--r--arch/csky/kernel/asm-offsets.c1
-rw-r--r--arch/csky/kernel/entry.S1
-rw-r--r--arch/csky/kernel/probes/ftrace.c2
-rw-r--r--arch/csky/kernel/probes/kprobes.c17
-rw-r--r--arch/csky/kernel/smp.c1
-rw-r--r--arch/csky/kernel/stacktrace.c2
-rw-r--r--arch/csky/lib/usercopy.c366
-rw-r--r--arch/csky/mm/fault.c2
-rw-r--r--arch/csky/mm/init.c1
-rw-r--r--arch/csky/mm/syscache.c1
-rw-r--r--arch/h8300/include/asm/Kbuild1
-rw-r--r--arch/h8300/include/asm/atomic.h97
-rw-r--r--arch/h8300/include/asm/bitops.h8
-rw-r--r--arch/h8300/include/asm/cmpxchg.h66
-rw-r--r--arch/h8300/kernel/asm-offsets.c1
-rw-r--r--arch/h8300/kernel/process.c2
-rw-r--r--arch/h8300/kernel/setup.c2
-rw-r--r--arch/h8300/mm/init.c2
-rw-r--r--arch/hexagon/Makefile6
-rw-r--r--arch/hexagon/configs/comet_defconfig2
-rw-r--r--arch/hexagon/include/asm/atomic.h28
-rw-r--r--arch/hexagon/include/asm/cmpxchg.h4
-rw-r--r--arch/hexagon/include/asm/futex.h4
-rw-r--r--arch/hexagon/include/asm/io.h1
-rw-r--r--arch/hexagon/include/asm/pgtable.h4
-rw-r--r--arch/hexagon/include/asm/timex.h3
-rw-r--r--arch/hexagon/kernel/hexagon_ksyms.c8
-rw-r--r--arch/hexagon/kernel/process.c2
-rw-r--r--arch/hexagon/kernel/ptrace.c4
-rw-r--r--arch/hexagon/lib/Makefile3
-rw-r--r--arch/hexagon/lib/divsi3.S67
-rw-r--r--arch/hexagon/lib/memcpy_likely_aligned.S56
-rw-r--r--arch/hexagon/lib/modsi3.S46
-rw-r--r--arch/hexagon/lib/udivsi3.S38
-rw-r--r--arch/hexagon/lib/umodsi3.S36
-rw-r--r--arch/hexagon/mm/init.c1
-rw-r--r--arch/ia64/Kconfig46
-rw-r--r--arch/ia64/configs/bigsur_defconfig1
-rw-r--r--arch/ia64/configs/generic_defconfig2
-rw-r--r--arch/ia64/include/asm/atomic.h74
-rw-r--r--arch/ia64/include/asm/cmpxchg.h16
-rw-r--r--arch/ia64/include/asm/io.h1
-rw-r--r--arch/ia64/include/asm/meminit.h11
-rw-r--r--arch/ia64/include/asm/module.h6
-rw-r--r--arch/ia64/include/asm/page.h31
-rw-r--r--arch/ia64/include/asm/pal.h3
-rw-r--r--arch/ia64/include/asm/pgalloc.h1
-rw-r--r--arch/ia64/include/asm/pgtable.h8
-rw-r--r--arch/ia64/include/asm/ptrace.h8
-rw-r--r--arch/ia64/include/asm/spinlock.h2
-rw-r--r--arch/ia64/include/asm/syscall.h2
-rw-r--r--arch/ia64/include/asm/uaccess.h18
-rw-r--r--arch/ia64/include/asm/unaligned.h12
-rw-r--r--arch/ia64/include/asm/uv/uv_hub.h2
-rw-r--r--arch/ia64/include/uapi/asm/cmpxchg.h10
-rw-r--r--arch/ia64/kernel/Makefile2
-rw-r--r--arch/ia64/kernel/acpi.c7
-rw-r--r--arch/ia64/kernel/efi.c11
-rw-r--r--arch/ia64/kernel/efi_stub.S2
-rw-r--r--arch/ia64/kernel/entry.S3
-rw-r--r--arch/ia64/kernel/err_inject.c22
-rw-r--r--arch/ia64/kernel/fsys.S4
-rw-r--r--arch/ia64/kernel/head.S6
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c12
-rw-r--r--arch/ia64/kernel/kprobes.c16
-rw-r--r--arch/ia64/kernel/machine_kexec.c2
-rw-r--r--arch/ia64/kernel/mca.c8
-rw-r--r--arch/ia64/kernel/mca_drv.c2
-rw-r--r--arch/ia64/kernel/module.c29
-rw-r--r--arch/ia64/kernel/pal.S6
-rw-r--r--arch/ia64/kernel/process.c4
-rw-r--r--arch/ia64/kernel/ptrace.c32
-rw-r--r--arch/ia64/kernel/smpboot.c1
-rw-r--r--arch/ia64/kernel/syscalls/Makefile16
-rw-r--r--arch/ia64/kernel/syscalls/syscall.tbl4
-rw-r--r--arch/ia64/kernel/syscalls/syscallhdr.sh36
-rw-r--r--arch/ia64/kernel/syscalls/syscalltbl.sh32
-rw-r--r--arch/ia64/kernel/topology.c5
-rw-r--r--arch/ia64/mm/Makefile1
-rw-r--r--arch/ia64/mm/contig.c4
-rw-r--r--arch/ia64/mm/discontig.c27
-rw-r--r--arch/ia64/mm/fault.c15
-rw-r--r--arch/ia64/mm/hugetlbpage.c3
-rw-r--r--arch/ia64/mm/init.c221
-rw-r--r--arch/ia64/mm/numa.c5
-rw-r--r--arch/m68k/Kconfig6
-rw-r--r--arch/m68k/Kconfig.cpu10
-rw-r--r--arch/m68k/Kconfig.machine3
-rw-r--r--arch/m68k/Makefile5
-rw-r--r--arch/m68k/atari/config.c12
-rw-r--r--arch/m68k/atari/time.c7
-rw-r--r--arch/m68k/coldfire/clk.c21
-rw-r--r--arch/m68k/coldfire/intc-simr.c12
-rw-r--r--arch/m68k/coldfire/m5206.c25
-rw-r--r--arch/m68k/coldfire/m520x.c51
-rw-r--r--arch/m68k/coldfire/m523x.c42
-rw-r--r--arch/m68k/coldfire/m5249.c33
-rw-r--r--arch/m68k/coldfire/m525x.c33
-rw-r--r--arch/m68k/coldfire/m5272.c35
-rw-r--r--arch/m68k/coldfire/m527x.c44
-rw-r--r--arch/m68k/coldfire/m528x.c42
-rw-r--r--arch/m68k/coldfire/m5307.c27
-rw-r--r--arch/m68k/coldfire/m53xx.c78
-rw-r--r--arch/m68k/coldfire/m5407.c25
-rw-r--r--arch/m68k/coldfire/m5441x.c126
-rw-r--r--arch/m68k/coldfire/m54xx.c33
-rw-r--r--arch/m68k/configs/amcore_defconfig1
-rw-r--r--arch/m68k/configs/amiga_defconfig20
-rw-r--r--arch/m68k/configs/apollo_defconfig10
-rw-r--r--arch/m68k/configs/atari_defconfig18
-rw-r--r--arch/m68k/configs/bvme6000_defconfig10
-rw-r--r--arch/m68k/configs/hp300_defconfig10
-rw-r--r--arch/m68k/configs/mac_defconfig19
-rw-r--r--arch/m68k/configs/multi_defconfig26
-rw-r--r--arch/m68k/configs/mvme147_defconfig10
-rw-r--r--arch/m68k/configs/mvme16x_defconfig10
-rw-r--r--arch/m68k/configs/q40_defconfig18
-rw-r--r--arch/m68k/configs/sun3_defconfig10
-rw-r--r--arch/m68k/configs/sun3x_defconfig10
-rw-r--r--arch/m68k/emu/nfblock.c20
-rw-r--r--arch/m68k/fpsp040/Makefile4
-rw-r--r--arch/m68k/ifpsp060/Makefile2
-rw-r--r--arch/m68k/include/asm/atomic.h60
-rw-r--r--arch/m68k/include/asm/bitops.h6
-rw-r--r--arch/m68k/include/asm/cmpxchg.h10
-rw-r--r--arch/m68k/include/asm/io_mm.h5
-rw-r--r--arch/m68k/include/asm/mcf_pgalloc.h2
-rw-r--r--arch/m68k/include/asm/mcf_pgtable.h2
-rw-r--r--arch/m68k/include/asm/mcfclk.h5
-rw-r--r--arch/m68k/include/asm/mmu_context.h2
-rw-r--r--arch/m68k/include/asm/mmzone.h10
-rw-r--r--arch/m68k/include/asm/motorola_pgalloc.h1
-rw-r--r--arch/m68k/include/asm/motorola_pgtable.h2
-rw-r--r--arch/m68k/include/asm/mvme147hw.h3
-rw-r--r--arch/m68k/include/asm/page.h2
-rw-r--r--arch/m68k/include/asm/page_mm.h39
-rw-r--r--arch/m68k/include/asm/page_no.h10
-rw-r--r--arch/m68k/include/asm/pgtable_mm.h1
-rw-r--r--arch/m68k/include/asm/sun3_pgalloc.h1
-rw-r--r--arch/m68k/include/asm/sun3xflop.h2
-rw-r--r--arch/m68k/include/asm/tlbflush.h2
-rw-r--r--arch/m68k/include/asm/unaligned.h26
-rw-r--r--arch/m68k/kernel/dma.c3
-rw-r--r--arch/m68k/kernel/process.c2
-rw-r--r--arch/m68k/kernel/signal.c4
-rw-r--r--arch/m68k/kernel/sys_m68k.c6
-rw-r--r--arch/m68k/kernel/syscalls/Makefile14
-rw-r--r--arch/m68k/kernel/syscalls/syscall.tbl4
-rw-r--r--arch/m68k/kernel/syscalls/syscallhdr.sh36
-rw-r--r--arch/m68k/kernel/syscalls/syscalltbl.sh32
-rw-r--r--arch/m68k/kernel/syscalltable.S3
-rw-r--r--arch/m68k/mac/config.c24
-rw-r--r--arch/m68k/mm/init.c21
-rw-r--r--arch/m68k/mvme147/config.c14
-rw-r--r--arch/m68k/mvme16x/config.c14
-rw-r--r--arch/m68k/q40/config.c37
-rw-r--r--arch/microblaze/Kconfig4
-rw-r--r--arch/microblaze/Makefile2
-rw-r--r--arch/microblaze/boot/dts/system.dts8
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/atomic.h28
-rw-r--r--arch/microblaze/include/asm/cmpxchg.h9
-rw-r--r--arch/microblaze/include/asm/ftrace.h2
-rw-r--r--arch/microblaze/include/asm/page.h3
-rw-r--r--arch/microblaze/include/asm/pgalloc.h2
-rw-r--r--arch/microblaze/include/asm/pgtable.h2
-rw-r--r--arch/microblaze/include/asm/unaligned.h27
-rw-r--r--arch/microblaze/kernel/asm-offsets.c1
-rw-r--r--arch/microblaze/kernel/syscall_table.S3
-rw-r--r--arch/microblaze/kernel/syscalls/Makefile14
-rw-r--r--arch/microblaze/kernel/syscalls/syscall.tbl4
-rw-r--r--arch/microblaze/kernel/syscalls/syscallhdr.sh36
-rw-r--r--arch/microblaze/kernel/syscalls/syscalltbl.sh32
-rw-r--r--arch/microblaze/lib/memcpy.c4
-rw-r--r--arch/microblaze/lib/memmove.c5
-rw-r--r--arch/microblaze/lib/memset.c2
-rw-r--r--arch/microblaze/lib/uaccess_old.S2
-rw-r--r--arch/microblaze/mm/init.c3
-rw-r--r--arch/mips/Kconfig60
-rw-r--r--arch/mips/Kconfig.debug9
-rw-r--r--arch/mips/Makefile2
-rw-r--r--arch/mips/alchemy/board-xxs1500.c1
-rw-r--r--arch/mips/alchemy/common/clock.c3
-rw-r--r--arch/mips/ar7/clock.c113
-rw-r--r--arch/mips/bcm63xx/clk.c2
-rw-r--r--arch/mips/bcm63xx/gpio.c9
-rw-r--r--arch/mips/bmips/dma.c2
-rw-r--r--arch/mips/boot/compressed/Makefile2
-rw-r--r--arch/mips/boot/compressed/decompress.c8
-rw-r--r--arch/mips/boot/compressed/uart-16550.c4
-rw-r--r--arch/mips/boot/dts/brcm/bcm3368-netgear-cvg834g.dts2
-rw-r--r--arch/mips/boot/dts/brcm/bcm3368.dtsi5
-rw-r--r--arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts2
-rw-r--r--arch/mips/boot/dts/brcm/bcm63268.dtsi137
-rw-r--r--arch/mips/boot/dts/brcm/bcm6328.dtsi124
-rw-r--r--arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts2
-rw-r--r--arch/mips/boot/dts/brcm/bcm6358.dtsi89
-rw-r--r--arch/mips/boot/dts/brcm/bcm6362-neufbox6-sercomm.dts2
-rw-r--r--arch/mips/boot/dts/brcm/bcm6362.dtsi134
-rw-r--r--arch/mips/boot/dts/brcm/bcm6368.dtsi133
-rw-r--r--arch/mips/boot/dts/brcm/bcm93384wvg.dts2
-rw-r--r--arch/mips/boot/dts/brcm/bcm93384wvg_viper.dts2
-rw-r--r--arch/mips/boot/dts/brcm/bcm96368mvwg.dts2
-rw-r--r--arch/mips/boot/dts/brcm/bcm97125cbmb.dts2
-rw-r--r--arch/mips/boot/dts/brcm/bcm97346dbsmb.dts4
-rw-r--r--arch/mips/boot/dts/brcm/bcm97358svmb.dts4
-rw-r--r--arch/mips/boot/dts/brcm/bcm97360svmb.dts2
-rw-r--r--arch/mips/boot/dts/brcm/bcm97362svmb.dts4
-rw-r--r--arch/mips/boot/dts/brcm/bcm97420c.dts2
-rw-r--r--arch/mips/boot/dts/brcm/bcm97425svmb.dts4
-rw-r--r--arch/mips/boot/dts/brcm/bcm97435svmb.dts4
-rw-r--r--arch/mips/boot/dts/brcm/bcm9ejtagprb.dts2
-rw-r--r--arch/mips/boot/dts/ingenic/ci20.dts24
-rw-r--r--arch/mips/boot/dts/ingenic/gcw0.dts6
-rw-r--r--arch/mips/boot/dts/ingenic/jz4780.dtsi10
-rw-r--r--arch/mips/boot/dts/ingenic/rs90.dts14
-rw-r--r--arch/mips/boot/dts/ingenic/x1000.dtsi7
-rw-r--r--arch/mips/boot/dts/ingenic/x1830.dtsi16
-rw-r--r--arch/mips/boot/dts/loongson/Makefile3
-rw-r--r--arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi289
-rw-r--r--arch/mips/boot/dts/loongson/loongson64_2core_2k1000.dts10
-rw-r--r--arch/mips/boot/dts/loongson/loongson64g-package.dtsi4
-rw-r--r--arch/mips/boot/dts/loongson/loongson64v_4core_virtio.dts2
-rw-r--r--arch/mips/boot/dts/loongson/ls7a-pch.dtsi8
-rw-r--r--arch/mips/boot/dts/loongson/rs780e-pch.dtsi2
-rw-r--r--arch/mips/boot/dts/mti/sead3.dts2
-rw-r--r--arch/mips/boot/dts/qca/Makefile1
-rw-r--r--arch/mips/boot/dts/qca/ar9331.dtsi2
-rw-r--r--arch/mips/boot/dts/qca/ar9331_openembed_som9331_board.dts110
-rw-r--r--arch/mips/cavium-octeon/oct_ilm.c2
-rw-r--r--arch/mips/cavium-octeon/octeon-memcpy.S8
-rw-r--r--arch/mips/cavium-octeon/octeon-usb.c9
-rw-r--r--arch/mips/configs/bigsur_defconfig4
-rw-r--r--arch/mips/configs/loongson2k_defconfig353
-rw-r--r--arch/mips/configs/loongson3_defconfig21
-rw-r--r--arch/mips/configs/malta_defconfig1
-rw-r--r--arch/mips/configs/malta_kvm_defconfig4
-rw-r--r--arch/mips/configs/malta_kvm_guest_defconfig436
-rw-r--r--arch/mips/configs/maltaup_xpa_defconfig4
-rw-r--r--arch/mips/configs/rbtx49xx_defconfig3
-rw-r--r--arch/mips/configs/sb1250_swarm_defconfig20
-rw-r--r--arch/mips/configs/workpad_defconfig9
-rw-r--r--arch/mips/crypto/.gitignore2
-rw-r--r--arch/mips/crypto/Makefile4
-rw-r--r--arch/mips/crypto/crc32-mips.c2
-rw-r--r--arch/mips/crypto/poly1305-glue.c2
-rw-r--r--arch/mips/generic/board-boston.its.S10
-rw-r--r--arch/mips/generic/board-jaguar2.its.S16
-rw-r--r--arch/mips/generic/board-luton.its.S8
-rw-r--r--arch/mips/generic/board-ni169445.its.S10
-rw-r--r--arch/mips/generic/board-ocelot.its.S20
-rw-r--r--arch/mips/generic/board-serval.its.S8
-rw-r--r--arch/mips/generic/board-xilfpga.its.S10
-rw-r--r--arch/mips/generic/vmlinux.its.S10
-rw-r--r--arch/mips/include/asm/Kbuild7
-rw-r--r--arch/mips/include/asm/asmmacro.h3
-rw-r--r--arch/mips/include/asm/atomic.h55
-rw-r--r--arch/mips/include/asm/bootinfo.h2
-rw-r--r--arch/mips/include/asm/cmpxchg.h22
-rw-r--r--arch/mips/include/asm/cpu-features.h4
-rw-r--r--arch/mips/include/asm/div64.h55
-rw-r--r--arch/mips/include/asm/highmem.h2
-rw-r--r--arch/mips/include/asm/hugetlb.h8
-rw-r--r--arch/mips/include/asm/io.h19
-rw-r--r--arch/mips/include/asm/irq.h1
-rw-r--r--arch/mips/include/asm/kvm_host.h264
-rw-r--r--arch/mips/include/asm/mach-ar7/ar7.h4
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h8
-rw-r--r--arch/mips/include/asm/mach-generic/spaces.h12
-rw-r--r--arch/mips/include/asm/mach-loongson64/boot_param.h27
-rw-r--r--arch/mips/include/asm/mach-loongson64/builtin_dtbs.h1
-rw-r--r--arch/mips/include/asm/mach-loongson64/kernel-entry-init.h27
-rw-r--r--arch/mips/include/asm/mach-loongson64/loongson.h27
-rw-r--r--arch/mips/include/asm/mach-ralink/mt7620.h53
-rw-r--r--arch/mips/include/asm/mach-ralink/mt7621.h7
-rw-r--r--arch/mips/include/asm/mach-ralink/pinmux.h52
-rw-r--r--arch/mips/include/asm/mach-ralink/rt288x.h9
-rw-r--r--arch/mips/include/asm/mach-ralink/rt305x.h24
-rw-r--r--arch/mips/include/asm/mach-ralink/rt3883.h34
-rw-r--r--arch/mips/include/asm/mips-boards/launch.h5
-rw-r--r--arch/mips/include/asm/mips-cps.h23
-rw-r--r--arch/mips/include/asm/mipsregs.h8
-rw-r--r--arch/mips/include/asm/mmzone.h8
-rw-r--r--arch/mips/include/asm/octeon/cvmx-address.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-bootinfo.h2
-rw-r--r--arch/mips/include/asm/page.h11
-rw-r--r--arch/mips/include/asm/pci.h1
-rw-r--r--arch/mips/include/asm/pgalloc.h11
-rw-r--r--arch/mips/include/asm/pgtable-32.h1
-rw-r--r--arch/mips/include/asm/pgtable-64.h1
-rw-r--r--arch/mips/include/asm/processor.h9
-rw-r--r--arch/mips/include/asm/thread_info.h6
-rw-r--r--arch/mips/include/asm/traps.h3
-rw-r--r--arch/mips/include/asm/uaccess.h598
-rw-r--r--arch/mips/include/asm/unistd.h4
-rw-r--r--arch/mips/include/asm/vdso/gettimeofday.h26
-rw-r--r--arch/mips/include/uapi/asm/mman.h3
-rw-r--r--arch/mips/include/uapi/asm/socket.h2
-rw-r--r--arch/mips/ingenic/Kconfig2
-rw-r--r--arch/mips/kernel/Makefile8
-rw-r--r--arch/mips/kernel/access-helper.h19
-rw-r--r--arch/mips/kernel/asm-offsets.c2
-rw-r--r--arch/mips/kernel/cevt-r4k.c4
-rw-r--r--arch/mips/kernel/cmpxchg.c4
-rw-r--r--arch/mips/kernel/cpu-probe.c14
-rw-r--r--arch/mips/kernel/cpu-r3k-probe.c3
-rw-r--r--arch/mips/kernel/ftrace.c8
-rw-r--r--arch/mips/kernel/kprobes.c3
-rw-r--r--arch/mips/kernel/process.c4
-rw-r--r--arch/mips/kernel/relocate.c1
-rw-r--r--arch/mips/kernel/relocate_kernel.S9
-rw-r--r--arch/mips/kernel/scall32-o32.S8
-rw-r--r--arch/mips/kernel/scall64-n32.S3
-rw-r--r--arch/mips/kernel/scall64-n64.S3
-rw-r--r--arch/mips/kernel/scall64-o32.S4
-rw-r--r--arch/mips/kernel/setup.c2
-rw-r--r--arch/mips/kernel/smp-bmips.c27
-rw-r--r--arch/mips/kernel/smp.c1
-rw-r--r--arch/mips/kernel/spinlock_test.c8
-rw-r--r--arch/mips/kernel/syscalls/Makefile69
-rw-r--r--arch/mips/kernel/syscalls/syscall_n32.tbl4
-rw-r--r--arch/mips/kernel/syscalls/syscall_n64.tbl4
-rw-r--r--arch/mips/kernel/syscalls/syscall_o32.tbl4
-rw-r--r--arch/mips/kernel/syscalls/syscallhdr.sh36
-rw-r--r--arch/mips/kernel/syscalls/syscallnr.sh2
-rw-r--r--arch/mips/kernel/syscalls/syscalltbl.sh36
-rw-r--r--arch/mips/kernel/traps.c119
-rw-r--r--arch/mips/kernel/unaligned.c205
-rw-r--r--arch/mips/kernel/vdso.c5
-rw-r--r--arch/mips/kernel/vmlinux.lds.S6
-rw-r--r--arch/mips/kvm/Kconfig34
-rw-r--r--arch/mips/kvm/Makefile9
-rw-r--r--arch/mips/kvm/commpage.c32
-rw-r--r--arch/mips/kvm/commpage.h24
-rw-r--r--arch/mips/kvm/dyntrans.c143
-rw-r--r--arch/mips/kvm/emulate.c1672
-rw-r--r--arch/mips/kvm/entry.c33
-rw-r--r--arch/mips/kvm/interrupt.c123
-rw-r--r--arch/mips/kvm/interrupt.h20
-rw-r--r--arch/mips/kvm/mips.c179
-rw-r--r--arch/mips/kvm/mmu.c505
-rw-r--r--arch/mips/kvm/tlb.c177
-rw-r--r--arch/mips/kvm/trap_emul.c1306
-rw-r--r--arch/mips/kvm/vz.c24
-rw-r--r--arch/mips/lantiq/xway/dma.c1
-rw-r--r--arch/mips/lib/memcpy.S28
-rw-r--r--arch/mips/lib/memset.S3
-rw-r--r--arch/mips/lib/mips-atomic.c12
-rw-r--r--arch/mips/lib/strncpy_user.S48
-rw-r--r--arch/mips/lib/strnlen_user.S44
-rw-r--r--arch/mips/loongson64/Makefile2
-rw-r--r--arch/mips/loongson64/env.c36
-rw-r--r--arch/mips/loongson64/init.c23
-rw-r--r--arch/mips/loongson64/numa.c21
-rw-r--r--arch/mips/loongson64/platform.c42
-rw-r--r--arch/mips/loongson64/reset.c116
-rw-r--r--arch/mips/loongson64/smp.c10
-rw-r--r--arch/mips/loongson64/time.c24
-rw-r--r--arch/mips/mm/Makefile6
-rw-r--r--arch/mips/mm/cache.c31
-rw-r--r--arch/mips/mm/dma-noncoherent.c1
-rw-r--r--arch/mips/mm/hugetlbpage.c4
-rw-r--r--arch/mips/mm/init.c8
-rw-r--r--arch/mips/mm/maccess.c10
-rw-r--r--arch/mips/mm/physaddr.c56
-rw-r--r--arch/mips/mm/tlbex.c9
-rw-r--r--arch/mips/mti-malta/Platform6
-rw-r--r--arch/mips/mti-malta/malta-time.c5
-rw-r--r--arch/mips/netlogic/common/irq.c6
-rw-r--r--arch/mips/pci/pci-ar2315.c1
-rw-r--r--arch/mips/pci/pci-legacy.c23
-rw-r--r--arch/mips/pci/pci-mt7620.c5
-rw-r--r--arch/mips/pci/pci-rt2880.c50
-rw-r--r--arch/mips/pci/pci-rt3883.c11
-rw-r--r--arch/mips/pci/pci-xtalk-bridge.c3
-rw-r--r--arch/mips/pic32/Kconfig1
-rw-r--r--arch/mips/ralink/Kconfig6
-rw-r--r--arch/mips/ralink/clk.c50
-rw-r--r--arch/mips/ralink/common.h3
-rw-r--r--arch/mips/ralink/mt7620.c322
-rw-r--r--arch/mips/ralink/mt7621.c123
-rw-r--r--arch/mips/ralink/of.c4
-rw-r--r--arch/mips/ralink/prom.c1
-rw-r--r--arch/mips/ralink/rt288x.c23
-rw-r--r--arch/mips/ralink/rt305x.c79
-rw-r--r--arch/mips/ralink/rt3883.c47
-rw-r--r--arch/mips/rb532/devices.c25
-rw-r--r--arch/mips/sgi-ip22/ip22-reset.c1
-rw-r--r--arch/mips/sgi-ip27/TODO19
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c1
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c1
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c4
-rw-r--r--arch/mips/sgi-ip30/ip30-irq.c1
-rw-r--r--arch/mips/sgi-ip32/ip32-reset.c1
-rw-r--r--arch/mips/vdso/Makefile4
-rw-r--r--arch/nds32/include/asm/memory.h6
-rw-r--r--arch/nds32/include/asm/pgalloc.h5
-rw-r--r--arch/nds32/include/asm/uaccess.h1
-rw-r--r--arch/nds32/kernel/ftrace.c2
-rw-r--r--arch/nds32/kernel/process.c2
-rw-r--r--arch/nds32/mm/cacheflush.c2
-rw-r--r--arch/nds32/mm/init.c1
-rw-r--r--arch/nios2/include/asm/irq.h1
-rw-r--r--arch/nios2/include/asm/pgalloc.h1
-rw-r--r--arch/nios2/include/asm/pgtable.h2
-rw-r--r--arch/nios2/include/asm/uaccess.h1
-rw-r--r--arch/nios2/kernel/irq.c1
-rw-r--r--arch/nios2/kernel/process.c2
-rw-r--r--arch/nios2/mm/cacheflush.c1
-rw-r--r--arch/nios2/mm/init.c1
-rw-r--r--arch/openrisc/configs/or1ksim_defconfig1
-rw-r--r--arch/openrisc/include/asm/atomic.h42
-rw-r--r--arch/openrisc/include/asm/barrier.h9
-rw-r--r--arch/openrisc/include/asm/cmpxchg.h4
-rw-r--r--arch/openrisc/include/asm/pgalloc.h2
-rw-r--r--arch/openrisc/include/asm/pgtable.h1
-rw-r--r--arch/openrisc/include/asm/tlbflush.h2
-rw-r--r--arch/openrisc/include/asm/unaligned.h47
-rw-r--r--arch/openrisc/kernel/asm-offsets.c1
-rw-r--r--arch/openrisc/kernel/setup.c2
-rw-r--r--arch/openrisc/kernel/smp.c2
-rw-r--r--arch/openrisc/mm/init.c8
-rw-r--r--arch/parisc/Kconfig12
-rw-r--r--arch/parisc/Makefile2
-rw-r--r--arch/parisc/include/asm/Kbuild1
-rw-r--r--arch/parisc/include/asm/atomic.h34
-rw-r--r--arch/parisc/include/asm/cmpxchg.h16
-rw-r--r--arch/parisc/include/asm/io.h5
-rw-r--r--arch/parisc/include/asm/pdc_chassis.h1
-rw-r--r--arch/parisc/include/asm/pgalloc.h1
-rw-r--r--arch/parisc/include/asm/pgtable.h2
-rw-r--r--arch/parisc/include/asm/processor.h1
-rw-r--r--arch/parisc/include/asm/unaligned.h6
-rw-r--r--arch/parisc/include/uapi/asm/mman.h3
-rw-r--r--arch/parisc/include/uapi/asm/socket.h2
-rw-r--r--arch/parisc/kernel/asm-offsets.c1
-rw-r--r--arch/parisc/kernel/pdc_chassis.c1
-rw-r--r--arch/parisc/kernel/process.c4
-rw-r--r--arch/parisc/kernel/ptrace.c2
-rw-r--r--arch/parisc/kernel/setup.c2
-rw-r--r--arch/parisc/kernel/smp.c1
-rw-r--r--arch/parisc/kernel/syscall.S16
-rw-r--r--arch/parisc/kernel/syscalls/Makefile30
-rw-r--r--arch/parisc/kernel/syscalls/syscall.tbl4
-rw-r--r--arch/parisc/kernel/syscalls/syscallhdr.sh36
-rw-r--r--arch/parisc/kernel/syscalls/syscalltbl.sh36
-rw-r--r--arch/parisc/math-emu/fpu.h32
-rw-r--r--arch/parisc/mm/hugetlbpage.c2
-rw-r--r--arch/parisc/mm/init.c2
-rw-r--r--arch/powerpc/Kconfig97
-rw-r--r--arch/powerpc/Kconfig.debug6
-rw-r--r--arch/powerpc/Makefile21
-rw-r--r--arch/powerpc/boot/Makefile6
-rw-r--r--arch/powerpc/boot/decompress.c4
-rw-r--r--arch/powerpc/boot/devtree.c59
-rw-r--r--arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi4
-rw-r--r--arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi4
-rw-r--r--arch/powerpc/boot/dts/fsl/c293si-post.dtsi4
-rw-r--r--arch/powerpc/boot/dts/fsl/p1010si-post.dtsi29
-rw-r--r--arch/powerpc/boot/dts/fsl/p2041si-post.dtsi16
-rw-r--r--arch/powerpc/boot/dts/icon.dts7
-rw-r--r--arch/powerpc/boot/dts/microwatt.dts138
-rw-r--r--arch/powerpc/boot/microwatt.c24
-rw-r--r--arch/powerpc/boot/ns16550.c9
-rwxr-xr-xarch/powerpc/boot/wrapper7
-rw-r--r--arch/powerpc/boot/zImage.ps3.lds.S2
-rw-r--r--arch/powerpc/configs/32-bit.config1
-rw-r--r--arch/powerpc/configs/44x/icon_defconfig1
-rw-r--r--arch/powerpc/configs/64-bit.config1
-rw-r--r--arch/powerpc/configs/microwatt_defconfig98
-rw-r--r--arch/powerpc/configs/mpc885_ads_defconfig25
-rw-r--r--arch/powerpc/configs/powernv_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64_defconfig4
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig1
-rw-r--r--arch/powerpc/configs/pseries_defconfig4
-rw-r--r--arch/powerpc/crypto/sha1-spe-glue.c2
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h14
-rw-r--r--arch/powerpc/include/asm/atomic.h140
-rw-r--r--arch/powerpc/include/asm/barrier.h18
-rw-r--r--arch/powerpc/include/asm/book3s/32/hash.h45
-rw-r--r--arch/powerpc/include/asm/book3s/32/kup.h283
-rw-r--r--arch/powerpc/include/asm/book3s/32/mmu-hash.h41
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h40
-rw-r--r--arch/powerpc/include/asm/book3s/32/tlbflush.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/kup.h27
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h1
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h1
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h8
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h6
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h4
-rw-r--r--arch/powerpc/include/asm/book3s/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/bug.h5
-rw-r--r--arch/powerpc/include/asm/cacheflush.h15
-rw-r--r--arch/powerpc/include/asm/checksum.h2
-rw-r--r--arch/powerpc/include/asm/cmpxchg.h30
-rw-r--r--arch/powerpc/include/asm/code-patching.h35
-rw-r--r--arch/powerpc/include/asm/cpm2.h2
-rw-r--r--arch/powerpc/include/asm/cpu_has_feature.h4
-rw-r--r--arch/powerpc/include/asm/cputhreads.h30
-rw-r--r--arch/powerpc/include/asm/dcr-native.h8
-rw-r--r--arch/powerpc/include/asm/exception-64s.h13
-rw-r--r--arch/powerpc/include/asm/fixmap.h9
-rw-r--r--arch/powerpc/include/asm/fsl_pamu_stash.h12
-rw-r--r--arch/powerpc/include/asm/ftrace.h4
-rw-r--r--arch/powerpc/include/asm/futex.h12
-rw-r--r--arch/powerpc/include/asm/head-64.h2
-rw-r--r--arch/powerpc/include/asm/hvcall.h21
-rw-r--r--arch/powerpc/include/asm/hvconsole.h3
-rw-r--r--arch/powerpc/include/asm/hw_irq.h23
-rw-r--r--arch/powerpc/include/asm/hydra.h2
-rw-r--r--arch/powerpc/include/asm/ima.h30
-rw-r--r--arch/powerpc/include/asm/inst.h119
-rw-r--r--arch/powerpc/include/asm/interrupt.h234
-rw-r--r--arch/powerpc/include/asm/io.h5
-rw-r--r--arch/powerpc/include/asm/irq.h7
-rw-r--r--arch/powerpc/include/asm/jump_label.h21
-rw-r--r--arch/powerpc/include/asm/kasan.h2
-rw-r--r--arch/powerpc/include/asm/kexec.h16
-rw-r--r--arch/powerpc/include/asm/kfence.h33
-rw-r--r--arch/powerpc/include/asm/kup.h69
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h17
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h8
-rw-r--r--arch/powerpc/include/asm/kvm_guest.h4
-rw-r--r--arch/powerpc/include/asm/kvm_host.h29
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h30
-rw-r--r--arch/powerpc/include/asm/livepatch.h2
-rw-r--r--arch/powerpc/include/asm/mmu.h23
-rw-r--r--arch/powerpc/include/asm/mmu_context.h21
-rw-r--r--arch/powerpc/include/asm/mmzone.h4
-rw-r--r--arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h5
-rw-r--r--arch/powerpc/include/asm/nohash/32/kup-8xx.h98
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-44x.h1
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-8xx.h46
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h7
-rw-r--r--arch/powerpc/include/asm/opal.h5
-rw-r--r--arch/powerpc/include/asm/paca.h9
-rw-r--r--arch/powerpc/include/asm/paravirt.h22
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h1
-rw-r--r--arch/powerpc/include/asm/pci.h7
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h8
-rw-r--r--arch/powerpc/include/asm/pgalloc.h5
-rw-r--r--arch/powerpc/include/asm/pgtable.h11
-rw-r--r--arch/powerpc/include/asm/plpar_wrappers.h6
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h95
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h45
-rw-r--r--arch/powerpc/include/asm/probes.h4
-rw-r--r--arch/powerpc/include/asm/processor.h30
-rw-r--r--arch/powerpc/include/asm/ps3.h4
-rw-r--r--arch/powerpc/include/asm/pte-walk.h29
-rw-r--r--arch/powerpc/include/asm/ptrace.h136
-rw-r--r--arch/powerpc/include/asm/qspinlock.h23
-rw-r--r--arch/powerpc/include/asm/reg.h19
-rw-r--r--arch/powerpc/include/asm/rtas.h6
-rw-r--r--arch/powerpc/include/asm/security_features.h4
-rw-r--r--arch/powerpc/include/asm/set_memory.h34
-rw-r--r--arch/powerpc/include/asm/setup.h1
-rw-r--r--arch/powerpc/include/asm/simple_spinlock.h6
-rw-r--r--arch/powerpc/include/asm/smp.h6
-rw-r--r--arch/powerpc/include/asm/spinlock.h3
-rw-r--r--arch/powerpc/include/asm/sstep.h7
-rw-r--r--arch/powerpc/include/asm/switch_to.h10
-rw-r--r--arch/powerpc/include/asm/syscall.h42
-rw-r--r--arch/powerpc/include/asm/thread_info.h7
-rw-r--r--arch/powerpc/include/asm/time.h12
-rw-r--r--arch/powerpc/include/asm/topology.h2
-rw-r--r--arch/powerpc/include/asm/uaccess.h389
-rw-r--r--arch/powerpc/include/asm/unaligned.h22
-rw-r--r--arch/powerpc/include/asm/unistd.h1
-rw-r--r--arch/powerpc/include/asm/uprobes.h4
-rw-r--r--arch/powerpc/include/asm/vas.h109
-rw-r--r--arch/powerpc/include/asm/vdso/gettimeofday.h10
-rw-r--r--arch/powerpc/include/asm/vdso_datapage.h2
-rw-r--r--arch/powerpc/include/asm/vio.h3
-rw-r--r--arch/powerpc/include/asm/vmalloc.h20
-rw-r--r--arch/powerpc/include/asm/xics.h4
-rw-r--r--arch/powerpc/include/asm/xive.h1
-rw-r--r--arch/powerpc/include/uapi/asm/errno.h1
-rw-r--r--arch/powerpc/include/uapi/asm/papr_pdsm.h6
-rw-r--r--arch/powerpc/include/uapi/asm/posix_types.h5
-rw-r--r--arch/powerpc/include/uapi/asm/vas-api.h6
-rw-r--r--arch/powerpc/kernel/Makefile4
-rw-r--r--arch/powerpc/kernel/align.c72
-rw-r--r--arch/powerpc/kernel/asm-offsets.c91
-rw-r--r--arch/powerpc/kernel/crash_dump.c6
-rw-r--r--arch/powerpc/kernel/eeh.c30
-rw-r--r--arch/powerpc/kernel/entry_32.S850
-rw-r--r--arch/powerpc/kernel/entry_64.S526
-rw-r--r--arch/powerpc/kernel/epapr_paravirt.c4
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S515
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S563
-rw-r--r--arch/powerpc/kernel/fadump.c18
-rw-r--r--arch/powerpc/kernel/firmware.c10
-rw-r--r--arch/powerpc/kernel/fpu.S6
-rw-r--r--arch/powerpc/kernel/head_32.h239
-rw-r--r--arch/powerpc/kernel/head_40x.S307
-rw-r--r--arch/powerpc/kernel/head_44x.S60
-rw-r--r--arch/powerpc/kernel/head_64.S25
-rw-r--r--arch/powerpc/kernel/head_8xx.S179
-rw-r--r--arch/powerpc/kernel/head_book3s_32.S390
-rw-r--r--arch/powerpc/kernel/head_booke.h236
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S107
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c4
-rw-r--r--arch/powerpc/kernel/hw_breakpoint_constraints.c2
-rw-r--r--arch/powerpc/kernel/idle_6xx.S14
-rw-r--r--arch/powerpc/kernel/idle_book3s.S4
-rw-r--r--arch/powerpc/kernel/idle_e500.S14
-rw-r--r--arch/powerpc/kernel/interrupt.c530
-rw-r--r--arch/powerpc/kernel/interrupt_64.S770
-rw-r--r--arch/powerpc/kernel/io-workarounds.c16
-rw-r--r--arch/powerpc/kernel/iommu.c35
-rw-r--r--arch/powerpc/kernel/irq.c228
-rw-r--r--arch/powerpc/kernel/isa-bridge.c4
-rw-r--r--arch/powerpc/kernel/jump_label.c6
-rw-r--r--arch/powerpc/kernel/kgdb.c21
-rw-r--r--arch/powerpc/kernel/kprobes-ftrace.c4
-rw-r--r--arch/powerpc/kernel/kprobes.c80
-rw-r--r--arch/powerpc/kernel/legacy_serial.c36
-rw-r--r--arch/powerpc/kernel/mce.c8
-rw-r--r--arch/powerpc/kernel/mce_power.c50
-rw-r--r--arch/powerpc/kernel/misc_32.S45
-rw-r--r--arch/powerpc/kernel/misc_64.S22
-rw-r--r--arch/powerpc/kernel/module.c37
-rw-r--r--arch/powerpc/kernel/module_32.c19
-rw-r--r--arch/powerpc/kernel/module_64.c55
-rw-r--r--arch/powerpc/kernel/nvram_64.c8
-rw-r--r--arch/powerpc/kernel/optprobes.c165
-rw-r--r--arch/powerpc/kernel/optprobes_head.S65
-rw-r--r--arch/powerpc/kernel/paca.c2
-rw-r--r--arch/powerpc/kernel/pci_64.c2
-rw-r--r--arch/powerpc/kernel/process.c136
-rw-r--r--arch/powerpc/kernel/prom.c23
-rw-r--r--arch/powerpc/kernel/prom_init.c123
-rw-r--r--arch/powerpc/kernel/ptrace/Makefile4
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-adv.c20
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-decl.h14
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-fpu.c10
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-noadv.c14
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-novsx.c8
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-view.c58
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace.c4
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace32.c4
-rw-r--r--arch/powerpc/kernel/rtas-proc.c15
-rw-r--r--arch/powerpc/kernel/rtas-rtc.c2
-rw-r--r--arch/powerpc/kernel/rtas.c48
-rw-r--r--arch/powerpc/kernel/security.c287
-rw-r--r--arch/powerpc/kernel/setup-common.c8
-rw-r--r--arch/powerpc/kernel/setup_32.c6
-rw-r--r--arch/powerpc/kernel/setup_64.c301
-rw-r--r--arch/powerpc/kernel/signal.c12
-rw-r--r--arch/powerpc/kernel/signal.h57
-rw-r--r--arch/powerpc/kernel/signal_32.c356
-rw-r--r--arch/powerpc/kernel/signal_64.c363
-rw-r--r--arch/powerpc/kernel/smp.c86
-rw-r--r--arch/powerpc/kernel/stacktrace.c134
-rw-r--r--arch/powerpc/kernel/syscalls.c15
-rw-r--r--arch/powerpc/kernel/syscalls/Makefile33
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl4
-rw-r--r--arch/powerpc/kernel/syscalls/syscallhdr.sh36
-rw-r--r--arch/powerpc/kernel/syscalls/syscalltbl.sh36
-rw-r--r--arch/powerpc/kernel/sysfs.c12
-rw-r--r--arch/powerpc/kernel/systbl.S5
-rw-r--r--arch/powerpc/kernel/tau_6xx.c2
-rw-r--r--arch/powerpc/kernel/time.c21
-rw-r--r--arch/powerpc/kernel/trace/ftrace.c71
-rw-r--r--arch/powerpc/kernel/traps.c89
-rw-r--r--arch/powerpc/kernel/udbg_16550.c39
-rw-r--r--arch/powerpc/kernel/uprobes.c13
-rw-r--r--arch/powerpc/kernel/vdso.c138
-rw-r--r--arch/powerpc/kernel/vdso32/gettimeofday.S11
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32.lds.S2
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64.lds.S2
-rw-r--r--arch/powerpc/kernel/vector.S10
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S19
-rw-r--r--arch/powerpc/kernel/watchdog.c1
-rw-r--r--arch/powerpc/kexec/Makefile7
-rw-r--r--arch/powerpc/kexec/core.c4
-rw-r--r--arch/powerpc/kexec/crash.c7
-rw-r--r--arch/powerpc/kexec/elf_64.c32
-rw-r--r--arch/powerpc/kexec/file_load.c183
-rw-r--r--arch/powerpc/kexec/file_load_64.c137
-rw-r--r--arch/powerpc/kexec/ima.c219
-rw-r--r--arch/powerpc/kvm/Makefile4
-rw-r--r--arch/powerpc/kvm/book3s.c126
-rw-r--r--arch/powerpc/kvm/book3s.h10
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c3
-rw-r--r--arch/powerpc/kvm/book3s_64_entry.S416
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c118
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c51
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c12
-rw-r--r--arch/powerpc/kvm/book3s_hv.c911
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c140
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S9
-rw-r--r--arch/powerpc/kvm/book3s_hv_nested.c159
-rw-r--r--arch/powerpc/kvm/book3s_hv_p9_entry.c508
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c32
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c15
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S689
-rw-r--r--arch/powerpc/kvm/book3s_hv_uvmem.c3
-rw-r--r--arch/powerpc/kvm/book3s_pr.c60
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c2
-rw-r--r--arch/powerpc/kvm/book3s_segment.S3
-rw-r--r--arch/powerpc/kvm/book3s_xive.c114
-rw-r--r--arch/powerpc/kvm/book3s_xive.h7
-rw-r--r--arch/powerpc/kvm/book3s_xive_native.c11
-rw-r--r--arch/powerpc/kvm/booke.c76
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c29
-rw-r--r--arch/powerpc/kvm/powerpc.c3
-rw-r--r--arch/powerpc/kvm/trace_booke.h15
-rw-r--r--arch/powerpc/lib/Makefile7
-rw-r--r--arch/powerpc/lib/checksum_wrappers.c15
-rw-r--r--arch/powerpc/lib/code-patching.c191
-rw-r--r--arch/powerpc/lib/error-inject.c2
-rw-r--r--arch/powerpc/lib/feature-fixups.c344
-rw-r--r--arch/powerpc/lib/inst.c73
-rw-r--r--arch/powerpc/lib/restart_table.c56
-rw-r--r--arch/powerpc/lib/sstep.c56
-rw-r--r--arch/powerpc/lib/test_emulate_step.c38
-rw-r--r--arch/powerpc/math-emu/math.c6
-rw-r--r--arch/powerpc/math-emu/math_efp.c2
-rw-r--r--arch/powerpc/mm/Makefile7
-rw-r--r--arch/powerpc/mm/book3s32/Makefile2
-rw-r--r--arch/powerpc/mm/book3s32/hash_low.S20
-rw-r--r--arch/powerpc/mm/book3s32/kuap.c33
-rw-r--r--arch/powerpc/mm/book3s32/kuep.c20
-rw-r--r--arch/powerpc/mm/book3s32/mmu.c29
-rw-r--r--arch/powerpc/mm/book3s32/mmu_context.c48
-rw-r--r--arch/powerpc/mm/book3s64/hash_pgtable.c130
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c47
-rw-r--r--arch/powerpc/mm/book3s64/mmu_context.c2
-rw-r--r--arch/powerpc/mm/book3s64/pkeys.c20
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c64
-rw-r--r--arch/powerpc/mm/book3s64/radix_tlb.c228
-rw-r--r--arch/powerpc/mm/cacheflush.c234
-rw-r--r--arch/powerpc/mm/fault.c60
-rw-r--r--arch/powerpc/mm/hugetlbpage.c3
-rw-r--r--arch/powerpc/mm/init_32.c3
-rw-r--r--arch/powerpc/mm/ioremap.c2
-rw-r--r--arch/powerpc/mm/ioremap_32.c4
-rw-r--r--arch/powerpc/mm/ioremap_64.c2
-rw-r--r--arch/powerpc/mm/maccess.c21
-rw-r--r--arch/powerpc/mm/mem.c296
-rw-r--r--arch/powerpc/mm/mmu_context.c28
-rw-r--r--arch/powerpc/mm/mmu_decl.h5
-rw-r--r--arch/powerpc/mm/nohash/44x.c17
-rw-r--r--arch/powerpc/mm/nohash/8xx.c46
-rw-r--r--arch/powerpc/mm/nohash/mmu_context.c173
-rw-r--r--arch/powerpc/mm/nohash/tlb_low.S13
-rw-r--r--arch/powerpc/mm/pageattr.c134
-rw-r--r--arch/powerpc/mm/pgtable.c8
-rw-r--r--arch/powerpc/mm/pgtable_32.c60
-rw-r--r--arch/powerpc/mm/ptdump/ptdump.c22
-rw-r--r--arch/powerpc/net/Makefile6
-rw-r--r--arch/powerpc/net/bpf_jit.h64
-rw-r--r--arch/powerpc/net/bpf_jit32.h139
-rw-r--r--arch/powerpc/net/bpf_jit64.h21
-rw-r--r--arch/powerpc/net/bpf_jit_asm.S226
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c783
-rw-r--r--arch/powerpc/net/bpf_jit_comp32.c1100
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c309
-rw-r--r--arch/powerpc/perf/Makefile6
-rw-r--r--arch/powerpc/perf/callchain.c2
-rw-r--r--arch/powerpc/perf/core-book3s.c30
-rw-r--r--arch/powerpc/perf/generic-compat-pmu.c170
-rw-r--r--arch/powerpc/perf/hv-24x7.c10
-rw-r--r--arch/powerpc/perf/isa207-common.c115
-rw-r--r--arch/powerpc/perf/isa207-common.h9
-rw-r--r--arch/powerpc/perf/power10-events-list.h4
-rw-r--r--arch/powerpc/perf/power10-pmu.c13
-rw-r--r--arch/powerpc/perf/power9-pmu.c13
-rw-r--r--arch/powerpc/platforms/44x/Kconfig2
-rw-r--r--arch/powerpc/platforms/52xx/lite5200_sleep.S2
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c1
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_smp.c4
-rw-r--r--arch/powerpc/platforms/Kconfig3
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype37
-rw-r--r--arch/powerpc/platforms/Makefile2
-rw-r--r--arch/powerpc/platforms/book3s/Kconfig15
-rw-r--r--arch/powerpc/platforms/book3s/Makefile2
-rw-r--r--arch/powerpc/platforms/book3s/vas-api.c493
-rw-r--r--arch/powerpc/platforms/cell/Kconfig1
-rw-r--r--arch/powerpc/platforms/cell/iommu.c3
-rw-r--r--arch/powerpc/platforms/cell/pmu.c1
-rw-r--r--arch/powerpc/platforms/cell/spider-pci.c3
-rw-r--r--arch/powerpc/platforms/cell/spu_callbacks.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c3
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c10
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c6
-rw-r--r--arch/powerpc/platforms/chrp/pci.c3
-rw-r--r--arch/powerpc/platforms/embedded6xx/Kconfig5
-rw-r--r--arch/powerpc/platforms/embedded6xx/flipper-pic.c1
-rw-r--r--arch/powerpc/platforms/embedded6xx/holly.c4
-rw-r--r--arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c4
-rw-r--r--arch/powerpc/platforms/maple/pci.c2
-rw-r--r--arch/powerpc/platforms/microwatt/Kconfig13
-rw-r--r--arch/powerpc/platforms/microwatt/Makefile1
-rw-r--r--arch/powerpc/platforms/microwatt/rng.c48
-rw-r--r--arch/powerpc/platforms/microwatt/setup.c41
-rw-r--r--arch/powerpc/platforms/pasemi/idle.c4
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c4
-rw-r--r--arch/powerpc/platforms/powermac/Kconfig1
-rw-r--r--arch/powerpc/platforms/powermac/bootx_init.c2
-rw-r--r--arch/powerpc/platforms/powermac/smp.c4
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig14
-rw-r--r--arch/powerpc/platforms/powernv/Makefile4
-rw-r--r--arch/powerpc/platforms/powernv/idle.c52
-rw-r--r--arch/powerpc/platforms/powernv/memtrace.c22
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c705
-rw-r--r--arch/powerpc/platforms/powernv/opal-call.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-core.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-prd.c5
-rw-r--r--arch/powerpc/platforms/powernv/opal.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c200
-rw-r--r--arch/powerpc/platforms/powernv/pci.c13
-rw-r--r--arch/powerpc/platforms/powernv/pci.h17
-rw-r--r--arch/powerpc/platforms/powernv/setup.c2
-rw-r--r--arch/powerpc/platforms/powernv/subcore.c10
-rw-r--r--arch/powerpc/platforms/powernv/vas-api.c278
-rw-r--r--arch/powerpc/platforms/powernv/vas-debug.c27
-rw-r--r--arch/powerpc/platforms/powernv/vas-fault.c173
-rw-r--r--arch/powerpc/platforms/powernv/vas-trace.h4
-rw-r--r--arch/powerpc/platforms/powernv/vas-window.c264
-rw-r--r--arch/powerpc/platforms/powernv/vas.h50
-rw-r--r--arch/powerpc/platforms/ps3/Kconfig10
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c5
-rw-r--r--arch/powerpc/platforms/ps3/mm.c12
-rw-r--r--arch/powerpc/platforms/ps3/setup.c43
-rw-r--r--arch/powerpc/platforms/ps3/system-bus.c9
-rw-r--r--arch/powerpc/platforms/pseries/Makefile1
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c19
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c27
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c92
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S39
-rw-r--r--arch/powerpc/platforms/pseries/hvCall_inst.c2
-rw-r--r--arch/powerpc/platforms/pseries/ibmebus.c1
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c49
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c38
-rw-r--r--arch/powerpc/platforms/pseries/lparcfg.c2
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c48
-rw-r--r--arch/powerpc/platforms/pseries/msi.c25
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c149
-rw-r--r--arch/powerpc/platforms/pseries/pci.c23
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c4
-rw-r--r--arch/powerpc/platforms/pseries/pmem.c2
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h5
-rw-r--r--arch/powerpc/platforms/pseries/ras.c8
-rw-r--r--arch/powerpc/platforms/pseries/rtas-fadump.c2
-rw-r--r--arch/powerpc/platforms/pseries/setup.c17
-rw-r--r--arch/powerpc/platforms/pseries/smp.c4
-rw-r--r--arch/powerpc/platforms/pseries/svm.c6
-rw-r--r--arch/powerpc/platforms/pseries/vas.c595
-rw-r--r--arch/powerpc/platforms/pseries/vas.h125
-rw-r--r--arch/powerpc/platforms/pseries/vio.c27
-rw-r--r--arch/powerpc/purgatory/trampoline_64.S1
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c3
-rw-r--r--arch/powerpc/sysdev/ehv_pic.c1
-rw-r--r--arch/powerpc/sysdev/fsl_mpic_err.c1
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c4
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c4
-rw-r--r--arch/powerpc/sysdev/i8259.c3
-rw-r--r--arch/powerpc/sysdev/mpic.c2
-rw-r--r--arch/powerpc/sysdev/tsi108_dev.c5
-rw-r--r--arch/powerpc/sysdev/tsi108_pci.c3
-rw-r--r--arch/powerpc/sysdev/xics/Kconfig3
-rw-r--r--arch/powerpc/sysdev/xics/Makefile1
-rw-r--r--arch/powerpc/sysdev/xics/icp-hv.c1
-rw-r--r--arch/powerpc/sysdev/xics/icp-opal.c1
-rw-r--r--arch/powerpc/sysdev/xics/ics-native.c257
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c4
-rw-r--r--arch/powerpc/sysdev/xive/Kconfig1
-rw-r--r--arch/powerpc/sysdev/xive/common.c217
-rw-r--r--arch/powerpc/sysdev/xive/native.c6
-rw-r--r--arch/powerpc/sysdev/xive/spapr.c2
-rw-r--r--arch/powerpc/sysdev/xive/xive-internal.h3
-rw-r--r--arch/powerpc/xmon/xmon.c291
-rw-r--r--arch/riscv/Kconfig126
-rw-r--r--arch/riscv/Kconfig.erratas44
-rw-r--r--arch/riscv/Kconfig.socs11
-rw-r--r--arch/riscv/Makefile20
-rw-r--r--arch/riscv/boot/Makefile13
-rw-r--r--arch/riscv/boot/dts/Makefile1
-rw-r--r--arch/riscv/boot/dts/microchip/Makefile3
-rw-r--r--arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts72
-rw-r--r--arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi329
-rw-r--r--arch/riscv/boot/dts/sifive/Makefile1
-rw-r--r--arch/riscv/boot/dts/sifive/fu740-c000.dtsi35
-rw-r--r--arch/riscv/boot/loader.lds.S3
-rw-r--r--arch/riscv/configs/defconfig4
-rw-r--r--arch/riscv/errata/Makefile2
-rw-r--r--arch/riscv/errata/alternative.c74
-rw-r--r--arch/riscv/errata/sifive/Makefile2
-rw-r--r--arch/riscv/errata/sifive/errata.c106
-rw-r--r--arch/riscv/errata/sifive/errata_cip_453.S38
-rw-r--r--arch/riscv/include/asm/alternative-macros.h142
-rw-r--r--arch/riscv/include/asm/alternative.h39
-rw-r--r--arch/riscv/include/asm/asm-prototypes.h16
-rw-r--r--arch/riscv/include/asm/asm.h1
-rw-r--r--arch/riscv/include/asm/atomic.h128
-rw-r--r--arch/riscv/include/asm/cmpxchg.h34
-rw-r--r--arch/riscv/include/asm/csr.h3
-rw-r--r--arch/riscv/include/asm/elf.h6
-rw-r--r--arch/riscv/include/asm/errata_list.h39
-rw-r--r--arch/riscv/include/asm/ftrace.h14
-rw-r--r--arch/riscv/include/asm/irq.h2
-rw-r--r--arch/riscv/include/asm/kexec.h56
-rw-r--r--arch/riscv/include/asm/page.h47
-rw-r--r--arch/riscv/include/asm/pgalloc.h2
-rw-r--r--arch/riscv/include/asm/pgtable.h66
-rw-r--r--arch/riscv/include/asm/processor.h1
-rw-r--r--arch/riscv/include/asm/ptrace.h5
-rw-r--r--arch/riscv/include/asm/sbi.h7
-rw-r--r--arch/riscv/include/asm/sections.h1
-rw-r--r--arch/riscv/include/asm/set_memory.h6
-rw-r--r--arch/riscv/include/asm/smp.h4
-rw-r--r--arch/riscv/include/asm/string.h5
-rw-r--r--arch/riscv/include/asm/syscall.h2
-rw-r--r--arch/riscv/include/asm/timex.h2
-rw-r--r--arch/riscv/include/asm/tlbflush.h3
-rw-r--r--arch/riscv/include/asm/uaccess.h8
-rw-r--r--arch/riscv/include/asm/vendorid_list.h10
-rw-r--r--arch/riscv/kernel/Makefile7
-rw-r--r--arch/riscv/kernel/crash_dump.c46
-rw-r--r--arch/riscv/kernel/crash_save_regs.S56
-rw-r--r--arch/riscv/kernel/entry.S10
-rw-r--r--arch/riscv/kernel/head.S49
-rw-r--r--arch/riscv/kernel/head.h3
-rw-r--r--arch/riscv/kernel/kexec_relocate.S223
-rw-r--r--arch/riscv/kernel/machine_kexec.c194
-rw-r--r--arch/riscv/kernel/mcount.S10
-rw-r--r--arch/riscv/kernel/module.c8
-rw-r--r--arch/riscv/kernel/probes/ftrace.c29
-rw-r--r--arch/riscv/kernel/probes/kprobes.c34
-rw-r--r--arch/riscv/kernel/process.c1
-rw-r--r--arch/riscv/kernel/sbi.c33
-rw-r--r--arch/riscv/kernel/setup.c129
-rw-r--r--arch/riscv/kernel/smp.c24
-rw-r--r--arch/riscv/kernel/smpboot.c5
-rw-r--r--arch/riscv/kernel/stacktrace.c18
-rw-r--r--arch/riscv/kernel/syscall_table.c2
-rw-r--r--arch/riscv/kernel/time.c3
-rw-r--r--arch/riscv/kernel/traps.c19
-rw-r--r--arch/riscv/kernel/vdso.c4
-rw-r--r--arch/riscv/kernel/vdso/Makefile16
-rw-r--r--arch/riscv/kernel/vmlinux-xip.lds.S146
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S16
-rw-r--r--arch/riscv/mm/fault.c14
-rw-r--r--arch/riscv/mm/init.c349
-rw-r--r--arch/riscv/mm/kasan_init.c85
-rw-r--r--arch/riscv/mm/physaddr.c2
-rw-r--r--arch/riscv/mm/ptdump.c75
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c13
-rw-r--r--arch/riscv/net/bpf_jit_core.c14
-rw-r--r--arch/s390/Kconfig20
-rw-r--r--arch/s390/Kconfig.debug8
-rw-r--r--arch/s390/configs/debug_defconfig21
-rw-r--r--arch/s390/configs/defconfig15
-rw-r--r--arch/s390/configs/zfcpdump_defconfig3
-rw-r--r--arch/s390/crypto/arch_random.c4
-rw-r--r--arch/s390/crypto/crc32be-vx.S4
-rw-r--r--arch/s390/include/asm/atomic.h96
-rw-r--r--arch/s390/include/asm/atomic_ops.h76
-rw-r--r--arch/s390/include/asm/bitops.h93
-rw-r--r--arch/s390/include/asm/ccwdev.h3
-rw-r--r--arch/s390/include/asm/cmpxchg.h168
-rw-r--r--arch/s390/include/asm/cpu_mcf.h2
-rw-r--r--arch/s390/include/asm/entry-common.h14
-rw-r--r--arch/s390/include/asm/idle.h12
-rw-r--r--arch/s390/include/asm/io.h5
-rw-r--r--arch/s390/include/asm/kvm_host.h14
-rw-r--r--arch/s390/include/asm/page.h6
-rw-r--r--arch/s390/include/asm/pci.h10
-rw-r--r--arch/s390/include/asm/pgalloc.h3
-rw-r--r--arch/s390/include/asm/pgtable.h7
-rw-r--r--arch/s390/include/asm/preempt.h4
-rw-r--r--arch/s390/include/asm/qdio.h26
-rw-r--r--arch/s390/include/asm/smp.h1
-rw-r--r--arch/s390/include/asm/spinlock.h2
-rw-r--r--arch/s390/include/asm/spinlock_types.h2
-rw-r--r--arch/s390/include/asm/stacktrace.h19
-rw-r--r--arch/s390/include/asm/timex.h36
-rw-r--r--arch/s390/include/asm/vdso/data.h2
-rw-r--r--arch/s390/include/asm/vdso/gettimeofday.h3
-rw-r--r--arch/s390/include/uapi/asm/hwctrset.h (renamed from arch/s390/include/uapi/asm/perf_cpum_cf_diag.h)0
-rw-r--r--arch/s390/kernel/Makefile2
-rw-r--r--arch/s390/kernel/cpcmd.c6
-rw-r--r--arch/s390/kernel/dis.c2
-rw-r--r--arch/s390/kernel/dumpstack.c12
-rw-r--r--arch/s390/kernel/entry.S12
-rw-r--r--arch/s390/kernel/entry.h25
-rw-r--r--arch/s390/kernel/idle.c12
-rw-r--r--arch/s390/kernel/ipl.c5
-rw-r--r--arch/s390/kernel/irq.c2
-rw-r--r--arch/s390/kernel/kprobes.c22
-rw-r--r--arch/s390/kernel/os_info.c2
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c14
-rw-r--r--arch/s390/kernel/perf_cpum_cf_common.c46
-rw-r--r--arch/s390/kernel/perf_cpum_cf_diag.c77
-rw-r--r--arch/s390/kernel/perf_event.c21
-rw-r--r--arch/s390/kernel/pgm_check.S147
-rw-r--r--arch/s390/kernel/process.c2
-rw-r--r--arch/s390/kernel/setup.c6
-rw-r--r--arch/s390/kernel/signal.c1
-rw-r--r--arch/s390/kernel/smp.c2
-rw-r--r--arch/s390/kernel/stacktrace.c6
-rw-r--r--arch/s390/kernel/syscall.c1
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl4
-rw-r--r--arch/s390/kernel/time.c38
-rw-r--r--arch/s390/kernel/topology.c14
-rw-r--r--arch/s390/kernel/traps.c78
-rw-r--r--arch/s390/kernel/uv.c47
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/s390/kvm/Makefile3
-rw-r--r--arch/s390/kvm/diag.c31
-rw-r--r--arch/s390/kvm/gaccess.c30
-rw-r--r--arch/s390/kvm/gaccess.h60
-rw-r--r--arch/s390/kvm/interrupt.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c267
-rw-r--r--arch/s390/kvm/kvm-s390.h8
-rw-r--r--arch/s390/kvm/pv.c7
-rw-r--r--arch/s390/kvm/vsie.c109
-rw-r--r--arch/s390/lib/test_unwind.c19
-rw-r--r--arch/s390/mm/fault.c20
-rw-r--r--arch/s390/mm/hugetlbpage.c2
-rw-r--r--arch/s390/mm/init.c2
-rw-r--r--arch/s390/mm/page-states.c8
-rw-r--r--arch/s390/mm/pgtable.c2
-rw-r--r--arch/s390/net/bpf_jit_comp.c64
-rw-r--r--arch/s390/pci/pci.c114
-rw-r--r--arch/s390/pci/pci_bus.c331
-rw-r--r--arch/s390/pci/pci_bus.h6
-rw-r--r--arch/s390/pci/pci_event.c108
-rw-r--r--arch/s390/pci/pci_sysfs.c44
-rw-r--r--arch/s390/tools/gen_facilities.c4
-rw-r--r--arch/sh/Kconfig8
-rw-r--r--arch/sh/Makefile2
-rw-r--r--arch/sh/configs/edosk7705_defconfig1
-rw-r--r--arch/sh/configs/se7206_defconfig1
-rw-r--r--arch/sh/configs/sh2007_defconfig1
-rw-r--r--arch/sh/configs/sh7724_generic_defconfig1
-rw-r--r--arch/sh/configs/sh7770_generic_defconfig1
-rw-r--r--arch/sh/configs/sh7785lcr_32bit_defconfig1
-rw-r--r--arch/sh/include/asm/atomic-grb.h6
-rw-r--r--arch/sh/include/asm/atomic-irq.h6
-rw-r--r--arch/sh/include/asm/atomic-llsc.h6
-rw-r--r--arch/sh/include/asm/atomic.h8
-rw-r--r--arch/sh/include/asm/bitops.h5
-rw-r--r--arch/sh/include/asm/cmpxchg.h4
-rw-r--r--arch/sh/include/asm/io.h5
-rw-r--r--arch/sh/include/asm/mmzone.h4
-rw-r--r--arch/sh/include/asm/pgalloc.h1
-rw-r--r--arch/sh/include/asm/pgtable.h2
-rw-r--r--arch/sh/include/asm/tlb.h10
-rw-r--r--arch/sh/include/asm/unaligned-sh4a.h199
-rw-r--r--arch/sh/include/asm/unaligned.h13
-rw-r--r--arch/sh/kernel/ftrace.c2
-rw-r--r--arch/sh/kernel/kprobes.c17
-rw-r--r--arch/sh/kernel/perf_event.c18
-rw-r--r--arch/sh/kernel/process_32.c2
-rw-r--r--arch/sh/kernel/smp.c2
-rw-r--r--arch/sh/kernel/syscalls/Makefile14
-rw-r--r--arch/sh/kernel/syscalls/syscall.tbl4
-rw-r--r--arch/sh/kernel/syscalls/syscallhdr.sh36
-rw-r--r--arch/sh/kernel/syscalls/syscalltbl.sh32
-rw-r--r--arch/sh/kernel/topology.c2
-rw-r--r--arch/sh/kernel/traps.c1
-rw-r--r--arch/sh/mm/Kconfig10
-rw-r--r--arch/sh/mm/cache-sh4.c1
-rw-r--r--arch/sh/mm/cache-sh7705.c1
-rw-r--r--arch/sh/mm/hugetlbpage.c2
-rw-r--r--arch/sh/mm/init.c3
-rw-r--r--arch/sparc/Kconfig7
-rw-r--r--arch/sparc/configs/sparc64_defconfig5
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/atomic_32.h38
-rw-r--r--arch/sparc/include/asm/atomic_64.h36
-rw-r--r--arch/sparc/include/asm/cmpxchg_32.h12
-rw-r--r--arch/sparc/include/asm/cmpxchg_64.h12
-rw-r--r--arch/sparc/include/asm/elf_64.h1
-rw-r--r--arch/sparc/include/asm/extable.h (renamed from arch/sparc/include/asm/extable_64.h)4
-rw-r--r--arch/sparc/include/asm/ftrace.h2
-rw-r--r--arch/sparc/include/asm/io_64.h9
-rw-r--r--arch/sparc/include/asm/mmzone.h4
-rw-r--r--arch/sparc/include/asm/pgalloc_32.h1
-rw-r--r--arch/sparc/include/asm/pgalloc_64.h1
-rw-r--r--arch/sparc/include/asm/pgtable_32.h6
-rw-r--r--arch/sparc/include/asm/pgtable_64.h8
-rw-r--r--arch/sparc/include/asm/processor_32.h6
-rw-r--r--arch/sparc/include/asm/thread_info_64.h1
-rw-r--r--arch/sparc/include/asm/uaccess.h3
-rw-r--r--arch/sparc/include/asm/uaccess_32.h38
-rw-r--r--arch/sparc/include/asm/uaccess_64.h1
-rw-r--r--arch/sparc/include/asm/unaligned.h11
-rw-r--r--arch/sparc/include/uapi/asm/socket.h2
-rw-r--r--arch/sparc/kernel/head_32.S2
-rw-r--r--arch/sparc/kernel/head_64.S2
-rw-r--r--arch/sparc/kernel/kprobes.c17
-rw-r--r--arch/sparc/kernel/process_32.c15
-rw-r--r--arch/sparc/kernel/process_64.c3
-rw-r--r--arch/sparc/kernel/setup_32.c3
-rw-r--r--arch/sparc/kernel/setup_64.c4
-rw-r--r--arch/sparc/kernel/smp_32.c1
-rw-r--r--arch/sparc/kernel/smp_64.c5
-rw-r--r--arch/sparc/kernel/sstate.c1
-rw-r--r--arch/sparc/kernel/syscalls/Makefile30
-rw-r--r--arch/sparc/kernel/syscalls/syscall.tbl4
-rw-r--r--arch/sparc/kernel/syscalls/syscallhdr.sh36
-rw-r--r--arch/sparc/kernel/syscalls/syscalltbl.sh36
-rw-r--r--arch/sparc/kernel/systbls_32.S4
-rw-r--r--arch/sparc/kernel/systbls_64.S8
-rw-r--r--arch/sparc/kernel/traps_64.c13
-rw-r--r--arch/sparc/kernel/unaligned_32.c106
-rw-r--r--arch/sparc/lib/atomic32.c24
-rw-r--r--arch/sparc/lib/atomic_64.S42
-rw-r--r--arch/sparc/lib/checksum_32.S64
-rw-r--r--arch/sparc/lib/copy_user.S315
-rw-r--r--arch/sparc/lib/memset.S87
-rw-r--r--arch/sparc/mm/Makefile2
-rw-r--r--arch/sparc/mm/extable.c107
-rw-r--r--arch/sparc/mm/fault_32.c80
-rw-r--r--arch/sparc/mm/hugetlbpage.c8
-rw-r--r--arch/sparc/mm/init_32.c2
-rw-r--r--arch/sparc/mm/init_64.c14
-rw-r--r--arch/sparc/mm/mm_32.h2
-rw-r--r--arch/sparc/mm/tlb.c1
-rw-r--r--arch/um/Kconfig1
-rw-r--r--arch/um/Kconfig.debug1
-rw-r--r--arch/um/configs/kunit_defconfig3
-rw-r--r--arch/um/drivers/cow.h7
-rw-r--r--arch/um/drivers/hostaudio_kern.c4
-rw-r--r--arch/um/drivers/mconsole_kern.c1
-rw-r--r--arch/um/drivers/vector_kern.c1
-rw-r--r--arch/um/include/asm/pgalloc.h1
-rw-r--r--arch/um/include/asm/pgtable-2level.h1
-rw-r--r--arch/um/include/asm/pgtable-3level.h1
-rw-r--r--arch/um/include/asm/pgtable.h2
-rw-r--r--arch/um/include/uapi/asm/Kbuild1
-rw-r--r--arch/um/kernel/Makefile1
-rw-r--r--arch/um/kernel/dyn.lds.S6
-rw-r--r--arch/um/kernel/gmon_syms.c16
-rw-r--r--arch/um/kernel/kmsg_dump.c13
-rw-r--r--arch/um/kernel/mem.c4
-rw-r--r--arch/um/kernel/process.c2
-rw-r--r--arch/um/kernel/skas/syscall.c2
-rw-r--r--arch/um/kernel/um_arch.c1
-rw-r--r--arch/um/kernel/uml.lds.S6
-rw-r--r--arch/x86/Kconfig95
-rw-r--r--arch/x86/Makefile33
-rw-r--r--arch/x86/boot/.gitignore1
-rw-r--r--arch/x86/boot/Makefile44
-rw-r--r--arch/x86/boot/compressed/Makefile8
-rw-r--r--arch/x86/boot/compressed/efi_thunk_64.S2
-rw-r--r--arch/x86/boot/compressed/head_64.S172
-rw-r--r--arch/x86/boot/compressed/idt_64.c14
-rw-r--r--arch/x86/boot/compressed/kaslr.c4
-rw-r--r--arch/x86/boot/compressed/mem_encrypt.S130
-rw-r--r--arch/x86/boot/compressed/misc.c11
-rw-r--r--arch/x86/boot/compressed/misc.h8
-rw-r--r--arch/x86/boot/compressed/sev.c (renamed from arch/x86/boot/compressed/sev-es.c)27
-rw-r--r--arch/x86/boot/genimage.sh303
-rw-r--r--arch/x86/boot/mtools.conf.in3
-rw-r--r--arch/x86/crypto/Makefile2
-rw-r--r--arch/x86/crypto/aesni-intel_avx-x86_64.S28
-rw-r--r--arch/x86/crypto/camellia-aesni-avx2-asm_64.S5
-rw-r--r--arch/x86/crypto/crc32-pclmul_glue.c2
-rw-r--r--arch/x86/crypto/crc32c-pcl-intel-asm_64.S7
-rw-r--r--arch/x86/crypto/curve25519-x86_64.c8
-rw-r--r--arch/x86/crypto/poly1305_glue.c6
-rw-r--r--arch/x86/crypto/sha1_avx2_x86_64_asm.S8
-rw-r--r--arch/x86/crypto/sha1_ni_asm.S8
-rw-r--r--arch/x86/crypto/sha256-avx2-asm.S13
-rw-r--r--arch/x86/crypto/sha512-avx-asm.S41
-rw-r--r--arch/x86/crypto/sha512-avx2-asm.S42
-rw-r--r--arch/x86/crypto/sha512-ssse3-asm.S41
-rw-r--r--arch/x86/crypto/twofish-x86_64-asm_64-3way.S2
-rw-r--r--arch/x86/crypto/twofish_glue_3way.c2
-rw-r--r--arch/x86/entry/Makefile10
-rw-r--r--arch/x86/entry/calling.h45
-rw-r--r--arch/x86/entry/common.c96
-rw-r--r--arch/x86/entry/entry_32.S103
-rw-r--r--arch/x86/entry/entry_64.S13
-rw-r--r--arch/x86/entry/entry_64_compat.S2
-rw-r--r--arch/x86/entry/syscall_32.c20
-rw-r--r--arch/x86/entry/syscall_64.c17
-rw-r--r--arch/x86/entry/syscall_x32.c35
-rw-r--r--arch/x86/entry/syscalls/Makefile38
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl4
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl4
-rw-r--r--arch/x86/entry/syscalls/syscallhdr.sh35
-rw-r--r--arch/x86/entry/syscalls/syscalltbl.sh46
-rw-r--r--arch/x86/entry/vdso/vdso2c.c2
-rw-r--r--arch/x86/entry/vdso/vdso2c.h2
-rw-r--r--arch/x86/entry/vdso/vdso32/system_call.S4
-rw-r--r--arch/x86/entry/vdso/vma.c2
-rw-r--r--arch/x86/entry/vdso/vsgx.S2
-rw-r--r--arch/x86/events/amd/core.c2
-rw-r--r--arch/x86/events/amd/iommu.c54
-rw-r--r--arch/x86/events/amd/iommu.h21
-rw-r--r--arch/x86/events/amd/uncore.c6
-rw-r--r--arch/x86/events/core.c377
-rw-r--r--arch/x86/events/intel/Makefile2
-rw-r--r--arch/x86/events/intel/bts.c2
-rw-r--r--arch/x86/events/intel/core.c734
-rw-r--r--arch/x86/events/intel/cstate.c39
-rw-r--r--arch/x86/events/intel/ds.c67
-rw-r--r--arch/x86/events/intel/lbr.c48
-rw-r--r--arch/x86/events/intel/p4.c22
-rw-r--r--arch/x86/events/intel/pt.c2
-rw-r--r--arch/x86/events/intel/uncore.c211
-rw-r--r--arch/x86/events/intel/uncore.h21
-rw-r--r--arch/x86/events/intel/uncore_discovery.c622
-rw-r--r--arch/x86/events/intel/uncore_discovery.h131
-rw-r--r--arch/x86/events/intel/uncore_snb.c131
-rw-r--r--arch/x86/events/intel/uncore_snbep.c315
-rw-r--r--arch/x86/events/msr.c2
-rw-r--r--arch/x86/events/perf_event.h126
-rw-r--r--arch/x86/events/rapl.c8
-rw-r--r--arch/x86/events/zhaoxin/core.c2
-rw-r--r--arch/x86/hyperv/hv_apic.c18
-rw-r--r--arch/x86/hyperv/hv_init.c63
-rw-r--r--arch/x86/hyperv/hv_proc.c26
-rw-r--r--arch/x86/hyperv/hv_spinlock.c8
-rw-r--r--arch/x86/hyperv/irqdomain.c6
-rw-r--r--arch/x86/hyperv/mmu.c18
-rw-r--r--arch/x86/hyperv/nested.c8
-rw-r--r--arch/x86/ia32/ia32_aout.c4
-rw-r--r--arch/x86/include/asm/Kbuild1
-rw-r--r--arch/x86/include/asm/agp.h2
-rw-r--r--arch/x86/include/asm/alternative-asm.h114
-rw-r--r--arch/x86/include/asm/alternative.h149
-rw-r--r--arch/x86/include/asm/apic.h1
-rw-r--r--arch/x86/include/asm/asm-prototypes.h13
-rw-r--r--arch/x86/include/asm/asm.h27
-rw-r--r--arch/x86/include/asm/atomic.h2
-rw-r--r--arch/x86/include/asm/barrier.h7
-rw-r--r--arch/x86/include/asm/bug.h9
-rw-r--r--arch/x86/include/asm/cmpxchg.h2
-rw-r--r--arch/x86/include/asm/cpu.h13
-rw-r--r--arch/x86/include/asm/cpufeature.h41
-rw-r--r--arch/x86/include/asm/cpufeatures.h12
-rw-r--r--arch/x86/include/asm/crash.h6
-rw-r--r--arch/x86/include/asm/desc.h25
-rw-r--r--arch/x86/include/asm/disabled-features.h7
-rw-r--r--arch/x86/include/asm/elf.h10
-rw-r--r--arch/x86/include/asm/entry-common.h16
-rw-r--r--arch/x86/include/asm/floppy.h1
-rw-r--r--arch/x86/include/asm/fpu/api.h6
-rw-r--r--arch/x86/include/asm/fpu/internal.h50
-rw-r--r--arch/x86/include/asm/hyperv-tlfs.h148
-rw-r--r--arch/x86/include/asm/idtentry.h50
-rw-r--r--arch/x86/include/asm/inat.h2
-rw-r--r--arch/x86/include/asm/insn-eval.h6
-rw-r--r--arch/x86/include/asm/insn.h46
-rw-r--r--arch/x86/include/asm/intel-family.h53
-rw-r--r--arch/x86/include/asm/intel_pconfig.h2
-rw-r--r--arch/x86/include/asm/intel_pt.h2
-rw-r--r--arch/x86/include/asm/io.h2
-rw-r--r--arch/x86/include/asm/irq_stack.h2
-rw-r--r--arch/x86/include/asm/irq_vectors.h7
-rw-r--r--arch/x86/include/asm/irqflags.h7
-rw-r--r--arch/x86/include/asm/jump_label.h89
-rw-r--r--arch/x86/include/asm/kexec.h5
-rw-r--r--arch/x86/include/asm/kfence.h7
-rw-r--r--arch/x86/include/asm/kprobes.h21
-rw-r--r--arch/x86/include/asm/kvm-x86-ops.h10
-rw-r--r--arch/x86/include/asm/kvm_host.h290
-rw-r--r--arch/x86/include/asm/kvm_para.h10
-rw-r--r--arch/x86/include/asm/mce.h13
-rw-r--r--arch/x86/include/asm/mem_encrypt.h1
-rw-r--r--arch/x86/include/asm/mshyperv.h100
-rw-r--r--arch/x86/include/asm/msr-index.h16
-rw-r--r--arch/x86/include/asm/msr.h4
-rw-r--r--arch/x86/include/asm/nops.h184
-rw-r--r--arch/x86/include/asm/nospec-branch.h9
-rw-r--r--arch/x86/include/asm/page.h6
-rw-r--r--arch/x86/include/asm/page_64.h33
-rw-r--r--arch/x86/include/asm/page_64_types.h23
-rw-r--r--arch/x86/include/asm/paravirt.h173
-rw-r--r--arch/x86/include/asm/paravirt_types.h216
-rw-r--r--arch/x86/include/asm/perf_event.h1
-rw-r--r--arch/x86/include/asm/pgalloc.h2
-rw-r--r--arch/x86/include/asm/pgtable.h2
-rw-r--r--arch/x86/include/asm/pgtable_types.h2
-rw-r--r--arch/x86/include/asm/preempt.h2
-rw-r--r--arch/x86/include/asm/processor.h39
-rw-r--r--arch/x86/include/asm/proto.h3
-rw-r--r--arch/x86/include/asm/ptrace.h20
-rw-r--r--arch/x86/include/asm/segment.h30
-rw-r--r--arch/x86/include/asm/set_memory.h4
-rw-r--r--arch/x86/include/asm/setup.h5
-rw-r--r--arch/x86/include/asm/sev-common.h76
-rw-r--r--arch/x86/include/asm/sev.h (renamed from arch/x86/include/asm/sev-es.h)30
-rw-r--r--arch/x86/include/asm/sgx.h (renamed from arch/x86/kernel/cpu/sgx/arch.h)52
-rw-r--r--arch/x86/include/asm/smap.h15
-rw-r--r--arch/x86/include/asm/smp.h1
-rw-r--r--arch/x86/include/asm/special_insns.h4
-rw-r--r--arch/x86/include/asm/stackprotector.h79
-rw-r--r--arch/x86/include/asm/suspend_32.h6
-rw-r--r--arch/x86/include/asm/svm.h13
-rw-r--r--arch/x86/include/asm/switch_to.h7
-rw-r--r--arch/x86/include/asm/syscall.h13
-rw-r--r--arch/x86/include/asm/syscall_wrapper.h11
-rw-r--r--arch/x86/include/asm/thermal.h4
-rw-r--r--arch/x86/include/asm/thread_info.h21
-rw-r--r--arch/x86/include/asm/tlbflush.h48
-rw-r--r--arch/x86/include/asm/trace/hyperv.h2
-rw-r--r--arch/x86/include/asm/unaligned.h15
-rw-r--r--arch/x86/include/asm/unistd.h8
-rw-r--r--arch/x86/include/asm/uv/uv_geo.h2
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h2
-rw-r--r--arch/x86/include/asm/vdso/clocksource.h2
-rw-r--r--arch/x86/include/asm/vdso/gettimeofday.h3
-rw-r--r--arch/x86/include/asm/vmalloc.h20
-rw-r--r--arch/x86/include/asm/vmx.h1
-rw-r--r--arch/x86/include/asm/xen/page.h12
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h4
-rw-r--r--arch/x86/include/uapi/asm/debugreg.h1
-rw-r--r--arch/x86/include/uapi/asm/hwcap2.h6
-rw-r--r--arch/x86/include/uapi/asm/kvm.h15
-rw-r--r--arch/x86/include/uapi/asm/kvm_para.h13
-rw-r--r--arch/x86/include/uapi/asm/msgbuf.h2
-rw-r--r--arch/x86/include/uapi/asm/sgx.h2
-rw-r--r--arch/x86/include/uapi/asm/shmbuf.h2
-rw-r--r--arch/x86/include/uapi/asm/sigcontext.h2
-rw-r--r--arch/x86/include/uapi/asm/svm.h3
-rw-r--r--arch/x86/include/uapi/asm/vmx.h1
-rw-r--r--arch/x86/kernel/Makefile10
-rw-r--r--arch/x86/kernel/acpi/boot.c147
-rw-r--r--arch/x86/kernel/acpi/cstate.c3
-rw-r--r--arch/x86/kernel/acpi/sleep.c2
-rw-r--r--arch/x86/kernel/acpi/wakeup_64.S2
-rw-r--r--arch/x86/kernel/alternative.c341
-rw-r--r--arch/x86/kernel/amd_nb.c5
-rw-r--r--arch/x86/kernel/apic/apic.c16
-rw-r--r--arch/x86/kernel/apic/io_apic.c18
-rw-r--r--arch/x86/kernel/apic/vector.c37
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c33
-rw-r--r--arch/x86/kernel/apm_32.c6
-rw-r--r--arch/x86/kernel/asm-offsets.c7
-rw-r--r--arch/x86/kernel/asm-offsets_32.c5
-rw-r--r--arch/x86/kernel/cpu/amd.c29
-rw-r--r--arch/x86/kernel/cpu/cacheinfo.c2
-rw-r--r--arch/x86/kernel/cpu/common.c57
-rw-r--r--arch/x86/kernel/cpu/cpu.h2
-rw-r--r--arch/x86/kernel/cpu/cpuid-deps.c3
-rw-r--r--arch/x86/kernel/cpu/cyrix.c2
-rw-r--r--arch/x86/kernel/cpu/feat_ctl.c71
-rw-r--r--arch/x86/kernel/cpu/hygon.c8
-rw-r--r--arch/x86/kernel/cpu/intel.c175
-rw-r--r--arch/x86/kernel/cpu/mce/amd.c55
-rw-r--r--arch/x86/kernel/cpu/mce/apei.c3
-rw-r--r--arch/x86/kernel/cpu/mce/core.c15
-rw-r--r--arch/x86/kernel/cpu/mce/inject.c6
-rw-r--r--arch/x86/kernel/cpu/mce/intel.c1
-rw-r--r--arch/x86/kernel/cpu/mce/severity.c14
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c8
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c49
-rw-r--r--arch/x86/kernel/cpu/mtrr/cleanup.c4
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c4
-rw-r--r--arch/x86/kernel/cpu/mtrr/mtrr.c2
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c4
-rw-r--r--arch/x86/kernel/cpu/resctrl/core.c2
-rw-r--r--arch/x86/kernel/cpu/resctrl/internal.h21
-rw-r--r--arch/x86/kernel/cpu/resctrl/monitor.c8
-rw-r--r--arch/x86/kernel/cpu/resctrl/pseudo_lock.c16
-rw-r--r--arch/x86/kernel/cpu/resctrl/rdtgroup.c6
-rw-r--r--arch/x86/kernel/cpu/scattered.c2
-rw-r--r--arch/x86/kernel/cpu/sgx/Makefile1
-rw-r--r--arch/x86/kernel/cpu/sgx/driver.c17
-rw-r--r--arch/x86/kernel/cpu/sgx/encl.c35
-rw-r--r--arch/x86/kernel/cpu/sgx/encl.h5
-rw-r--r--arch/x86/kernel/cpu/sgx/encls.h30
-rw-r--r--arch/x86/kernel/cpu/sgx/ioctl.c43
-rw-r--r--arch/x86/kernel/cpu/sgx/main.c268
-rw-r--r--arch/x86/kernel/cpu/sgx/sgx.h40
-rw-r--r--arch/x86/kernel/cpu/sgx/virt.c377
-rw-r--r--arch/x86/kernel/cpu/topology.c4
-rw-r--r--arch/x86/kernel/cpu/tsx.c37
-rw-r--r--arch/x86/kernel/cpu/vmware.c7
-rw-r--r--arch/x86/kernel/crash.c29
-rw-r--r--arch/x86/kernel/doublefault_32.c4
-rw-r--r--arch/x86/kernel/e820.c6
-rw-r--r--arch/x86/kernel/early-quirks.c3
-rw-r--r--arch/x86/kernel/fpu/signal.c80
-rw-r--r--arch/x86/kernel/fpu/xstate.c100
-rw-r--r--arch/x86/kernel/ftrace.c4
-rw-r--r--arch/x86/kernel/head64.c4
-rw-r--r--arch/x86/kernel/head_32.S18
-rw-r--r--arch/x86/kernel/head_64.S6
-rw-r--r--arch/x86/kernel/idt.c47
-rw-r--r--arch/x86/kernel/irq.c2
-rw-r--r--arch/x86/kernel/jump_label.c101
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c2
-rw-r--r--arch/x86/kernel/kgdb.c4
-rw-r--r--arch/x86/kernel/kprobes/core.c614
-rw-r--r--arch/x86/kernel/kprobes/ftrace.c2
-rw-r--r--arch/x86/kernel/kprobes/opt.c9
-rw-r--r--arch/x86/kernel/kvm.c269
-rw-r--r--arch/x86/kernel/kvmclock.c47
-rw-r--r--arch/x86/kernel/machine_kexec_32.c15
-rw-r--r--arch/x86/kernel/machine_kexec_64.c37
-rw-r--r--arch/x86/kernel/mmconf-fam10h_64.c2
-rw-r--r--arch/x86/kernel/nmi.c12
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c9
-rw-r--r--arch/x86/kernel/paravirt.c77
-rw-r--r--arch/x86/kernel/paravirt_patch.c99
-rw-r--r--arch/x86/kernel/process.c32
-rw-r--r--arch/x86/kernel/ptrace.c2
-rw-r--r--arch/x86/kernel/pvclock.c2
-rw-r--r--arch/x86/kernel/reboot.c2
-rw-r--r--arch/x86/kernel/relocate_kernel_32.S2
-rw-r--r--arch/x86/kernel/relocate_kernel_64.S2
-rw-r--r--arch/x86/kernel/setup.c159
-rw-r--r--arch/x86/kernel/setup_percpu.c7
-rw-r--r--arch/x86/kernel/sev-shared.c (renamed from arch/x86/kernel/sev-es-shared.c)37
-rw-r--r--arch/x86/kernel/sev.c (renamed from arch/x86/kernel/sev-es.c)446
-rw-r--r--arch/x86/kernel/signal.c30
-rw-r--r--arch/x86/kernel/signal_compat.c10
-rw-r--r--arch/x86/kernel/smp.c4
-rw-r--r--arch/x86/kernel/smpboot.c127
-rw-r--r--arch/x86/kernel/stacktrace.c6
-rw-r--r--arch/x86/kernel/static_call.c4
-rw-r--r--arch/x86/kernel/sysfb_efi.c2
-rw-r--r--arch/x86/kernel/tboot.c44
-rw-r--r--arch/x86/kernel/tls.c8
-rw-r--r--arch/x86/kernel/topology.c2
-rw-r--r--arch/x86/kernel/trace.c234
-rw-r--r--arch/x86/kernel/traps.c32
-rw-r--r--arch/x86/kernel/tsc.c13
-rw-r--r--arch/x86/kernel/tsc_sync.c2
-rw-r--r--arch/x86/kernel/umip.c14
-rw-r--r--arch/x86/kernel/unwind_orc.c14
-rw-r--r--arch/x86/kernel/uprobes.c8
-rw-r--r--arch/x86/kvm/Kconfig18
-rw-r--r--arch/x86/kvm/Makefile15
-rw-r--r--arch/x86/kvm/cpuid.c125
-rw-r--r--arch/x86/kvm/cpuid.h155
-rw-r--r--arch/x86/kvm/debugfs.c11
-rw-r--r--arch/x86/kvm/emulate.c255
-rw-r--r--arch/x86/kvm/fpu.h140
-rw-r--r--arch/x86/kvm/hyperv.c539
-rw-r--r--arch/x86/kvm/hyperv.h2
-rw-r--r--arch/x86/kvm/irq_comm.c2
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h34
-rw-r--r--arch/x86/kvm/kvm_emulate.h14
-rw-r--r--arch/x86/kvm/kvm_onhyperv.c93
-rw-r--r--arch/x86/kvm/kvm_onhyperv.h32
-rw-r--r--arch/x86/kvm/lapic.c70
-rw-r--r--arch/x86/kvm/lapic.h2
-rw-r--r--arch/x86/kvm/mmu.h53
-rw-r--r--arch/x86/kvm/mmu/mmu.c1476
-rw-r--r--arch/x86/kvm/mmu/mmu_audit.c2
-rw-r--r--arch/x86/kvm/mmu/mmu_internal.h64
-rw-r--r--arch/x86/kvm/mmu/mmutrace.h2
-rw-r--r--arch/x86/kvm/mmu/page_track.c2
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h85
-rw-r--r--arch/x86/kvm/mmu/spte.c165
-rw-r--r--arch/x86/kvm/mmu/spte.h173
-rw-r--r--arch/x86/kvm/mmu/tdp_iter.c30
-rw-r--r--arch/x86/kvm/mmu/tdp_iter.h4
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c857
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.h96
-rw-r--r--arch/x86/kvm/pmu.h2
-rw-r--r--arch/x86/kvm/reverse_cpuid.h186
-rw-r--r--arch/x86/kvm/svm/avic.c54
-rw-r--r--arch/x86/kvm/svm/nested.c679
-rw-r--r--arch/x86/kvm/svm/pmu.c8
-rw-r--r--arch/x86/kvm/svm/sev.c1032
-rw-r--r--arch/x86/kvm/svm/svm.c1289
-rw-r--r--arch/x86/kvm/svm/svm.h155
-rw-r--r--arch/x86/kvm/svm/svm_onhyperv.c41
-rw-r--r--arch/x86/kvm/svm/svm_onhyperv.h130
-rw-r--r--arch/x86/kvm/svm/vmenter.S47
-rw-r--r--arch/x86/kvm/trace.h8
-rw-r--r--arch/x86/kvm/vmx/capabilities.h7
-rw-r--r--arch/x86/kvm/vmx/evmcs.c3
-rw-r--r--arch/x86/kvm/vmx/evmcs.h8
-rw-r--r--arch/x86/kvm/vmx/nested.c526
-rw-r--r--arch/x86/kvm/vmx/nested.h16
-rw-r--r--arch/x86/kvm/vmx/posted_intr.c16
-rw-r--r--arch/x86/kvm/vmx/posted_intr.h1
-rw-r--r--arch/x86/kvm/vmx/sgx.c502
-rw-r--r--arch/x86/kvm/vmx/sgx.h34
-rw-r--r--arch/x86/kvm/vmx/vmcs.h13
-rw-r--r--arch/x86/kvm/vmx/vmcs12.c2
-rw-r--r--arch/x86/kvm/vmx/vmcs12.h12
-rw-r--r--arch/x86/kvm/vmx/vmx.c821
-rw-r--r--arch/x86/kvm/vmx/vmx.h56
-rw-r--r--arch/x86/kvm/vmx/vmx_ops.h4
-rw-r--r--arch/x86/kvm/x86.c1523
-rw-r--r--arch/x86/kvm/x86.h74
-rw-r--r--arch/x86/lib/atomic64_386_32.S2
-rw-r--r--arch/x86/lib/atomic64_cx8_32.S2
-rw-r--r--arch/x86/lib/copy_page_64.S2
-rw-r--r--arch/x86/lib/copy_user_64.S2
-rw-r--r--arch/x86/lib/inat.c2
-rw-r--r--arch/x86/lib/insn-eval.c122
-rw-r--r--arch/x86/lib/insn.c230
-rw-r--r--arch/x86/lib/memcpy_64.S2
-rw-r--r--arch/x86/lib/memmove_64.S2
-rw-r--r--arch/x86/lib/memset_64.S2
-rw-r--r--arch/x86/lib/mmx_32.c2
-rw-r--r--arch/x86/lib/msr-smp.c4
-rw-r--r--arch/x86/lib/msr.c4
-rw-r--r--arch/x86/lib/retpoline.S71
-rw-r--r--arch/x86/math-emu/fpu_trig.c11
-rw-r--r--arch/x86/math-emu/reg_ld_str.c2
-rw-r--r--arch/x86/math-emu/reg_round.S2
-rw-r--r--arch/x86/mm/extable.c2
-rw-r--r--arch/x86/mm/fault.c12
-rw-r--r--arch/x86/mm/init.c8
-rw-r--r--arch/x86/mm/init_32.c6
-rw-r--r--arch/x86/mm/init_64.c215
-rw-r--r--arch/x86/mm/ioremap.c23
-rw-r--r--arch/x86/mm/kaslr.c2
-rw-r--r--arch/x86/mm/kmmio.c2
-rw-r--r--arch/x86/mm/mem_encrypt.c18
-rw-r--r--arch/x86/mm/mem_encrypt_boot.S2
-rw-r--r--arch/x86/mm/mem_encrypt_identity.c43
-rw-r--r--arch/x86/mm/numa.c8
-rw-r--r--arch/x86/mm/pat/memtype.c4
-rw-r--r--arch/x86/mm/pat/set_memory.c10
-rw-r--r--arch/x86/mm/pgtable.c47
-rw-r--r--arch/x86/mm/pkeys.c6
-rw-r--r--arch/x86/mm/pti.c11
-rw-r--r--arch/x86/mm/tlb.c192
-rw-r--r--arch/x86/net/bpf_jit_comp.c113
-rw-r--r--arch/x86/net/bpf_jit_comp32.c209
-rw-r--r--arch/x86/pci/amd_bus.c4
-rw-r--r--arch/x86/pci/fixup.c46
-rw-r--r--arch/x86/platform/efi/efi.c2
-rw-r--r--arch/x86/platform/efi/efi_64.c6
-rw-r--r--arch/x86/platform/efi/quirks.c16
-rw-r--r--arch/x86/platform/intel-quark/imr.c4
-rw-r--r--arch/x86/platform/intel-quark/imr_selftest.c2
-rw-r--r--arch/x86/platform/intel/iosf_mbi.c4
-rw-r--r--arch/x86/platform/iris/iris.c1
-rw-r--r--arch/x86/platform/olpc/olpc-xo15-sci.c2
-rw-r--r--arch/x86/platform/olpc/olpc_dt.c2
-rw-r--r--arch/x86/platform/pvh/head.S20
-rw-r--r--arch/x86/platform/uv/uv_nmi.c48
-rw-r--r--arch/x86/power/cpu.c8
-rw-r--r--arch/x86/power/hibernate.c89
-rw-r--r--arch/x86/purgatory/purgatory.c2
-rw-r--r--arch/x86/realmode/Makefile1
-rw-r--r--arch/x86/realmode/init.c18
-rw-r--r--arch/x86/realmode/rm/trampoline_64.S4
-rw-r--r--arch/x86/tools/insn_decoder_test.c10
-rw-r--r--arch/x86/tools/insn_sanity.c8
-rw-r--r--arch/x86/um/Makefile1
-rw-r--r--arch/x86/um/asm/elf.h2
-rw-r--r--arch/x86/um/shared/sysdep/stub_32.h2
-rw-r--r--arch/x86/um/sys_call_table_32.c14
-rw-r--r--arch/x86/um/sys_call_table_64.c15
-rw-r--r--arch/x86/xen/enlighten.c1
-rw-r--r--arch/x86/xen/enlighten_pv.c15
-rw-r--r--arch/x86/xen/mmu_pv.c13
-rw-r--r--arch/x86/xen/p2m.c13
-rw-r--r--arch/x86/xen/pci-swiotlb-xen.c4
-rw-r--r--arch/x86/xen/setup.c16
-rw-r--r--arch/x86/xen/time.c26
-rw-r--r--arch/xtensa/Kconfig31
-rw-r--r--arch/xtensa/Makefile15
-rw-r--r--arch/xtensa/boot/Makefile4
-rw-r--r--arch/xtensa/boot/boot-elf/Makefile11
-rw-r--r--arch/xtensa/boot/boot-redboot/Makefile6
-rw-r--r--arch/xtensa/configs/xip_kc705_defconfig1
-rw-r--r--arch/xtensa/include/asm/atomic.h26
-rw-r--r--arch/xtensa/include/asm/cmpxchg.h14
-rw-r--r--arch/xtensa/include/asm/initialize_mmu.h2
-rw-r--r--arch/xtensa/include/asm/page.h4
-rw-r--r--arch/xtensa/include/asm/pgalloc.h2
-rw-r--r--arch/xtensa/include/asm/pgtable.h5
-rw-r--r--arch/xtensa/include/asm/tlbflush.h4
-rw-r--r--arch/xtensa/include/asm/unaligned.h29
-rw-r--r--arch/xtensa/include/uapi/asm/mman.h3
-rw-r--r--arch/xtensa/kernel/coprocessor.S64
-rw-r--r--arch/xtensa/kernel/head.S4
-rw-r--r--arch/xtensa/kernel/pci.c2
-rw-r--r--arch/xtensa/kernel/process.c2
-rw-r--r--arch/xtensa/kernel/smp.c1
-rw-r--r--arch/xtensa/kernel/syscall.c8
-rw-r--r--arch/xtensa/kernel/syscalls/Makefile14
-rw-r--r--arch/xtensa/kernel/syscalls/syscall.tbl4
-rw-r--r--arch/xtensa/kernel/syscalls/syscallhdr.sh36
-rw-r--r--arch/xtensa/kernel/syscalls/syscalltbl.sh32
-rw-r--r--arch/xtensa/mm/cache.c9
-rw-r--r--arch/xtensa/mm/fault.c5
-rw-r--r--arch/xtensa/mm/init.c1
-rw-r--r--arch/xtensa/mm/misc.S36
-rw-r--r--arch/xtensa/platforms/iss/console.c45
-rw-r--r--arch/xtensa/platforms/iss/include/platform/simcall-gdbio.h34
-rw-r--r--arch/xtensa/platforms/iss/include/platform/simcall-iss.h73
-rw-r--r--arch/xtensa/platforms/iss/include/platform/simcall.h104
-rw-r--r--arch/xtensa/platforms/iss/setup.c1
-rw-r--r--arch/xtensa/platforms/iss/simdisk.c29
2653 files changed, 79150 insertions, 57000 deletions
diff --git a/arch/.gitignore b/arch/.gitignore
index 4191da401dbb..756c19c34f99 100644
--- a/arch/.gitignore
+++ b/arch/.gitignore
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-i386
-x86_64
+/i386/
+/x86_64/
diff --git a/arch/Kconfig b/arch/Kconfig
index 2bb30673d8e6..129df498a8e1 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -285,6 +285,13 @@ config ARCH_THREAD_STACK_ALLOCATOR
config ARCH_WANTS_DYNAMIC_TASK_STRUCT
bool
+config ARCH_WANTS_NO_INSTR
+ bool
+ help
+ An architecture should select this if the noinstr macro is being used on
+ functions to denote that the toolchain should avoid instrumenting such
+ functions and is required for correctness.
+
config ARCH_32BIT_OFF_T
bool
depends on !64BIT
@@ -631,14 +638,12 @@ config ARCH_SUPPORTS_LTO_CLANG_THIN
config HAS_LTO_CLANG
def_bool y
# Clang >= 11: https://github.com/ClangBuiltLinux/linux/issues/510
- depends on CC_IS_CLANG && CLANG_VERSION >= 110000 && LD_IS_LLD
- depends on $(success,test $(LLVM) -eq 1)
- depends on $(success,test $(LLVM_IAS) -eq 1)
+ depends on CC_IS_CLANG && CLANG_VERSION >= 110000 && LD_IS_LLD && AS_IS_LLVM
depends on $(success,$(NM) --help | head -n 1 | grep -qi llvm)
depends on $(success,$(AR) --help | head -n 1 | grep -qi llvm)
depends on ARCH_SUPPORTS_LTO_CLANG
depends on !FTRACE_MCOUNT_USE_RECORDMCOUNT
- depends on !KASAN
+ depends on !KASAN || KASAN_HW_TAGS
depends on !GCOV_KERNEL
help
The compiler and Kconfig options support building with Clang's
@@ -693,6 +698,51 @@ config LTO_CLANG_THIN
If unsure, say Y.
endchoice
+config ARCH_SUPPORTS_CFI_CLANG
+ bool
+ help
+ An architecture should select this option if it can support Clang's
+ Control-Flow Integrity (CFI) checking.
+
+config CFI_CLANG
+ bool "Use Clang's Control Flow Integrity (CFI)"
+ depends on LTO_CLANG && ARCH_SUPPORTS_CFI_CLANG
+ # Clang >= 12:
+ # - https://bugs.llvm.org/show_bug.cgi?id=46258
+ # - https://bugs.llvm.org/show_bug.cgi?id=47479
+ depends on CLANG_VERSION >= 120000
+ select KALLSYMS
+ help
+ This option enables Clang’s forward-edge Control Flow Integrity
+ (CFI) checking, where the compiler injects a runtime check to each
+ indirect function call to ensure the target is a valid function with
+ the correct static type. This restricts possible call targets and
+ makes it more difficult for an attacker to exploit bugs that allow
+ the modification of stored function pointers. More information can be
+ found from Clang's documentation:
+
+ https://clang.llvm.org/docs/ControlFlowIntegrity.html
+
+config CFI_CLANG_SHADOW
+ bool "Use CFI shadow to speed up cross-module checks"
+ default y
+ depends on CFI_CLANG && MODULES
+ help
+ If you select this option, the kernel builds a fast look-up table of
+ CFI check functions in loaded modules to reduce performance overhead.
+
+ If unsure, say Y.
+
+config CFI_PERMISSIVE
+ bool "Use CFI in permissive mode"
+ depends on CFI_CLANG
+ help
+ When selected, Control Flow Integrity (CFI) violations result in a
+ warning instead of a kernel panic. This option should only be used
+ for finding indirect call type mismatches during development.
+
+ If unsure, say N.
+
config HAVE_ARCH_WITHIN_STACK_FRAMES
bool
help
@@ -786,6 +836,17 @@ config HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
config HAVE_ARCH_HUGE_VMAP
bool
+#
+# Archs that select this would be capable of PMD-sized vmaps (i.e.,
+# arch_vmap_pmd_supported() returns true), and they must make no assumptions
+# that vmalloc memory is mapped with PAGE_SIZE ptes. The VM_NO_HUGE_VMAP flag
+# can be used to prohibit arch-specific allocations from using hugepages to
+# help with this (e.g., modules may require it).
+#
+config HAVE_ARCH_HUGE_VMALLOC
+ depends on HAVE_ARCH_HUGE_VMAP
+ bool
+
config ARCH_WANT_HUGE_PMD_SHARE
bool
@@ -1014,6 +1075,13 @@ config COMPAT_32BIT_TIME
config ARCH_NO_PREEMPT
bool
+config ARCH_EPHEMERAL_INODES
+ def_bool n
+ help
+ An arch should select this symbol if it doesn't keep track of inode
+ instances on its own, but instead relies on something else (e.g. the
+ host kernel for an UML kernel).
+
config ARCH_SUPPORTS_RT
bool
@@ -1055,6 +1123,29 @@ config VMAP_STACK
backing virtual mappings with real shadow memory, and KASAN_VMALLOC
must be enabled.
+config HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
+ def_bool n
+ help
+ An arch should select this symbol if it can support kernel stack
+ offset randomization with calls to add_random_kstack_offset()
+ during syscall entry and choose_random_kstack_offset() during
+ syscall exit. Careful removal of -fstack-protector-strong and
+ -fstack-protector should also be applied to the entry code and
+ closely examined, as the artificial stack bump looks like an array
+ to the compiler, so it will attempt to add canary checks regardless
+ of the static branch state.
+
+config RANDOMIZE_KSTACK_OFFSET_DEFAULT
+ bool "Randomize kernel stack offset on syscall entry"
+ depends on HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
+ help
+ The kernel stack offset can be randomized (after pt_regs) by
+ roughly 5 bits of entropy, frustrating memory corruption
+ attacks that depend on stack address determinism or
+ cross-syscall address exposures. This feature is controlled
+ by kernel boot param "randomize_kstack_offset=on/off", and this
+ config chooses the default boot state.
+
config ARCH_OPTIONAL_KERNEL_RWX
def_bool n
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 5998106faa60..77d3280dc678 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -40,6 +40,7 @@ config ALPHA
select MMU_GATHER_NO_RANGE
select SET_FS
select SPARSEMEM_EXTREME if SPARSEMEM
+ select ZONE_DMA
help
The Alpha is a 64-bit general-purpose processor designed and
marketed by the Digital Equipment Corporation of blessed memory,
@@ -65,10 +66,6 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
-config ZONE_DMA
- bool
- default y
-
config GENERIC_ISA_DMA
bool
default y
@@ -549,29 +546,12 @@ config NR_CPUS
MARVEL support can handle a maximum of 32 CPUs, all the others
with working support have a maximum of 4 CPUs.
-config ARCH_DISCONTIGMEM_ENABLE
- bool "Discontiguous Memory Support"
- depends on BROKEN
- help
- Say Y to support efficient handling of discontiguous physical memory,
- for architectures which are either NUMA (Non-Uniform Memory Access)
- or have huge holes in the physical address space for other reasons.
- See <file:Documentation/vm/numa.rst> for more.
-
config ARCH_SPARSEMEM_ENABLE
bool "Sparse Memory Support"
help
Say Y to support efficient handling of discontiguous physical memory,
for systems that have huge holes in the physical address space.
-config NUMA
- bool "NUMA Support (EXPERIMENTAL)"
- depends on DISCONTIGMEM && BROKEN
- help
- Say Y to compile the kernel to support NUMA (Non-Uniform Memory
- Access). This option is for configuring high-end multiprocessor
- server machines. If in doubt, say N.
-
config ALPHA_WTINT
bool "Use WTINT" if ALPHA_SRM || ALPHA_GENERIC
default y if ALPHA_QEMU
@@ -596,11 +576,6 @@ config ALPHA_WTINT
If unsure, say N.
-config NODES_SHIFT
- int
- default "7"
- depends on NEED_MULTIPLE_NODES
-
# LARGE_VMALLOC is racy, if you *really* need it then fix it first
config ALPHA_LARGE_VMALLOC
bool
diff --git a/arch/alpha/configs/defconfig b/arch/alpha/configs/defconfig
index 724c4075df40..dd2dd9f0861f 100644
--- a/arch/alpha/configs/defconfig
+++ b/arch/alpha/configs/defconfig
@@ -25,19 +25,18 @@ CONFIG_PNP=y
CONFIG_ISAPNP=y
CONFIG_BLK_DEV_FD=y
CONFIG_BLK_DEV_LOOP=m
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_IDE_GENERIC=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_ALI15X3=y
-CONFIG_BLK_DEV_CMD64X=y
-CONFIG_BLK_DEV_CY82C693=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
CONFIG_SCSI_AIC7XXX=m
CONFIG_AIC7XXX_CMDS_PER_DEVICE=253
# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_ATA=y
+# CONFIG_SATA_PMP is not set
+CONFIG_PATA_ALI=y
+CONFIG_PATA_CMD64X=y
+CONFIG_PATA_CYPRESS=y
+CONFIG_ATA_GENERIC=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m
CONFIG_NET_ETHERNET=y
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index e41c113c6688..f2861a43a61e 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -26,11 +26,11 @@
#define ATOMIC64_INIT(i) { (i) }
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic64_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic64_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
-#define atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
+#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
+#define arch_atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
/*
* To get proper branch prediction for the main line, we must branch
@@ -39,7 +39,7 @@
*/
#define ATOMIC_OP(op, asm_op) \
-static __inline__ void atomic_##op(int i, atomic_t * v) \
+static __inline__ void arch_atomic_##op(int i, atomic_t * v) \
{ \
unsigned long temp; \
__asm__ __volatile__( \
@@ -55,7 +55,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
} \
#define ATOMIC_OP_RETURN(op, asm_op) \
-static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \
long temp, result; \
__asm__ __volatile__( \
@@ -74,7 +74,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, asm_op) \
-static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \
long temp, result; \
__asm__ __volatile__( \
@@ -92,7 +92,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
}
#define ATOMIC64_OP(op, asm_op) \
-static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
+static __inline__ void arch_atomic64_##op(s64 i, atomic64_t * v) \
{ \
s64 temp; \
__asm__ __volatile__( \
@@ -108,7 +108,8 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
} \
#define ATOMIC64_OP_RETURN(op, asm_op) \
-static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
+static __inline__ s64 \
+arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
{ \
s64 temp, result; \
__asm__ __volatile__( \
@@ -127,7 +128,8 @@ static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
}
#define ATOMIC64_FETCH_OP(op, asm_op) \
-static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
+static __inline__ s64 \
+arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
{ \
s64 temp, result; \
__asm__ __volatile__( \
@@ -155,18 +157,18 @@ static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
ATOMIC_OPS(add)
ATOMIC_OPS(sub)
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
-#define atomic_andnot atomic_andnot
-#define atomic64_andnot atomic64_andnot
+#define arch_atomic_andnot arch_atomic_andnot
+#define arch_atomic64_andnot arch_atomic64_andnot
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm) \
@@ -180,15 +182,15 @@ ATOMIC_OPS(andnot, bic)
ATOMIC_OPS(or, bis)
ATOMIC_OPS(xor, xor)
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#undef ATOMIC_OPS
#undef ATOMIC64_FETCH_OP
@@ -198,14 +200,18 @@ ATOMIC_OPS(xor, xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic64_cmpxchg(v, old, new) \
+ (arch_cmpxchg(&((v)->counter), old, new))
+#define arch_atomic64_xchg(v, new) \
+ (arch_xchg(&((v)->counter), new))
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_cmpxchg(v, old, new) \
+ (arch_cmpxchg(&((v)->counter), old, new))
+#define arch_atomic_xchg(v, new) \
+ (arch_xchg(&((v)->counter), new))
/**
- * atomic_fetch_add_unless - add unless the number is a given value
+ * arch_atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -213,7 +219,7 @@ ATOMIC_OPS(xor, xor)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int c, new, old;
smp_mb();
@@ -234,10 +240,10 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
smp_mb();
return old;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
/**
- * atomic64_fetch_add_unless - add unless the number is a given value
+ * arch_atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -245,7 +251,7 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 c, new, old;
smp_mb();
@@ -266,16 +272,16 @@ static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
smp_mb();
return old;
}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
/*
- * atomic64_dec_if_positive - decrement by 1 if old value positive
+ * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic_t
*
* The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented.
*/
-static inline s64 atomic64_dec_if_positive(atomic64_t *v)
+static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 old, tmp;
smp_mb();
@@ -295,6 +301,6 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
smp_mb();
return old - 1;
}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
#endif /* _ALPHA_ATOMIC_H */
diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h
index 6c7c39452471..6e0a850aa9d3 100644
--- a/arch/alpha/include/asm/cmpxchg.h
+++ b/arch/alpha/include/asm/cmpxchg.h
@@ -17,7 +17,7 @@
sizeof(*(ptr))); \
})
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -26,7 +26,7 @@
sizeof(*(ptr))); \
})
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
@@ -42,7 +42,7 @@
* The leading and the trailing memory barriers guarantee that these
* operations are fully ordered.
*/
-#define xchg(ptr, x) \
+#define arch_xchg(ptr, x) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \
@@ -53,7 +53,7 @@
__ret; \
})
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _o_ = (o); \
@@ -65,10 +65,10 @@
__ret; \
})
-#define cmpxchg64(ptr, o, n) \
+#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
+ arch_cmpxchg((ptr), (o), (n)); \
})
#undef ____cmpxchg
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
index 1f6a909d1fa5..0fab5ac90775 100644
--- a/arch/alpha/include/asm/io.h
+++ b/arch/alpha/include/asm/io.h
@@ -602,11 +602,6 @@ extern void outsl (unsigned long port, const void *src, unsigned long count);
*/
#define xlate_dev_mem_ptr(p) __va(p)
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p) p
-
#endif /* __KERNEL__ */
#endif /* __ALPHA_IO_H */
diff --git a/arch/alpha/include/asm/machvec.h b/arch/alpha/include/asm/machvec.h
index a4e96e2bec74..e49fabce7b33 100644
--- a/arch/alpha/include/asm/machvec.h
+++ b/arch/alpha/include/asm/machvec.h
@@ -99,12 +99,6 @@ struct alpha_machine_vector
const char *vector_name;
- /* NUMA information */
- int (*pa_to_nid)(unsigned long);
- int (*cpuid_to_nid)(int);
- unsigned long (*node_mem_start)(int);
- unsigned long (*node_mem_size)(int);
-
/* System specific parameters. */
union {
struct {
diff --git a/arch/alpha/include/asm/mmzone.h b/arch/alpha/include/asm/mmzone.h
deleted file mode 100644
index 86644604d977..000000000000
--- a/arch/alpha/include/asm/mmzone.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Written by Kanoj Sarcar (kanoj@sgi.com) Aug 99
- * Adapted for the alpha wildfire architecture Jan 2001.
- */
-#ifndef _ASM_MMZONE_H_
-#define _ASM_MMZONE_H_
-
-#ifdef CONFIG_DISCONTIGMEM
-
-#include <asm/smp.h>
-
-/*
- * Following are macros that are specific to this numa platform.
- */
-
-extern pg_data_t node_data[];
-
-#define alpha_pa_to_nid(pa) \
- (alpha_mv.pa_to_nid \
- ? alpha_mv.pa_to_nid(pa) \
- : (0))
-#define node_mem_start(nid) \
- (alpha_mv.node_mem_start \
- ? alpha_mv.node_mem_start(nid) \
- : (0UL))
-#define node_mem_size(nid) \
- (alpha_mv.node_mem_size \
- ? alpha_mv.node_mem_size(nid) \
- : ((nid) ? (0UL) : (~0UL)))
-
-#define pa_to_nid(pa) alpha_pa_to_nid(pa)
-#define NODE_DATA(nid) (&node_data[(nid)])
-
-#define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn)
-
-#if 1
-#define PLAT_NODE_DATA_LOCALNR(p, n) \
- (((p) >> PAGE_SHIFT) - PLAT_NODE_DATA(n)->gendata.node_start_pfn)
-#else
-static inline unsigned long
-PLAT_NODE_DATA_LOCALNR(unsigned long p, int n)
-{
- unsigned long temp;
- temp = p >> PAGE_SHIFT;
- return temp - PLAT_NODE_DATA(n)->gendata.node_start_pfn;
-}
-#endif
-
-/*
- * Following are macros that each numa implementation must define.
- */
-
-/*
- * Given a kernel address, find the home node of the underlying memory.
- */
-#define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr))
-
-/*
- * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory
- * and returns the kaddr corresponding to first physical page in the
- * node's mem_map.
- */
-#define LOCAL_BASE_ADDR(kaddr) \
- ((unsigned long)__va(NODE_DATA(kvaddr_to_nid(kaddr))->node_start_pfn \
- << PAGE_SHIFT))
-
-/* XXX: FIXME -- nyc */
-#define kern_addr_valid(kaddr) (0)
-
-#define mk_pte(page, pgprot) \
-({ \
- pte_t pte; \
- unsigned long pfn; \
- \
- pfn = page_to_pfn(page) << 32; \
- pte_val(pte) = pfn | pgprot_val(pgprot); \
- \
- pte; \
-})
-
-#define pte_page(x) \
-({ \
- unsigned long kvirt; \
- struct page * __xx; \
- \
- kvirt = (unsigned long)__va(pte_val(x) >> (32-PAGE_SHIFT)); \
- __xx = virt_to_page(kvirt); \
- \
- __xx; \
-})
-
-#define pfn_to_nid(pfn) pa_to_nid(((u64)(pfn) << PAGE_SHIFT))
-#define pfn_valid(pfn) \
- (((pfn) - node_start_pfn(pfn_to_nid(pfn))) < \
- node_spanned_pages(pfn_to_nid(pfn))) \
-
-#endif /* CONFIG_DISCONTIGMEM */
-
-#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/alpha/include/asm/page.h b/arch/alpha/include/asm/page.h
index 268f99b4602b..18f48a6f2ff6 100644
--- a/arch/alpha/include/asm/page.h
+++ b/arch/alpha/include/asm/page.h
@@ -17,9 +17,9 @@
extern void clear_page(void *page);
#define clear_user_page(page, vaddr, pg) clear_page(page)
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
- alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vmaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
+ alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vmaddr)
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
extern void copy_page(void * _to, void * _from);
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
index 9c6a24fe493d..68be7adbfe58 100644
--- a/arch/alpha/include/asm/pgalloc.h
+++ b/arch/alpha/include/asm/pgalloc.h
@@ -18,7 +18,6 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte)
{
pmd_set(pmd, (pte_t *)(page_to_pa(pte) + PAGE_OFFSET));
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
index 8d856c62e22a..ff690846465e 100644
--- a/arch/alpha/include/asm/pgtable.h
+++ b/arch/alpha/include/asm/pgtable.h
@@ -46,7 +46,6 @@ struct vm_area_struct;
#define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3))
#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3))
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
/* Number of pointers that fit on a page: this will go away. */
#define PTRS_PER_PAGE (1UL << (PAGE_SHIFT-3))
@@ -206,7 +205,6 @@ extern unsigned long __zero_page(void);
#define page_to_pa(page) (page_to_pfn(page) << PAGE_SHIFT)
#define pte_pfn(pte) (pte_val(pte) >> 32)
-#ifndef CONFIG_DISCONTIGMEM
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
#define mk_pte(page, pgprot) \
({ \
@@ -215,7 +213,6 @@ extern unsigned long __zero_page(void);
pte_val(pte) = (page_to_pfn(page) << 32) | pgprot_val(pgprot); \
pte; \
})
-#endif
extern inline pte_t pfn_pte(unsigned long physpfn, pgprot_t pgprot)
{ pte_t pte; pte_val(pte) = (PHYS_TWIDDLE(physpfn) << 32) | pgprot_val(pgprot); return pte; }
@@ -330,9 +327,7 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-#ifndef CONFIG_DISCONTIGMEM
#define kern_addr_valid(addr) (1)
-#endif
#define pte_ERROR(e) \
printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
diff --git a/arch/alpha/include/asm/topology.h b/arch/alpha/include/asm/topology.h
index 5a77a40567fa..7d393036aa8f 100644
--- a/arch/alpha/include/asm/topology.h
+++ b/arch/alpha/include/asm/topology.h
@@ -7,45 +7,6 @@
#include <linux/numa.h>
#include <asm/machvec.h>
-#ifdef CONFIG_NUMA
-static inline int cpu_to_node(int cpu)
-{
- int node;
-
- if (!alpha_mv.cpuid_to_nid)
- return 0;
-
- node = alpha_mv.cpuid_to_nid(cpu);
-
-#ifdef DEBUG_NUMA
- BUG_ON(node < 0);
-#endif
-
- return node;
-}
-
-extern struct cpumask node_to_cpumask_map[];
-/* FIXME: This is dumb, recalculating every time. But simple. */
-static const struct cpumask *cpumask_of_node(int node)
-{
- int cpu;
-
- if (node == NUMA_NO_NODE)
- return cpu_all_mask;
-
- cpumask_clear(&node_to_cpumask_map[node]);
-
- for_each_online_cpu(cpu) {
- if (cpu_to_node(cpu) == node)
- cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
- }
-
- return &node_to_cpumask_map[node];
-}
-
-#define cpumask_of_pcibus(bus) (cpu_online_mask)
-
-#endif /* !CONFIG_NUMA */
# include <asm-generic/topology.h>
#endif /* _ASM_ALPHA_TOPOLOGY_H */
diff --git a/arch/alpha/include/asm/unaligned.h b/arch/alpha/include/asm/unaligned.h
deleted file mode 100644
index 863c807b66f8..000000000000
--- a/arch/alpha/include/asm/unaligned.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_ALPHA_UNALIGNED_H
-#define _ASM_ALPHA_UNALIGNED_H
-
-#include <linux/unaligned/le_struct.h>
-#include <linux/unaligned/be_byteshift.h>
-#include <linux/unaligned/generic.h>
-
-#define get_unaligned __get_unaligned_le
-#define put_unaligned __put_unaligned_le
-
-#endif /* _ASM_ALPHA_UNALIGNED_H */
diff --git a/arch/alpha/include/uapi/asm/mman.h b/arch/alpha/include/uapi/asm/mman.h
index a18ec7f63888..56b4ee5a6c9e 100644
--- a/arch/alpha/include/uapi/asm/mman.h
+++ b/arch/alpha/include/uapi/asm/mman.h
@@ -71,6 +71,9 @@
#define MADV_COLD 20 /* deactivate these pages */
#define MADV_PAGEOUT 21 /* reclaim these pages */
+#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */
+#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index 57420356ce4c..6b3daba60987 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -127,6 +127,8 @@
#define SO_PREFER_BUSY_POLL 69
#define SO_BUSY_POLL_BUDGET 70
+#define SO_NETNS_COOKIE 71
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
index 4485b77f8658..1efca79ac83c 100644
--- a/arch/alpha/kernel/core_marvel.c
+++ b/arch/alpha/kernel/core_marvel.c
@@ -287,8 +287,7 @@ io7_init_hose(struct io7 *io7, int port)
/*
* Set up window 0 for scatter-gather 8MB at 8MB.
*/
- hose->sg_isa = iommu_arena_new_node(marvel_cpuid_to_nid(io7->pe),
- hose, 0x00800000, 0x00800000, 0);
+ hose->sg_isa = iommu_arena_new_node(0, hose, 0x00800000, 0x00800000, 0);
hose->sg_isa->align_entry = 8; /* cache line boundary */
csrs->POx_WBASE[0].csr =
hose->sg_isa->dma_base | wbase_m_ena | wbase_m_sg;
@@ -305,8 +304,7 @@ io7_init_hose(struct io7 *io7, int port)
/*
* Set up window 2 for scatter-gather (up-to) 1GB at 3GB.
*/
- hose->sg_pci = iommu_arena_new_node(marvel_cpuid_to_nid(io7->pe),
- hose, 0xc0000000, 0x40000000, 0);
+ hose->sg_pci = iommu_arena_new_node(0, hose, 0xc0000000, 0x40000000, 0);
hose->sg_pci->align_entry = 8; /* cache line boundary */
csrs->POx_WBASE[2].csr =
hose->sg_pci->dma_base | wbase_m_ena | wbase_m_sg;
@@ -843,53 +841,8 @@ EXPORT_SYMBOL(marvel_ioportmap);
EXPORT_SYMBOL(marvel_ioread8);
EXPORT_SYMBOL(marvel_iowrite8);
#endif
-
-/*
- * NUMA Support
- */
-/**********
- * FIXME - for now each cpu is a node by itself
- * -- no real support for striped mode
- **********
- */
-int
-marvel_pa_to_nid(unsigned long pa)
-{
- int cpuid;
- if ((pa >> 43) & 1) /* I/O */
- cpuid = (~(pa >> 35) & 0xff);
- else /* mem */
- cpuid = ((pa >> 34) & 0x3) | ((pa >> (37 - 2)) & (0x1f << 2));
-
- return marvel_cpuid_to_nid(cpuid);
-}
-
-int
-marvel_cpuid_to_nid(int cpuid)
-{
- return cpuid;
-}
-
-unsigned long
-marvel_node_mem_start(int nid)
-{
- unsigned long pa;
-
- pa = (nid & 0x3) | ((nid & (0x1f << 2)) << 1);
- pa <<= 34;
-
- return pa;
-}
-
-unsigned long
-marvel_node_mem_size(int nid)
-{
- return 16UL * 1024 * 1024 * 1024; /* 16GB */
-}
-
-
-/*
+/*
* AGP GART Support.
*/
#include <linux/agp_backend.h>
diff --git a/arch/alpha/kernel/core_wildfire.c b/arch/alpha/kernel/core_wildfire.c
index e8d3b033018d..3a804b67f9da 100644
--- a/arch/alpha/kernel/core_wildfire.c
+++ b/arch/alpha/kernel/core_wildfire.c
@@ -434,39 +434,12 @@ wildfire_write_config(struct pci_bus *bus, unsigned int devfn, int where,
return PCIBIOS_SUCCESSFUL;
}
-struct pci_ops wildfire_pci_ops =
+struct pci_ops wildfire_pci_ops =
{
.read = wildfire_read_config,
.write = wildfire_write_config,
};
-
-/*
- * NUMA Support
- */
-int wildfire_pa_to_nid(unsigned long pa)
-{
- return pa >> 36;
-}
-
-int wildfire_cpuid_to_nid(int cpuid)
-{
- /* assume 4 CPUs per node */
- return cpuid >> 2;
-}
-
-unsigned long wildfire_node_mem_start(int nid)
-{
- /* 64GB per node */
- return (unsigned long)nid * (64UL * 1024 * 1024 * 1024);
-}
-
-unsigned long wildfire_node_mem_size(int nid)
-{
- /* 64GB per node */
- return 64UL * 1024 * 1024 * 1024;
-}
-
#if DEBUG_DUMP_REGS
static void __init
diff --git a/arch/alpha/kernel/pc873xx.c b/arch/alpha/kernel/pc873xx.c
index 63aee5d86e02..82b19c9e59d6 100644
--- a/arch/alpha/kernel/pc873xx.c
+++ b/arch/alpha/kernel/pc873xx.c
@@ -13,12 +13,12 @@ static char *pc873xx_names[] = {
static unsigned int base, model;
-unsigned int __init pc873xx_get_base()
+unsigned int __init pc873xx_get_base(void)
{
return base;
}
-char *__init pc873xx_get_model()
+char *__init pc873xx_get_model(void)
{
return pc873xx_names[model];
}
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
index d84b19aa8e9d..35d7b3096d6e 100644
--- a/arch/alpha/kernel/pci_iommu.c
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -71,33 +71,6 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
if (align < mem_size)
align = mem_size;
-
-#ifdef CONFIG_DISCONTIGMEM
-
- arena = memblock_alloc_node(sizeof(*arena), align, nid);
- if (!NODE_DATA(nid) || !arena) {
- printk("%s: couldn't allocate arena from node %d\n"
- " falling back to system-wide allocation\n",
- __func__, nid);
- arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
- if (!arena)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(*arena));
- }
-
- arena->ptes = memblock_alloc_node(sizeof(*arena), align, nid);
- if (!NODE_DATA(nid) || !arena->ptes) {
- printk("%s: couldn't allocate arena ptes from node %d\n"
- " falling back to system-wide allocation\n",
- __func__, nid);
- arena->ptes = memblock_alloc(mem_size, align);
- if (!arena->ptes)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, mem_size, align);
- }
-
-#else /* CONFIG_DISCONTIGMEM */
-
arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
if (!arena)
panic("%s: Failed to allocate %zu bytes\n", __func__,
@@ -107,8 +80,6 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, mem_size, align);
-#endif /* CONFIG_DISCONTIGMEM */
-
spin_lock_init(&arena->lock);
arena->hose = hose;
arena->dma_base = base;
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 5112ab996394..ef0c08ed0481 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -380,7 +380,7 @@ get_wchan(struct task_struct *p)
{
unsigned long schedule_frame;
unsigned long pc;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
/*
* This one depends on the frame size of schedule(). Do a
diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h
index 701a05090141..5816a31c1b38 100644
--- a/arch/alpha/kernel/proto.h
+++ b/arch/alpha/kernel/proto.h
@@ -49,10 +49,6 @@ extern void marvel_init_arch(void);
extern void marvel_kill_arch(int);
extern void marvel_machine_check(unsigned long, unsigned long);
extern void marvel_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
-extern int marvel_pa_to_nid(unsigned long);
-extern int marvel_cpuid_to_nid(int);
-extern unsigned long marvel_node_mem_start(int);
-extern unsigned long marvel_node_mem_size(int);
extern struct _alpha_agp_info *marvel_agp_info(void);
struct io7 *marvel_find_io7(int pe);
struct io7 *marvel_next_io7(struct io7 *prev);
@@ -101,10 +97,6 @@ extern void wildfire_init_arch(void);
extern void wildfire_kill_arch(int);
extern void wildfire_machine_check(unsigned long vector, unsigned long la_ptr);
extern void wildfire_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
-extern int wildfire_pa_to_nid(unsigned long);
-extern int wildfire_cpuid_to_nid(int);
-extern unsigned long wildfire_node_mem_start(int);
-extern unsigned long wildfire_node_mem_size(int);
/* console.c */
#ifdef CONFIG_VGA_HOSE
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 03dda3beb3bd..7d56c217b235 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -28,6 +28,7 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/ioport.h>
+#include <linux/panic_notifier.h>
#include <linux/platform_device.h>
#include <linux/memblock.h>
#include <linux/pci.h>
@@ -46,7 +47,6 @@
#include <linux/log2.h>
#include <linux/export.h>
-extern struct atomic_notifier_head panic_notifier_list;
static int alpha_panic_event(struct notifier_block *, unsigned long, void *);
static struct notifier_block alpha_panic_block = {
alpha_panic_event,
@@ -79,11 +79,6 @@ int alpha_l3_cacheshape;
unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON;
#endif
-#ifdef CONFIG_NUMA
-struct cpumask node_to_cpumask_map[MAX_NUMNODES] __read_mostly;
-EXPORT_SYMBOL(node_to_cpumask_map);
-#endif
-
/* Which processor we booted from. */
int boot_cpuid;
@@ -305,7 +300,6 @@ move_initrd(unsigned long mem_limit)
}
#endif
-#ifndef CONFIG_DISCONTIGMEM
static void __init
setup_memory(void *kernel_end)
{
@@ -389,9 +383,6 @@ setup_memory(void *kernel_end)
}
#endif /* CONFIG_BLK_DEV_INITRD */
}
-#else
-extern void setup_memory(void *);
-#endif /* !CONFIG_DISCONTIGMEM */
int __init
page_is_ram(unsigned long pfn)
@@ -618,13 +609,6 @@ setup_arch(char **cmdline_p)
"VERBOSE_MCHECK "
#endif
-#ifdef CONFIG_DISCONTIGMEM
- "DISCONTIGMEM "
-#ifdef CONFIG_NUMA
- "NUMA "
-#endif
-#endif
-
#ifdef CONFIG_DEBUG_SPINLOCK
"DEBUG_SPINLOCK "
#endif
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index f4dd9f3f3001..4b2575f936d4 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -166,7 +166,6 @@ smp_callin(void)
DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
cpuid, current, current->active_mm));
- preempt_disable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index 83d6c53d6d4d..1f99b03effc2 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -461,10 +461,5 @@ struct alpha_machine_vector marvel_ev7_mv __initmv = {
.kill_arch = marvel_kill_arch,
.pci_map_irq = marvel_map_irq,
.pci_swizzle = common_swizzle,
-
- .pa_to_nid = marvel_pa_to_nid,
- .cpuid_to_nid = marvel_cpuid_to_nid,
- .node_mem_start = marvel_node_mem_start,
- .node_mem_size = marvel_node_mem_size,
};
ALIAS_MV(marvel_ev7)
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c
index 2c54d707142a..3cee05443f07 100644
--- a/arch/alpha/kernel/sys_wildfire.c
+++ b/arch/alpha/kernel/sys_wildfire.c
@@ -337,10 +337,5 @@ struct alpha_machine_vector wildfire_mv __initmv = {
.kill_arch = wildfire_kill_arch,
.pci_map_irq = wildfire_map_irq,
.pci_swizzle = common_swizzle,
-
- .pa_to_nid = wildfire_pa_to_nid,
- .cpuid_to_nid = wildfire_cpuid_to_nid,
- .node_mem_start = wildfire_node_mem_start,
- .node_mem_size = wildfire_node_mem_size,
};
ALIAS_MV(wildfire)
diff --git a/arch/alpha/kernel/syscalls/Makefile b/arch/alpha/kernel/syscalls/Makefile
index 285aaba832d9..6713c65a25e1 100644
--- a/arch/alpha/kernel/syscalls/Makefile
+++ b/arch/alpha/kernel/syscalls/Makefile
@@ -6,20 +6,14 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
$(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
syscall := $(src)/syscall.tbl
-syshdr := $(srctree)/$(src)/syscallhdr.sh
-systbl := $(srctree)/$(src)/syscalltbl.sh
+syshdr := $(srctree)/scripts/syscallhdr.sh
+systbl := $(srctree)/scripts/syscalltbl.sh
quiet_cmd_syshdr = SYSHDR $@
- cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
- '$(syshdr_abis_$(basetarget))' \
- '$(syshdr_pfx_$(basetarget))' \
- '$(syshdr_offset_$(basetarget))'
+ cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr $< $@
quiet_cmd_systbl = SYSTBL $@
- cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \
- '$(systbl_abis_$(basetarget))' \
- '$(systbl_abi_$(basetarget))' \
- '$(systbl_offset_$(basetarget))'
+ cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@
$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl
index 02f0244e005c..a17687ed4b51 100644
--- a/arch/alpha/kernel/syscalls/syscall.tbl
+++ b/arch/alpha/kernel/syscalls/syscall.tbl
@@ -482,3 +482,7 @@
550 common process_madvise sys_process_madvise
551 common epoll_pwait2 sys_epoll_pwait2
552 common mount_setattr sys_mount_setattr
+553 common quotactl_fd sys_quotactl_fd
+554 common landlock_create_ruleset sys_landlock_create_ruleset
+555 common landlock_add_rule sys_landlock_add_rule
+556 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/alpha/kernel/syscalls/syscallhdr.sh b/arch/alpha/kernel/syscalls/syscallhdr.sh
deleted file mode 100644
index 1780e861492a..000000000000
--- a/arch/alpha/kernel/syscalls/syscallhdr.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-prefix="$4"
-offset="$5"
-
-fileguard=_UAPI_ASM_ALPHA_`basename "$out" | sed \
- -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
- -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- printf "#ifndef %s\n" "${fileguard}"
- printf "#define %s\n" "${fileguard}"
- printf "\n"
-
- nxt=0
- while read nr abi name entry ; do
- if [ -z "$offset" ]; then
- printf "#define __NR_%s%s\t%s\n" \
- "${prefix}" "${name}" "${nr}"
- else
- printf "#define __NR_%s%s\t(%s + %s)\n" \
- "${prefix}" "${name}" "${offset}" "${nr}"
- fi
- nxt=$((nr+1))
- done
-
- printf "\n"
- printf "#ifdef __KERNEL__\n"
- printf "#define __NR_syscalls\t%s\n" "${nxt}"
- printf "#endif\n"
- printf "\n"
- printf "#endif /* %s */\n" "${fileguard}"
-) > "$out"
diff --git a/arch/alpha/kernel/syscalls/syscalltbl.sh b/arch/alpha/kernel/syscalls/syscalltbl.sh
deleted file mode 100644
index 85d78d9309ad..000000000000
--- a/arch/alpha/kernel/syscalls/syscalltbl.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-my_abi="$4"
-offset="$5"
-
-emit() {
- t_nxt="$1"
- t_nr="$2"
- t_entry="$3"
-
- while [ $t_nxt -lt $t_nr ]; do
- printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}"
- t_nxt=$((t_nxt+1))
- done
- printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}"
-}
-
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- nxt=0
- if [ -z "$offset" ]; then
- offset=0
- fi
-
- while read nr abi name entry ; do
- emit $((nxt+offset)) $((nr+offset)) $entry
- nxt=$((nr+1))
- done
-) > "$out"
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index 9704f22ed5e3..68f3e4f329eb 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -7,10 +7,9 @@
#include <asm/unistd.h>
-#define __SYSCALL(nr, entry, nargs) .quad entry
+#define __SYSCALL(nr, entry) .quad entry
.data
.align 3
.globl sys_call_table
sys_call_table:
#include <asm/syscall_table.h>
-#undef __SYSCALL
diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c
index dc68efbe9367..1931a04af85a 100644
--- a/arch/alpha/lib/csum_partial_copy.c
+++ b/arch/alpha/lib/csum_partial_copy.c
@@ -13,6 +13,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/uaccess.h>
+#include <net/checksum.h>
#define ldq_u(x,y) \
diff --git a/arch/alpha/mm/Makefile b/arch/alpha/mm/Makefile
index 08ac6612edad..bd770302eb82 100644
--- a/arch/alpha/mm/Makefile
+++ b/arch/alpha/mm/Makefile
@@ -6,5 +6,3 @@
ccflags-y := -Werror
obj-y := init.o fault.o
-
-obj-$(CONFIG_DISCONTIGMEM) += numa.o
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c
index 3c42b3147fd6..f6114d03357c 100644
--- a/arch/alpha/mm/init.c
+++ b/arch/alpha/mm/init.c
@@ -235,8 +235,6 @@ callback_init(void * kernel_end)
return kernel_end;
}
-
-#ifndef CONFIG_DISCONTIGMEM
/*
* paging_init() sets up the memory map.
*/
@@ -257,7 +255,6 @@ void __init paging_init(void)
/* Initialize the kernel's ZERO_PGE. */
memset((void *)ZERO_PGE, 0, PAGE_SIZE);
}
-#endif /* CONFIG_DISCONTIGMEM */
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM)
void
@@ -282,5 +279,4 @@ mem_init(void)
set_max_mapnr(max_low_pfn);
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
memblock_free_all();
- mem_init_print_info(NULL);
}
diff --git a/arch/alpha/mm/numa.c b/arch/alpha/mm/numa.c
deleted file mode 100644
index 0636e254a22f..000000000000
--- a/arch/alpha/mm/numa.c
+++ /dev/null
@@ -1,223 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/arch/alpha/mm/numa.c
- *
- * DISCONTIGMEM NUMA alpha support.
- *
- * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/memblock.h>
-#include <linux/swap.h>
-#include <linux/initrd.h>
-#include <linux/pfn.h>
-#include <linux/module.h>
-
-#include <asm/hwrpb.h>
-#include <asm/sections.h>
-
-pg_data_t node_data[MAX_NUMNODES];
-EXPORT_SYMBOL(node_data);
-
-#undef DEBUG_DISCONTIG
-#ifdef DEBUG_DISCONTIG
-#define DBGDCONT(args...) printk(args)
-#else
-#define DBGDCONT(args...)
-#endif
-
-#define for_each_mem_cluster(memdesc, _cluster, i) \
- for ((_cluster) = (memdesc)->cluster, (i) = 0; \
- (i) < (memdesc)->numclusters; (i)++, (_cluster)++)
-
-static void __init show_mem_layout(void)
-{
- struct memclust_struct * cluster;
- struct memdesc_struct * memdesc;
- int i;
-
- /* Find free clusters, and init and free the bootmem accordingly. */
- memdesc = (struct memdesc_struct *)
- (hwrpb->mddt_offset + (unsigned long) hwrpb);
-
- printk("Raw memory layout:\n");
- for_each_mem_cluster(memdesc, cluster, i) {
- printk(" memcluster %2d, usage %1lx, start %8lu, end %8lu\n",
- i, cluster->usage, cluster->start_pfn,
- cluster->start_pfn + cluster->numpages);
- }
-}
-
-static void __init
-setup_memory_node(int nid, void *kernel_end)
-{
- extern unsigned long mem_size_limit;
- struct memclust_struct * cluster;
- struct memdesc_struct * memdesc;
- unsigned long start_kernel_pfn, end_kernel_pfn;
- unsigned long start, end;
- unsigned long node_pfn_start, node_pfn_end;
- unsigned long node_min_pfn, node_max_pfn;
- int i;
- int show_init = 0;
-
- /* Find the bounds of current node */
- node_pfn_start = (node_mem_start(nid)) >> PAGE_SHIFT;
- node_pfn_end = node_pfn_start + (node_mem_size(nid) >> PAGE_SHIFT);
-
- /* Find free clusters, and init and free the bootmem accordingly. */
- memdesc = (struct memdesc_struct *)
- (hwrpb->mddt_offset + (unsigned long) hwrpb);
-
- /* find the bounds of this node (node_min_pfn/node_max_pfn) */
- node_min_pfn = ~0UL;
- node_max_pfn = 0UL;
- for_each_mem_cluster(memdesc, cluster, i) {
- /* Bit 0 is console/PALcode reserved. Bit 1 is
- non-volatile memory -- we might want to mark
- this for later. */
- if (cluster->usage & 3)
- continue;
-
- start = cluster->start_pfn;
- end = start + cluster->numpages;
-
- if (start >= node_pfn_end || end <= node_pfn_start)
- continue;
-
- if (!show_init) {
- show_init = 1;
- printk("Initializing bootmem allocator on Node ID %d\n", nid);
- }
- printk(" memcluster %2d, usage %1lx, start %8lu, end %8lu\n",
- i, cluster->usage, cluster->start_pfn,
- cluster->start_pfn + cluster->numpages);
-
- if (start < node_pfn_start)
- start = node_pfn_start;
- if (end > node_pfn_end)
- end = node_pfn_end;
-
- if (start < node_min_pfn)
- node_min_pfn = start;
- if (end > node_max_pfn)
- node_max_pfn = end;
- }
-
- if (mem_size_limit && node_max_pfn > mem_size_limit) {
- static int msg_shown = 0;
- if (!msg_shown) {
- msg_shown = 1;
- printk("setup: forcing memory size to %ldK (from %ldK).\n",
- mem_size_limit << (PAGE_SHIFT - 10),
- node_max_pfn << (PAGE_SHIFT - 10));
- }
- node_max_pfn = mem_size_limit;
- }
-
- if (node_min_pfn >= node_max_pfn)
- return;
-
- /* Update global {min,max}_low_pfn from node information. */
- if (node_min_pfn < min_low_pfn)
- min_low_pfn = node_min_pfn;
- if (node_max_pfn > max_low_pfn)
- max_pfn = max_low_pfn = node_max_pfn;
-
-#if 0 /* we'll try this one again in a little while */
- /* Cute trick to make sure our local node data is on local memory */
- node_data[nid] = (pg_data_t *)(__va(node_min_pfn << PAGE_SHIFT));
-#endif
- printk(" Detected node memory: start %8lu, end %8lu\n",
- node_min_pfn, node_max_pfn);
-
- DBGDCONT(" DISCONTIG: node_data[%d] is at 0x%p\n", nid, NODE_DATA(nid));
-
- /* Find the bounds of kernel memory. */
- start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS);
- end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end));
-
- if (!nid && (node_max_pfn < end_kernel_pfn || node_min_pfn > start_kernel_pfn))
- panic("kernel loaded out of ram");
-
- memblock_add_node(PFN_PHYS(node_min_pfn),
- (node_max_pfn - node_min_pfn) << PAGE_SHIFT, nid);
-
- /* Zone start phys-addr must be 2^(MAX_ORDER-1) aligned.
- Note that we round this down, not up - node memory
- has much larger alignment than 8Mb, so it's safe. */
- node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1);
-
- NODE_DATA(nid)->node_start_pfn = node_min_pfn;
- NODE_DATA(nid)->node_present_pages = node_max_pfn - node_min_pfn;
-
- node_set_online(nid);
-}
-
-void __init
-setup_memory(void *kernel_end)
-{
- unsigned long kernel_size;
- int nid;
-
- show_mem_layout();
-
- nodes_clear(node_online_map);
-
- min_low_pfn = ~0UL;
- max_low_pfn = 0UL;
- for (nid = 0; nid < MAX_NUMNODES; nid++)
- setup_memory_node(nid, kernel_end);
-
- kernel_size = virt_to_phys(kernel_end) - KERNEL_START_PHYS;
- memblock_reserve(KERNEL_START_PHYS, kernel_size);
-
-#ifdef CONFIG_BLK_DEV_INITRD
- initrd_start = INITRD_START;
- if (initrd_start) {
- extern void *move_initrd(unsigned long);
-
- initrd_end = initrd_start+INITRD_SIZE;
- printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
- (void *) initrd_start, INITRD_SIZE);
-
- if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) {
- if (!move_initrd(PFN_PHYS(max_low_pfn)))
- printk("initrd extends beyond end of memory "
- "(0x%08lx > 0x%p)\ndisabling initrd\n",
- initrd_end,
- phys_to_virt(PFN_PHYS(max_low_pfn)));
- } else {
- nid = kvaddr_to_nid(initrd_start);
- memblock_reserve(virt_to_phys((void *)initrd_start),
- INITRD_SIZE);
- }
- }
-#endif /* CONFIG_BLK_DEV_INITRD */
-}
-
-void __init paging_init(void)
-{
- unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
- unsigned long dma_local_pfn;
-
- /*
- * The old global MAX_DMA_ADDRESS per-arch API doesn't fit
- * in the NUMA model, for now we convert it to a pfn and
- * we interpret this pfn as a local per-node information.
- * This issue isn't very important since none of these machines
- * have legacy ISA slots anyways.
- */
- dma_local_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
-
- max_zone_pfn[ZONE_DMA] = dma_local_pfn;
- max_zone_pfn[ZONE_NORMAL] = max_pfn;
-
- free_area_init(max_zone_pfn);
-
- /* Initialize the kernel's ZERO_PGE. */
- memset((void *)ZERO_PGE, 0, PAGE_SIZE);
-}
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index bc8d6aecfbbd..d8f51eb8963b 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -6,6 +6,7 @@
config ARC
def_bool y
select ARC_TIMERS
+ select ARCH_HAS_CACHE_LINE_SIZE
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_PTE_SPECIAL
@@ -28,6 +29,7 @@ config ARC
select GENERIC_SMP_IDLE_THREAD
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARC_MMU_V4
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_DEBUG_KMEMLEAK
select HAVE_FUTEX_CMPXCHG if FUTEX
@@ -48,9 +50,6 @@ config ARC
select HAVE_ARCH_JUMP_LABEL if ISA_ARCV2 && !CPU_ENDIAN_BE32
select SET_FS
-config ARCH_HAS_CACHE_LINE_SIZE
- def_bool y
-
config TRACE_IRQFLAGS_SUPPORT
def_bool y
@@ -63,10 +62,6 @@ config SCHED_OMIT_FRAME_POINTER
config GENERIC_CSUM
def_bool y
-config ARCH_DISCONTIGMEM_ENABLE
- def_bool n
- depends on BROKEN
-
config ARCH_FLATMEM_ENABLE
def_bool y
@@ -86,10 +81,6 @@ config STACKTRACE_SUPPORT
def_bool y
select STACKTRACE
-config HAVE_ARCH_TRANSPARENT_HUGEPAGE
- def_bool y
- depends on ARC_MMU_V4
-
menu "ARC Architecture Configuration"
menu "ARC Platform/SoC/Board"
@@ -349,15 +340,6 @@ config ARC_HUGEPAGE_16M
endchoice
-config NODES_SHIFT
- int "Maximum NUMA Nodes (as a power of 2)"
- default "0" if !DISCONTIGMEM
- default "1" if DISCONTIGMEM
- depends on NEED_MULTIPLE_NODES
- help
- Accessing memory beyond 1GB (with or w/o PAE) requires 2 memory
- zones.
-
config ARC_COMPACT_IRQ_LEVELS
depends on ISA_ARCOMPACT
bool "Setup Timer IRQ as high Priority"
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 4392c9c189c4..e47adc97a89b 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -31,7 +31,7 @@ endif
ifdef CONFIG_ARC_CURR_IN_REG
-# For a global register defintion, make sure it gets passed to every file
+# For a global register definition, make sure it gets passed to every file
# We had a customer reported bug where some code built in kernel was NOT using
# any kernel headers, and missing the r25 global register
# Can't do unconditionally because of recursive include issues
diff --git a/arch/arc/boot/dts/haps_hs.dts b/arch/arc/boot/dts/haps_hs.dts
index 60d578e2781f..76ad527a0847 100644
--- a/arch/arc/boot/dts/haps_hs.dts
+++ b/arch/arc/boot/dts/haps_hs.dts
@@ -16,7 +16,7 @@
memory {
device_type = "memory";
/* CONFIG_LINUX_RAM_BASE needs to match low mem start */
- reg = <0x0 0x80000000 0x0 0x20000000 /* 512 MB low mem */
+ reg = <0x0 0x80000000 0x0 0x40000000 /* 1 GB low mem */
0x1 0x00000000 0x0 0x40000000>; /* 1 GB highmem */
};
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 5afc79c9b2f5..7a36d79b5b2f 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -14,14 +14,14 @@
#include <asm/barrier.h>
#include <asm/smp.h>
-#define atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
#ifdef CONFIG_ARC_HAS_LLSC
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#define ATOMIC_OP(op, c_op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned int val; \
\
@@ -37,7 +37,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned int val; \
\
@@ -63,7 +63,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned int val, orig; \
\
@@ -94,11 +94,11 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
#ifndef CONFIG_SMP
/* violating atomic_xxx API locking protocol in UP for optimization sake */
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#else
-static inline void atomic_set(atomic_t *v, int i)
+static inline void arch_atomic_set(atomic_t *v, int i)
{
/*
* Independent of hardware support, all of the atomic_xxx() APIs need
@@ -116,7 +116,7 @@ static inline void atomic_set(atomic_t *v, int i)
atomic_ops_unlock(flags);
}
-#define atomic_set_release(v, i) atomic_set((v), (i))
+#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
#endif
@@ -126,7 +126,7 @@ static inline void atomic_set(atomic_t *v, int i)
*/
#define ATOMIC_OP(op, c_op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
@@ -136,7 +136,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
}
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
unsigned long temp; \
@@ -154,7 +154,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
unsigned long orig; \
@@ -180,9 +180,6 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
-#define atomic_andnot atomic_andnot
-#define atomic_fetch_andnot atomic_fetch_andnot
-
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
@@ -193,6 +190,9 @@ ATOMIC_OPS(andnot, &= ~, bic)
ATOMIC_OPS(or, |=, or)
ATOMIC_OPS(xor, ^=, xor)
+#define arch_atomic_andnot arch_atomic_andnot
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
+
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
@@ -220,7 +220,7 @@ typedef struct {
#define ATOMIC64_INIT(a) { (a) }
-static inline s64 atomic64_read(const atomic64_t *v)
+static inline s64 arch_atomic64_read(const atomic64_t *v)
{
s64 val;
@@ -232,7 +232,7 @@ static inline s64 atomic64_read(const atomic64_t *v)
return val;
}
-static inline void atomic64_set(atomic64_t *v, s64 a)
+static inline void arch_atomic64_set(atomic64_t *v, s64 a)
{
/*
* This could have been a simple assignment in "C" but would need
@@ -253,7 +253,7 @@ static inline void atomic64_set(atomic64_t *v, s64 a)
}
#define ATOMIC64_OP(op, op1, op2) \
-static inline void atomic64_##op(s64 a, atomic64_t *v) \
+static inline void arch_atomic64_##op(s64 a, atomic64_t *v) \
{ \
s64 val; \
\
@@ -270,7 +270,7 @@ static inline void atomic64_##op(s64 a, atomic64_t *v) \
} \
#define ATOMIC64_OP_RETURN(op, op1, op2) \
-static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
+static inline s64 arch_atomic64_##op##_return(s64 a, atomic64_t *v) \
{ \
s64 val; \
\
@@ -293,7 +293,7 @@ static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
}
#define ATOMIC64_FETCH_OP(op, op1, op2) \
-static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
+static inline s64 arch_atomic64_fetch_##op(s64 a, atomic64_t *v) \
{ \
s64 val, orig; \
\
@@ -320,9 +320,6 @@ static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
ATOMIC64_OP_RETURN(op, op1, op2) \
ATOMIC64_FETCH_OP(op, op1, op2)
-#define atomic64_andnot atomic64_andnot
-#define atomic64_fetch_andnot atomic64_fetch_andnot
-
ATOMIC64_OPS(add, add.f, adc)
ATOMIC64_OPS(sub, sub.f, sbc)
ATOMIC64_OPS(and, and, and)
@@ -330,13 +327,16 @@ ATOMIC64_OPS(andnot, bic, bic)
ATOMIC64_OPS(or, or, or)
ATOMIC64_OPS(xor, xor, xor)
+#define arch_atomic64_andnot arch_atomic64_andnot
+#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
+
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
static inline s64
-atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
+arch_atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
{
s64 prev;
@@ -358,7 +358,7 @@ atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
return prev;
}
-static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new)
+static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new)
{
s64 prev;
@@ -379,14 +379,14 @@ static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new)
}
/**
- * atomic64_dec_if_positive - decrement by 1 if old value positive
+ * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic64_t
*
* The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented.
*/
-static inline s64 atomic64_dec_if_positive(atomic64_t *v)
+static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 val;
@@ -408,10 +408,10 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
return val;
}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
/**
- * atomic64_fetch_add_unless - add unless the number is a given value
+ * arch_atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
@@ -419,7 +419,7 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
* Atomically adds @a to @v, if it was not @u.
* Returns the old value of @v
*/
-static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 old, temp;
@@ -443,7 +443,7 @@ static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
return old;
}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
#endif /* !CONFIG_GENERIC_ATOMIC64 */
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
index 9b87e162e539..d42917e803e1 100644
--- a/arch/arc/include/asm/cmpxchg.h
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -63,7 +63,7 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
#endif
-#define cmpxchg(ptr, o, n) ({ \
+#define arch_cmpxchg(ptr, o, n) ({ \
(typeof(*(ptr)))__cmpxchg((ptr), \
(unsigned long)(o), \
(unsigned long)(n)); \
@@ -75,7 +75,7 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
* !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee
* semantics, and this lock also happens to be used by atomic_*()
*/
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))
/*
@@ -116,14 +116,14 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
*
* Technically the lock is also needed for UP (boils down to irq save/restore)
* but we can cheat a bit since cmpxchg() atomic_ops_lock() would cause irqs to
- * be disabled thus can't possibly be interrpted/preempted/clobbered by xchg()
+ * be disabled thus can't possibly be interrupted/preempted/clobbered by xchg()
* Other way around, xchg is one instruction anyways, so can't be interrupted
* as such
*/
#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
-#define xchg(ptr, with) \
+#define arch_xchg(ptr, with) \
({ \
unsigned long flags; \
typeof(*(ptr)) old_val; \
@@ -136,14 +136,14 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
#else
-#define xchg(ptr, with) _xchg(ptr, with)
+#define arch_xchg(ptr, with) _xchg(ptr, with)
#endif
/*
* "atomic" variant of xchg()
* REQ: It needs to follow the same serialization rules as other atomic_xxx()
- * Since xchg() doesn't always do that, it would seem that following defintion
+ * Since xchg() doesn't always do that, it would seem that following definition
* is incorrect. But here's the rationale:
* SMP : Even xchg() takes the atomic_ops_lock, so OK.
* LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
@@ -153,6 +153,6 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
* can't be clobbered by others. Thus no serialization required when
* atomic_xchg is involved.
*/
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#endif
diff --git a/arch/arc/include/asm/mmzone.h b/arch/arc/include/asm/mmzone.h
deleted file mode 100644
index b86b9d1e54dc..000000000000
--- a/arch/arc/include/asm/mmzone.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
- */
-
-#ifndef _ASM_ARC_MMZONE_H
-#define _ASM_ARC_MMZONE_H
-
-#ifdef CONFIG_DISCONTIGMEM
-
-extern struct pglist_data node_data[];
-#define NODE_DATA(nid) (&node_data[nid])
-
-static inline int pfn_to_nid(unsigned long pfn)
-{
- int is_end_low = 1;
-
- if (IS_ENABLED(CONFIG_ARC_HAS_PAE40))
- is_end_low = pfn <= virt_to_pfn(0xFFFFFFFFUL);
-
- /*
- * node 0: lowmem: 0x8000_0000 to 0xFFFF_FFFF
- * node 1: HIGHMEM w/o PAE40: 0x0 to 0x7FFF_FFFF
- * HIGHMEM with PAE40: 0x1_0000_0000 to ...
- */
- if (pfn >= ARCH_PFN_OFFSET && is_end_low)
- return 0;
-
- return 1;
-}
-
-static inline int pfn_valid(unsigned long pfn)
-{
- int nid = pfn_to_nid(pfn);
-
- return (pfn <= node_end_pfn(nid));
-}
-#endif /* CONFIG_DISCONTIGMEM */
-
-#endif
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index ad9b7fe4dba3..4a9d33372fe2 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -7,6 +7,18 @@
#include <uapi/asm/page.h>
+#ifdef CONFIG_ARC_HAS_PAE40
+
+#define MAX_POSSIBLE_PHYSMEM_BITS 40
+#define PAGE_MASK_PHYS (0xff00000000ull | PAGE_MASK)
+
+#else /* CONFIG_ARC_HAS_PAE40 */
+
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
+#define PAGE_MASK_PHYS PAGE_MASK
+
+#endif /* CONFIG_ARC_HAS_PAE40 */
+
#ifndef __ASSEMBLY__
#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
index 6147db925248..a32ca3104ced 100644
--- a/arch/arc/include/asm/pgalloc.h
+++ b/arch/arc/include/asm/pgalloc.h
@@ -129,6 +129,4 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
-#define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd))
-
#endif /* _ASM_ARC_PGALLOC_H */
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 163641726a2b..320cc0ae8a08 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -107,8 +107,8 @@
#define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
/* Set of bits not changed in pte_modify */
-#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL)
-
+#define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
+ _PAGE_SPECIAL)
/* More Abbrevaited helpers */
#define PAGE_U_NONE __pgprot(___DEF)
#define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
@@ -132,13 +132,7 @@
#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
#define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
-#ifdef CONFIG_ARC_HAS_PAE40
-#define PTE_BITS_NON_RWX_IN_PD1 (0xff00000000 | PAGE_MASK | _PAGE_CACHEABLE)
-#define MAX_POSSIBLE_PHYSMEM_BITS 40
-#else
-#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE)
-#define MAX_POSSIBLE_PHYSMEM_BITS 32
-#endif
+#define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK_PHYS | _PAGE_CACHEABLE)
/**************************************************************************
* Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
@@ -228,12 +222,6 @@
*/
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-/*
- * No special requirements for lowest virtual address we permit any user space
- * mapping to be mapped at.
- */
-#define FIRST_USER_ADDRESS 0UL
-
/****************************************************************
* Bucket load of VM Helpers
@@ -362,6 +350,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
#define kern_addr_valid(addr) (1)
+#define pmd_pgtable(pmd) ((pgtable_t) pmd_page_vaddr(pmd))
+
/*
* remap a physical page `pfn' of size `size' with page protection `prot'
* into virtual address `from'
diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h
index 2a97e2718a21..2a4ad619abfb 100644
--- a/arch/arc/include/uapi/asm/page.h
+++ b/arch/arc/include/uapi/asm/page.h
@@ -33,5 +33,4 @@
#define PAGE_MASK (~(PAGE_SIZE-1))
-
#endif /* _UAPI__ASM_ARC_PAGE_H */
diff --git a/arch/arc/include/uapi/asm/sigcontext.h b/arch/arc/include/uapi/asm/sigcontext.h
index 95f8a4380e11..7a5449dfcb29 100644
--- a/arch/arc/include/uapi/asm/sigcontext.h
+++ b/arch/arc/include/uapi/asm/sigcontext.h
@@ -18,6 +18,7 @@
*/
struct sigcontext {
struct user_regs_struct regs;
+ struct user_regs_arcv2 v2abi;
};
#endif /* _ASM_ARC_SIGCONTEXT_H */
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index 1743506081da..2cb8dfe866b6 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -177,7 +177,7 @@ tracesys:
; Do the Sys Call as we normally would.
; Validate the Sys Call number
- cmp r8, NR_syscalls
+ cmp r8, NR_syscalls - 1
mov.hi r0, -ENOSYS
bhi tracesys_exit
@@ -255,7 +255,7 @@ ENTRY(EV_Trap)
;============ Normal syscall case
; syscall num shd not exceed the total system calls avail
- cmp r8, NR_syscalls
+ cmp r8, NR_syscalls - 1
mov.hi r0, -ENOSYS
bhi .Lret_from_system_call
diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c
index ecfbc42d3a40..345a0000554c 100644
--- a/arch/arc/kernel/kgdb.c
+++ b/arch/arc/kernel/kgdb.c
@@ -140,6 +140,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
ptr = &remcomInBuffer[1];
if (kgdb_hex2long(&ptr, &addr))
regs->ret = addr;
+ fallthrough;
case 'D':
case 'k':
diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
index cabef45f11df..5f0415fc7328 100644
--- a/arch/arc/kernel/kprobes.c
+++ b/arch/arc/kernel/kprobes.c
@@ -317,22 +317,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long trapnr)
* caused the fault.
*/
- /* We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
/*
* In case the user-specified fault handler returned zero,
* try to fix up.
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index d838d0d57696..3793876f42d9 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -50,14 +50,14 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
int ret;
/*
- * This is only for old cores lacking LLOCK/SCOND, which by defintion
+ * This is only for old cores lacking LLOCK/SCOND, which by definition
* can't possibly be SMP. Thus doesn't need to be SMP safe.
* And this also helps reduce the overhead for serializing in
* the UP case
*/
WARN_ON_ONCE(IS_ENABLED(CONFIG_SMP));
- /* Z indicates to userspace if operation succeded */
+ /* Z indicates to userspace if operation succeeded */
regs->status32 &= ~STATUS_Z_MASK;
ret = access_ok(uaddr, sizeof(*uaddr));
@@ -107,7 +107,7 @@ fail:
void arch_cpu_idle(void)
{
- /* Re-enable interrupts <= default irq priority before commiting SLEEP */
+ /* Re-enable interrupts <= default irq priority before committing SLEEP */
const unsigned int arg = 0x10 | ARCV2_IRQ_DEF_PRIO;
__asm__ __volatile__(
@@ -120,7 +120,7 @@ void arch_cpu_idle(void)
void arch_cpu_idle(void)
{
- /* sleep, but enable both set E1/E2 (levels of interrutps) before committing */
+ /* sleep, but enable both set E1/E2 (levels of interrupts) before committing */
__asm__ __volatile__("sleep 0x3 \n");
}
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index a78d8f745a67..cb2f88502baf 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -61,6 +61,41 @@ struct rt_sigframe {
unsigned int sigret_magic;
};
+static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
+{
+ int err = 0;
+#ifndef CONFIG_ISA_ARCOMPACT
+ struct user_regs_arcv2 v2abi;
+
+ v2abi.r30 = regs->r30;
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+ v2abi.r58 = regs->r58;
+ v2abi.r59 = regs->r59;
+#else
+ v2abi.r58 = v2abi.r59 = 0;
+#endif
+ err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi));
+#endif
+ return err;
+}
+
+static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
+{
+ int err = 0;
+#ifndef CONFIG_ISA_ARCOMPACT
+ struct user_regs_arcv2 v2abi;
+
+ err = __copy_from_user(&v2abi, &mctx->v2abi, sizeof(v2abi));
+
+ regs->r30 = v2abi.r30;
+#ifdef CONFIG_ARC_HAS_ACCL_REGS
+ regs->r58 = v2abi.r58;
+ regs->r59 = v2abi.r59;
+#endif
+#endif
+ return err;
+}
+
static int
stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
sigset_t *set)
@@ -94,9 +129,13 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), &uregs.scratch,
sizeof(sf->uc.uc_mcontext.regs.scratch));
+
+ if (is_isa_arcv2())
+ err |= save_arcv2_regs(&(sf->uc.uc_mcontext), regs);
+
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
- return err;
+ return err ? -EFAULT : 0;
}
static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
@@ -109,8 +148,12 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
err |= __copy_from_user(&uregs.scratch,
&(sf->uc.uc_mcontext.regs.scratch),
sizeof(sf->uc.uc_mcontext.regs.scratch));
+
+ if (is_isa_arcv2())
+ err |= restore_arcv2_regs(&(sf->uc.uc_mcontext), regs);
+
if (err)
- return err;
+ return -EFAULT;
set_current_blocked(&set);
regs->bta = uregs.scratch.bta;
@@ -259,7 +302,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
regs->r2 = (unsigned long)&sf->uc;
/*
- * small optim to avoid unconditonally calling do_sigaltstack
+ * small optim to avoid unconditionally calling do_sigaltstack
* in sigreturn path, now that we only have rt_sigreturn
*/
magic = MAGIC_SIGALTSTK;
@@ -391,7 +434,7 @@ void do_signal(struct pt_regs *regs)
void do_notify_resume(struct pt_regs *regs)
{
/*
- * ASM glue gaurantees that this is only called when returning to
+ * ASM glue guarantees that this is only called when returning to
* user mode
*/
if (test_thread_flag(TIF_NOTIFY_RESUME))
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 52906d314537..db0e104d6835 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -189,7 +189,6 @@ void start_kernel_secondary(void)
pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
local_irq_enable();
- preempt_disable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
index f73da203b170..1b9576d21e24 100644
--- a/arch/arc/kernel/stacktrace.c
+++ b/arch/arc/kernel/stacktrace.c
@@ -83,7 +83,7 @@ seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs,
* is safe-kept and BLINK at a well known location in there
*/
- if (tsk->state == TASK_RUNNING)
+ if (task_is_running(tsk))
return -1;
frame_info->task = tsk;
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index a331bb5d8319..7654c2e42dc0 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -83,12 +83,12 @@ static void show_faulting_vma(unsigned long address)
* non-inclusive vma
*/
mmap_read_lock(active_mm);
- vma = find_vma(active_mm, address);
+ vma = vma_lookup(active_mm, address);
- /* check against the find_vma( ) behaviour which returns the next VMA
- * if the container VMA is not found
+ /* Lookup the vma at the address and report if the container VMA is not
+ * found
*/
- if (vma && (vma->vm_start <= address)) {
+ if (vma) {
char buf[ARC_PATH_MAX];
char *nm = "?";
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index 74ad4256022e..47bab67f8649 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -187,25 +187,26 @@ static void init_unwind_table(struct unwind_table *table, const char *name,
const void *table_start, unsigned long table_size,
const u8 *header_start, unsigned long header_size)
{
- const u8 *ptr = header_start + 4;
- const u8 *end = header_start + header_size;
-
table->core.pc = (unsigned long)core_start;
table->core.range = core_size;
table->init.pc = (unsigned long)init_start;
table->init.range = init_size;
table->address = table_start;
table->size = table_size;
-
- /* See if the linker provided table looks valid. */
- if (header_size <= 4
- || header_start[0] != 1
- || (void *)read_pointer(&ptr, end, header_start[1]) != table_start
- || header_start[2] == DW_EH_PE_omit
- || read_pointer(&ptr, end, header_start[2]) <= 0
- || header_start[3] == DW_EH_PE_omit)
- header_start = NULL;
-
+ /* To avoid the pointer addition with NULL pointer.*/
+ if (header_start != NULL) {
+ const u8 *ptr = header_start + 4;
+ const u8 *end = header_start + header_size;
+ /* See if the linker provided table looks valid. */
+ if (header_size <= 4
+ || header_start[0] != 1
+ || (void *)read_pointer(&ptr, end, header_start[1])
+ != table_start
+ || header_start[2] == DW_EH_PE_omit
+ || read_pointer(&ptr, end, header_start[2]) <= 0
+ || header_start[3] == DW_EH_PE_omit)
+ header_start = NULL;
+ }
table->hdrsz = header_size;
smp_wmb();
table->header = header_start;
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
index 33ce59d91461..e2146a8da195 100644
--- a/arch/arc/kernel/vmlinux.lds.S
+++ b/arch/arc/kernel/vmlinux.lds.S
@@ -57,7 +57,6 @@ SECTIONS
.init.ramfs : { INIT_RAM_FS }
. = ALIGN(PAGE_SIZE);
- _stext = .;
HEAD_TEXT_SECTION
INIT_TEXT_SECTION(L1_CACHE_BYTES)
@@ -83,6 +82,7 @@ SECTIONS
.text : {
_text = .;
+ _stext = .;
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index ce07e697916c..abfeef7bf6f8 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -32,11 +32,6 @@ unsigned long arch_pfn_offset;
EXPORT_SYMBOL(arch_pfn_offset);
#endif
-#ifdef CONFIG_DISCONTIGMEM
-struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
-EXPORT_SYMBOL(node_data);
-#endif
-
long __init arc_get_mem_sz(void)
{
return low_mem_sz;
@@ -139,25 +134,28 @@ void __init setup_arch_memory(void)
#ifdef CONFIG_HIGHMEM
/*
- * Populate a new node with highmem
- *
* On ARC (w/o PAE) HIGHMEM addresses are actually smaller (0 based)
- * than addresses in normal ala low memory (0x8000_0000 based).
+ * than addresses in normal aka low memory (0x8000_0000 based).
* Even with PAE, the huge peripheral space hole would waste a lot of
- * mem with single mem_map[]. This warrants a mem_map per region design.
- * Thus HIGHMEM on ARC is imlemented with DISCONTIGMEM.
- *
- * DISCONTIGMEM in turns requires multiple nodes. node 0 above is
- * populated with normal memory zone while node 1 only has highmem
+ * mem with single contiguous mem_map[].
+ * Thus when HIGHMEM on ARC is enabled the memory map corresponding
+ * to the hole is freed and ARC specific version of pfn_valid()
+ * handles the hole in the memory map.
*/
-#ifdef CONFIG_DISCONTIGMEM
- node_set_online(1);
-#endif
min_high_pfn = PFN_DOWN(high_mem_start);
max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
- max_zone_pfn[ZONE_HIGHMEM] = min_low_pfn;
+ /*
+ * max_high_pfn should be ok here for both HIGHMEM and HIGHMEM+PAE.
+ * For HIGHMEM without PAE max_high_pfn should be less than
+ * min_low_pfn to guarantee that these two regions don't overlap.
+ * For PAE case highmem is greater than lowmem, so it is natural
+ * to use max_high_pfn.
+ *
+ * In both cases, holes should be handled by pfn_valid().
+ */
+ max_zone_pfn[ZONE_HIGHMEM] = max_high_pfn;
high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
@@ -194,7 +192,6 @@ void __init mem_init(void)
{
memblock_free_all();
highmem_init();
- mem_init_print_info(NULL);
}
#ifdef CONFIG_HIGHMEM
diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
index fac4adc90204..95c649fbc95a 100644
--- a/arch/arc/mm/ioremap.c
+++ b/arch/arc/mm/ioremap.c
@@ -53,9 +53,10 @@ EXPORT_SYMBOL(ioremap);
void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
unsigned long flags)
{
+ unsigned int off;
unsigned long vaddr;
struct vm_struct *area;
- phys_addr_t off, end;
+ phys_addr_t end;
pgprot_t prot = __pgprot(flags);
/* Don't allow wraparound, zero size */
@@ -72,7 +73,7 @@ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
/* Mappings have to be page-aligned */
off = paddr & ~PAGE_MASK;
- paddr &= PAGE_MASK;
+ paddr &= PAGE_MASK_PHYS;
size = PAGE_ALIGN(end + 1) - paddr;
/*
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 9bb3c24f3677..9c7c68247289 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -576,7 +576,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
pte_t *ptep)
{
unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
- phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
+ phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
struct page *page = pfn_to_page(pte_pfn(*ptep));
create_tlb(vma, vaddr, ptep);
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 853aab5ab327..06b6187b67af 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -31,8 +31,10 @@ config ARM
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7
select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_SUPPORTS_HUGETLBFS if ARM_LPAE
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
+ select ARCH_USE_MEMTEST
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_WANT_LD_ORPHAN_WARN
@@ -76,6 +78,7 @@ config ARM
select HAVE_ARCH_SECCOMP_FILTER if AEABI && !OABI_COMPAT
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_TRACEHOOK
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARM_LPAE
select HAVE_ARM_SMCCC if CPU_V7
select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32
select HAVE_CONTEXT_TRACKING
@@ -215,9 +218,6 @@ config GENERIC_CALIBRATE_DELAY
config ARCH_MAY_HAVE_PC_FDC
bool
-config ZONE_DMA
- bool
-
config ARCH_SUPPORTS_UPROBES
def_bool y
@@ -348,8 +348,8 @@ config ARCH_EP93XX
select ARM_AMBA
imply ARM_PATCH_PHYS_VIRT
select ARM_VIC
+ select GENERIC_IRQ_MULTI_HANDLER
select AUTO_ZRELADDR
- select CLKDEV_LOOKUP
select CLKSRC_MMIO
select CPU_ARM920T
select GPIOLIB
@@ -500,7 +500,6 @@ config ARCH_OMAP1
bool "TI OMAP1"
depends on MMU
select ARCH_OMAP
- select CLKDEV_LOOKUP
select CLKSRC_MMIO
select GENERIC_IRQ_CHIP
select GENERIC_IRQ_MULTI_HANDLER
@@ -1292,9 +1291,15 @@ config KASAN_SHADOW_OFFSET
config NR_CPUS
int "Maximum number of CPUs (2-32)"
- range 2 32
+ range 2 16 if DEBUG_KMAP_LOCAL
+ range 2 32 if !DEBUG_KMAP_LOCAL
depends on SMP
default "4"
+ help
+ The maximum number of CPUs that the kernel can support.
+ Up to 32 CPUs can be supported, or up to 16 if kmap_local()
+ debugging is enabled, which uses half of the per-CPU fixmap
+ slots as guard regions.
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
@@ -1320,7 +1325,7 @@ config ARM_PSCI
# selected platforms.
config ARCH_NR_GPIO
int
- default 2048 if ARCH_SOCFPGA
+ default 2048 if ARCH_INTEL_SOCFPGA
default 1024 if ARCH_BRCMSTB || ARCH_RENESAS || ARCH_TEGRA || \
ARCH_ZYNQ || ARCH_ASPEED
default 512 if ARCH_EXYNOS || ARCH_KEYSTONE || SOC_OMAP5 || \
@@ -1503,14 +1508,6 @@ config HW_PERF_EVENTS
def_bool y
depends on ARM_PMU
-config SYS_SUPPORTS_HUGETLBFS
- def_bool y
- depends on ARM_LPAE
-
-config HAVE_ARCH_TRANSPARENT_HUGEPAGE
- def_bool y
- depends on ARM_LPAE
-
config ARCH_WANT_GENERAL_HUGETLB
def_bool y
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 9e0b5e7f12af..36016497b1b3 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -1087,7 +1087,7 @@ choice
on SD5203 UART.
config DEBUG_SOCFPGA_UART0
- depends on ARCH_SOCFPGA
+ depends on ARCH_INTEL_SOCFPGA
bool "Use SOCFPGA UART0 for low-level debug"
select DEBUG_UART_8250
help
@@ -1095,7 +1095,7 @@ choice
on SOCFPGA(Cyclone 5 and Arria 5) based platforms.
config DEBUG_SOCFPGA_ARRIA10_UART1
- depends on ARCH_SOCFPGA
+ depends on ARCH_INTEL_SOCFPGA
bool "Use SOCFPGA Arria10 UART1 for low-level debug"
select DEBUG_UART_8250
help
@@ -1103,7 +1103,7 @@ choice
on SOCFPGA(Arria 10) based platforms.
config DEBUG_SOCFPGA_CYCLONE5_UART1
- depends on ARCH_SOCFPGA
+ depends on ARCH_INTEL_SOCFPGA
bool "Use SOCFPGA Cyclone 5 UART1 for low-level debug"
select DEBUG_UART_8250
help
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index dad5502ecc28..415c3514573a 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -209,7 +209,7 @@ machine-$(CONFIG_PLAT_SAMSUNG) += s3c
machine-$(CONFIG_ARCH_S5PV210) += s5pv210
machine-$(CONFIG_ARCH_SA1100) += sa1100
machine-$(CONFIG_ARCH_RENESAS) += shmobile
-machine-$(CONFIG_ARCH_SOCFPGA) += socfpga
+machine-$(CONFIG_ARCH_INTEL_SOCFPGA) += socfpga
machine-$(CONFIG_ARCH_STI) += sti
machine-$(CONFIG_ARCH_STM32) += stm32
machine-$(CONFIG_ARCH_SUNXI) += sunxi
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index fd94e27ba4fa..8eb70c1febce 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -96,13 +96,6 @@ endif
$(foreach o, $(libfdt_objs) atags_to_fdt.o fdt_check_mem_start.o, \
$(eval CFLAGS_$(o) := -I $(srctree)/scripts/dtc/libfdt -fno-stack-protector))
-# These were previously generated C files. When you are building the kernel
-# with O=, make sure to remove the stale files in the output tree. Otherwise,
-# the build system wrongly compiles the stale ones.
-ifdef building_out_of_srctree
-$(shell rm -f $(addprefix $(obj)/, fdt_rw.c fdt_ro.c fdt_wip.c fdt.c))
-endif
-
targets := vmlinux vmlinux.lds piggy_data piggy.o \
lib1funcs.o ashldi3.o bswapsdi2.o \
head.o $(OBJS)
@@ -118,8 +111,8 @@ asflags-y := -DZIMAGE
# Supply kernel BSS size to the decompressor via a linker symbol.
KBSS_SZ = $(shell echo $$(($$($(NM) $(obj)/../../../../vmlinux | \
- sed -n -e 's/^\([^ ]*\) [AB] __bss_start$$/-0x\1/p' \
- -e 's/^\([^ ]*\) [AB] __bss_stop$$/+0x\1/p') )) )
+ sed -n -e 's/^\([^ ]*\) [ABD] __bss_start$$/-0x\1/p' \
+ -e 's/^\([^ ]*\) [ABD] __bss_stop$$/+0x\1/p') )) )
LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ)
# Supply ZRELADDR to the decompressor via a linker symbol.
ifneq ($(CONFIG_AUTO_ZRELADDR),y)
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 8e5d4ab4e75e..f8f09c5066e7 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -333,8 +333,11 @@ dtb-$(CONFIG_ARCH_LPC18XX) += \
dtb-$(CONFIG_ARCH_LPC32XX) += \
lpc3250-ea3250.dtb \
lpc3250-phy3250.dtb
+dtb-$(CONFIG_ARCH_WPCM450) += \
+ nuvoton-wpcm450-supermicro-x9sci-ln4f.dtb
dtb-$(CONFIG_ARCH_NPCM7XX) += \
nuvoton-npcm730-gsj.dtb \
+ nuvoton-npcm730-gbs.dtb \
nuvoton-npcm730-kudo.dtb \
nuvoton-npcm750-evb.dtb \
nuvoton-npcm750-runbmc-olympus.dtb
@@ -660,6 +663,7 @@ dtb-$(CONFIG_SOC_IMX7D) += \
imx7d-pico-hobbit.dtb \
imx7d-pico-nymph.dtb \
imx7d-pico-pi.dtb \
+ imx7d-remarkable2.dtb \
imx7d-sbc-imx7.dtb \
imx7d-sdb.dtb \
imx7d-sdb-reva.dtb \
@@ -929,7 +933,9 @@ dtb-$(CONFIG_ARCH_QCOM) += \
qcom-msm8974-sony-xperia-castor.dtb \
qcom-msm8974-sony-xperia-honami.dtb \
qcom-mdm9615-wp8548-mangoh-green.dtb \
- qcom-sdx55-mtp.dtb
+ qcom-sdx55-mtp.dtb \
+ qcom-sdx55-t55.dtb \
+ qcom-sdx55-telit-fn980-tlb.dtb
dtb-$(CONFIG_ARCH_RDA) += \
rda8810pl-orangepi-2g-iot.dtb \
rda8810pl-orangepi-i96.dtb
@@ -1033,7 +1039,7 @@ dtb-$(CONFIG_ARCH_S5PV210) += \
s5pv210-smdkc110.dtb \
s5pv210-smdkv210.dtb \
s5pv210-torbreck.dtb
-dtb-$(CONFIG_ARCH_SOCFPGA) += \
+dtb-$(CONFIG_ARCH_INTEL_SOCFPGA) += \
socfpga_arria5_socdk.dtb \
socfpga_arria10_socdk_nand.dtb \
socfpga_arria10_socdk_qspi.dtb \
@@ -1071,11 +1077,16 @@ dtb-$(CONFIG_ARCH_STM32) += \
stm32746g-eval.dtb \
stm32h743i-eval.dtb \
stm32h743i-disco.dtb \
+ stm32h750i-art-pi.dtb \
stm32mp153c-dhcom-drc02.dtb \
stm32mp157a-avenger96.dtb \
stm32mp157a-dhcor-avenger96.dtb \
stm32mp157a-dk1.dtb \
stm32mp157a-iot-box.dtb \
+ stm32mp157a-microgea-stm32mp1-microdev2.0.dtb \
+ stm32mp157a-microgea-stm32mp1-microdev2.0-of7.dtb \
+ stm32mp157a-icore-stm32mp1-ctouch2.dtb \
+ stm32mp157a-icore-stm32mp1-edimm2.2.dtb \
stm32mp157a-stinger96.dtb \
stm32mp157c-dhcom-pdk2.dtb \
stm32mp157c-dhcom-picoitx.dtb \
@@ -1105,7 +1116,8 @@ dtb-$(CONFIG_MACH_SUN4I) += \
sun4i-a10-olinuxino-lime.dtb \
sun4i-a10-pcduino.dtb \
sun4i-a10-pcduino2.dtb \
- sun4i-a10-pov-protab2-ips9.dtb
+ sun4i-a10-pov-protab2-ips9.dtb \
+ sun4i-a10-topwise-a721.dtb
dtb-$(CONFIG_MACH_SUN5I) += \
sun5i-a10s-auxtek-t003.dtb \
sun5i-a10s-auxtek-t004.dtb \
@@ -1337,6 +1349,7 @@ dtb-$(CONFIG_MACH_ARMADA_375) += \
armada-375-db.dtb
dtb-$(CONFIG_MACH_ARMADA_38X) += \
armada-382-rd-ac3x-48g4x2xl.dtb \
+ armada-385-atl-x530.dtb\
armada-385-clearfog-gtr-s4.dtb \
armada-385-clearfog-gtr-l8.dtb \
armada-385-db-88f6820-amc.dtb \
@@ -1397,6 +1410,7 @@ dtb-$(CONFIG_ARCH_MILBEAUT) += milbeaut-m10v-evb.dtb
dtb-$(CONFIG_ARCH_MSTARV7) += \
mstar-infinity-msc313-breadbee_crust.dtb \
mstar-infinity2m-ssd202d-ssd201htv2.dtb \
+ mstar-infinity2m-ssd202d-unitv2.dtb \
mstar-infinity3-msc313e-breadbee.dtb \
mstar-mercury5-ssc8336n-midrived08.dtb
dtb-$(CONFIG_ARCH_ASPEED) += \
@@ -1406,6 +1420,7 @@ dtb-$(CONFIG_ARCH_ASPEED) += \
aspeed-bmc-ampere-mtjade.dtb \
aspeed-bmc-arm-centriq2400-rep.dtb \
aspeed-bmc-arm-stardragon4800-rep2.dtb \
+ aspeed-bmc-asrock-e3c246d4i.dtb \
aspeed-bmc-bytedance-g220a.dtb \
aspeed-bmc-facebook-cmm.dtb \
aspeed-bmc-facebook-galaxy100.dtb \
@@ -1418,6 +1433,7 @@ dtb-$(CONFIG_ARCH_ASPEED) += \
aspeed-bmc-facebook-yosemitev2.dtb \
aspeed-bmc-ibm-everest.dtb \
aspeed-bmc-ibm-rainier.dtb \
+ aspeed-bmc-ibm-rainier-1s4u.dtb \
aspeed-bmc-ibm-rainier-4u.dtb \
aspeed-bmc-intel-s2600wf.dtb \
aspeed-bmc-inspur-fp5280g2.dtb \
diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts
index b4feb85e171a..e2ee8b8c07bc 100644
--- a/arch/arm/boot/dts/am335x-boneblack.dts
+++ b/arch/arm/boot/dts/am335x-boneblack.dts
@@ -26,54 +26,54 @@
&gpio0 {
gpio-line-names =
- "[ethernet]",
- "[ethernet]",
+ "[mdio_data]",
+ "[mdio_clk]",
"P9_22 [spi0_sclk]",
"P9_21 [spi0_d0]",
"P9_18 [spi0_d1]",
"P9_17 [spi0_cs0]",
- "[sd card]",
- "P9_42A [ecappwm0]",
- "P8_35 [hdmi]",
- "P8_33 [hdmi]",
- "P8_31 [hdmi]",
- "P8_32 [hdmi]",
+ "[mmc0_cd]",
+ "P8_42A [ecappwm0]",
+ "P8_35 [lcd d12]",
+ "P8_33 [lcd d13]",
+ "P8_31 [lcd d14]",
+ "P8_32 [lcd d15]",
"P9_20 [i2c2_sda]",
"P9_19 [i2c2_scl]",
"P9_26 [uart1_rxd]",
"P9_24 [uart1_txd]",
- "[ethernet]",
- "[ethernet]",
- "[usb]",
- "[hdmi]",
+ "[rmii1_txd3]",
+ "[rmii1_txd2]",
+ "[usb0_drvvbus]",
+ "[hdmi cec]",
"P9_41B",
- "[ethernet]",
+ "[rmii1_txd1]",
"P8_19 [ehrpwm2a]",
"P8_13 [ehrpwm2b]",
- "[NC]",
- "[NC]",
+ "NC",
+ "NC",
"P8_14",
"P8_17",
- "[ethernet]",
- "[ethernet]",
+ "[rmii1_txd0]",
+ "[rmii1_refclk]",
"P9_11 [uart4_rxd]",
"P9_13 [uart4_txd]";
};
&gpio1 {
gpio-line-names =
- "P8_25 [emmc]",
- "[emmc]",
- "P8_5 [emmc]",
- "P8_6 [emmc]",
- "P8_23 [emmc]",
- "P8_22 [emmc]",
- "P8_3 [emmc]",
- "P8_4 [emmc]",
- "[NC]",
- "[NC]",
- "[NC]",
- "[NC]",
+ "P8_25 [mmc1_dat0]",
+ "[mmc1_dat1]",
+ "P8_5 [mmc1_dat2]",
+ "P8_6 [mmc1_dat3]",
+ "P8_23 [mmc1_dat4]",
+ "P8_22 [mmc1_dat5]",
+ "P8_3 [mmc1_dat6]",
+ "P8_4 [mmc1_dat7]",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
"P8_12",
"P8_11",
"P8_16",
@@ -82,13 +82,13 @@
"P9_23",
"P9_14 [ehrpwm1a]",
"P9_16 [ehrpwm1b]",
- "[emmc]",
+ "[emmc rst]",
"[usr0 led]",
"[usr1 led]",
"[usr2 led]",
"[usr3 led]",
- "[hdmi]",
- "[usb]",
+ "[hdmi irq]",
+ "[usb vbus oc]",
"[hdmi audio]",
"P9_12",
"P8_26",
@@ -116,38 +116,38 @@
"P8_38 [hdmi]",
"P8_36 [hdmi]",
"P8_34 [hdmi]",
- "[ethernet]",
- "[ethernet]",
- "[ethernet]",
- "[ethernet]",
+ "[rmii1_rxd3]",
+ "[rmii1_rxd2]",
+ "[rmii1_rxd1]",
+ "[rmii1_rxd0]",
"P8_27 [hdmi]",
"P8_29 [hdmi]",
"P8_28 [hdmi]",
"P8_30 [hdmi]",
- "[emmc]",
- "[emmc]",
- "[emmc]",
- "[emmc]",
- "[emmc]",
- "[emmc]";
+ "[mmc0_dat3]",
+ "[mmc0_dat2]",
+ "[mmc0_dat1]",
+ "[mmc0_dat0]",
+ "[mmc0_clk]",
+ "[mmc0_cmd]";
};
&gpio3 {
gpio-line-names =
- "[ethernet]",
- "[ethernet]",
- "[ethernet]",
- "[ethernet]",
- "[ethernet]",
- "[i2c0]",
- "[i2c0]",
- "[emu]",
- "[emu]",
- "[ethernet]",
- "[ethernet]",
- "[NC]",
- "[NC]",
- "[usb]",
+ "[mii col]",
+ "[mii crs]",
+ "[mii rx err]",
+ "[mii tx en]",
+ "[mii rx dv]",
+ "[i2c0 sda]",
+ "[i2c0 scl]",
+ "[jtag emu0]",
+ "[jtag emu1]",
+ "[mii tx clk]",
+ "[mii rx clk]",
+ "NC",
+ "NC",
+ "[usb vbus en]",
"P9_31 [spi1_sclk]",
"P9_29 [spi1_d0]",
"P9_30 [spi1_d1]",
@@ -156,14 +156,14 @@
"P9_27",
"P9_41A",
"P9_25",
- "[NC]",
- "[NC]",
- "[NC]",
- "[NC]",
- "[NC]",
- "[NC]",
- "[NC]",
- "[NC]",
- "[NC]",
- "[NC]";
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "NC";
};
diff --git a/arch/arm/boot/dts/am335x-pocketbeagle.dts b/arch/arm/boot/dts/am335x-pocketbeagle.dts
index d526c5941c9b..209cdd17dc1e 100644
--- a/arch/arm/boot/dts/am335x-pocketbeagle.dts
+++ b/arch/arm/boot/dts/am335x-pocketbeagle.dts
@@ -61,51 +61,51 @@
&gpio0 {
gpio-line-names =
- "[NC]",
- "[NC]",
+ "NC",
+ "NC",
"P1.08 [SPI0_CLK]",
"P1.10 [SPI0_MISO]",
"P1.12 [SPI0_MOSI]",
"P1.06 [SPI0_CS]",
"[MMC0_CD]",
"P2.29 [SPI1_CLK]",
- "[SYSBOOT]",
- "[SYSBOOT]",
- "[SYSBOOT]",
- "[SYSBOOT]",
+ "[SYSBOOT 12]",
+ "[SYSBOOT 13]",
+ "[SYSBOOT 14]",
+ "[SYSBOOT 15]",
"P1.26 [I2C2_SDA]",
"P1.28 [I2C2_SCL]",
"P2.11 [I2C1_SDA]",
"P2.09 [I2C1_SCL]",
- "[NC]",
- "[NC]",
- "[NC]",
+ "NC",
+ "NC",
+ "NC",
"P2.31 [SPI1_CS]",
"P1.20 [PRU0.16]",
- "[NC]",
- "[NC]",
+ "NC",
+ "NC",
"P2.03",
- "[NC]",
- "[NC]",
+ "NC",
+ "NC",
"P1.34",
"P2.19",
- "[NC]",
- "[NC]",
+ "NC",
+ "NC",
"P2.05 [UART4_RX]",
"P2.07 [UART4_TX]";
};
&gpio1 {
gpio-line-names =
- "[NC]",
- "[NC]",
- "[NC]",
- "[NC]",
- "[NC]",
- "[NC]",
- "[NC]",
- "[NC]",
- "[NC]",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
"P2.25 [SPI1_MOSI]",
"P1.32 [UART0_RX]",
"P1.30 [UART0_TX]",
@@ -113,10 +113,10 @@
"P2.33",
"P2.22",
"P2.18",
- "[NC]",
- "[NC]",
+ "NC",
+ "NC",
"P2.01 [PWM1A]",
- "[NC]",
+ "NC",
"P2.10",
"[USR LED 0]",
"[USR LED 1]",
@@ -126,35 +126,35 @@
"P2.04",
"P2.02",
"P2.08",
- "[NC]",
- "[NC]",
- "[NC]";
+ "NC",
+ "NC",
+ "NC";
};
&gpio2 {
gpio-line-names =
"P2.20",
"P2.17",
- "[NC]",
- "[NC]",
- "[NC]",
+ "NC",
+ "NC",
+ "NC",
"[EEPROM_WP]",
- "[SYSBOOT]",
- "[SYSBOOT]",
- "[SYSBOOT]",
- "[SYSBOOT]",
- "[SYSBOOT]",
- "[SYSBOOT]",
- "[SYSBOOT]",
- "[SYSBOOT]",
- "[SYSBOOT]",
- "[SYSBOOT]",
- "[SYSBOOT]",
- "[SYSBOOT]",
- "[NC]",
- "[NC]",
- "[NC]",
- "[NC]",
+ "[SYSBOOT 0]",
+ "[SYSBOOT 1]",
+ "[SYSBOOT 2]",
+ "[SYSBOOT 3]",
+ "[SYSBOOT 4]",
+ "[SYSBOOT 5]",
+ "[SYSBOOT 6]",
+ "[SYSBOOT 7]",
+ "[SYSBOOT 8]",
+ "[SYSBOOT 9]",
+ "[SYSBOOT 10]",
+ "[SYSBOOT 11]",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
"P2.35 [AIN5]",
"P1.02 [AIN6]",
"P1.35 [PRU1.10]",
@@ -169,19 +169,19 @@
&gpio3 {
gpio-line-names =
- "[NC]",
- "[NC]",
- "[NC]",
- "[NC]",
- "[NC]",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
"[I2C0_SDA]",
"[I2C0_SCL]",
- "[JTAG]",
- "[JTAG]",
- "[NC]",
- "[NC]",
- "[NC]",
- "[NC]",
+ "[JTAG EMU0]",
+ "[JTAG EMU1]",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
"P1.03 [USB1]",
"P1.36 [PWM0A]",
"P1.33 [PRU0.1]",
@@ -191,16 +191,16 @@
"P2.34 [PRU0.5]",
"P2.28 [PRU0.6]",
"P1.29 [PRU0.7]",
- "[NC]",
- "[NC]",
- "[NC]",
- "[NC]",
- "[NC]",
- "[NC]",
- "[NC]",
- "[NC]",
- "[NC]",
- "[NC]";
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "NC",
+ "NC";
};
&am33xx_pinmux {
diff --git a/arch/arm/boot/dts/am33xx-clocks.dtsi b/arch/arm/boot/dts/am33xx-clocks.dtsi
index dced92a8970e..b7b7106f2dee 100644
--- a/arch/arm/boot/dts/am33xx-clocks.dtsi
+++ b/arch/arm/boot/dts/am33xx-clocks.dtsi
@@ -164,7 +164,7 @@
#clock-cells = <0>;
compatible = "ti,am3-dpll-core-clock";
clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
- reg = <0x0490>, <0x045c>, <0x0468>;
+ reg = <0x0490>, <0x045c>, <0x0468>, <0x0460>, <0x0464>;
};
dpll_core_x2_ck: dpll_core_x2_ck {
@@ -204,7 +204,7 @@
#clock-cells = <0>;
compatible = "ti,am3-dpll-clock";
clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
- reg = <0x0488>, <0x0420>, <0x042c>;
+ reg = <0x0488>, <0x0420>, <0x042c>, <0x0424>, <0x0428>;
};
dpll_mpu_m2_ck: dpll_mpu_m2_ck@4a8 {
@@ -220,7 +220,7 @@
#clock-cells = <0>;
compatible = "ti,am3-dpll-no-gate-clock";
clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
- reg = <0x0494>, <0x0434>, <0x0440>;
+ reg = <0x0494>, <0x0434>, <0x0440>, <0x0438>, <0x043c>;
};
dpll_ddr_m2_ck: dpll_ddr_m2_ck@4a0 {
@@ -244,7 +244,7 @@
#clock-cells = <0>;
compatible = "ti,am3-dpll-no-gate-clock";
clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
- reg = <0x0498>, <0x0448>, <0x0454>;
+ reg = <0x0498>, <0x0448>, <0x0454>, <0x044c>, <0x0450>;
};
dpll_disp_m2_ck: dpll_disp_m2_ck@4a4 {
@@ -261,7 +261,7 @@
#clock-cells = <0>;
compatible = "ti,am3-dpll-no-gate-j-type-clock";
clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
- reg = <0x048c>, <0x0470>, <0x049c>;
+ reg = <0x048c>, <0x0470>, <0x049c>, <0x0474>, <0x0478>;
};
dpll_per_m2_ck: dpll_per_m2_ck@4ac {
diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi
index 1fb22088caeb..039a9ab4c7ea 100644
--- a/arch/arm/boot/dts/am33xx-l4.dtsi
+++ b/arch/arm/boot/dts/am33xx-l4.dtsi
@@ -259,22 +259,22 @@
ranges = <0x00000000 0x0000d000 0x00001000>,
<0x00001000 0x0000e000 0x00001000>;
- tscadc: tscadc@0 {
- compatible = "ti,am3359-tscadc";
- reg = <0x0 0x1000>;
- interrupts = <16>;
- status = "disabled";
- dmas = <&edma 53 0>, <&edma 57 0>;
- dma-names = "fifo0", "fifo1";
+ tscadc: tscadc@0 {
+ compatible = "ti,am3359-tscadc";
+ reg = <0x0 0x1000>;
+ interrupts = <16>;
+ status = "disabled";
+ dmas = <&edma 53 0>, <&edma 57 0>;
+ dma-names = "fifo0", "fifo1";
- tsc {
- compatible = "ti,am3359-tsc";
- };
- am335x_adc: adc {
- #io-channel-cells = <1>;
- compatible = "ti,am3359-adc";
- };
+ tsc {
+ compatible = "ti,am3359-tsc";
+ };
+ am335x_adc: adc {
+ #io-channel-cells = <1>;
+ compatible = "ti,am3359-adc";
};
+ };
};
target-module@10000 { /* 0x44e10000, ap 22 0c.0 */
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 5b213a1e68bb..5e33d0e88f5b 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -40,6 +40,9 @@
ethernet1 = &cpsw_emac1;
spi0 = &spi0;
spi1 = &spi1;
+ mmc0 = &mmc1;
+ mmc1 = &mmc2;
+ mmc2 = &mmc3;
};
cpus {
diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi
index c726cd8dbdf1..314fc5975acb 100644
--- a/arch/arm/boot/dts/am43xx-clocks.dtsi
+++ b/arch/arm/boot/dts/am43xx-clocks.dtsi
@@ -204,7 +204,7 @@
#clock-cells = <0>;
compatible = "ti,am3-dpll-core-clock";
clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
- reg = <0x2d20>, <0x2d24>, <0x2d2c>;
+ reg = <0x2d20>, <0x2d24>, <0x2d2c>, <0x2d48>, <0x2d4c>;
};
dpll_core_x2_ck: dpll_core_x2_ck {
@@ -250,7 +250,7 @@
#clock-cells = <0>;
compatible = "ti,am3-dpll-clock";
clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
- reg = <0x2d60>, <0x2d64>, <0x2d6c>;
+ reg = <0x2d60>, <0x2d64>, <0x2d6c>, <0x2d88>, <0x2d8c>;
};
dpll_mpu_m2_ck: dpll_mpu_m2_ck@2d70 {
@@ -276,7 +276,7 @@
#clock-cells = <0>;
compatible = "ti,am3-dpll-clock";
clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
- reg = <0x2da0>, <0x2da4>, <0x2dac>;
+ reg = <0x2da0>, <0x2da4>, <0x2dac>, <0x2dc8>, <0x2dcc>;
};
dpll_ddr_m2_ck: dpll_ddr_m2_ck@2db0 {
@@ -294,7 +294,7 @@
#clock-cells = <0>;
compatible = "ti,am3-dpll-clock";
clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
- reg = <0x2e20>, <0x2e24>, <0x2e2c>;
+ reg = <0x2e20>, <0x2e24>, <0x2e2c>, <0x2e48>, <0x2e4c>;
};
dpll_disp_m2_ck: dpll_disp_m2_ck@2e30 {
@@ -313,7 +313,7 @@
#clock-cells = <0>;
compatible = "ti,am3-dpll-j-type-clock";
clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
- reg = <0x2de0>, <0x2de4>, <0x2dec>;
+ reg = <0x2de0>, <0x2de4>, <0x2dec>, <0x2e08>, <0x2e0c>;
};
dpll_per_m2_ck: dpll_per_m2_ck@2df0 {
@@ -557,7 +557,7 @@
#clock-cells = <0>;
compatible = "ti,am3-dpll-clock";
clocks = <&sys_clkin_ck>, <&sys_clkin_ck>;
- reg = <0x2e60>, <0x2e64>, <0x2e6c>;
+ reg = <0x2e60>, <0x2e64>, <0x2e6c>, <0x2e88>, <0x2e8c>;
};
dpll_extdev_m2_ck: dpll_extdev_m2_ck@2e70 {
diff --git a/arch/arm/boot/dts/armada-385-atl-x530.dts b/arch/arm/boot/dts/armada-385-atl-x530.dts
new file mode 100644
index 000000000000..ed3f41c7df71
--- /dev/null
+++ b/arch/arm/boot/dts/armada-385-atl-x530.dts
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Device Tree file for Armada 385 Allied Telesis x530/GS980MX Board.
+ (x530/AT-GS980MX)
+ *
+ Copyright (C) 2020 Allied Telesis Labs
+ */
+
+/dts-v1/;
+#include "armada-385.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "x530/AT-GS980MX";
+ compatible = "alliedtelesis,gs980mx", "alliedtelesis,x530", "marvell,armada385", "marvell,armada380";
+
+ chosen {
+ stdout-path = "serial1:115200n8";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x40000000>; /* 1GB */
+ };
+
+ soc {
+ ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
+ MBUS_ID(0x01, 0x3d) 0 0xf4800000 0x80000
+ MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000>;
+
+ internal-regs {
+ i2c0: i2c@11000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins>;
+ status = "okay";
+ };
+
+ uart0: serial@12000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pins>;
+ status = "okay";
+ };
+ };
+ };
+};
+
+&pciec {
+ status = "okay";
+};
+
+&pcie1 {
+ status = "okay";
+ reset-gpios = <&gpio1 23 GPIO_ACTIVE_LOW>;
+ reset-delay-us = <400000>;
+};
+
+&pcie2 {
+ status = "okay";
+};
+
+&devbus_cs1 {
+ compatible = "marvell,mvebu-devbus";
+ status = "okay";
+
+ devbus,bus-width = <8>;
+ devbus,turn-off-ps = <60000>;
+ devbus,badr-skew-ps = <0>;
+ devbus,acc-first-ps = <124000>;
+ devbus,acc-next-ps = <248000>;
+ devbus,rd-setup-ps = <0>;
+ devbus,rd-hold-ps = <0>;
+
+ /* Write parameters */
+ devbus,sync-enable = <0>;
+ devbus,wr-high-ps = <60000>;
+ devbus,wr-low-ps = <60000>;
+ devbus,ale-wr-ps = <60000>;
+
+ nvs@0 {
+ status = "okay";
+
+ compatible = "mtd-ram";
+ reg = <0 0x00080000>;
+ bank-width = <1>;
+ label = "nvs";
+ };
+};
+
+&pinctrl {
+ i2c0_gpio_pins: i2c-gpio-pins-0 {
+ marvell,pins = "mpp2", "mpp3";
+ marvell,function = "gpio";
+ };
+};
+
+&i2c0 {
+ clock-frequency = <100000>;
+ status = "okay";
+
+ pinctrl-names = "default", "gpio";
+ pinctrl-0 = <&i2c0_pins>;
+ pinctrl-1 = <&i2c0_gpio_pins>;
+ scl-gpio = <&gpio0 2 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+ sda-gpio = <&gpio0 3 (GPIO_ACTIVE_HIGH|GPIO_OPEN_DRAIN)>;
+
+ i2c0mux: mux@71 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "nxp,pca9544";
+ reg = <0x71>;
+ i2c-mux-idle-disconnect;
+
+ i2c@0 { /* POE devices MUX */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ adt7476_2e: hwmon@2e {
+ compatible = "adi,adt7476";
+ reg = <0x2e>;
+ };
+
+ adt7476_2d: hwmon@2d {
+ compatible = "adi,adt7476";
+ reg = <0x2d>;
+ };
+ };
+
+ i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+
+ rtc@68 {
+ compatible = "dallas,ds1340";
+ reg = <0x68>;
+ };
+ };
+
+ i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+
+ gpio@20 {
+ compatible = "nxp,pca9554";
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x20>;
+ };
+ };
+ };
+};
+
+&usb0 {
+ status = "okay";
+};
+
+&spi1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi1_pins>;
+ status = "okay";
+
+ spi-flash@1 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "jedec,spi-nor";
+ reg = <1>; /* Chip select 1 */
+ spi-max-frequency = <54000000>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@u-boot {
+ reg = <0x00000000 0x00100000>;
+ label = "u-boot";
+ };
+ partition@u-boot-env {
+ reg = <0x00100000 0x00040000>;
+ label = "u-boot-env";
+ };
+ partition@unused {
+ reg = <0x00140000 0x00e80000>;
+ label = "unused";
+ };
+ partition@idprom {
+ reg = <0x00fc0000 0x00040000>;
+ label = "idprom";
+ };
+ };
+ };
+};
+
+&nand_controller {
+ status = "okay";
+
+ nand@0 {
+ reg = <0>;
+ label = "pxa3xx_nand-0";
+ nand-rb = <0>;
+ nand-on-flash-bbt;
+ nand-ecc-strength = <4>;
+ nand-ecc-step-size = <512>;
+
+ marvell,nand-enable-arbiter;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ partition@user {
+ reg = <0x00000000 0x0f000000>;
+ label = "user";
+ };
+ partition@errlog {
+ /* Maximum mtdoops size is 8MB, so set to that. */
+ reg = <0x0f000000 0x00800000>;
+ label = "errlog";
+ };
+ partition@nand-bbt {
+ reg = <0x0f800000 0x00800000>;
+ label = "nand-bbt";
+ };
+ };
+ };
+};
+
diff --git a/arch/arm/boot/dts/armada-385-turris-omnia.dts b/arch/arm/boot/dts/armada-385-turris-omnia.dts
index 646a06420c77..5bd6a66d2c2b 100644
--- a/arch/arm/boot/dts/armada-385-turris-omnia.dts
+++ b/arch/arm/boot/dts/armada-385-turris-omnia.dts
@@ -32,7 +32,8 @@
ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000
- MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;
+ MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000
+ MBUS_ID(0x0c, 0x04) 0 0xf1200000 0x100000>;
internal-regs {
@@ -389,6 +390,7 @@
phy1: ethernet-phy@1 {
compatible = "ethernet-phy-ieee802.3-c22";
reg = <1>;
+ marvell,reg-init = <3 18 0 0x4985>;
/* irq is connected to &pcawan pin 7 */
};
diff --git a/arch/arm/boot/dts/aspeed-bmc-amd-ethanolx.dts b/arch/arm/boot/dts/aspeed-bmc-amd-ethanolx.dts
index ac2d04cfaf2f..6aeb47c44eba 100644
--- a/arch/arm/boot/dts/aspeed-bmc-amd-ethanolx.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-amd-ethanolx.dts
@@ -151,6 +151,31 @@
status = "okay";
};
+//FPGA
+&i2c2 {
+ status = "okay";
+};
+
+//24LC128 EEPROM
+&i2c3 {
+ status = "okay";
+};
+
+//P0 Power regulators
+&i2c4 {
+ status = "okay";
+};
+
+//P1 Power regulators
+&i2c5 {
+ status = "okay";
+};
+
+//P0/P1 Thermal diode
+&i2c6 {
+ status = "okay";
+};
+
// Thermal Sensors
&i2c7 {
status = "okay";
@@ -196,6 +221,11 @@
};
};
+//BMC I2C
+&i2c8 {
+ status = "okay";
+};
+
&kcs1 {
status = "okay";
aspeed,lpc-io-reg = <0x60>;
diff --git a/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts b/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts
new file mode 100644
index 000000000000..dcab6e78dfa4
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0+
+/dts-v1/;
+
+#include "aspeed-g5.dtsi"
+#include <dt-bindings/gpio/aspeed-gpio.h>
+#include <dt-bindings/i2c/i2c.h>
+
+/{
+ model = "ASRock E3C246D4I BMC";
+ compatible = "asrock,e3c246d4i-bmc", "aspeed,ast2500";
+
+ aliases {
+ serial4 = &uart5;
+ };
+
+ chosen {
+ stdout-path = &uart5;
+ bootargs = "console=tty0 console=ttyS4,115200 earlyprintk";
+ };
+
+ memory@80000000 {
+ reg = <0x80000000 0x20000000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ heartbeat {
+ /* BMC_HB_LED_N */
+ gpios = <&gpio ASPEED_GPIO(H, 6) GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "timer";
+ };
+
+ system-fault {
+ /* SYSTEM_FAULT_LED_N */
+ gpios = <&gpio ASPEED_GPIO(Z, 2) GPIO_ACTIVE_LOW>;
+ panic-indicator;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+
+ uid-button {
+ label = "uid-button";
+ gpios = <&gpio ASPEED_GPIO(F, 1) GPIO_ACTIVE_LOW>;
+ linux,code = <ASPEED_GPIO(F, 1)>;
+ };
+ };
+
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&adc 0>, <&adc 1>, <&adc 2>, <&adc 3>, <&adc 4>,
+ <&adc 5>, <&adc 6>, <&adc 7>, <&adc 8>, <&adc 9>,
+ <&adc 10>, <&adc 11>, <&adc 12>;
+ };
+};
+
+&fmc {
+ status = "okay";
+ flash@0 {
+ status = "okay";
+ m25p,fast-read;
+ label = "bmc";
+ spi-max-frequency = <100000000>; /* 100 MHz */
+#include "openbmc-flash-layout.dtsi"
+ };
+};
+
+&uart5 {
+ status = "okay";
+};
+
+&vuart {
+ status = "okay";
+ aspeed,sirq-active-high;
+};
+
+&mac0 {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rgmii1_default &pinctrl_mdio1_default>;
+};
+
+&i2c1 {
+ status = "okay";
+
+ /* thermal sensor, one diode run to a disconnected header */
+ w83773g@4c {
+ compatible = "nuvoton,w83773g";
+ reg = <0x4c>;
+ };
+};
+
+&i2c3 {
+ status = "okay";
+
+ /* FRU EEPROM */
+ eeprom@57 {
+ compatible = "st,24c128", "atmel,24c128";
+ reg = <0x57>;
+ pagesize = <16>;
+ };
+};
+
+&video {
+ status = "okay";
+};
+
+&vhub {
+ status = "okay";
+};
+
+&lpc_ctrl {
+ status = "okay";
+};
+
+&lpc_snoop {
+ status = "okay";
+ snoop-ports = <0x80>;
+};
+
+&gpio {
+ status = "okay";
+ gpio-line-names =
+ /* A */ "BMC_MAC1_INTB", "BMC_MAC2_INTB", "NMI_BTN_N", "BMC_NMI",
+ "", "", "", "",
+ /* B */ "", "", "", "", "", "IRQ_BMC_PCH_SMI_LPC_N", "", "",
+ /* C */ "", "", "", "", "", "", "", "",
+ /* D */ "BMC_PSIN", "BMC_PSOUT", "BMC_RESETCON", "RESETCON",
+ "", "", "", "",
+ /* E */ "", "", "", "", "", "", "", "",
+ /* F */ "LOCATORLED_STATUS_N", "LOCATORBTN", "", "",
+ "", "", "BMC_PCH_SCI_LPC", "BMC_NCSI_MUX_CTL",
+ /* G */ "HWM_BAT_EN", "CHASSIS_ID0", "CHASSIS_ID1", "CHASSIS_ID2",
+ "BMC_ALERT1_N_R", "BMC_ALERT2_N_R", "BMC_ALERT3_N", "SML0ALERT",
+ /* H */ "FM_ME_RCVR_N", "O_PWROK", "SKL_CNL_R", "D4_DIMM_EVENT_3V_N",
+ "MFG_MODE_N", "BMC_RTCRST", "BMC_HB_LED_N", "BMC_CASEOPEN",
+ /* I */ "", "", "", "", "", "", "", "",
+ /* J */ "BMC_READY", "BMC_PCH_BIOS_CS_N", "BMC_SMI", "",
+ "", "", "", "",
+ /* K */ "", "", "", "", "", "", "", "",
+ /* L */ "BMC_CTS1", "BMC_DCD1", "BMC_DSR1", "BMC_RI1",
+ "BMC_DTR1", "BMC_RTS1", "BMC_TXD1", "BMC_RXD1",
+ /* M */ "BMC_LAN0_DIS_N", "BMC_LAN1_DIS_N", "", "",
+ "", "", "", "",
+ /* N */ "", "", "", "", "", "", "", "",
+ /* O */ "", "", "", "", "", "", "", "",
+ /* P */ "", "", "", "", "", "", "", "",
+ /* Q */ "", "", "", "",
+ "BMC_SBM_PRESENT_1_N", "BMC_SBM_PRESENT_2_N",
+ "BMC_SBM_PRESENT_3_N", "BMC_PCIE_WAKE_N",
+ /* R */ "", "", "", "", "", "", "", "",
+ /* S */ "PCHHOT_BMC_N", "", "RSMRST",
+ "", "", "", "", "",
+ /* T */ "", "", "", "", "", "", "", "",
+ /* U */ "", "", "", "", "", "", "", "",
+ /* V */ "", "", "", "", "", "", "", "",
+ /* W */ "PS_PWROK", /* dummy always-high signal */
+ "", "", "", "", "", "", "",
+ /* X */ "", "", "", "", "", "", "", "",
+ /* Y */ "SLP_S3", "SLP_S5", "", "", "", "", "", "",
+ /* Z */ "CPU_CATERR_BMC_PCH_N", "", "SYSTEM_FAULT_LED_N", "BMC_THROTTLE_N",
+ "", "", "", "",
+ /* AA */ "CPU1_THERMTRIP_LATCH_N", "", "CPU1_PROCHOT_N", "",
+ "", "", "IRQ_SMI_ACTIVE_N", "FM_BIOS_POST_CMPLT_N",
+ /* AB */ "", "", "ME_OVERRIDE", "BMC_DMI_MODIFY",
+ "", "", "", "",
+ /* AC */ "LAD0", "LAD1", "LAD2", "LAD3",
+ "CK_33M_BMC", "LFRAME", "SERIRQ", "S_PLTRST";
+
+ /* Assert BMC_READY so BIOS doesn't sit around waiting for it */
+ bmc-ready {
+ gpio-hog;
+ gpios = <ASPEED_GPIO(J, 0) GPIO_ACTIVE_LOW>;
+ output-high;
+ };
+};
+
+&adc {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_adc0_default
+ &pinctrl_adc1_default
+ &pinctrl_adc2_default
+ &pinctrl_adc3_default
+ &pinctrl_adc4_default
+ &pinctrl_adc5_default
+ &pinctrl_adc6_default
+ &pinctrl_adc7_default
+ &pinctrl_adc8_default
+ &pinctrl_adc9_default
+ &pinctrl_adc10_default
+ &pinctrl_adc11_default
+ &pinctrl_adc12_default>;
+};
+
+&kcs3 {
+ status = "okay";
+ aspeed,lpc-io-reg = <0xca2>;
+};
diff --git a/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts b/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts
index cd18641d5c23..7b4b2b126ad8 100644
--- a/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts
@@ -507,6 +507,11 @@
&i2c7 {
status = "okay";
//HSC, AirMax Conn A
+ adm1278@45 {
+ compatible = "adm1275";
+ reg = <0x45>;
+ shunt-resistor-micro-ohms = <250>;
+ };
};
&i2c8 {
diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
index 6bd876657bb8..3295c8c7c05c 100644
--- a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
@@ -44,6 +44,58 @@
i2c415 = &cfam3_i2c15;
i2c416 = &cfam3_i2c16;
i2c417 = &cfam3_i2c17;
+ i2c500 = &cfam4_i2c0;
+ i2c501 = &cfam4_i2c1;
+ i2c510 = &cfam4_i2c10;
+ i2c511 = &cfam4_i2c11;
+ i2c512 = &cfam4_i2c12;
+ i2c513 = &cfam4_i2c13;
+ i2c514 = &cfam4_i2c14;
+ i2c515 = &cfam4_i2c15;
+ i2c602 = &cfam5_i2c2;
+ i2c603 = &cfam5_i2c3;
+ i2c610 = &cfam5_i2c10;
+ i2c611 = &cfam5_i2c11;
+ i2c614 = &cfam5_i2c14;
+ i2c615 = &cfam5_i2c15;
+ i2c616 = &cfam5_i2c16;
+ i2c617 = &cfam5_i2c17;
+ i2c700 = &cfam6_i2c0;
+ i2c701 = &cfam6_i2c1;
+ i2c710 = &cfam6_i2c10;
+ i2c711 = &cfam6_i2c11;
+ i2c712 = &cfam6_i2c12;
+ i2c713 = &cfam6_i2c13;
+ i2c714 = &cfam6_i2c14;
+ i2c715 = &cfam6_i2c15;
+ i2c802 = &cfam7_i2c2;
+ i2c803 = &cfam7_i2c3;
+ i2c810 = &cfam7_i2c10;
+ i2c811 = &cfam7_i2c11;
+ i2c814 = &cfam7_i2c14;
+ i2c815 = &cfam7_i2c15;
+ i2c816 = &cfam7_i2c16;
+ i2c817 = &cfam7_i2c17;
+
+ i2c16 = &i2c4mux0chn0;
+ i2c17 = &i2c4mux0chn1;
+ i2c18 = &i2c4mux0chn2;
+ i2c19 = &i2c5mux0chn0;
+ i2c20 = &i2c5mux0chn1;
+ i2c21 = &i2c5mux0chn2;
+ i2c22 = &i2c5mux0chn3;
+ i2c23 = &i2c6mux0chn0;
+ i2c24 = &i2c6mux0chn1;
+ i2c25 = &i2c6mux0chn2;
+ i2c26 = &i2c6mux0chn3;
+ i2c27 = &i2c14mux0chn0;
+ i2c28 = &i2c14mux0chn1;
+ i2c29 = &i2c14mux0chn2;
+ i2c30 = &i2c14mux0chn3;
+ i2c31 = &i2c14mux1chn0;
+ i2c32 = &i2c14mux1chn1;
+ i2c33 = &i2c14mux1chn2;
+ i2c34 = &i2c14mux1chn3;
serial4 = &uart5;
@@ -63,6 +115,22 @@
spi41 = &cfam3_spi1;
spi42 = &cfam3_spi2;
spi43 = &cfam3_spi3;
+ spi50 = &cfam4_spi0;
+ spi51 = &cfam4_spi1;
+ spi52 = &cfam4_spi2;
+ spi53 = &cfam4_spi3;
+ spi60 = &cfam5_spi0;
+ spi61 = &cfam5_spi1;
+ spi62 = &cfam5_spi2;
+ spi63 = &cfam5_spi3;
+ spi70 = &cfam6_spi0;
+ spi71 = &cfam6_spi1;
+ spi72 = &cfam6_spi2;
+ spi73 = &cfam6_spi3;
+ spi80 = &cfam7_spi0;
+ spi81 = &cfam7_spi1;
+ spi82 = &cfam7_spi2;
+ spi83 = &cfam7_spi3;
};
chosen {
@@ -103,6 +171,889 @@
reg = <0xbf000000 0x01000000>; /* 16M */
};
};
+
+ gpio-keys-polled {
+ compatible = "gpio-keys-polled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ poll-interval = <1000>;
+
+ fan0-presence {
+ label = "fan0-presence";
+ gpios = <&pca0 15 GPIO_ACTIVE_LOW>;
+ linux,code = <15>;
+ };
+
+ fan1-presence {
+ label = "fan1-presence";
+ gpios = <&pca0 14 GPIO_ACTIVE_LOW>;
+ linux,code = <14>;
+ };
+
+ fan2-presence {
+ label = "fan2-presence";
+ gpios = <&pca0 13 GPIO_ACTIVE_LOW>;
+ linux,code = <13>;
+ };
+
+ fan3-presence {
+ label = "fan3-presence";
+ gpios = <&pca0 12 GPIO_ACTIVE_LOW>;
+ linux,code = <12>;
+ };
+ };
+};
+
+&gpio0 {
+ gpio-line-names =
+ /*A0-A7*/ "","","","","","","","",
+ /*B0-B7*/ "USERSPACE_RSTIND_BUFF","","","","","","","",
+ /*C0-C7*/ "","","","","","","","",
+ /*D0-D7*/ "","","","","","","","",
+ /*E0-E7*/ "","","","","","","","",
+ /*F0-F7*/ "PIN_HOLE_RESET_IN_N","","",
+ "PIN_HOLE_RESET_OUT_N","","","","",
+ /*G0-G7*/ "","","","","","","","",
+ /*H0-H7*/ "","","","","","","","",
+ /*I0-I7*/ "","","","","","","","",
+ /*J0-J7*/ "","","","","","","","",
+ /*K0-K7*/ "","","","","","","","",
+ /*L0-L7*/ "","","","","","","","",
+ /*M0-M7*/ "","","","","","","","",
+ /*N0-N7*/ "","","","","","","","",
+ /*O0-O7*/ "","","","","","","","",
+ /*P0-P7*/ "","","","","","","","",
+ /*Q0-Q7*/ "","","","","","","","",
+ /*R0-R7*/ "","","","","","I2C_FLASH_MICRO_N","","",
+ /*S0-S7*/ "","","","","","","","",
+ /*T0-T7*/ "","","","","","","","",
+ /*U0-U7*/ "","","","","","","","",
+ /*V0-V7*/ "","BMC_3RESTART_ATTEMPT_P","","","","","","",
+ /*W0-W7*/ "","","","","","","","",
+ /*X0-X7*/ "","","","","","","","",
+ /*Y0-Y7*/ "","","","","","","","",
+ /*Z0-Z7*/ "","","","","","","","";
+};
+
+&i2c0 {
+ status = "okay";
+
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ };
+
+ pca1: pca9552@62 {
+ compatible = "nxp,pca9552";
+ reg = <0x62>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "presence-ps0",
+ "presence-ps1",
+ "presence-ps2",
+ "presence-ps3",
+ "presence-pdb",
+ "presence-tpm",
+ "", "",
+ "presence-cp0",
+ "presence-cp1",
+ "presence-cp2",
+ "presence-cp3",
+ "presence-dasd",
+ "presence-lcd-op",
+ "presence-base-op",
+ "";
+
+ gpio@0 {
+ reg = <0>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@01 {
+ reg = <1>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@2 {
+ reg = <2>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@3 {
+ reg = <3>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@4 {
+ reg = <4>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@5 {
+ reg = <5>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@6 {
+ reg = <6>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@7 {
+ reg = <7>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@8 {
+ reg = <8>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@9 {
+ reg = <9>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@10 {
+ reg = <10>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@11 {
+ reg = <11>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@12 {
+ reg = <12>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@13 {
+ reg = <13>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@14 {
+ reg = <14>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@15 {
+ reg = <15>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+ };
+};
+
+&i2c1 {
+ status = "okay";
+
+ pca2: pca9552@61 {
+ compatible = "nxp,pca9552";
+ reg = <0x61>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "presence-cable-card1",
+ "presence-cable-card2",
+ "presence-cable-card3",
+ "presence-cable-card4",
+ "presence-cable-card5",
+ "expander-cable-card1",
+ "expander-cable-card2",
+ "expander-cable-card3",
+ "expander-cable-card4",
+ "expander-cable-card5";
+
+ gpio@0 {
+ reg = <0>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@1 {
+ reg = <1>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@2 {
+ reg = <2>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@3 {
+ reg = <3>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@4 {
+ reg = <4>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@5 {
+ reg = <5>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@6 {
+ reg = <6>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@7 {
+ reg = <7>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@8 {
+ reg = <8>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@9 {
+ reg = <9>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ };
+
+ pca3: pca9552@62 {
+ compatible = "nxp,pca9552";
+ reg = <0x62>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "presence-cable-card6",
+ "presence-cable-card7",
+ "presence-cable-card8",
+ "presence-cable-card9",
+ "presence-cable-card10",
+ "presence-cable-card11",
+ "expander-cable-card6",
+ "expander-cable-card7",
+ "expander-cable-card8",
+ "expander-cable-card9",
+ "expander-cable-card10",
+ "expander-cable-card11";
+
+ gpio@0 {
+ reg = <0>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@1 {
+ reg = <1>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@2 {
+ reg = <2>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@3 {
+ reg = <3>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@4 {
+ reg = <4>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@5 {
+ reg = <5>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@6 {
+ reg = <6>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@7 {
+ reg = <7>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@8 {
+ reg = <8>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@9 {
+ reg = <9>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@10 {
+ reg = <10>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@11 {
+ reg = <11>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ };
+
+};
+
+&i2c2 {
+ status = "okay";
+};
+
+&i2c3 {
+ status = "okay";
+
+ eeprom@54 {
+ compatible = "atmel,24c128";
+ reg = <0x54>;
+ };
+
+ power-supply@68 {
+ compatible = "ibm,cffps";
+ reg = <0x68>;
+ };
+
+ power-supply@69 {
+ compatible = "ibm,cffps";
+ reg = <0x69>;
+ };
+
+ power-supply@6a {
+ compatible = "ibm,cffps";
+ reg = <0x6a>;
+ };
+
+ power-supply@6b {
+ compatible = "ibm,cffps";
+ reg = <0x6b>;
+ };
+};
+
+&i2c4 {
+ status = "okay";
+
+ i2c-switch@70 {
+ compatible = "nxp,pca9546";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+ i2c-mux-idle-disconnect;
+
+ i2c4mux0chn0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ eeprom@52 {
+ compatible = "atmel,24c64";
+ reg = <0x52>;
+ };
+ };
+
+ i2c4mux0chn1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ i2c4mux0chn2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ };
+ };
+ };
+};
+
+&i2c5 {
+ status = "okay";
+
+ i2c-switch@70 {
+ compatible = "nxp,pca9546";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+ i2c-mux-idle-disconnect;
+
+ i2c5mux0chn0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ i2c5mux0chn1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ };
+ };
+
+ i2c5mux0chn2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ eeprom@52 {
+ compatible = "atmel,24c64";
+ reg = <0x52>;
+ };
+ };
+
+ i2c5mux0chn3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ eeprom@53 {
+ compatible = "atmel,24c64";
+ reg = <0x53>;
+ };
+ };
+ };
+};
+
+&i2c6 {
+ status = "okay";
+
+ i2c-switch@70 {
+ compatible = "nxp,pca9546";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+ i2c-mux-idle-disconnect;
+
+ i2c6mux0chn0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ i2c6mux0chn1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ eeprom@52 {
+ compatible = "atmel,24c64";
+ reg = <0x52>;
+ };
+ };
+
+ i2c6mux0chn2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ eeprom@53 {
+ compatible = "atmel,24c64";
+ reg = <0x53>;
+ };
+ };
+
+ i2c6mux0chn3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ };
+ };
+ };
+};
+
+&i2c7 {
+ status = "okay";
+};
+
+&i2c8 {
+ status = "okay";
+
+ ucd90320@11 {
+ compatible = "ti,ucd90320";
+ reg = <0x11>;
+ };
+
+ rtc@32 {
+ compatible = "epson,rx8900";
+ reg = <0x32>;
+ };
+
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+};
+
+&i2c9 {
+ status = "okay";
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+
+ eeprom@51 {
+ compatible = "atmel,24c128";
+ reg = <0x51>;
+ };
+
+ eeprom@53 {
+ compatible = "atmel,24c128";
+ reg = <0x53>;
+ };
+
+ eeprom@52 {
+ compatible = "atmel,24c128";
+ reg = <0x52>;
+ };
+};
+
+&i2c10 {
+ status = "okay";
+
+ eeprom@51 {
+ compatible = "atmel,24c128";
+ reg = <0x51>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+
+ eeprom@53 {
+ compatible = "atmel,24c128";
+ reg = <0x53>;
+ };
+
+ eeprom@52 {
+ compatible = "atmel,24c128";
+ reg = <0x52>;
+ };
+};
+
+&i2c11 {
+ status = "okay";
+
+ eeprom@51 {
+ compatible = "atmel,24c128";
+ reg = <0x51>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+
+ eeprom@53 {
+ compatible = "atmel,24c128";
+ reg = <0x53>;
+ };
+
+ eeprom@52 {
+ compatible = "atmel,24c128";
+ reg = <0x52>;
+ };
+};
+
+&i2c12 {
+ status = "okay";
+};
+
+&i2c13 {
+ status = "okay";
+
+ eeprom@51 {
+ compatible = "atmel,24c128";
+ reg = <0x51>;
+ };
+
+ eeprom@50 {
+ compatible = "atmel,24c128";
+ reg = <0x50>;
+ };
+
+ eeprom@53 {
+ compatible = "atmel,24c128";
+ reg = <0x53>;
+ };
+
+ eeprom@52 {
+ compatible = "atmel,24c128";
+ reg = <0x52>;
+ };
+};
+
+&i2c14 {
+ status = "okay";
+
+ i2c-switch@70 {
+ compatible = "nxp,pca9546";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+ i2c-mux-idle-disconnect;
+
+ i2c14mux0chn0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ i2c14mux0chn1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ eeprom@51 {
+ compatible = "atmel,24c32";
+ reg = <0x51>;
+ };
+ };
+
+ i2c14mux0chn2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ eeprom@50 {
+ compatible = "atmel,24c32";
+ reg = <0x50>;
+ };
+ };
+
+ i2c14mux0chn3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+
+ max31785@52 {
+ compatible = "maxim,max31785a";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x52>;
+
+ fan@0 {
+ compatible = "pmbus-fan";
+ reg = <0>;
+ tach-pulses = <2>;
+ };
+
+ fan@1 {
+ compatible = "pmbus-fan";
+ reg = <1>;
+ tach-pulses = <2>;
+ };
+
+ fan@2 {
+ compatible = "pmbus-fan";
+ reg = <2>;
+ tach-pulses = <2>;
+ };
+
+ fan@3 {
+ compatible = "pmbus-fan";
+ reg = <3>;
+ tach-pulses = <2>;
+ };
+ };
+
+ pca0: pca9552@61 {
+ compatible = "nxp,pca9552";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x61>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "","","","",
+ "","","","",
+ "","","","",
+ "presence-fan3",
+ "presence-fan2",
+ "presence-fan1",
+ "presence-fan0";
+
+ gpio@0 {
+ reg = <0>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@1 {
+ reg = <1>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@2 {
+ reg = <2>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@3 {
+ reg = <3>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@4 {
+ reg = <4>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@5 {
+ reg = <5>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@6 {
+ reg = <6>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@7 {
+ reg = <7>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@8 {
+ reg = <8>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@9 {
+ reg = <9>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@10 {
+ reg = <10>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@11 {
+ reg = <11>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@12 {
+ reg = <12>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@13 {
+ reg = <13>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@14 {
+ reg = <14>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@15 {
+ reg = <15>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+ };
+ };
+ };
+
+ i2c-switch@71 {
+ compatible = "nxp,pca9546";
+ reg = <0x71>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+ i2c-mux-idle-disconnect;
+
+ i2c14mux1chn0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ eeprom@50 {
+ compatible = "atmel,24c32";
+ reg = <0x50>;
+ };
+ };
+
+ i2c14mux1chn1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ eeprom@50 {
+ compatible = "atmel,24c32";
+ reg = <0x50>;
+ };
+ };
+
+ i2c14mux1chn2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ eeprom@50 {
+ compatible = "atmel,24c32";
+ reg = <0x50>;
+ };
+ };
+
+ i2c14mux1chn3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ eeprom@50 {
+ compatible = "atmel,24c32";
+ reg = <0x50>;
+ };
+ };
+ };
+};
+
+&i2c15 {
+ status = "okay";
};
&ehci1 {
@@ -133,7 +1084,7 @@
*/
cfam-reset-gpios = <&gpio0 ASPEED_GPIO(Q, 0) GPIO_ACTIVE_HIGH>;
- cfam@0,0 {
+ cfam@0,0 { /* DCM0_C0 */
reg = <0 0>;
#address-cells = <1>;
#size-cells = <1>;
@@ -277,7 +1228,7 @@
};
&fsi_hub0 {
- cfam@1,0 {
+ cfam@1,0 { /* DCM0_C1 */
reg = <1 0>;
#address-cells = <1>;
#size-cells = <1>;
@@ -421,7 +1372,7 @@
};
};
- cfam@2,0 {
+ cfam@2,0 { /* DCM1_C0 */
reg = <2 0>;
#address-cells = <1>;
#size-cells = <1>;
@@ -565,7 +1516,7 @@
};
};
- cfam@3,0 {
+ cfam@3,0 { /* DCM1_C1 */
reg = <3 0>;
#address-cells = <1>;
#size-cells = <1>;
@@ -708,6 +1659,582 @@
no-scan-on-init;
};
};
+
+ cfam@4,0 { /* DCM2_C0 */
+ reg = <4 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <4>;
+
+ scom@1000 {
+ compatible = "ibm,fsi2pib";
+ reg = <0x1000 0x400>;
+ };
+
+ i2c@1800 {
+ compatible = "ibm,fsi-i2c-master";
+ reg = <0x1800 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam4_i2c0: i2c-bus@0 {
+ reg = <0>; /* OM01 */
+ };
+
+ cfam4_i2c1: i2c-bus@1 {
+ reg = <1>; /* OM23 */
+ };
+
+ cfam4_i2c10: i2c-bus@a {
+ reg = <10>; /* OP3A */
+ };
+
+ cfam4_i2c11: i2c-bus@b {
+ reg = <11>; /* OP3B */
+ };
+
+ cfam4_i2c12: i2c-bus@c {
+ reg = <12>; /* OP4A */
+ };
+
+ cfam4_i2c13: i2c-bus@d {
+ reg = <13>; /* OP4B */
+ };
+
+ cfam4_i2c14: i2c-bus@e {
+ reg = <14>; /* OP5A */
+ };
+
+ cfam4_i2c15: i2c-bus@f {
+ reg = <15>; /* OP5B */
+ };
+ };
+
+ fsi2spi@1c00 {
+ compatible = "ibm,fsi2spi";
+ reg = <0x1c00 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam4_spi0: spi@0 {
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam4_spi1: spi@20 {
+ reg = <0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam4_spi2: spi@40 {
+ reg = <0x40>;
+ compatible = "ibm,fsi2spi-restricted";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam4_spi3: spi@60 {
+ reg = <0x60>;
+ compatible = "ibm,fsi2spi-restricted";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+ };
+
+ sbefifo@2400 {
+ compatible = "ibm,p9-sbefifo";
+ reg = <0x2400 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi_occ4: occ {
+ compatible = "ibm,p10-occ";
+ };
+ };
+
+ fsi_hub4: hub@3400 {
+ compatible = "fsi-master-hub";
+ reg = <0x3400 0x400>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ no-scan-on-init;
+ };
+ };
+
+ cfam@5,0 { /* DCM2_C1 */
+ reg = <5 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <5>;
+
+ scom@1000 {
+ compatible = "ibm,fsi2pib";
+ reg = <0x1000 0x400>;
+ };
+
+ i2c@1800 {
+ compatible = "ibm,fsi-i2c-master";
+ reg = <0x1800 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam5_i2c2: i2c-bus@2 {
+ reg = <2>; /* OM45 */
+ };
+
+ cfam5_i2c3: i2c-bus@3 {
+ reg = <3>; /* OM67 */
+ };
+
+ cfam5_i2c10: i2c-bus@a {
+ reg = <10>; /* OP3A */
+ };
+
+ cfam5_i2c11: i2c-bus@b {
+ reg = <11>; /* OP3B */
+ };
+
+ cfam5_i2c14: i2c-bus@e {
+ reg = <14>; /* OP5A */
+ };
+
+ cfam5_i2c15: i2c-bus@f {
+ reg = <15>; /* OP5B */
+ };
+
+ cfam5_i2c16: i2c-bus@10 {
+ reg = <16>; /* OP6A */
+ };
+
+ cfam5_i2c17: i2c-bus@11 {
+ reg = <17>; /* OP6B */
+ };
+ };
+
+ fsi2spi@1c00 {
+ compatible = "ibm,fsi2spi";
+ reg = <0x1c00 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam5_spi0: spi@0 {
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam5_spi1: spi@20 {
+ reg = <0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam5_spi2: spi@40 {
+ reg = <0x40>;
+ compatible = "ibm,fsi2spi-restricted";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam5_spi3: spi@60 {
+ reg = <0x60>;
+ compatible = "ibm,fsi2spi-restricted";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+ };
+
+ sbefifo@2400 {
+ compatible = "ibm,p9-sbefifo";
+ reg = <0x2400 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi_occ5: occ {
+ compatible = "ibm,p10-occ";
+ };
+ };
+
+ fsi_hub5: hub@3400 {
+ compatible = "fsi-master-hub";
+ reg = <0x3400 0x400>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ no-scan-on-init;
+ };
+ };
+
+ cfam@6,0 { /* DCM3_C0 */
+ reg = <6 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <6>;
+
+ scom@1000 {
+ compatible = "ibm,fsi2pib";
+ reg = <0x1000 0x400>;
+ };
+
+ i2c@1800 {
+ compatible = "ibm,fsi-i2c-master";
+ reg = <0x1800 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam6_i2c0: i2c-bus@0 {
+ reg = <0>; /* OM01 */
+ };
+
+ cfam6_i2c1: i2c-bus@1 {
+ reg = <1>; /* OM23 */
+ };
+
+ cfam6_i2c10: i2c-bus@a {
+ reg = <10>; /* OP3A */
+ };
+
+ cfam6_i2c11: i2c-bus@b {
+ reg = <11>; /* OP3B */
+ };
+
+ cfam6_i2c12: i2c-bus@c {
+ reg = <12>; /* OP4A */
+ };
+
+ cfam6_i2c13: i2c-bus@d {
+ reg = <13>; /* OP4B */
+ };
+
+ cfam6_i2c14: i2c-bus@e {
+ reg = <14>; /* OP5A */
+ };
+
+ cfam6_i2c15: i2c-bus@f {
+ reg = <15>; /* OP5B */
+ };
+ };
+
+ fsi2spi@1c00 {
+ compatible = "ibm,fsi2spi";
+ reg = <0x1c00 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam6_spi0: spi@0 {
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam6_spi1: spi@20 {
+ reg = <0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam6_spi2: spi@40 {
+ reg = <0x40>;
+ compatible = "ibm,fsi2spi-restricted";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam6_spi3: spi@60 {
+ reg = <0x60>;
+ compatible = "ibm,fsi2spi-restricted";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+ };
+
+ sbefifo@2400 {
+ compatible = "ibm,p9-sbefifo";
+ reg = <0x2400 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi_occ6: occ {
+ compatible = "ibm,p10-occ";
+ };
+ };
+
+ fsi_hub6: hub@3400 {
+ compatible = "fsi-master-hub";
+ reg = <0x3400 0x400>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ no-scan-on-init;
+ };
+ };
+
+ cfam@7,0 { /* DCM3_C1 */
+ reg = <7 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <7>;
+
+ scom@1000 {
+ compatible = "ibm,fsi2pib";
+ reg = <0x1000 0x400>;
+ };
+
+ i2c@1800 {
+ compatible = "ibm,fsi-i2c-master";
+ reg = <0x1800 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam7_i2c2: i2c-bus@2 {
+ reg = <2>; /* OM45 */
+ };
+
+ cfam7_i2c3: i2c-bus@3 {
+ reg = <3>; /* OM67 */
+ };
+
+ cfam7_i2c10: i2c-bus@a {
+ reg = <10>; /* OP3A */
+ };
+
+ cfam7_i2c11: i2c-bus@b {
+ reg = <11>; /* OP3B */
+ };
+
+ cfam7_i2c14: i2c-bus@e {
+ reg = <14>; /* OP5A */
+ };
+
+ cfam7_i2c15: i2c-bus@f {
+ reg = <15>; /* OP5B */
+ };
+
+ cfam7_i2c16: i2c-bus@10 {
+ reg = <16>; /* OP6A */
+ };
+
+ cfam7_i2c17: i2c-bus@11 {
+ reg = <17>; /* OP6B */
+ };
+ };
+
+ fsi2spi@1c00 {
+ compatible = "ibm,fsi2spi";
+ reg = <0x1c00 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam7_spi0: spi@0 {
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam7_spi1: spi@20 {
+ reg = <0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam7_spi2: spi@40 {
+ reg = <0x40>;
+ compatible = "ibm,fsi2spi-restricted";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam7_spi3: spi@60 {
+ reg = <0x60>;
+ compatible = "ibm,fsi2spi-restricted";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+ };
+
+ sbefifo@2400 {
+ compatible = "ibm,p9-sbefifo";
+ reg = <0x2400 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi_occ7: occ {
+ compatible = "ibm,p10-occ";
+ };
+ };
+
+ fsi_hub7: hub@3400 {
+ compatible = "fsi-master-hub";
+ reg = <0x3400 0x400>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ no-scan-on-init;
+ };
+ };
};
/* Legacy OCC numbering (to get rid of when userspace is fixed) */
@@ -727,6 +2254,22 @@
reg = <4>;
};
+&fsi_occ4 {
+ reg = <5>;
+};
+
+&fsi_occ5 {
+ reg = <6>;
+};
+
+&fsi_occ6 {
+ reg = <7>;
+};
+
+&fsi_occ7 {
+ reg = <8>;
+};
+
&ibt {
status = "okay";
};
@@ -769,6 +2312,20 @@
use-ncsi;
};
+&wdt1 {
+ aspeed,reset-type = "none";
+ aspeed,external-signal;
+ aspeed,ext-push-pull;
+ aspeed,ext-active-high;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdtrst1_default>;
+};
+
+&wdt2 {
+ status = "okay";
+};
+
&xdma {
status = "okay";
memory-region = <&vga_memory>;
diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier-1s4u.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier-1s4u.dts
new file mode 100644
index 000000000000..f5f5b18c113a
--- /dev/null
+++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier-1s4u.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+// Copyright 2021 IBM Corp.
+/dts-v1/;
+
+#include "aspeed-bmc-ibm-rainier-4u.dts"
+
+/ {
+ model = "Rainier 1S4U";
+};
+
+&max {
+ /delete-node/ fan3;
+ /delete-node/ fan5;
+};
diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier-4u.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier-4u.dts
index 291f7d6c9979..f7fd3b3c90d0 100644
--- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier-4u.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier-4u.dts
@@ -22,16 +22,30 @@
&fan0 {
tach-pulses = <4>;
+ /delete-property/ maxim,fan-dual-tach;
};
&fan1 {
tach-pulses = <4>;
+ /delete-property/ maxim,fan-dual-tach;
};
&fan2 {
tach-pulses = <4>;
+ /delete-property/ maxim,fan-dual-tach;
};
&fan3 {
tach-pulses = <4>;
+ /delete-property/ maxim,fan-dual-tach;
+};
+
+&fan4 {
+ tach-pulses = <4>;
+ /delete-property/ maxim,fan-dual-tach;
+};
+
+&fan5 {
+ tach-pulses = <4>;
+ /delete-property/ maxim,fan-dual-tach;
};
diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
index 6c9804d2f3b4..941c0489479a 100644
--- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
@@ -12,11 +12,55 @@
compatible = "ibm,rainier-bmc", "aspeed,ast2600";
aliases {
+ i2c100 = &cfam0_i2c0;
+ i2c101 = &cfam0_i2c1;
+ i2c110 = &cfam0_i2c10;
+ i2c111 = &cfam0_i2c11;
+ i2c112 = &cfam0_i2c12;
+ i2c113 = &cfam0_i2c13;
+ i2c114 = &cfam0_i2c14;
+ i2c115 = &cfam0_i2c15;
+ i2c202 = &cfam1_i2c2;
+ i2c203 = &cfam1_i2c3;
+ i2c210 = &cfam1_i2c10;
+ i2c211 = &cfam1_i2c11;
+ i2c214 = &cfam1_i2c14;
+ i2c215 = &cfam1_i2c15;
+ i2c216 = &cfam1_i2c16;
+ i2c217 = &cfam1_i2c17;
+ i2c300 = &cfam2_i2c0;
+ i2c301 = &cfam2_i2c1;
+ i2c310 = &cfam2_i2c10;
+ i2c311 = &cfam2_i2c11;
+ i2c312 = &cfam2_i2c12;
+ i2c313 = &cfam2_i2c13;
+ i2c314 = &cfam2_i2c14;
+ i2c315 = &cfam2_i2c15;
+ i2c402 = &cfam3_i2c2;
+ i2c403 = &cfam3_i2c3;
+ i2c410 = &cfam3_i2c10;
+ i2c411 = &cfam3_i2c11;
+ i2c414 = &cfam3_i2c14;
+ i2c415 = &cfam3_i2c15;
+ i2c416 = &cfam3_i2c16;
+ i2c417 = &cfam3_i2c17;
+
serial4 = &uart5;
i2c16 = &i2c2mux0;
i2c17 = &i2c2mux1;
i2c18 = &i2c2mux2;
i2c19 = &i2c2mux3;
+ i2c20 = &i2c4mux0chn0;
+ i2c21 = &i2c4mux0chn1;
+ i2c22 = &i2c4mux0chn2;
+ i2c23 = &i2c5mux0chn0;
+ i2c24 = &i2c5mux0chn1;
+ i2c25 = &i2c6mux0chn0;
+ i2c26 = &i2c6mux0chn1;
+ i2c27 = &i2c6mux0chn2;
+ i2c28 = &i2c6mux0chn3;
+ i2c29 = &i2c11mux0chn0;
+ i2c30 = &i2c11mux0chn1;
spi10 = &cfam0_spi0;
spi11 = &cfam0_spi1;
@@ -30,6 +74,10 @@
spi31 = &cfam2_spi1;
spi32 = &cfam2_spi2;
spi33 = &cfam2_spi3;
+ spi40 = &cfam3_spi0;
+ spi41 = &cfam3_spi1;
+ spi42 = &cfam3_spi2;
+ spi43 = &cfam3_spi3;
};
chosen {
@@ -131,6 +179,73 @@
reg = <3>;
};
};
+
+ leds {
+ compatible = "gpio-leds";
+
+ /* BMC Card fault LED at the back */
+ bmc-ingraham0 {
+ gpios = <&gpio0 ASPEED_GPIO(H, 1) GPIO_ACTIVE_LOW>;
+ };
+
+ /* Enclosure ID LED at the back */
+ rear-enc-id0 {
+ gpios = <&gpio0 ASPEED_GPIO(H, 2) GPIO_ACTIVE_LOW>;
+ };
+
+ /* Enclosure fault LED at the back */
+ rear-enc-fault0 {
+ gpios = <&gpio0 ASPEED_GPIO(H, 3) GPIO_ACTIVE_LOW>;
+ };
+
+ /* PCIE slot power LED */
+ pcieslot-power {
+ gpios = <&gpio0 ASPEED_GPIO(P, 4) GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ gpio-keys-polled {
+ compatible = "gpio-keys-polled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ poll-interval = <1000>;
+
+ fan0-presence {
+ label = "fan0-presence";
+ gpios = <&pca0 6 GPIO_ACTIVE_LOW>;
+ linux,code = <6>;
+ };
+
+ fan1-presence {
+ label = "fan1-presence";
+ gpios = <&pca0 7 GPIO_ACTIVE_LOW>;
+ linux,code = <7>;
+ };
+
+ fan2-presence {
+ label = "fan2-presence";
+ gpios = <&pca0 8 GPIO_ACTIVE_LOW>;
+ linux,code = <8>;
+ };
+
+ fan3-presence {
+ label = "fan3-presence";
+ gpios = <&pca0 9 GPIO_ACTIVE_LOW>;
+ linux,code = <9>;
+ };
+
+ fan4-presence {
+ label = "fan4-presence";
+ gpios = <&pca0 10 GPIO_ACTIVE_LOW>;
+ linux,code = <10>;
+ };
+
+ fan5-presence {
+ label = "fan5-presence";
+ gpios = <&pca0 11 GPIO_ACTIVE_LOW>;
+ linux,code = <11>;
+ };
+ };
};
&ehci1 {
@@ -146,7 +261,7 @@
/*E0-E7*/ "","","","","","","","",
/*F0-F7*/ "","","","","","","","",
/*G0-G7*/ "","","","","","","","",
- /*H0-H7*/ "","","","","","","","",
+ /*H0-H7*/ "","bmc-ingraham0","rear-enc-id0","rear-enc-fault0","","","","",
/*I0-I7*/ "","","","","","","","",
/*J0-J7*/ "","","","","","","","",
/*K0-K7*/ "","","","","","","","",
@@ -154,7 +269,7 @@
/*M0-M7*/ "","","","","","","","",
/*N0-N7*/ "","","","","","","","",
/*O0-O7*/ "","","","usb-power","","","","",
- /*P0-P7*/ "","","","","","","","",
+ /*P0-P7*/ "","","","","pcieslot-power","","","",
/*Q0-Q7*/ "cfam-reset","","","","","","","",
/*R0-R7*/ "","","","","","","","",
/*S0-S7*/ "presence-ps0","presence-ps1","presence-ps2","presence-ps3",
@@ -226,6 +341,38 @@
reg = <0x1800 0x400>;
#address-cells = <1>;
#size-cells = <0>;
+
+ cfam0_i2c0: i2c-bus@0 {
+ reg = <0>; /* OMI01 */
+ };
+
+ cfam0_i2c1: i2c-bus@1 {
+ reg = <1>; /* OMI23 */
+ };
+
+ cfam0_i2c10: i2c-bus@a {
+ reg = <10>; /* OP3A */
+ };
+
+ cfam0_i2c11: i2c-bus@b {
+ reg = <11>; /* OP3B */
+ };
+
+ cfam0_i2c12: i2c-bus@c {
+ reg = <12>; /* OP4A */
+ };
+
+ cfam0_i2c13: i2c-bus@d {
+ reg = <13>; /* OP4B */
+ };
+
+ cfam0_i2c14: i2c-bus@e {
+ reg = <14>; /* OP5A */
+ };
+
+ cfam0_i2c15: i2c-bus@f {
+ reg = <15>; /* OP5B */
+ };
};
fsi2spi@1c00 {
@@ -317,8 +464,6 @@
reg = <0x3400 0x400>;
#address-cells = <2>;
#size-cells = <0>;
-
- no-scan-on-init;
};
};
};
@@ -340,6 +485,38 @@
reg = <0x1800 0x400>;
#address-cells = <1>;
#size-cells = <0>;
+
+ cfam1_i2c2: i2c-bus@2 {
+ reg = <2>; /* OMI45 */
+ };
+
+ cfam1_i2c3: i2c-bus@3 {
+ reg = <3>; /* OMI67 */
+ };
+
+ cfam1_i2c10: i2c-bus@a {
+ reg = <10>; /* OP3A */
+ };
+
+ cfam1_i2c11: i2c-bus@b {
+ reg = <11>; /* OP3B */
+ };
+
+ cfam1_i2c14: i2c-bus@e {
+ reg = <14>; /* OP5A */
+ };
+
+ cfam1_i2c15: i2c-bus@f {
+ reg = <15>; /* OP5B */
+ };
+
+ cfam1_i2c16: i2c-bus@10 {
+ reg = <16>; /* OP6A */
+ };
+
+ cfam1_i2c17: i2c-bus@11 {
+ reg = <17>; /* OP6B */
+ };
};
fsi2spi@1c00 {
@@ -452,6 +629,38 @@
reg = <0x1800 0x400>;
#address-cells = <1>;
#size-cells = <0>;
+
+ cfam2_i2c0: i2c-bus@0 {
+ reg = <0>; /* OM01 */
+ };
+
+ cfam2_i2c1: i2c-bus@1 {
+ reg = <1>; /* OM23 */
+ };
+
+ cfam2_i2c10: i2c-bus@a {
+ reg = <10>; /* OP3A */
+ };
+
+ cfam2_i2c11: i2c-bus@b {
+ reg = <11>; /* OP3B */
+ };
+
+ cfam2_i2c12: i2c-bus@c {
+ reg = <12>; /* OP4A */
+ };
+
+ cfam2_i2c13: i2c-bus@d {
+ reg = <13>; /* OP4B */
+ };
+
+ cfam2_i2c14: i2c-bus@e {
+ reg = <14>; /* OP5A */
+ };
+
+ cfam2_i2c15: i2c-bus@f {
+ reg = <15>; /* OP5B */
+ };
};
fsi2spi@1c00 {
@@ -547,6 +756,150 @@
no-scan-on-init;
};
};
+
+ cfam@3,0 {
+ reg = <3 0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chip-id = <3>;
+
+ scom@1000 {
+ compatible = "ibm,fsi2pib";
+ reg = <0x1000 0x400>;
+ };
+
+ i2c@1800 {
+ compatible = "ibm,fsi-i2c-master";
+ reg = <0x1800 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam3_i2c2: i2c-bus@2 {
+ reg = <2>; /* OM45 */
+ };
+
+ cfam3_i2c3: i2c-bus@3 {
+ reg = <3>; /* OM67 */
+ };
+
+ cfam3_i2c10: i2c-bus@a {
+ reg = <10>; /* OP3A */
+ };
+
+ cfam3_i2c11: i2c-bus@b {
+ reg = <11>; /* OP3B */
+ };
+
+ cfam3_i2c14: i2c-bus@e {
+ reg = <14>; /* OP5A */
+ };
+
+ cfam3_i2c15: i2c-bus@f {
+ reg = <15>; /* OP5B */
+ };
+
+ cfam3_i2c16: i2c-bus@10 {
+ reg = <16>; /* OP6A */
+ };
+
+ cfam3_i2c17: i2c-bus@11 {
+ reg = <17>; /* OP6B */
+ };
+ };
+
+ fsi2spi@1c00 {
+ compatible = "ibm,fsi2spi";
+ reg = <0x1c00 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cfam3_spi0: spi@0 {
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam3_spi1: spi@20 {
+ reg = <0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam3_spi2: spi@40 {
+ reg = <0x40>;
+ compatible = "ibm,fsi2spi-restricted";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+
+ cfam3_spi3: spi@60 {
+ reg = <0x60>;
+ compatible = "ibm,fsi2spi-restricted";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@0 {
+ at25,byte-len = <0x80000>;
+ at25,addr-mode = <4>;
+ at25,page-size = <256>;
+
+ compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ };
+ };
+ };
+
+ sbefifo@2400 {
+ compatible = "ibm,p9-sbefifo";
+ reg = <0x2400 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ fsi_occ3: occ {
+ compatible = "ibm,p10-occ";
+ };
+ };
+
+ fsi_hub3: hub@3400 {
+ compatible = "fsi-master-hub";
+ reg = <0x3400 0x400>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ no-scan-on-init;
+ };
+ };
};
/* Legacy OCC numbering (to get rid of when userspace is fixed) */
@@ -562,6 +915,10 @@
reg = <3>;
};
+&fsi_occ3 {
+ reg = <4>;
+};
+
&ibt {
status = "okay";
};
@@ -574,20 +931,64 @@
reg = <0x51>;
};
- tca9554@40 {
+ tca_pres1: tca9554@20{
compatible = "ti,tca9554";
- reg = <0x40>;
+ reg = <0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
gpio-controller;
#gpio-cells = <2>;
- smbus0-hog {
- gpio-hog;
- gpios = <4 GPIO_ACTIVE_HIGH>;
- output-high;
- line-name = "smbus0";
+ gpio-line-names = "",
+ "RUSSEL_FW_I2C_ENABLE_N",
+ "RUSSEL_OPPANEL_PRESENCE_N",
+ "BLYTH_OPPANEL_PRESENCE_N",
+ "CPU_TPM_CARD_PRESENT_N",
+ "DASD_BP2_PRESENT_N",
+ "DASD_BP1_PRESENT_N",
+ "DASD_BP0_PRESENT_N";
+
+ gpio@0 {
+ reg = <0>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@1 {
+ reg = <1>;
+ type = <PCA955X_TYPE_GPIO>;
};
- };
+ gpio@2 {
+ reg = <2>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@3 {
+ reg = <3>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@4 {
+ reg = <4>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@5 {
+ reg = <5>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@6 {
+ reg = <6>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@7 {
+ reg = <7>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+ };
};
&i2c1 {
@@ -610,6 +1011,104 @@
compatible = "ibm,cffps";
reg = <0x69>;
};
+
+ pca_pres1: pca9552@61 {
+ compatible = "nxp,pca9552";
+ reg = <0x61>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "SLOT0_PRSNT_EN_RSVD", "SLOT1_PRSNT_EN_RSVD",
+ "SLOT2_PRSNT_EN_RSVD", "SLOT3_PRSNT_EN_RSVD",
+ "SLOT4_PRSNT_EN_RSVD", "SLOT0_EXPANDER_PRSNT_N",
+ "SLOT1_EXPANDER_PRSNT_N", "SLOT2_EXPANDER_PRSNT_N",
+ "SLOT3_EXPANDER_PRSNT_N", "SLOT4_EXPANDER_PRSNT_N",
+ "", "", "", "", "", "";
+
+ gpio@0 {
+ reg = <0>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@1 {
+ reg = <1>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@2 {
+ reg = <2>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@3 {
+ reg = <3>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@4 {
+ reg = <4>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@5 {
+ reg = <5>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@6 {
+ reg = <6>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@7 {
+ reg = <7>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@8 {
+ reg = <8>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@9 {
+ reg = <9>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@10 {
+ reg = <10>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@11 {
+ reg = <11>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@12 {
+ reg = <12>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@13 {
+ reg = <13>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@14 {
+ reg = <14>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+
+ gpio@15 {
+ reg = <15>;
+ type = <PCA955X_TYPE_GPIO>;
+ };
+ };
};
&i2c4 {
@@ -630,19 +1129,46 @@
reg = <0x4a>;
};
- eeprom@50 {
- compatible = "atmel,24c64";
- reg = <0x50>;
- };
+ pca9546@70 {
+ compatible = "nxp,pca9546";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+ i2c-mux-idle-disconnect;
- eeprom@51 {
- compatible = "atmel,24c64";
- reg = <0x51>;
- };
+ i2c4mux0chn0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
- eeprom@52 {
- compatible = "atmel,24c64";
- reg = <0x52>;
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ i2c4mux0chn1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ };
+ };
+
+ i2c4mux0chn2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+
+ eeprom@52 {
+ compatible = "atmel,24c64";
+ reg = <0x52>;
+ };
+ };
};
};
@@ -659,14 +1185,35 @@
reg = <0x49>;
};
- eeprom@50 {
- compatible = "atmel,24c64";
- reg = <0x50>;
- };
+ pca9546@70 {
+ compatible = "nxp,pca9546";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+ i2c-mux-idle-disconnect;
- eeprom@51 {
- compatible = "atmel,24c64";
- reg = <0x51>;
+ i2c5mux0chn0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ i2c5mux0chn1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ };
+ };
};
};
@@ -688,24 +1235,57 @@
reg = <0x4b>;
};
- eeprom@50 {
- compatible = "atmel,24c64";
- reg = <0x50>;
- };
+ pca9546@70 {
+ compatible = "nxp,pca9546";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+ i2c-mux-idle-disconnect;
- eeprom@51 {
- compatible = "atmel,24c64";
- reg = <0x51>;
- };
+ i2c6mux0chn0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
- eeprom@52 {
- compatible = "atmel,24c64";
- reg = <0x52>;
- };
+ eeprom@53 {
+ compatible = "atmel,24c64";
+ reg = <0x53>;
+ };
+ };
- eeprom@53 {
- compatible = "atmel,24c64";
- reg = <0x53>;
+ i2c6mux0chn1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ eeprom@52 {
+ compatible = "atmel,24c64";
+ reg = <0x52>;
+ };
+ };
+
+ i2c6mux0chn2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ i2c6mux0chn3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ };
+ };
};
};
@@ -713,9 +1293,9 @@
multi-master;
status = "okay";
- si7021-a20@20 {
+ si7021-a20@40 {
compatible = "silabs,si7020";
- reg = <0x20>;
+ reg = <0x40>;
};
tmp275@48 {
@@ -723,7 +1303,7 @@
reg = <0x48>;
};
- max31785@52 {
+ max: max31785@52 {
compatible = "maxim,max31785a";
reg = <0x52>;
#address-cells = <1>;
@@ -752,6 +1332,18 @@
reg = <3>;
tach-pulses = <2>;
};
+
+ fan4: fan@4 {
+ compatible = "pmbus-fan";
+ reg = <4>;
+ tach-pulses = <2>;
+ };
+
+ fan5: fan@5 {
+ compatible = "pmbus-fan";
+ reg = <5>;
+ tach-pulses = <2>;
+ };
};
pca0: pca9552@61 {
@@ -899,7 +1491,7 @@
reg = <0x51>;
};
- pca1: pca9552@61 {
+ pca_pres2: pca9552@61 {
compatible = "nxp,pca9552";
reg = <0x61>;
#address-cells = <1>;
@@ -907,6 +1499,15 @@
gpio-controller;
#gpio-cells = <2>;
+ gpio-line-names =
+ "SLOT6_PRSNT_EN_RSVD", "SLOT7_PRSNT_EN_RSVD",
+ "SLOT8_PRSNT_EN_RSVD", "SLOT9_PRSNT_EN_RSVD",
+ "SLOT10_PRSNT_EN_RSVD", "SLOT11_PRSNT_EN_RSVD",
+ "SLOT6_EXPANDER_PRSNT_N", "SLOT7_EXPANDER_PRSNT_N",
+ "SLOT8_EXPANDER_PRSNT_N", "SLOT9_EXPANDER_PRSNT_N",
+ "SLOT10_EXPANDER_PRSNT_N", "SLOT11_EXPANDER_PRSNT_N",
+ "", "", "", "";
+
gpio@0 {
reg = <0>;
type = <PCA955X_TYPE_GPIO>;
@@ -1041,14 +1642,35 @@
reg = <0x49>;
};
- eeprom@50 {
- compatible = "atmel,24c64";
- reg = <0x50>;
- };
+ pca9546@70 {
+ compatible = "nxp,pca9546";
+ reg = <0x70>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "okay";
+ i2c-mux-idle-disconnect;
- eeprom@51 {
- compatible = "atmel,24c64";
- reg = <0x51>;
+ i2c11mux0chn0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ };
+
+ i2c11mux0chn1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+
+ eeprom@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ };
+ };
};
};
@@ -1140,6 +1762,20 @@
};
};
+&wdt1 {
+ aspeed,reset-type = "none";
+ aspeed,external-signal;
+ aspeed,ext-push-pull;
+ aspeed,ext-active-high;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdtrst1_default>;
+};
+
+&wdt2 {
+ status = "okay";
+};
+
&xdma {
status = "okay";
memory-region = <&vga_memory>;
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-mihawk.dts b/arch/arm/boot/dts/aspeed-bmc-opp-mihawk.dts
index 577c211c469e..15c1f0ac81dc 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-mihawk.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-mihawk.dts
@@ -383,6 +383,39 @@
};
};
+&gpio {
+ gpio-line-names =
+ /*A0-A7*/ "","cfam-reset","","","","","","",
+ /*B0-B7*/ "","","","","","","","",
+ /*C0-C7*/ "","","","","","","","",
+ /*D0-D7*/ "fsi-enable","","","","","","","",
+ /*E0-E7*/ "","","","","","fsi-mux","fsi-clock","fsi-data",
+ /*F0-F7*/ "","id-button","","","","","air-water","",
+ /*G0-G7*/ "","","","","","","","",
+ /*H0-H7*/ "","","","","","","","",
+ /*I0-I7*/ "","","","","","","","",
+ /*J0-J7*/ "","","checkstop","","","","","",
+ /*K0-K7*/ "","","","","","","","",
+ /*L0-L7*/ "","","","","","","","",
+ /*M0-M7*/ "","","","","","","","",
+ /*N0-N7*/ "","","","","","","","",
+ /*O0-O7*/ "","","","","","","","",
+ /*P0-P7*/ "","","","","","","","",
+ /*Q0-Q7*/ "","","","","","","","",
+ /*R0-R7*/ "","","fsi-trans","","","","","",
+ /*S0-S7*/ "","","","","","","","",
+ /*T0-T7*/ "","","","","","","","",
+ /*U0-U7*/ "","","","","","","","",
+ /*V0-V7*/ "","","","","","","","",
+ /*W0-W7*/ "","","","","","","","",
+ /*X0-X7*/ "","","","","","","","",
+ /*Y0-Y7*/ "","","","","","","","",
+ /*Z0-Z7*/ "presence-ps1","","presence-ps0","","","","","",
+ /*AA0-AA7*/ "led-front-fault","power-button","led-front-id","","","","","",
+ /*AB0-AB7*/ "","","","","","","","",
+ /*AC0-AC7*/ "","","","","","","","";
+};
+
&fmc {
status = "okay";
flash@0 {
diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi
index e7a45ba18fc9..c5aeb3cf3a09 100644
--- a/arch/arm/boot/dts/aspeed-g4.dtsi
+++ b/arch/arm/boot/dts/aspeed-g4.dtsi
@@ -343,59 +343,45 @@
};
lpc: lpc@1e789000 {
- compatible = "aspeed,ast2400-lpc", "simple-mfd";
+ compatible = "aspeed,ast2400-lpc-v2", "simple-mfd", "syscon";
reg = <0x1e789000 0x1000>;
+ reg-io-width = <4>;
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x1e789000 0x1000>;
- lpc_bmc: lpc-bmc@0 {
- compatible = "aspeed,ast2400-lpc-bmc";
- reg = <0x0 0x80>;
+ lpc_ctrl: lpc-ctrl@80 {
+ compatible = "aspeed,ast2400-lpc-ctrl";
+ reg = <0x80 0x10>;
+ clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
+ status = "disabled";
};
- lpc_host: lpc-host@80 {
- compatible = "aspeed,ast2400-lpc-host", "simple-mfd", "syscon";
- reg = <0x80 0x1e0>;
- reg-io-width = <4>;
-
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x0 0x80 0x1e0>;
-
- lpc_ctrl: lpc-ctrl@0 {
- compatible = "aspeed,ast2400-lpc-ctrl";
- reg = <0x0 0x10>;
- clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
- status = "disabled";
- };
-
- lpc_snoop: lpc-snoop@10 {
- compatible = "aspeed,ast2400-lpc-snoop";
- reg = <0x10 0x8>;
- interrupts = <8>;
- clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
- status = "disabled";
- };
-
- lhc: lhc@20 {
- compatible = "aspeed,ast2400-lhc";
- reg = <0x20 0x24 0x48 0x8>;
- };
-
- lpc_reset: reset-controller@18 {
- compatible = "aspeed,ast2400-lpc-reset";
- reg = <0x18 0x4>;
- #reset-cells = <1>;
- };
-
- ibt: ibt@c0 {
- compatible = "aspeed,ast2400-ibt-bmc";
- reg = <0xc0 0x18>;
- interrupts = <8>;
- status = "disabled";
- };
+ lpc_snoop: lpc-snoop@90 {
+ compatible = "aspeed,ast2400-lpc-snoop";
+ reg = <0x90 0x8>;
+ interrupts = <8>;
+ clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
+ status = "disabled";
+ };
+
+ lhc: lhc@a0 {
+ compatible = "aspeed,ast2400-lhc";
+ reg = <0xa0 0x24 0xc8 0x8>;
+ };
+
+ lpc_reset: reset-controller@98 {
+ compatible = "aspeed,ast2400-lpc-reset";
+ reg = <0x98 0x4>;
+ #reset-cells = <1>;
+ };
+
+ ibt: ibt@140 {
+ compatible = "aspeed,ast2400-ibt-bmc";
+ reg = <0x140 0x18>;
+ interrupts = <8>;
+ status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi
index 21930521a986..d733c1f161c1 100644
--- a/arch/arm/boot/dts/aspeed-g5.dtsi
+++ b/arch/arm/boot/dts/aspeed-g5.dtsi
@@ -434,91 +434,74 @@
};
lpc: lpc@1e789000 {
- compatible = "aspeed,ast2500-lpc", "simple-mfd";
+ compatible = "aspeed,ast2500-lpc-v2", "simple-mfd", "syscon";
reg = <0x1e789000 0x1000>;
+ reg-io-width = <4>;
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x1e789000 0x1000>;
- lpc_bmc: lpc-bmc@0 {
- compatible = "aspeed,ast2500-lpc-bmc", "simple-mfd", "syscon";
- reg = <0x0 0x80>;
- reg-io-width = <4>;
-
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x0 0x0 0x80>;
-
- kcs1: kcs@24 {
- compatible = "aspeed,ast2500-kcs-bmc-v2";
- reg = <0x24 0x1>, <0x30 0x1>, <0x3c 0x1>;
- interrupts = <8>;
- status = "disabled";
- };
- kcs2: kcs@28 {
- compatible = "aspeed,ast2500-kcs-bmc-v2";
- reg = <0x28 0x1>, <0x34 0x1>, <0x40 0x1>;
- interrupts = <8>;
- status = "disabled";
- };
- kcs3: kcs@2c {
- compatible = "aspeed,ast2500-kcs-bmc-v2";
- reg = <0x2c 0x1>, <0x38 0x1>, <0x44 0x1>;
- interrupts = <8>;
- status = "disabled";
- };
+ kcs1: kcs@24 {
+ compatible = "aspeed,ast2500-kcs-bmc-v2";
+ reg = <0x24 0x1>, <0x30 0x1>, <0x3c 0x1>;
+ interrupts = <8>;
+ status = "disabled";
+ };
+
+ kcs2: kcs@28 {
+ compatible = "aspeed,ast2500-kcs-bmc-v2";
+ reg = <0x28 0x1>, <0x34 0x1>, <0x40 0x1>;
+ interrupts = <8>;
+ status = "disabled";
+ };
+
+ kcs3: kcs@2c {
+ compatible = "aspeed,ast2500-kcs-bmc-v2";
+ reg = <0x2c 0x1>, <0x38 0x1>, <0x44 0x1>;
+ interrupts = <8>;
+ status = "disabled";
+ };
+
+ kcs4: kcs@114 {
+ compatible = "aspeed,ast2500-kcs-bmc-v2";
+ reg = <0x114 0x1>, <0x118 0x1>, <0x11c 0x1>;
+ interrupts = <8>;
+ status = "disabled";
};
- lpc_host: lpc-host@80 {
- compatible = "aspeed,ast2500-lpc-host", "simple-mfd", "syscon";
- reg = <0x80 0x1e0>;
- reg-io-width = <4>;
-
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x0 0x80 0x1e0>;
-
- kcs4: kcs@94 {
- compatible = "aspeed,ast2500-kcs-bmc-v2";
- reg = <0x94 0x1>, <0x98 0x1>, <0x9c 0x1>;
- interrupts = <8>;
- status = "disabled";
- };
-
- lpc_ctrl: lpc-ctrl@0 {
- compatible = "aspeed,ast2500-lpc-ctrl";
- reg = <0x0 0x10>;
- clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
- status = "disabled";
- };
-
- lpc_snoop: lpc-snoop@10 {
- compatible = "aspeed,ast2500-lpc-snoop";
- reg = <0x10 0x8>;
- interrupts = <8>;
- clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
- status = "disabled";
- };
-
- lpc_reset: reset-controller@18 {
- compatible = "aspeed,ast2500-lpc-reset";
- reg = <0x18 0x4>;
- #reset-cells = <1>;
- };
-
- lhc: lhc@20 {
- compatible = "aspeed,ast2500-lhc";
- reg = <0x20 0x24 0x48 0x8>;
- };
-
-
- ibt: ibt@c0 {
- compatible = "aspeed,ast2500-ibt-bmc";
- reg = <0xc0 0x18>;
- interrupts = <8>;
- status = "disabled";
- };
+ lpc_ctrl: lpc-ctrl@80 {
+ compatible = "aspeed,ast2500-lpc-ctrl";
+ reg = <0x80 0x10>;
+ clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
+ status = "disabled";
+ };
+
+ lpc_snoop: lpc-snoop@90 {
+ compatible = "aspeed,ast2500-lpc-snoop";
+ reg = <0x90 0x8>;
+ interrupts = <8>;
+ clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
+ status = "disabled";
+ };
+
+ lpc_reset: reset-controller@98 {
+ compatible = "aspeed,ast2500-lpc-reset";
+ reg = <0x98 0x4>;
+ #reset-cells = <1>;
+ };
+
+ lhc: lhc@a0 {
+ compatible = "aspeed,ast2500-lhc";
+ reg = <0xa0 0x24 0xc8 0x8>;
+ };
+
+
+ ibt: ibt@140 {
+ compatible = "aspeed,ast2500-ibt-bmc";
+ reg = <0x140 0x18>;
+ interrupts = <8>;
+ status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/aspeed-g6.dtsi b/arch/arm/boot/dts/aspeed-g6.dtsi
index 3ee470c2b7b5..f96607b7b4e2 100644
--- a/arch/arm/boot/dts/aspeed-g6.dtsi
+++ b/arch/arm/boot/dts/aspeed-g6.dtsi
@@ -460,91 +460,74 @@
};
lpc: lpc@1e789000 {
- compatible = "aspeed,ast2600-lpc", "simple-mfd";
+ compatible = "aspeed,ast2600-lpc-v2", "simple-mfd", "syscon";
reg = <0x1e789000 0x1000>;
+ reg-io-width = <4>;
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x1e789000 0x1000>;
- lpc_bmc: lpc-bmc@0 {
- compatible = "aspeed,ast2600-lpc-bmc", "simple-mfd", "syscon";
- reg = <0x0 0x80>;
- reg-io-width = <4>;
-
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x0 0x0 0x80>;
-
- kcs1: kcs@24 {
- compatible = "aspeed,ast2500-kcs-bmc-v2";
- reg = <0x24 0x1>, <0x30 0x1>, <0x3c 0x1>;
- interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
- kcs_chan = <1>;
- status = "disabled";
- };
- kcs2: kcs@28 {
- compatible = "aspeed,ast2500-kcs-bmc-v2";
- reg = <0x28 0x1>, <0x34 0x1>, <0x40 0x1>;
- interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
- status = "disabled";
- };
- kcs3: kcs@2c {
- compatible = "aspeed,ast2500-kcs-bmc-v2";
- reg = <0x2c 0x1>, <0x38 0x1>, <0x44 0x1>;
- interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
- status = "disabled";
- };
+ kcs1: kcs@24 {
+ compatible = "aspeed,ast2500-kcs-bmc-v2";
+ reg = <0x24 0x1>, <0x30 0x1>, <0x3c 0x1>;
+ interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ kcs_chan = <1>;
+ status = "disabled";
+ };
+
+ kcs2: kcs@28 {
+ compatible = "aspeed,ast2500-kcs-bmc-v2";
+ reg = <0x28 0x1>, <0x34 0x1>, <0x40 0x1>;
+ interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ kcs3: kcs@2c {
+ compatible = "aspeed,ast2500-kcs-bmc-v2";
+ reg = <0x2c 0x1>, <0x38 0x1>, <0x44 0x1>;
+ interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ kcs4: kcs@114 {
+ compatible = "aspeed,ast2500-kcs-bmc-v2";
+ reg = <0x114 0x1>, <0x118 0x1>, <0x11c 0x1>;
+ interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ lpc_ctrl: lpc-ctrl@80 {
+ compatible = "aspeed,ast2600-lpc-ctrl";
+ reg = <0x80 0x80>;
+ clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
+ status = "disabled";
+ };
+
+ lpc_snoop: lpc-snoop@80 {
+ compatible = "aspeed,ast2600-lpc-snoop";
+ reg = <0x80 0x80>;
+ interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
+ status = "disabled";
};
- lpc_host: lpc-host@80 {
- compatible = "aspeed,ast2600-lpc-host", "simple-mfd", "syscon";
- reg = <0x80 0x1e0>;
- reg-io-width = <4>;
-
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x0 0x80 0x1e0>;
-
- kcs4: kcs@94 {
- compatible = "aspeed,ast2500-kcs-bmc-v2";
- reg = <0x94 0x1>, <0x98 0x1>, <0x9c 0x1>;
- interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
- status = "disabled";
- };
-
- lpc_ctrl: lpc-ctrl@0 {
- compatible = "aspeed,ast2600-lpc-ctrl";
- reg = <0x0 0x80>;
- clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
- status = "disabled";
- };
-
- lpc_snoop: lpc-snoop@0 {
- compatible = "aspeed,ast2600-lpc-snoop";
- reg = <0x0 0x80>;
- interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
- status = "disabled";
- };
-
- lhc: lhc@20 {
- compatible = "aspeed,ast2600-lhc";
- reg = <0x20 0x24 0x48 0x8>;
- };
-
- lpc_reset: reset-controller@18 {
- compatible = "aspeed,ast2600-lpc-reset";
- reg = <0x18 0x4>;
- #reset-cells = <1>;
- };
-
- ibt: ibt@c0 {
- compatible = "aspeed,ast2600-ibt-bmc";
- reg = <0xc0 0x18>;
- interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
- status = "disabled";
- };
+ lhc: lhc@a0 {
+ compatible = "aspeed,ast2600-lhc";
+ reg = <0xa0 0x24 0xc8 0x8>;
+ };
+
+ lpc_reset: reset-controller@98 {
+ compatible = "aspeed,ast2600-lpc-reset";
+ reg = <0x98 0x4>;
+ #reset-cells = <1>;
+ };
+
+ ibt: ibt@140 {
+ compatible = "aspeed,ast2600-ibt-bmc";
+ reg = <0x140 0x18>;
+ interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/at91-sam9x60ek.dts b/arch/arm/boot/dts/at91-sam9x60ek.dts
index 73b6b1f89de9..edca66c232c1 100644
--- a/arch/arm/boot/dts/at91-sam9x60ek.dts
+++ b/arch/arm/boot/dts/at91-sam9x60ek.dts
@@ -8,6 +8,7 @@
*/
/dts-v1/;
#include "sam9x60.dtsi"
+#include <dt-bindings/input/input.h>
/ {
model = "Microchip SAM9X60-EK";
@@ -84,7 +85,7 @@
sw1 {
label = "SW1";
gpios = <&pioD 18 GPIO_ACTIVE_LOW>;
- linux,code=<0x104>;
+ linux,code=<KEY_PROG1>;
wakeup-source;
};
};
@@ -334,14 +335,6 @@
};
&pinctrl {
- atmel,mux-mask = <
- /* A B C */
- 0xFFFFFE7F 0xC0E0397F 0xEF00019D /* pioA */
- 0x03FFFFFF 0x02FC7E68 0x00780000 /* pioB */
- 0xffffffff 0xF83FFFFF 0xB800F3FC /* pioC */
- 0x003FFFFF 0x003F8000 0x00000000 /* pioD */
- >;
-
adc {
pinctrl_adc_default: adc_default {
atmel,pins = <AT91_PIOB 15 AT91_PERIPH_A AT91_PINCTRL_NONE>;
diff --git a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
index 1b1163858b1d..e3251f3e3eaa 100644
--- a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
+++ b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi
@@ -84,8 +84,8 @@
pinctrl-0 = <&pinctrl_macb0_default>;
phy-mode = "rmii";
- ethernet-phy@0 {
- reg = <0x0>;
+ ethernet-phy@7 {
+ reg = <0x7>;
interrupt-parent = <&pioA>;
interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
index 84e1180f3e89..a9e6fee55a2a 100644
--- a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts
@@ -11,6 +11,7 @@
#include "at91-sama5d27_som1.dtsi"
#include <dt-bindings/mfd/atmel-flexcom.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
/ {
model = "Atmel SAMA5D27 SOM1 EK";
@@ -466,7 +467,7 @@
pb4 {
label = "USER";
gpios = <&pioA PIN_PA29 GPIO_ACTIVE_LOW>;
- linux,code = <0x104>;
+ linux,code = <KEY_PROG1>;
wakeup-source;
};
};
diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
index 180a08765cb8..ff83967fd008 100644
--- a/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1_ek.dts
@@ -8,6 +8,7 @@
*/
/dts-v1/;
#include "at91-sama5d27_wlsom1.dtsi"
+#include <dt-bindings/input/input.h>
/ {
model = "Microchip SAMA5D27 WLSOM1 EK";
@@ -35,7 +36,7 @@
sw4 {
label = "USER BUTTON";
gpios = <&pioA PIN_PB2 GPIO_ACTIVE_LOW>;
- linux,code = <0x104>;
+ linux,code = <KEY_PROG1>;
wakeup-source;
};
};
diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts
index 46722a163184..bd64721fa23c 100644
--- a/arch/arm/boot/dts/at91-sama5d2_icp.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts
@@ -12,6 +12,7 @@
#include "sama5d2.dtsi"
#include "sama5d2-pinfunc.h"
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
#include <dt-bindings/mfd/atmel-flexcom.h>
/ {
@@ -51,7 +52,7 @@
sw4 {
label = "USER_PB1";
gpios = <&pioA PIN_PD0 GPIO_ACTIVE_LOW>;
- linux,code = <0x104>;
+ linux,code = <KEY_PROG1>;
wakeup-source;
};
};
diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
index 8de57d164acd..dfd150eb0fd8 100644
--- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
@@ -11,6 +11,7 @@
#include "sama5d2-pinfunc.h"
#include <dt-bindings/mfd/atmel-flexcom.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
#include <dt-bindings/pinctrl/at91.h>
/ {
@@ -402,7 +403,7 @@
bp1 {
label = "PB_USER";
gpios = <&pioA PIN_PA10 GPIO_ACTIVE_LOW>;
- linux,code = <0x104>;
+ linux,code = <KEY_PROG1>;
wakeup-source;
};
};
diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
index 4e7cf21f124c..509c732a0d8b 100644
--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
@@ -10,6 +10,7 @@
#include "sama5d2-pinfunc.h"
#include <dt-bindings/mfd/atmel-flexcom.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
#include <dt-bindings/regulator/active-semi,8945a-regulator.h>
/ {
@@ -712,7 +713,7 @@
bp1 {
label = "PB_USER";
gpios = <&pioA PIN_PB9 GPIO_ACTIVE_LOW>;
- linux,code = <0x104>;
+ linux,code = <KEY_PROG1>;
wakeup-source;
};
};
diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
index 5179258f9247..9c55a921263b 100644
--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
@@ -7,6 +7,7 @@
*/
/dts-v1/;
#include "sama5d36.dtsi"
+#include <dt-bindings/input/input.h>
/ {
model = "SAMA5D3 Xplained";
@@ -354,7 +355,7 @@
bp3 {
label = "PB_USER";
gpios = <&pioE 29 GPIO_ACTIVE_LOW>;
- linux,code = <0x104>;
+ linux,code = <KEY_PROG1>;
wakeup-source;
};
};
diff --git a/arch/arm/boot/dts/at91sam9260ek.dts b/arch/arm/boot/dts/at91sam9260ek.dts
index d3446e42b598..ce96345d28a3 100644
--- a/arch/arm/boot/dts/at91sam9260ek.dts
+++ b/arch/arm/boot/dts/at91sam9260ek.dts
@@ -7,6 +7,7 @@
*/
/dts-v1/;
#include "at91sam9260.dtsi"
+#include <dt-bindings/input/input.h>
/ {
model = "Atmel at91sam9260ek";
@@ -156,7 +157,7 @@
btn4 {
label = "Button 4";
gpios = <&pioA 31 GPIO_ACTIVE_LOW>;
- linux,code = <0x104>;
+ linux,code = <KEY_PROG1>;
wakeup-source;
};
};
diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
index 6e6e672c0b86..87bb39060e8b 100644
--- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
+++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
@@ -5,6 +5,7 @@
* Copyright (C) 2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
*/
#include "at91sam9g20.dtsi"
+#include <dt-bindings/input/input.h>
/ {
@@ -234,7 +235,7 @@
btn4 {
label = "Button 4";
gpios = <&pioA 31 GPIO_ACTIVE_LOW>;
- linux,code = <0x104>;
+ linux,code = <KEY_PROG1>;
wakeup-source;
};
};
diff --git a/arch/arm/boot/dts/bcm2711.dtsi b/arch/arm/boot/dts/bcm2711.dtsi
index 462b1dfb0385..720beec54d61 100644
--- a/arch/arm/boot/dts/bcm2711.dtsi
+++ b/arch/arm/boot/dts/bcm2711.dtsi
@@ -308,14 +308,6 @@
#reset-cells = <1>;
};
- bsc_intr: interrupt-controller@7ef00040 {
- compatible = "brcm,bcm2711-l2-intc", "brcm,l2-intc";
- reg = <0x7ef00040 0x30>;
- interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-controller;
- #interrupt-cells = <1>;
- };
-
aon_intr: interrupt-controller@7ef00100 {
compatible = "brcm,bcm2711-l2-intc", "brcm,l2-intc";
reg = <0x7ef00100 0x30>;
@@ -362,8 +354,6 @@
reg = <0x7ef04500 0x100>, <0x7ef00b00 0x300>;
reg-names = "bsc", "auto-i2c";
clock-frequency = <97500>;
- interrupt-parent = <&bsc_intr>;
- interrupts = <0>;
status = "disabled";
};
@@ -405,8 +395,6 @@
reg = <0x7ef09500 0x100>, <0x7ef05b00 0x300>;
reg-names = "bsc", "auto-i2c";
clock-frequency = <97500>;
- interrupt-parent = <&bsc_intr>;
- interrupts = <1>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts b/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
index 6a96655d8626..8ed403767540 100644
--- a/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
+++ b/arch/arm/boot/dts/bcm4708-asus-rt-ac56u.dts
@@ -21,8 +21,8 @@
memory@0 {
device_type = "memory";
- reg = <0x00000000 0x08000000
- 0x88000000 0x08000000>;
+ reg = <0x00000000 0x08000000>,
+ <0x88000000 0x08000000>;
};
leds {
diff --git a/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts b/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
index 3b0029e61b4c..667b118ba4ee 100644
--- a/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
+++ b/arch/arm/boot/dts/bcm4708-asus-rt-ac68u.dts
@@ -21,8 +21,8 @@
memory@0 {
device_type = "memory";
- reg = <0x00000000 0x08000000
- 0x88000000 0x08000000>;
+ reg = <0x00000000 0x08000000>,
+ <0x88000000 0x08000000>;
};
leds {
diff --git a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
index 90f57bad6b24..ff31ce45831a 100644
--- a/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
+++ b/arch/arm/boot/dts/bcm4708-buffalo-wzr-1750dhp.dts
@@ -21,8 +21,8 @@
memory@0 {
device_type = "memory";
- reg = <0x00000000 0x08000000
- 0x88000000 0x18000000>;
+ reg = <0x00000000 0x08000000>,
+ <0x88000000 0x18000000>;
};
spi {
diff --git a/arch/arm/boot/dts/bcm4708-linksys-ea6300-v1.dts b/arch/arm/boot/dts/bcm4708-linksys-ea6300-v1.dts
index 41548d6d479a..5bac1e15775a 100644
--- a/arch/arm/boot/dts/bcm4708-linksys-ea6300-v1.dts
+++ b/arch/arm/boot/dts/bcm4708-linksys-ea6300-v1.dts
@@ -21,6 +21,11 @@
reg = <0x00000000 0x08000000>;
};
+ nvram@1c080000 {
+ compatible = "brcm,nvram";
+ reg = <0x1c080000 0x180000>;
+ };
+
gpio-keys {
compatible = "gpio-keys";
diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
index fed75e6ab58c..61c7b137607e 100644
--- a/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
+++ b/arch/arm/boot/dts/bcm4708-netgear-r6250.dts
@@ -22,8 +22,8 @@
memory {
device_type = "memory";
- reg = <0x00000000 0x08000000
- 0x88000000 0x08000000>;
+ reg = <0x00000000 0x08000000>,
+ <0x88000000 0x08000000>;
};
leds {
diff --git a/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts b/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
index 79542e18915c..4c60eda296d9 100644
--- a/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
+++ b/arch/arm/boot/dts/bcm4708-netgear-r6300-v2.dts
@@ -21,8 +21,8 @@
memory@0 {
device_type = "memory";
- reg = <0x00000000 0x08000000
- 0x88000000 0x08000000>;
+ reg = <0x00000000 0x08000000>,
+ <0x88000000 0x08000000>;
};
leds {
diff --git a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
index 51c64f0b2560..9ca6d1b2590d 100644
--- a/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
+++ b/arch/arm/boot/dts/bcm4708-smartrg-sr400ac.dts
@@ -21,8 +21,8 @@
memory@0 {
device_type = "memory";
- reg = <0x00000000 0x08000000
- 0x88000000 0x08000000>;
+ reg = <0x00000000 0x08000000>,
+ <0x88000000 0x08000000>;
};
leds {
diff --git a/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts b/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
index c29950b43a95..0e273c598732 100644
--- a/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
+++ b/arch/arm/boot/dts/bcm47081-asus-rt-n18u.dts
@@ -21,8 +21,8 @@
memory@0 {
device_type = "memory";
- reg = <0x00000000 0x08000000
- 0x88000000 0x08000000>;
+ reg = <0x00000000 0x08000000>,
+ <0x88000000 0x08000000>;
};
leds {
diff --git a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
index 2f2d2b0a6893..d857751ec507 100644
--- a/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
+++ b/arch/arm/boot/dts/bcm47081-buffalo-wzr-600dhp2.dts
@@ -21,8 +21,8 @@
memory@0 {
device_type = "memory";
- reg = <0x00000000 0x08000000
- 0x88000000 0x08000000>;
+ reg = <0x00000000 0x08000000>,
+ <0x88000000 0x08000000>;
};
spi {
diff --git a/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts b/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
index 0e349e39f608..8b1a05a0f1a1 100644
--- a/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
+++ b/arch/arm/boot/dts/bcm47081-buffalo-wzr-900dhp.dts
@@ -21,8 +21,8 @@
memory@0 {
device_type = "memory";
- reg = <0x00000000 0x08000000
- 0x88000000 0x08000000>;
+ reg = <0x00000000 0x08000000>,
+ <0x88000000 0x08000000>;
};
spi {
diff --git a/arch/arm/boot/dts/bcm47081-luxul-xwr-1200.dts b/arch/arm/boot/dts/bcm47081-luxul-xwr-1200.dts
index 432254383769..9316a36434f7 100644
--- a/arch/arm/boot/dts/bcm47081-luxul-xwr-1200.dts
+++ b/arch/arm/boot/dts/bcm47081-luxul-xwr-1200.dts
@@ -21,6 +21,11 @@
reg = <0x00000000 0x08000000>;
};
+ nvram@1eff0000 {
+ compatible = "brcm,nvram";
+ reg = <0x1eff0000 0x10000>;
+ };
+
leds {
compatible = "gpio-leds";
diff --git a/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts b/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
index 8f1e565c3db4..6c6bb7b17d27 100644
--- a/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
+++ b/arch/arm/boot/dts/bcm4709-asus-rt-ac87u.dts
@@ -21,8 +21,8 @@
memory {
device_type = "memory";
- reg = <0x00000000 0x08000000
- 0x88000000 0x08000000>;
+ reg = <0x00000000 0x08000000>,
+ <0x88000000 0x08000000>;
};
leds {
diff --git a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
index ce888b1835d1..d29e7f80ea6a 100644
--- a/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
+++ b/arch/arm/boot/dts/bcm4709-buffalo-wxr-1900dhp.dts
@@ -21,8 +21,8 @@
memory {
device_type = "memory";
- reg = <0x00000000 0x08000000
- 0x88000000 0x18000000>;
+ reg = <0x00000000 0x08000000>,
+ <0x88000000 0x18000000>;
};
leds {
diff --git a/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts b/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
index ed8619b54d69..9b6887d477d8 100644
--- a/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
+++ b/arch/arm/boot/dts/bcm4709-linksys-ea9200.dts
@@ -18,8 +18,13 @@
memory {
device_type = "memory";
- reg = <0x00000000 0x08000000
- 0x88000000 0x08000000>;
+ reg = <0x00000000 0x08000000>,
+ <0x88000000 0x08000000>;
+ };
+
+ nvram@1c080000 {
+ compatible = "brcm,nvram";
+ reg = <0x1c080000 0x180000>;
};
gpio-keys {
diff --git a/arch/arm/boot/dts/bcm4709-netgear-r7000.dts b/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
index 1f87993eae1d..7989a53597d4 100644
--- a/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
+++ b/arch/arm/boot/dts/bcm4709-netgear-r7000.dts
@@ -21,8 +21,8 @@
memory {
device_type = "memory";
- reg = <0x00000000 0x08000000
- 0x88000000 0x08000000>;
+ reg = <0x00000000 0x08000000>,
+ <0x88000000 0x08000000>;
};
leds {
diff --git a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
index 6c6199a53d09..87b655be674c 100644
--- a/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
+++ b/arch/arm/boot/dts/bcm4709-netgear-r8000.dts
@@ -32,8 +32,8 @@
memory {
device_type = "memory";
- reg = <0x00000000 0x08000000
- 0x88000000 0x08000000>;
+ reg = <0x00000000 0x08000000>,
+ <0x88000000 0x08000000>;
};
leds {
diff --git a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
index 911c65fbf251..e635a15041dd 100644
--- a/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
+++ b/arch/arm/boot/dts/bcm47094-dlink-dir-885l.dts
@@ -21,8 +21,8 @@
memory@0 {
device_type = "memory";
- reg = <0x00000000 0x08000000
- 0x88000000 0x08000000>;
+ reg = <0x00000000 0x08000000>,
+ <0x88000000 0x08000000>;
};
nand: nand@18028000 {
diff --git a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
index 3725f2b0d60b..05d4f2931772 100644
--- a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
+++ b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
@@ -18,8 +18,13 @@
memory@0 {
device_type = "memory";
- reg = <0x00000000 0x08000000
- 0x88000000 0x08000000>;
+ reg = <0x00000000 0x08000000>,
+ <0x88000000 0x08000000>;
+ };
+
+ nvram@1c080000 {
+ compatible = "brcm,nvram";
+ reg = <0x1c080000 0x100000>;
};
gpio-keys {
@@ -70,6 +75,7 @@
power {
label = "bcm53xx:white:power";
gpios = <&chipcommon 4 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "default-on";
};
wifi-disabled {
@@ -274,7 +280,7 @@
&nandcs {
partitions {
- compatible = "fixed-partitions";
+ compatible = "linksys,ns-partitions";
#address-cells = <1>;
#size-cells = <1>;
@@ -295,20 +301,18 @@
};
partition@200000 {
- label = "firmware";
- reg = <0x0200000 0x01D00000>;
- compatible = "brcm,trx";
+ reg = <0x0200000 0x01d00000>;
+ compatible = "linksys,ns-firmware", "brcm,trx";
};
- partition@1F00000 {
- label = "failsafe";
- reg = <0x01F00000 0x01D00000>;
- read-only;
+ partition@1f00000 {
+ reg = <0x01f00000 0x01d00000>;
+ compatible = "linksys,ns-firmware", "brcm,trx";
};
partition@5200000 {
label = "system";
- reg = <0x05200000 0x02E00000>;
+ reg = <0x05200000 0x02e00000>;
};
};
};
diff --git a/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts b/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
index 50f7cd08cfbb..4b8117f32d26 100644
--- a/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
+++ b/arch/arm/boot/dts/bcm47094-luxul-abr-4500.dts
@@ -18,8 +18,13 @@
memory@0 {
device_type = "memory";
- reg = <0x00000000 0x08000000
- 0x88000000 0x18000000>;
+ reg = <0x00000000 0x08000000>,
+ <0x88000000 0x18000000>;
+ };
+
+ nvram@1eff0000 {
+ compatible = "brcm,nvram";
+ reg = <0x1eff0000 0x10000>;
};
leds {
diff --git a/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts b/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
index bcc420f85b56..5fecce0422c7 100644
--- a/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
+++ b/arch/arm/boot/dts/bcm47094-luxul-xbr-4500.dts
@@ -18,8 +18,13 @@
memory@0 {
device_type = "memory";
- reg = <0x00000000 0x08000000
- 0x88000000 0x18000000>;
+ reg = <0x00000000 0x08000000>,
+ <0x88000000 0x18000000>;
+ };
+
+ nvram@1eff0000 {
+ compatible = "brcm,nvram";
+ reg = <0x1eff0000 0x10000>;
};
leds {
diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts b/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
index 4f8d777ae18d..452b8d0ab180 100644
--- a/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
+++ b/arch/arm/boot/dts/bcm47094-luxul-xwc-2000.dts
@@ -18,8 +18,8 @@
memory {
device_type = "memory";
- reg = <0x00000000 0x08000000
- 0x88000000 0x18000000>;
+ reg = <0x00000000 0x08000000>,
+ <0x88000000 0x18000000>;
};
leds {
diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
index e17e9a17fb00..cbe8c8e4a301 100644
--- a/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
+++ b/arch/arm/boot/dts/bcm47094-luxul-xwr-3100.dts
@@ -18,8 +18,13 @@
memory@0 {
device_type = "memory";
- reg = <0x00000000 0x08000000
- 0x88000000 0x08000000>;
+ reg = <0x00000000 0x08000000>,
+ <0x88000000 0x08000000>;
+ };
+
+ nvram@1eff0000 {
+ compatible = "brcm,nvram";
+ reg = <0x1eff0000 0x10000>;
};
leds {
diff --git a/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts b/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
index 60cc87ecc7ec..24ae3c8a3e09 100644
--- a/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
+++ b/arch/arm/boot/dts/bcm47094-luxul-xwr-3150-v1.dts
@@ -18,8 +18,13 @@
memory@0 {
device_type = "memory";
- reg = <0x00000000 0x08000000
- 0x88000000 0x18000000>;
+ reg = <0x00000000 0x08000000>,
+ <0x88000000 0x18000000>;
+ };
+
+ nvram@1eff0000 {
+ compatible = "brcm,nvram";
+ reg = <0x1eff0000 0x10000>;
};
leds {
diff --git a/arch/arm/boot/dts/bcm47094-netgear-r8500.dts b/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
index f42a1703f4ab..42097a4c2659 100644
--- a/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
+++ b/arch/arm/boot/dts/bcm47094-netgear-r8500.dts
@@ -18,8 +18,8 @@
memory@0 {
device_type = "memory";
- reg = <0x00000000 0x08000000
- 0x88000000 0x18000000>;
+ reg = <0x00000000 0x08000000>,
+ <0x88000000 0x18000000>;
};
leds {
diff --git a/arch/arm/boot/dts/bcm47094-phicomm-k3.dts b/arch/arm/boot/dts/bcm47094-phicomm-k3.dts
index ac3a4483dcb3..a2566ad4619c 100644
--- a/arch/arm/boot/dts/bcm47094-phicomm-k3.dts
+++ b/arch/arm/boot/dts/bcm47094-phicomm-k3.dts
@@ -15,8 +15,8 @@
memory@0 {
device_type = "memory";
- reg = <0x00000000 0x08000000
- 0x88000000 0x18000000>;
+ reg = <0x00000000 0x08000000>,
+ <0x88000000 0x18000000>;
};
gpio-keys {
diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
index 3bf90d9e3335..149144cdff35 100644
--- a/arch/arm/boot/dts/dra7-l4.dtsi
+++ b/arch/arm/boot/dts/dra7-l4.dtsi
@@ -1,5 +1,8 @@
&l4_cfg { /* 0x4a000000 */
- compatible = "ti,dra7-l4-cfg", "simple-bus";
+ compatible = "ti,dra7-l4-cfg", "simple-pm-bus";
+ power-domains = <&prm_coreaon>;
+ clocks = <&l4cfg_clkctrl DRA7_L4CFG_L4_CFG_CLKCTRL 0>;
+ clock-names = "fck";
reg = <0x4a000000 0x800>,
<0x4a000800 0x800>,
<0x4a001000 0x1000>;
@@ -11,7 +14,7 @@
<0x00200000 0x4a200000 0x100000>; /* segment 2 */
segment@0 { /* 0x4a000000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00000000 0x00000000 0x000800>, /* ap 0 */
@@ -493,7 +496,7 @@
};
segment@100000 { /* 0x4a100000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00002000 0x00102000 0x001000>, /* ap 27 */
@@ -572,11 +575,33 @@
};
target-module@40000 { /* 0x4a140000, ap 31 06.0 */
- compatible = "ti,sysc";
- status = "disabled";
- #address-cells = <1>;
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x400fc 4>,
+ <0x41100 4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-midle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ power-domains = <&prm_l3init>;
+ clocks = <&l3init_clkctrl DRA7_L3INIT_SATA_CLKCTRL 0>;
+ clock-names = "fck";
#size-cells = <1>;
+ #address-cells = <1>;
ranges = <0x0 0x40000 0x10000>;
+
+ sata: sata@0 {
+ compatible = "snps,dwc-ahci";
+ reg = <0 0x1100>, <0x1100 0x8>;
+ interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&sata_phy>;
+ phy-names = "sata-phy";
+ clocks = <&l3init_clkctrl DRA7_L3INIT_SATA_CLKCTRL 8>;
+ ports-implemented = <0x1>;
+ };
};
target-module@51000 { /* 0x4a151000, ap 33 50.0 */
@@ -789,7 +814,7 @@
};
segment@200000 { /* 0x4a200000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00018000 0x00218000 0x001000>, /* ap 43 */
@@ -1006,7 +1031,10 @@
};
&l4_per1 { /* 0x48000000 */
- compatible = "ti,dra7-l4-per1", "simple-bus";
+ compatible = "ti,dra7-l4-per1", "simple-pm-bus";
+ power-domains = <&prm_l4per>;
+ clocks = <&l4per_clkctrl DRA7_L4PER_L4_PER1_CLKCTRL 0>;
+ clock-names = "fck";
reg = <0x48000000 0x800>,
<0x48000800 0x800>,
<0x48001000 0x400>,
@@ -1020,7 +1048,7 @@
<0x00200000 0x48200000 0x200000>; /* segment 1 */
segment@0 { /* 0x48000000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00000000 0x00000000 0x000800>, /* ap 0 */
@@ -1168,7 +1196,7 @@
};
};
- target-module@34000 { /* 0x48034000, ap 7 46.0 */
+ timer3_target: target-module@34000 { /* 0x48034000, ap 7 46.0 */
compatible = "ti,sysc-omap4-timer", "ti,sysc";
reg = <0x34000 0x4>,
<0x34010 0x4>;
@@ -1195,7 +1223,7 @@
};
};
- target-module@36000 { /* 0x48036000, ap 9 4e.0 */
+ timer4_target: target-module@36000 { /* 0x48036000, ap 9 4e.0 */
compatible = "ti,sysc-omap4-timer", "ti,sysc";
reg = <0x36000 0x4>,
<0x36010 0x4>;
@@ -2269,14 +2297,17 @@
};
segment@200000 { /* 0x48200000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
};
};
&l4_per2 { /* 0x48400000 */
- compatible = "ti,dra7-l4-per2", "simple-bus";
+ compatible = "ti,dra7-l4-per2", "simple-pm-bus";
+ power-domains = <&prm_l4per>;
+ clocks = <&l4per2_clkctrl DRA7_L4PER2_L4_PER2_CLKCTRL 0>;
+ clock-names = "fck";
reg = <0x48400000 0x800>,
<0x48400800 0x800>,
<0x48401000 0x400>,
@@ -2296,7 +2327,7 @@
<0x48454000 0x48454000 0x400000>; /* L3 data port */
segment@0 { /* 0x48400000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00000000 0x00000000 0x000800>, /* ap 0 */
@@ -3094,7 +3125,10 @@
};
&l4_per3 { /* 0x48800000 */
- compatible = "ti,dra7-l4-per3", "simple-bus";
+ compatible = "ti,dra7-l4-per3", "simple-pm-bus";
+ power-domains = <&prm_l4per>;
+ clocks = <&l4per3_clkctrl DRA7_L4PER3_L4_PER3_CLKCTRL 0>;
+ clock-names = "fck";
reg = <0x48800000 0x800>,
<0x48800800 0x800>,
<0x48801000 0x400>,
@@ -3106,7 +3140,7 @@
ranges = <0x00000000 0x48800000 0x200000>; /* segment 0 */
segment@0 { /* 0x48800000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00000000 0x00000000 0x000800>, /* ap 0 */
@@ -4205,7 +4239,10 @@
};
&l4_wkup { /* 0x4ae00000 */
- compatible = "ti,dra7-l4-wkup", "simple-bus";
+ compatible = "ti,dra7-l4-wkup", "simple-pm-bus";
+ power-domains = <&prm_wkupaon>;
+ clocks = <&wkupaon_clkctrl DRA7_WKUPAON_L4_WKUP_CLKCTRL 0>;
+ clock-names = "fck";
reg = <0x4ae00000 0x800>,
<0x4ae00800 0x800>,
<0x4ae01000 0x1000>;
@@ -4218,7 +4255,7 @@
<0x00030000 0x4ae30000 0x010000>; /* segment 3 */
segment@0 { /* 0x4ae00000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00000000 0x00000000 0x000800>, /* ap 0 */
@@ -4295,7 +4332,7 @@
};
segment@10000 { /* 0x4ae10000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00000000 0x00010000 0x001000>, /* ap 5 */
@@ -4405,7 +4442,7 @@
};
segment@20000 { /* 0x4ae20000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00006000 0x00026000 0x001000>, /* ap 13 */
@@ -4511,7 +4548,7 @@
};
segment@30000 { /* 0x4ae30000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0000c000 0x0003c000 0x002000>, /* ap 30 */
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index ce1194744f84..dfc1ef8ef6ae 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -46,6 +46,7 @@
timer {
compatible = "arm,armv7-timer";
+ status = "disabled"; /* See ARM architected timer wrap erratum i940 */
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
@@ -125,18 +126,6 @@
};
/*
- * The soc node represents the soc top level view. It is used for IPs
- * that are not memory mapped in the MPU view or for the MPU itself.
- */
- soc {
- compatible = "ti,omap-infra";
- mpu {
- compatible = "ti,omap5-mpu";
- ti,hwmods = "mpu";
- };
- };
-
- /*
* XXX: Use a flat representation of the SOC interconnect.
* The real OMAP interconnect network is quite complex.
* Since it will not bring real advantage to represent that in DT for
@@ -144,16 +133,22 @@
* hierarchy.
*/
ocp: ocp {
- compatible = "ti,dra7-l3-noc", "simple-bus";
+ compatible = "simple-pm-bus";
+ power-domains = <&prm_core>;
+ clocks = <&l3main1_clkctrl DRA7_L3MAIN1_L3_MAIN_1_CLKCTRL 0>,
+ <&l3instr_clkctrl DRA7_L3INSTR_L3_MAIN_2_CLKCTRL 0>;
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x0 0x0 0xc0000000>;
dma-ranges = <0x80000000 0x0 0x80000000 0x80000000>;
- ti,hwmods = "l3_main_1", "l3_main_2";
- reg = <0x0 0x44000000 0x0 0x1000000>,
- <0x0 0x45000000 0x0 0x1000>;
- interrupts-extended = <&crossbar_mpu GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
- <&wakeupgen GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+
+ l3-noc@44000000 {
+ compatible = "ti,dra7-l3-noc";
+ reg = <0x44000000 0x1000>,
+ <0x45000000 0x1000>;
+ interrupts-extended = <&crossbar_mpu GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <&wakeupgen GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ };
l4_cfg: interconnect@4a000000 {
};
@@ -161,36 +156,65 @@
};
l4_per1: interconnect@48000000 {
};
+
+ target-module@48210000 {
+ compatible = "ti,sysc-omap4-simple", "ti,sysc";
+ power-domains = <&prm_mpu>;
+ clocks = <&mpu_clkctrl DRA7_MPU_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x48210000 0x1f0000>;
+
+ mpu {
+ compatible = "ti,omap5-mpu";
+ };
+ };
+
l4_per2: interconnect@48400000 {
};
l4_per3: interconnect@48800000 {
};
- axi@0 {
- compatible = "simple-bus";
+ /*
+ * Register access seems to have complex dependencies and also
+ * seems to need an enabled phy. See the TRM chapter for "Table
+ * 26-678. Main Sequence PCIe Controller Global Initialization"
+ * and also dra7xx_pcie_probe().
+ */
+ axi0: target-module@51000000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ power-domains = <&prm_l3init>;
+ resets = <&prm_l3init 0>;
+ reset-names = "rstctrl";
+ clocks = <&pcie_clkctrl DRA7_PCIE_PCIE1_CLKCTRL 0>,
+ <&pcie_clkctrl DRA7_PCIE_PCIE1_CLKCTRL 9>,
+ <&pcie_clkctrl DRA7_PCIE_PCIE1_CLKCTRL 10>;
+ clock-names = "fck", "phy-clk", "phy-clk-div";
#size-cells = <1>;
#address-cells = <1>;
- ranges = <0x51000000 0x51000000 0x3000
- 0x0 0x20000000 0x10000000>;
+ ranges = <0x51000000 0x51000000 0x3000>,
+ <0x20000000 0x20000000 0x10000000>;
dma-ranges;
/**
* To enable PCI endpoint mode, disable the pcie1_rc
* node and enable pcie1_ep mode.
*/
pcie1_rc: pcie@51000000 {
- reg = <0x51000000 0x2000>, <0x51002000 0x14c>, <0x1000 0x2000>;
+ reg = <0x51000000 0x2000>,
+ <0x51002000 0x14c>,
+ <0x20001000 0x2000>;
reg-names = "rc_dbics", "ti_conf", "config";
interrupts = <0 232 0x4>, <0 233 0x4>;
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
- ranges = <0x81000000 0 0 0x03000 0 0x00010000
- 0x82000000 0 0x20013000 0x13000 0 0xffed000>;
+ ranges = <0x81000000 0 0x00000000 0x20003000 0 0x00010000>,
+ <0x82000000 0 0x20013000 0x20013000 0 0x0ffed000>;
bus-range = <0x00 0xff>;
#interrupt-cells = <1>;
num-lanes = <1>;
linux,pci-domain = <0>;
- ti,hwmods = "pcie1";
phys = <&pcie1_phy>;
phy-names = "pcie-phy0";
ti,syscon-lane-sel = <&scm_conf_pcie 0x18>;
@@ -209,13 +233,15 @@
};
pcie1_ep: pcie_ep@51000000 {
- reg = <0x51000000 0x28>, <0x51002000 0x14c>, <0x51001000 0x28>, <0x1000 0x10000000>;
+ reg = <0x51000000 0x28>,
+ <0x51002000 0x14c>,
+ <0x51001000 0x28>,
+ <0x20001000 0x10000000>;
reg-names = "ep_dbics", "ti_conf", "ep_dbics2", "addr_space";
interrupts = <0 232 0x4>;
num-lanes = <1>;
num-ib-windows = <4>;
num-ob-windows = <16>;
- ti,hwmods = "pcie1";
phys = <&pcie1_phy>;
phy-names = "pcie-phy0";
ti,syscon-unaligned-access = <&scm_conf1 0x14 1>;
@@ -224,28 +250,42 @@
};
};
- axi@1 {
- compatible = "simple-bus";
+ /*
+ * Register access seems to have complex dependencies and also
+ * seems to need an enabled phy. See the TRM chapter for "Table
+ * 26-678. Main Sequence PCIe Controller Global Initialization"
+ * and also dra7xx_pcie_probe().
+ */
+ axi1: target-module@51800000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ clocks = <&pcie_clkctrl DRA7_PCIE_PCIE2_CLKCTRL 0>,
+ <&pcie_clkctrl DRA7_PCIE_PCIE2_CLKCTRL 9>,
+ <&pcie_clkctrl DRA7_PCIE_PCIE2_CLKCTRL 10>;
+ clock-names = "fck", "phy-clk", "phy-clk-div";
+ power-domains = <&prm_l3init>;
+ resets = <&prm_l3init 1>;
+ reset-names = "rstctrl";
#size-cells = <1>;
#address-cells = <1>;
- ranges = <0x51800000 0x51800000 0x3000
- 0x0 0x30000000 0x10000000>;
+ ranges = <0x51800000 0x51800000 0x3000>,
+ <0x30000000 0x30000000 0x10000000>;
dma-ranges;
status = "disabled";
pcie2_rc: pcie@51800000 {
- reg = <0x51800000 0x2000>, <0x51802000 0x14c>, <0x1000 0x2000>;
+ reg = <0x51800000 0x2000>,
+ <0x51802000 0x14c>,
+ <0x30001000 0x2000>;
reg-names = "rc_dbics", "ti_conf", "config";
interrupts = <0 355 0x4>, <0 356 0x4>;
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
- ranges = <0x81000000 0 0 0x03000 0 0x00010000
- 0x82000000 0 0x30013000 0x13000 0 0xffed000>;
+ ranges = <0x81000000 0 0x00000000 0x30003000 0 0x00010000>,
+ <0x82000000 0 0x30013000 0x30013000 0 0x0ffed000>;
bus-range = <0x00 0xff>;
#interrupt-cells = <1>;
num-lanes = <1>;
linux,pci-domain = <1>;
- ti,hwmods = "pcie2";
phys = <&pcie2_phy>;
phy-names = "pcie-phy0";
interrupt-map-mask = <0 0 0 7>;
@@ -336,8 +376,15 @@
target-module@43300000 {
compatible = "ti,sysc-omap4", "ti,sysc";
- reg = <0x43300000 0x4>;
- reg-names = "rev";
+ reg = <0x43300000 0x4>,
+ <0x43300010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-midle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
clocks = <&l3main1_clkctrl DRA7_L3MAIN1_TPCC_CLKCTRL 0>;
clock-names = "fck";
#address-cells = <1>;
@@ -369,8 +416,15 @@
target-module@43400000 {
compatible = "ti,sysc-omap4", "ti,sysc";
- reg = <0x43400000 0x4>;
- reg-names = "rev";
+ reg = <0x43400000 0x4>,
+ <0x43400010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-midle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
clocks = <&l3main1_clkctrl DRA7_L3MAIN1_TPTC0_CLKCTRL 0>;
clock-names = "fck";
#address-cells = <1>;
@@ -387,8 +441,15 @@
target-module@43500000 {
compatible = "ti,sysc-omap4", "ti,sysc";
- reg = <0x43500000 0x4>;
- reg-names = "rev";
+ reg = <0x43500000 0x4>,
+ <0x43500010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-midle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
clocks = <&l3main1_clkctrl DRA7_L3MAIN1_TPTC1_CLKCTRL 0>;
clock-names = "fck";
#address-cells = <1>;
@@ -403,11 +464,23 @@
};
};
- dmm@4e000000 {
- compatible = "ti,omap5-dmm";
- reg = <0x4e000000 0x800>;
- interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
- ti,hwmods = "dmm";
+ target-module@4e000000 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0x4e000000 0x4>,
+ <0x4e000010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ranges = <0x0 0x4e000000 0x2000000>;
+ #size-cells = <1>;
+ #address-cells = <1>;
+
+ dmm@0 {
+ compatible = "ti,omap5-dmm";
+ reg = <0 0x800>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ };
};
ipu1: ipu@58820000 {
@@ -694,32 +767,36 @@
>;
};
- qspi: spi@4b300000 {
- compatible = "ti,dra7xxx-qspi";
- reg = <0x4b300000 0x100>,
- <0x5c000000 0x4000000>;
- reg-names = "qspi_base", "qspi_mmap";
- syscon-chipselects = <&scm_conf 0x558>;
- #address-cells = <1>;
- #size-cells = <0>;
- ti,hwmods = "qspi";
- clocks = <&l4per2_clkctrl DRA7_L4PER2_QSPI_CLKCTRL 25>;
+ target-module@4b300000 {
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x4b300000 0x4>,
+ <0x4b300010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ clocks = <&l4per2_clkctrl DRA7_L4PER2_QSPI_CLKCTRL 0>;
clock-names = "fck";
- num-cs = <4>;
- interrupts = <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>;
- status = "disabled";
- };
-
- /* OCP2SCP3 */
- sata: sata@4a141100 {
- compatible = "snps,dwc-ahci";
- reg = <0x4a140000 0x1100>, <0x4a141100 0x7>;
- interrupts = <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>;
- phys = <&sata_phy>;
- phy-names = "sata-phy";
- clocks = <&l3init_clkctrl DRA7_L3INIT_SATA_CLKCTRL 8>;
- ti,hwmods = "sata";
- ports-implemented = <0x1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x4b300000 0x1000>,
+ <0x5c000000 0x5c000000 0x4000000>;
+
+ qspi: spi@0 {
+ compatible = "ti,dra7xxx-qspi";
+ reg = <0 0x100>,
+ <0x5c000000 0x4000000>;
+ reg-names = "qspi_base", "qspi_mmap";
+ syscon-chipselects = <&scm_conf 0x558>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&l4per2_clkctrl DRA7_L4PER2_QSPI_CLKCTRL 25>;
+ clock-names = "fck";
+ num-cs = <4>;
+ interrupts = <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
};
/* OCP2SCP1 */
@@ -1241,3 +1318,22 @@
assigned-clock-parents = <&sys_32k_ck>;
};
};
+
+/* Local timers, see ARM architected timer wrap erratum i940 */
+&timer3_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER3_CLKCTRL 24>;
+ assigned-clock-parents = <&timer_sys_clk_div>;
+ };
+};
+
+&timer4_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ assigned-clocks = <&l4per_clkctrl DRA7_L4PER_TIMER4_CLKCTRL 24>;
+ assigned-clock-parents = <&timer_sys_clk_div>;
+ };
+};
diff --git a/arch/arm/boot/dts/ep7209.dtsi b/arch/arm/boot/dts/ep7209.dtsi
index 365931f8b48d..57bdad2c1994 100644
--- a/arch/arm/boot/dts/ep7209.dtsi
+++ b/arch/arm/boot/dts/ep7209.dtsi
@@ -10,6 +10,8 @@
model = "Cirrus Logic EP7209";
compatible = "cirrus,ep7209";
+ chosen { };
+
aliases {
gpio0 = &porta;
gpio1 = &portb;
@@ -108,6 +110,7 @@
compatible = "cirrus,ep7209-fb";
reg = <0x800002c0 0xd44>, <0x60000000 0xc000>;
clocks = <&clks CLPS711X_CLK_BUS>;
+ syscon = <&syscon1>;
status = "disabled";
};
@@ -132,7 +135,7 @@
#pwm-cells = <1>;
};
- uart1: uart@80000480 {
+ uart1: serial@80000480 {
compatible = "cirrus,ep7209-uart";
reg = <0x80000480 0x80>;
interrupts = <12 13>;
@@ -147,6 +150,7 @@
reg = <0x80000500 0x4>;
interrupts = <15>;
clocks = <&clks CLPS711X_CLK_SPI>;
+ syscon = <&syscon3>;
status = "disabled";
};
@@ -155,7 +159,7 @@
reg = <0x80001100 0x80>;
};
- uart2: uart@80001480 {
+ uart2: serial@80001480 {
compatible = "cirrus,ep7209-uart";
reg = <0x80001480 0x80>;
interrupts = <28 29>;
@@ -170,6 +174,7 @@
clocks = <&clks CLPS711X_CLK_PLL>;
clock-names = "pll";
interrupts = <32>;
+ syscon = <&syscon3>;
status = "disabled";
};
@@ -179,8 +184,17 @@
};
};
+ keypad: keypad {
+ compatible = "cirrus,ep7209-keypad";
+ interrupt-parent = <&intc>;
+ interrupts = <16>;
+ syscon = <&syscon1>;
+ status = "disabled";
+ };
+
mctrl: mctrl {
compatible = "cirrus,ep7209-mctrl-gpio";
+ gpio,syscon-dev = <&syscon1 0 0>;
gpio-controller;
#gpio-cells = <2>;
};
diff --git a/arch/arm/boot/dts/ep7211-edb7211.dts b/arch/arm/boot/dts/ep7211-edb7211.dts
index da076479c8e2..7fb532f227af 100644
--- a/arch/arm/boot/dts/ep7211-edb7211.dts
+++ b/arch/arm/boot/dts/ep7211-edb7211.dts
@@ -7,7 +7,7 @@
model = "Cirrus Logic EP7211 Development Board";
compatible = "cirrus,edb7211", "cirrus,ep7211", "cirrus,ep7209";
- memory {
+ memory@c0000000 {
device_type = "memory";
reg = <0xc0000000 0x02000000>;
};
diff --git a/arch/arm/boot/dts/exynos3250-monk.dts b/arch/arm/boot/dts/exynos3250-monk.dts
index fae046e08a5d..8b41a9d5e2db 100644
--- a/arch/arm/boot/dts/exynos3250-monk.dts
+++ b/arch/arm/boot/dts/exynos3250-monk.dts
@@ -142,7 +142,7 @@
assigned-clock-rates = <6000000>;
thermistor-ap {
- compatible = "ntc,ncp15wb473";
+ compatible = "murata,ncp15wb473";
pullup-uv = <1800000>;
pullup-ohm = <100000>;
pulldown-ohm = <100000>;
@@ -150,7 +150,7 @@
};
thermistor-battery {
- compatible = "ntc,ncp15wb473";
+ compatible = "murata,ncp15wb473";
pullup-uv = <1800000>;
pullup-ohm = <100000>;
pulldown-ohm = <100000>;
diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts
index d64ccf4b7d32..c52b9cf4f74c 100644
--- a/arch/arm/boot/dts/exynos3250-rinato.dts
+++ b/arch/arm/boot/dts/exynos3250-rinato.dts
@@ -142,7 +142,7 @@
assigned-clock-rates = <6000000>;
thermistor-ap {
- compatible = "ntc,ncp15wb473";
+ compatible = "murata,ncp15wb473";
pullup-uv = <1800000>;
pullup-ohm = <100000>;
pulldown-ohm = <100000>;
@@ -150,7 +150,7 @@
};
thermistor-battery {
- compatible = "ntc,ncp15wb473";
+ compatible = "murata,ncp15wb473";
pullup-uv = <1800000>;
pullup-ohm = <100000>;
pulldown-ohm = <100000>;
diff --git a/arch/arm/boot/dts/exynos4210-i9100.dts b/arch/arm/boot/dts/exynos4210-i9100.dts
index 304a8ee2364c..525ff3d2fac3 100644
--- a/arch/arm/boot/dts/exynos4210-i9100.dts
+++ b/arch/arm/boot/dts/exynos4210-i9100.dts
@@ -136,7 +136,7 @@
compatible = "maxim,max17042";
interrupt-parent = <&gpx2>;
- interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
pinctrl-0 = <&max17042_fuel_irq>;
pinctrl-names = "default";
@@ -147,6 +147,36 @@
};
};
+ i2c_s5k5baf: i2c-gpio-1 {
+ compatible = "i2c-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sda-gpios = <&gpc1 0 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpc1 2 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ i2c-gpio,delay-us = <2>;
+
+ image-sensor@2d {
+ compatible = "samsung,s5k5baf";
+ reg = <0x2d>;
+ vdda-supply = <&cam_io_en_reg>;
+ vddreg-supply = <&vt_core_15v_reg>;
+ vddio-supply = <&vtcam_reg>;
+ clocks = <&camera 0>;
+ clock-names = "mclk";
+ stbyn-gpios = <&gpl2 0 GPIO_ACTIVE_LOW>;
+ rstn-gpios = <&gpl2 1 GPIO_ACTIVE_LOW>;
+ clock-frequency = <24000000>;
+
+ port {
+ s5k5bafx_ep: endpoint {
+ remote-endpoint = <&csis1_ep>;
+ data-lanes = <1>;
+ };
+ };
+ };
+ };
+
spi-3 {
compatible = "spi-gpio";
#address-cells = <1>;
@@ -220,7 +250,29 @@
};
&camera {
+ pinctrl-0 = <&cam_port_a_clk_active>;
+ pinctrl-names = "default";
status = "okay";
+ assigned-clocks = <&clock CLK_MOUT_CAM0>, <&clock CLK_MOUT_CAM1>;
+ assigned-clock-parents = <&clock CLK_XUSBXTI>, <&clock CLK_XUSBXTI>;
+};
+
+&csis_1 {
+ status = "okay";
+ vddcore-supply = <&vusb_reg>;
+ vddio-supply = <&vmipi_reg>;
+ clock-frequency = <160000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@4 {
+ reg = <4>;
+ csis1_ep: endpoint {
+ remote-endpoint = <&s5k5bafx_ep>;
+ data-lanes = <1>;
+ samsung,csis-hs-settle = <6>;
+ };
+ };
};
&cpu0 {
@@ -384,6 +436,8 @@
pinctrl-0 = <&max8997_irq>, <&otg_gp>, <&usb_sel>;
pinctrl-names = "default";
+ charger-supply = <&charger_reg>;
+
regulators {
vadc_reg: LDO1 {
regulator-name = "VADC_3.3V_C210";
diff --git a/arch/arm/boot/dts/exynos4412-midas.dtsi b/arch/arm/boot/dts/exynos4412-midas.dtsi
index 111c32bae02c..fc77c1bfd844 100644
--- a/arch/arm/boot/dts/exynos4412-midas.dtsi
+++ b/arch/arm/boot/dts/exynos4412-midas.dtsi
@@ -173,7 +173,7 @@
pmic@66 {
compatible = "maxim,max77693";
interrupt-parent = <&gpx1>;
- interrupts = <5 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&max77693_irq>;
reg = <0x66>;
@@ -221,7 +221,7 @@
fuel-gauge@36 {
compatible = "maxim,max17047";
interrupt-parent = <&gpx2>;
- interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&max77693_fuel_irq>;
reg = <0x36>;
@@ -665,7 +665,7 @@
max77686: pmic@9 {
compatible = "maxim,max77686";
interrupt-parent = <&gpx0>;
- interrupts = <7 IRQ_TYPE_NONE>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
pinctrl-0 = <&max77686_irq>;
pinctrl-names = "default";
reg = <0x09>;
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index 2b20d9095d9f..5bd05866d7a3 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -2,7 +2,7 @@
/*
* Common definition for Hardkernel's Exynos4412 based ODROID-X/X2/U2/U3 boards
* device tree source
-*/
+ */
#include <dt-bindings/sound/samsung-i2s.h>
#include <dt-bindings/input/input.h>
@@ -122,6 +122,7 @@
};
&clock {
+ clocks = <&clock CLK_XUSBXTI>;
assigned-clocks = <&clock CLK_FOUT_EPLL>;
assigned-clock-rates = <45158401>;
};
@@ -278,7 +279,7 @@
max77686: pmic@9 {
compatible = "maxim,max77686";
interrupt-parent = <&gpx3>;
- interrupts = <2 IRQ_TYPE_NONE>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&max77686_irq>;
reg = <0x09>;
diff --git a/arch/arm/boot/dts/exynos4412-odroidx.dts b/arch/arm/boot/dts/exynos4412-odroidx.dts
index 0e9d626e740a..440135d0ff2a 100644
--- a/arch/arm/boot/dts/exynos4412-odroidx.dts
+++ b/arch/arm/boot/dts/exynos4412-odroidx.dts
@@ -84,7 +84,8 @@
ethernet: usbether@1 {
compatible = "usb0424,ec00";
reg = <1>;
- local-mac-address = [00 00 00 00 00 00]; /* Filled in by a bootloader */
+ /* Filled in by a bootloader */
+ local-mac-address = [00 00 00 00 00 00];
};
};
};
diff --git a/arch/arm/boot/dts/exynos4412-p4note.dtsi b/arch/arm/boot/dts/exynos4412-p4note.dtsi
index b2f9d5448a18..9e750890edb8 100644
--- a/arch/arm/boot/dts/exynos4412-p4note.dtsi
+++ b/arch/arm/boot/dts/exynos4412-p4note.dtsi
@@ -146,7 +146,7 @@
pinctrl-0 = <&fuel_alert_irq>;
pinctrl-names = "default";
interrupt-parent = <&gpx2>;
- interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
maxim,rsns-microohm = <10000>;
maxim,over-heat-temp = <600>;
maxim,over-volt = <4300>;
@@ -322,7 +322,7 @@
max77686: pmic@9 {
compatible = "maxim,max77686";
interrupt-parent = <&gpx0>;
- interrupts = <7 IRQ_TYPE_NONE>;
+ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
pinctrl-0 = <&max77686_irq>;
pinctrl-names = "default";
reg = <0x09>;
diff --git a/arch/arm/boot/dts/exynos4412-ppmu-common.dtsi b/arch/arm/boot/dts/exynos4412-ppmu-common.dtsi
index 3a3b2fafefdd..7f187a3dedcc 100644
--- a/arch/arm/boot/dts/exynos4412-ppmu-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-ppmu-common.dtsi
@@ -7,41 +7,41 @@
*/
&ppmu_dmc0 {
- status = "okay";
+ status = "okay";
- events {
- ppmu_dmc0_3: ppmu-event3-dmc0 {
- event-name = "ppmu-event3-dmc0";
- };
- };
+ events {
+ ppmu_dmc0_3: ppmu-event3-dmc0 {
+ event-name = "ppmu-event3-dmc0";
+ };
+ };
};
&ppmu_dmc1 {
- status = "okay";
+ status = "okay";
- events {
- ppmu_dmc1_3: ppmu-event3-dmc1 {
- event-name = "ppmu-event3-dmc1";
- };
- };
+ events {
+ ppmu_dmc1_3: ppmu-event3-dmc1 {
+ event-name = "ppmu-event3-dmc1";
+ };
+ };
};
&ppmu_leftbus {
- status = "okay";
+ status = "okay";
- events {
- ppmu_leftbus_3: ppmu-event3-leftbus {
- event-name = "ppmu-event3-leftbus";
- };
- };
+ events {
+ ppmu_leftbus_3: ppmu-event3-leftbus {
+ event-name = "ppmu-event3-leftbus";
+ };
+ };
};
&ppmu_rightbus {
- status = "okay";
+ status = "okay";
- events {
- ppmu_rightbus_3: ppmu-event3-rightbus {
- event-name = "ppmu-event3-rightbus";
- };
- };
+ events {
+ ppmu_rightbus_3: ppmu-event3-rightbus {
+ event-name = "ppmu-event3-rightbus";
+ };
+ };
};
diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
index 8b5a79a8720c..39bbe18145cf 100644
--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
+++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
@@ -134,7 +134,7 @@
compatible = "maxim,max77686";
reg = <0x09>;
interrupt-parent = <&gpx3>;
- interrupts = <2 IRQ_TYPE_NONE>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&max77686_irq>;
#clock-cells = <1>;
diff --git a/arch/arm/boot/dts/exynos5250-snow-common.dtsi b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
index 6635f6184051..2335c4687349 100644
--- a/arch/arm/boot/dts/exynos5250-snow-common.dtsi
+++ b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
@@ -292,7 +292,7 @@
max77686: pmic@9 {
compatible = "maxim,max77686";
interrupt-parent = <&gpx3>;
- interrupts = <2 IRQ_TYPE_NONE>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&max77686_irq>;
wakeup-source;
diff --git a/arch/arm/boot/dts/exynos5410-pinctrl.dtsi b/arch/arm/boot/dts/exynos5410-pinctrl.dtsi
index d0aa18443a69..9599ba8ba798 100644
--- a/arch/arm/boot/dts/exynos5410-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos5410-pinctrl.dtsi
@@ -235,13 +235,13 @@
interrupt-parent = <&combiner>;
#interrupt-cells = <2>;
interrupts = <23 0>,
- <24 0>,
- <25 0>,
- <25 1>,
- <26 0>,
- <26 1>,
- <27 0>,
- <27 1>;
+ <24 0>,
+ <25 0>,
+ <25 1>,
+ <26 0>,
+ <26 1>,
+ <27 0>,
+ <27 1>;
};
gpx1: gpx1 {
@@ -252,13 +252,13 @@
interrupt-parent = <&combiner>;
#interrupt-cells = <2>;
interrupts = <28 0>,
- <28 1>,
- <29 0>,
- <29 1>,
- <30 0>,
- <30 1>,
- <31 0>,
- <31 1>;
+ <28 1>,
+ <29 0>,
+ <29 1>,
+ <30 0>,
+ <30 1>,
+ <31 0>,
+ <31 1>;
};
gpx2: gpx2 {
diff --git a/arch/arm/boot/dts/imx50-kobo-aura.dts b/arch/arm/boot/dts/imx50-kobo-aura.dts
index 97cfd970fe74..82ce8c43be86 100644
--- a/arch/arm/boot/dts/imx50-kobo-aura.dts
+++ b/arch/arm/boot/dts/imx50-kobo-aura.dts
@@ -143,10 +143,24 @@
pinctrl-0 = <&pinctrl_i2c3>;
status = "okay";
- /* TODO: embedded controller at 0x43 */
+ embedded-controller@43 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ec>;
+ compatible = "netronix,ntxec";
+ reg = <0x43>;
+ system-power-controller;
+ interrupts-extended = <&gpio4 11 IRQ_TYPE_EDGE_FALLING>;
+ #pwm-cells = <2>;
+ };
};
&iomuxc {
+ pinctrl_ec: ecgrp {
+ fsl,pins = <
+ MX50_PAD_CSPI_SS0__GPIO4_11 0x0 /* INT */
+ >;
+ };
+
pinctrl_gpiokeys: gpiokeysgrp {
fsl,pins = <
MX50_PAD_CSPI_MISO__GPIO4_10 0x0
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
index 7ebb46ce9e36..01cfcbe5928e 100644
--- a/arch/arm/boot/dts/imx51.dtsi
+++ b/arch/arm/boot/dts/imx51.dtsi
@@ -467,7 +467,7 @@
};
iim: efuse@83f98000 {
- compatible = "fsl,imx51-iim", "fsl,imx27-iim";
+ compatible = "fsl,imx51-iim", "fsl,imx27-iim", "syscon";
reg = <0x83f98000 0x4000>;
interrupts = <69>;
clocks = <&clks IMX5_CLK_IIM_GATE>;
diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi
index 9b4efcd82636..fe4244044a0f 100644
--- a/arch/arm/boot/dts/imx53-qsb-common.dtsi
+++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi
@@ -142,6 +142,7 @@
&esdhc1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc1>;
+ cd-gpios = <&gpio3 13 GPIO_ACTIVE_LOW>;
status = "okay";
};
@@ -209,6 +210,7 @@
MX53_PAD_SD1_DATA3__ESDHC1_DAT3 0x1d5
MX53_PAD_SD1_CMD__ESDHC1_CMD 0x1d5
MX53_PAD_SD1_CLK__ESDHC1_CLK 0x1d5
+ MX53_PAD_EIM_DA13__GPIO3_13 0xe4
>;
};
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index 000050aeeabe..2cf3909cca2f 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -668,7 +668,7 @@
};
iim: efuse@63f98000 {
- compatible = "fsl,imx53-iim", "fsl,imx27-iim";
+ compatible = "fsl,imx53-iim", "fsl,imx27-iim", "syscon";
reg = <0x63f98000 0x4000>;
interrupts = <69>;
clocks = <&clks IMX5_CLK_IIM_GATE>;
diff --git a/arch/arm/boot/dts/imx6dl-plybas.dts b/arch/arm/boot/dts/imx6dl-plybas.dts
index 333c306aa946..bf72a67a9c76 100644
--- a/arch/arm/boot/dts/imx6dl-plybas.dts
+++ b/arch/arm/boot/dts/imx6dl-plybas.dts
@@ -19,17 +19,15 @@
gpio_keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
autorepeat;
- button@20 {
+ button-start {
label = "START";
linux,code = <31>;
gpios = <&gpio5 8 GPIO_ACTIVE_LOW>;
};
- button@21 {
+ button-clean {
label = "CLEAN";
linux,code = <46>;
gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
index 7d2c72562c73..9148a01ed6d9 100644
--- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
+++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
@@ -105,9 +105,13 @@
phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
phy-reset-duration = <20>;
phy-supply = <&sw2_reg>;
- phy-handle = <&ethphy0>;
status = "okay";
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+
mdio {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/imx6q-b450v3.dts b/arch/arm/boot/dts/imx6q-b450v3.dts
index 604f2420370f..d994b32ad825 100644
--- a/arch/arm/boot/dts/imx6q-b450v3.dts
+++ b/arch/arm/boot/dts/imx6q-b450v3.dts
@@ -84,6 +84,11 @@
};
&pca9539 {
+ gpio-line-names = "AMB_P_INT1#", "AMB_P_INT2#", "BT_EN", "WLAN_EN",
+ "", "SM_D_ACT", "DP1_RST#", "",
+ "WD15S_EN", "WD15S_DIS#", "", "",
+ "", "", "", "";
+
P04-hog {
gpio-hog;
gpios = <4 0>;
diff --git a/arch/arm/boot/dts/imx6q-b650v3.dts b/arch/arm/boot/dts/imx6q-b650v3.dts
index 56d2aeb1900c..fa1a1df37cde 100644
--- a/arch/arm/boot/dts/imx6q-b650v3.dts
+++ b/arch/arm/boot/dts/imx6q-b650v3.dts
@@ -84,6 +84,11 @@
};
&pca9539 {
+ gpio-line-names = "AMB_P_INT1#", "AMB_P_INT2#", "BT_EN", "WLAN_EN",
+ "", "SM_D_ACT", "DP1_RST#", "",
+ "WD15S_EN", "WD15S_DIS#", "", "",
+ "", "", "", "";
+
P07-hog {
gpio-hog;
gpios = <7 0>;
diff --git a/arch/arm/boot/dts/imx6q-b850v3.dts b/arch/arm/boot/dts/imx6q-b850v3.dts
index 3d6b757bf325..db8c332df6a1 100644
--- a/arch/arm/boot/dts/imx6q-b850v3.dts
+++ b/arch/arm/boot/dts/imx6q-b850v3.dts
@@ -199,6 +199,11 @@
};
&pca9539 {
+ gpio-line-names = "AMB_P_INT1#", "AMB_P_INT2#", "BT_EN", "WLAN_EN",
+ "REMOTE_ON_PML#", "SM_D_ACT", "DP1_RST#", "DP2_RST#",
+ "", "", "", "",
+ "", "", "", "";
+
P10-hog {
gpio-hog;
gpios = <8 0>;
diff --git a/arch/arm/boot/dts/imx6q-ba16.dtsi b/arch/arm/boot/dts/imx6q-ba16.dtsi
index e4578ed3371e..6330d75f8f39 100644
--- a/arch/arm/boot/dts/imx6q-ba16.dtsi
+++ b/arch/arm/boot/dts/imx6q-ba16.dtsi
@@ -124,6 +124,9 @@
regulator-name = "usb_otg_vbus";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
+ pinctrl-0 = <&pinctrl_usbotg_vbus>;
+ gpio = <&gpio4 15 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
};
};
@@ -172,7 +175,19 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet>;
phy-mode = "rgmii-id";
+ phy-supply = <&reg_3p3v>;
+ phy-handle = <&phy0>;
status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy0: ethernet-phy@4 {
+ reg = <4>;
+ qca,clk-out-frequency = <125000000>;
+ };
+ };
};
&hdmi {
@@ -575,6 +590,12 @@
>;
};
+ pinctrl_usbotg_vbus: usbotgvbusgrp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x000b0
+ >;
+ };
+
pinctrl_usdhc2: usdhc2grp {
fsl,pins = <
MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059
diff --git a/arch/arm/boot/dts/imx6q-bx50v3.dtsi b/arch/arm/boot/dts/imx6q-bx50v3.dtsi
index 2a98cc657595..10922375c51e 100644
--- a/arch/arm/boot/dts/imx6q-bx50v3.dtsi
+++ b/arch/arm/boot/dts/imx6q-bx50v3.dtsi
@@ -173,8 +173,8 @@
&i2c1 {
pinctrl-names = "default", "gpio";
pinctrl-1 = <&pinctrl_i2c1_gpio>;
- sda-gpios = <&gpio5 26 GPIO_ACTIVE_HIGH>;
- scl-gpios = <&gpio5 27 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio5 26 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpio5 27 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
pca9547: mux@70 {
compatible = "nxp,pca9547";
@@ -315,15 +315,15 @@
&i2c2 {
pinctrl-names = "default", "gpio";
pinctrl-1 = <&pinctrl_i2c2_gpio>;
- sda-gpios = <&gpio4 13 GPIO_ACTIVE_HIGH>;
- scl-gpios = <&gpio4 12 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio4 13 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpio4 12 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
};
&i2c3 {
pinctrl-names = "default", "gpio";
pinctrl-1 = <&pinctrl_i2c3_gpio>;
- sda-gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>;
- scl-gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>;
+ sda-gpios = <&gpio1 6 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpio1 3 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
};
&iomuxc {
diff --git a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
index 236fc205c389..d0768ae429fa 100644
--- a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
+++ b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi
@@ -406,6 +406,18 @@
vin-supply = <&sw1_reg>;
};
+&reg_pu {
+ vin-supply = <&sw1_reg>;
+};
+
+&reg_vdd1p1 {
+ vin-supply = <&sw2_reg>;
+};
+
+&reg_vdd2p5 {
+ vin-supply = <&sw2_reg>;
+};
+
&uart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1>;
diff --git a/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi b/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
index 828cf3e39784..c4e146f3341b 100644
--- a/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi
@@ -126,7 +126,7 @@
compatible = "nxp,pca8574";
reg = <0x3a>;
gpio-controller;
- #gpio-cells = <1>;
+ #gpio-cells = <2>;
};
};
diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
index 959d8ac2e393..8e587e17e75d 100644
--- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
@@ -32,8 +32,6 @@
gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
user-pb {
label = "user_pb";
diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
index 8072ed47c6bb..a0710d562766 100644
--- a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
@@ -32,8 +32,6 @@
gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
user-pb {
label = "user_pb";
diff --git a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
index 8c9bcdd39830..29ba24c273e9 100644
--- a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
@@ -33,8 +33,6 @@
gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
user-pb {
label = "user_pb";
diff --git a/arch/arm/boot/dts/imx6qdl-gw551x.dtsi b/arch/arm/boot/dts/imx6qdl-gw551x.dtsi
index e5d803d023c8..435dec6338fe 100644
--- a/arch/arm/boot/dts/imx6qdl-gw551x.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw551x.dtsi
@@ -67,8 +67,6 @@
gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
user-pb {
label = "user_pb";
diff --git a/arch/arm/boot/dts/imx6qdl-gw552x.dtsi b/arch/arm/boot/dts/imx6qdl-gw552x.dtsi
index 290a607fede9..2e61102ae694 100644
--- a/arch/arm/boot/dts/imx6qdl-gw552x.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw552x.dtsi
@@ -24,8 +24,6 @@
gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
user-pb {
label = "user_pb";
diff --git a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
index 093a219a77ae..4bc4371e6bae 100644
--- a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
@@ -91,8 +91,6 @@
gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
user-pb {
label = "user_pb";
diff --git a/arch/arm/boot/dts/imx6qdl-gw5903.dtsi b/arch/arm/boot/dts/imx6qdl-gw5903.dtsi
index e1c8dd233cab..1fdb7ba630f1 100644
--- a/arch/arm/boot/dts/imx6qdl-gw5903.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw5903.dtsi
@@ -75,8 +75,6 @@
gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
user-pb {
label = "user_pb";
diff --git a/arch/arm/boot/dts/imx6qdl-gw5904.dtsi b/arch/arm/boot/dts/imx6qdl-gw5904.dtsi
index 3cd2e717c1da..304f3fb88fab 100644
--- a/arch/arm/boot/dts/imx6qdl-gw5904.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw5904.dtsi
@@ -72,8 +72,6 @@
gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
user-pb {
label = "user_pb";
diff --git a/arch/arm/boot/dts/imx6qdl-gw5907.dtsi b/arch/arm/boot/dts/imx6qdl-gw5907.dtsi
index 21c68a55bcb9..fcd3bdfd6182 100644
--- a/arch/arm/boot/dts/imx6qdl-gw5907.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw5907.dtsi
@@ -23,8 +23,6 @@
gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
user-pb {
label = "user_pb";
diff --git a/arch/arm/boot/dts/imx6qdl-gw5910.dtsi b/arch/arm/boot/dts/imx6qdl-gw5910.dtsi
index ed4e22259959..68e5ab2e27e2 100644
--- a/arch/arm/boot/dts/imx6qdl-gw5910.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw5910.dtsi
@@ -26,8 +26,6 @@
gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
user-pb {
label = "user_pb";
diff --git a/arch/arm/boot/dts/imx6qdl-gw5912.dtsi b/arch/arm/boot/dts/imx6qdl-gw5912.dtsi
index 797f160249f7..0415bcb41640 100644
--- a/arch/arm/boot/dts/imx6qdl-gw5912.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw5912.dtsi
@@ -24,8 +24,6 @@
gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
user-pb {
label = "user_pb";
diff --git a/arch/arm/boot/dts/imx6qdl-gw5913.dtsi b/arch/arm/boot/dts/imx6qdl-gw5913.dtsi
index 4cd7d290f5b2..8e23cec7149e 100644
--- a/arch/arm/boot/dts/imx6qdl-gw5913.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw5913.dtsi
@@ -23,8 +23,6 @@
gpio-keys {
compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
user-pb {
label = "user_pb";
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pbab01.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pbab01.dtsi
index d434868e870a..51d28e275aa6 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-pbab01.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-pbab01.dtsi
@@ -78,7 +78,8 @@
ssi2 {
fsl,audmux-port = <1>;
fsl,port-config = <
- (IMX_AUDMUX_V2_PTCR_TFSDIR |
+ (IMX_AUDMUX_V2_PTCR_SYN |
+ IMX_AUDMUX_V2_PTCR_TFSDIR |
IMX_AUDMUX_V2_PTCR_TFSEL(4) |
IMX_AUDMUX_V2_PTCR_TCLKDIR |
IMX_AUDMUX_V2_PTCR_TCSEL(4))
@@ -89,7 +90,7 @@
pins5 {
fsl,audmux-port = <4>;
fsl,port-config = <
- 0x00000000
+ IMX_AUDMUX_V2_PTCR_SYN
IMX_AUDMUX_V2_PDCR_RXDSEL(1)
>;
};
@@ -164,6 +165,7 @@
&usbotg {
status = "okay";
+ dr_mode = "peripheral";
};
&usdhc2 {
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
index 7a1e53195785..7bd658b7bdda 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
@@ -31,6 +31,8 @@
reg_usb_h1_vbus: regulator@1 {
compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbh1_vbus>;
reg = <1>;
regulator-name = "usb_h1_vbus";
regulator-min-microvolt = <5000000>;
@@ -41,6 +43,8 @@
};
gpio_leds: leds {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_leds>;
compatible = "gpio-leds";
green {
@@ -122,6 +126,8 @@
};
pmic@58 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pmic>;
compatible = "dlg,da9063";
reg = <0x58>;
interrupt-parent = <&gpio2>;
@@ -215,25 +221,13 @@
};
&iomuxc {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_hog>;
-
imx6q-phytec-pfla02 {
- pinctrl_hog: hoggrp {
- fsl,pins = <
- MX6QDL_PAD_EIM_D23__GPIO3_IO23 0x80000000
- MX6QDL_PAD_DISP0_DAT3__GPIO4_IO24 0x80000000 /* SPI NOR chipselect */
- MX6QDL_PAD_SD4_DAT1__GPIO2_IO09 0x80000000 /* PMIC interrupt */
- MX6QDL_PAD_ENET_TXD0__GPIO1_IO30 0x80000000 /* Green LED */
- MX6QDL_PAD_EIM_EB3__GPIO2_IO31 0x80000000 /* Red LED */
- >;
- };
-
pinctrl_ecspi3: ecspi3grp {
fsl,pins = <
MX6QDL_PAD_DISP0_DAT2__ECSPI3_MISO 0x100b1
MX6QDL_PAD_DISP0_DAT1__ECSPI3_MOSI 0x100b1
MX6QDL_PAD_DISP0_DAT0__ECSPI3_SCLK 0x100b1
+ MX6QDL_PAD_DISP0_DAT3__GPIO4_IO24 0x80000000 /* CS0 */
>;
};
@@ -255,6 +249,7 @@
MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
MX6QDL_PAD_ENET_TX_EN__ENET_TX_EN 0x1b0b0
+ MX6QDL_PAD_EIM_D23__GPIO3_IO23 0x80000000 /* Reset GPIO */
>;
};
@@ -308,10 +303,21 @@
>;
};
+ pinctrl_leds: ledsgrp {
+ fsl,pins = <
+ MX6QDL_PAD_ENET_TXD0__GPIO1_IO30 0x80000000 /* Green LED */
+ MX6QDL_PAD_EIM_EB3__GPIO2_IO31 0x80000000 /* Red LED */
+ >;
+ };
+
pinctrl_pcie: pciegrp {
fsl,pins = <MX6QDL_PAD_DI0_PIN15__GPIO4_IO17 0x80000000>;
};
+ pinctrl_pmic: pmicgrp {
+ fsl,pins = <MX6QDL_PAD_SD4_DAT1__GPIO2_IO09 0x80000000>; /* PMIC interrupt */
+ };
+
pinctrl_uart3: uart3grp {
fsl,pins = <
MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1
@@ -328,9 +334,9 @@
>;
};
- pinctrl_usbh1: usbh1grp {
+ pinctrl_usbh1_vbus: usbh1vbusgrp {
fsl,pins = <
- MX6QDL_PAD_GPIO_0__USB_H1_PWR 0x80000000
+ MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0
>;
};
@@ -415,8 +421,6 @@
&usbh1 {
vbus-supply = <&reg_usb_h1_vbus>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usbh1>;
status = "disabled";
};
@@ -433,6 +437,7 @@
pinctrl-0 = <&pinctrl_usdhc2>;
cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+ vmmc-supply = <&vdd_sd1_reg>;
status = "disabled";
};
@@ -442,5 +447,6 @@
&pinctrl_usdhc3_cdwp>;
cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
+ vmmc-supply = <&vdd_sd0_reg>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx6qdl-ts7970.dtsi b/arch/arm/boot/dts/imx6qdl-ts7970.dtsi
index e6aa0c33754d..fded07f370b3 100644
--- a/arch/arm/boot/dts/imx6qdl-ts7970.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-ts7970.dtsi
@@ -506,7 +506,6 @@
};
&ssi1 {
- fsl,mode = "i2s-slave";
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
index c070893c509e..b62a0dbb033f 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
@@ -97,15 +97,21 @@
&i2c1 {
clock-frequency = <100000>;
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c1>;
+ pinctrl-1 = <&pinctrl_i2c1_gpio>;
+ scl-gpios = <&gpio3 21 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio3 28 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "okay";
};
&i2c2 {
clock-frequency = <100000>;
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c2>;
+ pinctrl-1 = <&pinctrl_i2c2_gpio>;
+ scl-gpios = <&gpio4 12 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio4 13 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "okay";
codec: sgtl5000@a {
@@ -185,6 +191,13 @@
>;
};
+ pinctrl_i2c1_gpio: i2c1gpiogrp {
+ fsl,pins = <
+ MX6QDL_PAD_EIM_D21__GPIO3_IO21 0x4001b8b0
+ MX6QDL_PAD_EIM_D28__GPIO3_IO28 0x4001b8b0
+ >;
+ };
+
pinctrl_i2c2: i2c2grp {
fsl,pins = <
MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
@@ -192,6 +205,13 @@
>;
};
+ pinctrl_i2c2_gpio: i2c2gpiogrp {
+ fsl,pins = <
+ MX6QDL_PAD_KEY_COL3__GPIO4_IO12 0x4001b8b0
+ MX6QDL_PAD_KEY_ROW3__GPIO4_IO13 0x4001b8b0
+ >;
+ };
+
pinctrl_mclk: mclkgrp {
fsl,pins = <
MX6QDL_PAD_GPIO_0__CCM_CLKO1 0x130b0
diff --git a/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts b/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts
index 6ea5f918d059..a17b8bbbdb95 100644
--- a/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts
+++ b/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts
@@ -97,8 +97,11 @@
pinctrl-1 = <&pinctrl_i2c1_sleep>;
status = "okay";
- /* TODO: embedded controller at 0x43 (driver missing) */
-
+ ec: embedded-controller@43 {
+ compatible = "netronix,ntxec";
+ reg = <0x43>;
+ #pwm-cells = <2>;
+ };
};
&i2c2 {
diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
index c593597b2119..5a1e10def6ef 100644
--- a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
+++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
@@ -210,9 +210,6 @@
micrel,led-mode = <1>;
clocks = <&clks IMX6UL_CLK_ENET_REF>;
clock-names = "rmii-ref";
- reset-gpios = <&gpio_spi 1 GPIO_ACTIVE_LOW>;
- reset-assert-us = <10000>;
- reset-deassert-us = <100>;
};
@@ -222,9 +219,6 @@
micrel,led-mode = <1>;
clocks = <&clks IMX6UL_CLK_ENET2_REF>;
clock-names = "rmii-ref";
- reset-gpios = <&gpio_spi 2 GPIO_ACTIVE_LOW>;
- reset-assert-us = <10000>;
- reset-deassert-us = <100>;
};
};
};
@@ -243,6 +237,22 @@
status = "okay";
};
+&gpio_spi {
+ eth0-phy-hog {
+ gpio-hog;
+ gpios = <1 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "eth0-phy";
+ };
+
+ eth1-phy-hog {
+ gpio-hog;
+ gpios = <2 GPIO_ACTIVE_HIGH>;
+ output-high;
+ line-name = "eth1-phy";
+ };
+};
+
&i2c1 {
clock-frequency = <100000>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/imx6ull-colibri.dtsi b/arch/arm/boot/dts/imx6ull-colibri.dtsi
index 4436556624d6..0cdbf7b6e728 100644
--- a/arch/arm/boot/dts/imx6ull-colibri.dtsi
+++ b/arch/arm/boot/dts/imx6ull-colibri.dtsi
@@ -522,12 +522,12 @@
pinctrl_usdhc2: usdhc2-grp {
fsl,pins = <
- MX6UL_PAD_CSI_DATA00__USDHC2_DATA0 0x17059
- MX6UL_PAD_CSI_DATA01__USDHC2_DATA1 0x17059
- MX6UL_PAD_CSI_DATA02__USDHC2_DATA2 0x17059
- MX6UL_PAD_CSI_DATA03__USDHC2_DATA3 0x17059
- MX6UL_PAD_CSI_HSYNC__USDHC2_CMD 0x17059
- MX6UL_PAD_CSI_VSYNC__USDHC2_CLK 0x17059
+ MX6UL_PAD_CSI_DATA00__USDHC2_DATA0 0x17069
+ MX6UL_PAD_CSI_DATA01__USDHC2_DATA1 0x17069
+ MX6UL_PAD_CSI_DATA02__USDHC2_DATA2 0x17069
+ MX6UL_PAD_CSI_DATA03__USDHC2_DATA3 0x17069
+ MX6UL_PAD_CSI_HSYNC__USDHC2_CMD 0x17069
+ MX6UL_PAD_CSI_VSYNC__USDHC2_CLK 0x17069
MX6UL_PAD_GPIO1_IO03__OSC32K_32K_OUT 0x10
>;
diff --git a/arch/arm/boot/dts/imx6ull-myir-mys-6ulx-eval.dts b/arch/arm/boot/dts/imx6ull-myir-mys-6ulx-eval.dts
index ecbb2cc5b9ab..79cc45728cd2 100644
--- a/arch/arm/boot/dts/imx6ull-myir-mys-6ulx-eval.dts
+++ b/arch/arm/boot/dts/imx6ull-myir-mys-6ulx-eval.dts
@@ -14,5 +14,6 @@
};
&gpmi {
+ fsl,use-minimum-ecc;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx7d-mba7.dts b/arch/arm/boot/dts/imx7d-mba7.dts
index 5ef86de53013..23856a8d4b8c 100644
--- a/arch/arm/boot/dts/imx7d-mba7.dts
+++ b/arch/arm/boot/dts/imx7d-mba7.dts
@@ -99,8 +99,6 @@
/* probe deferral not supported */
/* pcie-bus-supply = <&reg_mpcie_1v5>; */
reset-gpio = <&gpio5 12 GPIO_ACTIVE_LOW>;
- disable-gpio = <&gpio2 29 GPIO_ACTIVE_LOW>;
- power-on-gpio = <&gpio2 30 GPIO_ACTIVE_LOW>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx7d-meerkat96.dts b/arch/arm/boot/dts/imx7d-meerkat96.dts
index 5339210b63d0..dd8003bd1fc0 100644
--- a/arch/arm/boot/dts/imx7d-meerkat96.dts
+++ b/arch/arm/boot/dts/imx7d-meerkat96.dts
@@ -193,7 +193,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc1>;
keep-power-in-suspend;
- tuning-step = <2>;
+ fsl,tuning-step = <2>;
vmmc-supply = <&reg_3p3v>;
no-1-8-v;
broken-cd;
diff --git a/arch/arm/boot/dts/imx7d-pico.dtsi b/arch/arm/boot/dts/imx7d-pico.dtsi
index e57da0d32b98..e519897fae08 100644
--- a/arch/arm/boot/dts/imx7d-pico.dtsi
+++ b/arch/arm/boot/dts/imx7d-pico.dtsi
@@ -351,7 +351,7 @@
pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
bus-width = <4>;
- tuning-step = <2>;
+ fsl,tuning-step = <2>;
vmmc-supply = <&reg_3p3v>;
wakeup-source;
no-1-8-v;
diff --git a/arch/arm/boot/dts/imx7d-remarkable2.dts b/arch/arm/boot/dts/imx7d-remarkable2.dts
new file mode 100644
index 000000000000..8cbae656395c
--- /dev/null
+++ b/arch/arm/boot/dts/imx7d-remarkable2.dts
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (C) 2015 Freescale Semiconductor, Inc.
+ * Copyright (C) 2019 reMarkable AS - http://www.remarkable.com/
+ *
+ */
+
+/dts-v1/;
+
+#include "imx7d.dtsi"
+
+/ {
+ model = "reMarkable 2.0";
+ compatible = "remarkable,imx7d-remarkable2", "fsl,imx7d";
+
+ chosen {
+ stdout-path = &uart6;
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x40000000>;
+ };
+};
+
+&clks {
+ assigned-clocks = <&clks IMX7D_CLKO2_ROOT_SRC>,
+ <&clks IMX7D_CLKO2_ROOT_DIV>;
+ assigned-clock-parents = <&clks IMX7D_CKIL>;
+ assigned-clock-rates = <0>, <32768>;
+};
+
+&snvs_pwrkey {
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ assigned-clocks = <&clks IMX7D_UART1_ROOT_SRC>;
+ assigned-clock-parents = <&clks IMX7D_OSC_24M_CLK>;
+ status = "okay";
+};
+
+&uart6 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart6>;
+ assigned-clocks = <&clks IMX7D_UART6_ROOT_SRC>;
+ assigned-clock-parents = <&clks IMX7D_OSC_24M_CLK>;
+ status = "okay";
+};
+
+&usbotg2 {
+ srp-disable;
+ hnp-disable;
+ status = "okay";
+};
+
+&usdhc3 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz", "sleep";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ pinctrl-3 = <&pinctrl_usdhc3>;
+ assigned-clocks = <&clks IMX7D_USDHC3_ROOT_CLK>;
+ assigned-clock-rates = <400000000>;
+ bus-width = <8>;
+ non-removable;
+ status = "okay";
+};
+
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+};
+
+&iomuxc {
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX7D_PAD_UART1_TX_DATA__UART1_DCE_TX 0x79
+ MX7D_PAD_UART1_RX_DATA__UART1_DCE_RX 0x79
+ >;
+ };
+
+ pinctrl_uart6: uart6grp {
+ fsl,pins = <
+ MX7D_PAD_EPDC_DATA09__UART6_DCE_TX 0x79
+ MX7D_PAD_EPDC_DATA08__UART6_DCE_RX 0x79
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX7D_PAD_SD3_CMD__SD3_CMD 0x59
+ MX7D_PAD_SD3_CLK__SD3_CLK 0x19
+ MX7D_PAD_SD3_DATA0__SD3_DATA0 0x59
+ MX7D_PAD_SD3_DATA1__SD3_DATA1 0x59
+ MX7D_PAD_SD3_DATA2__SD3_DATA2 0x59
+ MX7D_PAD_SD3_DATA3__SD3_DATA3 0x59
+ MX7D_PAD_SD3_DATA4__SD3_DATA4 0x59
+ MX7D_PAD_SD3_DATA5__SD3_DATA5 0x59
+ MX7D_PAD_SD3_DATA6__SD3_DATA6 0x59
+ MX7D_PAD_SD3_DATA7__SD3_DATA7 0x59
+ MX7D_PAD_SD3_STROBE__SD3_STROBE 0x19
+ >;
+ };
+
+ pinctrl_usdhc3_100mhz: usdhc3grp_100mhz {
+ fsl,pins = <
+ MX7D_PAD_SD3_CMD__SD3_CMD 0x5a
+ MX7D_PAD_SD3_CLK__SD3_CLK 0x1a
+ MX7D_PAD_SD3_DATA0__SD3_DATA0 0x5a
+ MX7D_PAD_SD3_DATA1__SD3_DATA1 0x5a
+ MX7D_PAD_SD3_DATA2__SD3_DATA2 0x5a
+ MX7D_PAD_SD3_DATA3__SD3_DATA3 0x5a
+ MX7D_PAD_SD3_DATA4__SD3_DATA4 0x5a
+ MX7D_PAD_SD3_DATA5__SD3_DATA5 0x5a
+ MX7D_PAD_SD3_DATA6__SD3_DATA6 0x5a
+ MX7D_PAD_SD3_DATA7__SD3_DATA7 0x5a
+ MX7D_PAD_SD3_STROBE__SD3_STROBE 0x1a
+ >;
+ };
+
+ pinctrl_usdhc3_200mhz: usdhc3grp_200mhz {
+ fsl,pins = <
+ MX7D_PAD_SD3_CMD__SD3_CMD 0x5b
+ MX7D_PAD_SD3_CLK__SD3_CLK 0x1b
+ MX7D_PAD_SD3_DATA0__SD3_DATA0 0x5b
+ MX7D_PAD_SD3_DATA1__SD3_DATA1 0x5b
+ MX7D_PAD_SD3_DATA2__SD3_DATA2 0x5b
+ MX7D_PAD_SD3_DATA3__SD3_DATA3 0x5b
+ MX7D_PAD_SD3_DATA4__SD3_DATA4 0x5b
+ MX7D_PAD_SD3_DATA5__SD3_DATA5 0x5b
+ MX7D_PAD_SD3_DATA6__SD3_DATA6 0x5b
+ MX7D_PAD_SD3_DATA7__SD3_DATA7 0x5b
+ MX7D_PAD_SD3_STROBE__SD3_STROBE 0x1b
+ >;
+ };
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX7D_PAD_ENET1_COL__WDOG1_WDOG_ANY 0x74
+ >;
+ };
+};
diff --git a/arch/arm/boot/dts/iwg20d-q7-common.dtsi b/arch/arm/boot/dts/iwg20d-q7-common.dtsi
index 63cafd220dba..bc857676d191 100644
--- a/arch/arm/boot/dts/iwg20d-q7-common.dtsi
+++ b/arch/arm/boot/dts/iwg20d-q7-common.dtsi
@@ -325,8 +325,8 @@
rcar_sound,dai {
dai0 {
- playback = <&ssi1 &src3 &dvc1>;
- capture = <&ssi0 &src2 &dvc0>;
+ playback = <&ssi1>, <&src3>, <&dvc1>;
+ capture = <&ssi0>, <&src2>, <&dvc0>;
};
};
};
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 007dd2bd0595..4fce81422943 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -246,6 +246,7 @@
reg = <0x0 0x1700000 0x0 0x100000>;
ranges = <0x0 0x0 0x1700000 0x100000>;
interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+ dma-coherent;
sec_jr0: jr@10000 {
compatible = "fsl,sec-v5.0-job-ring",
@@ -871,7 +872,7 @@
phy_type = "ulpi";
};
- usb3: usb3@3100000 {
+ usb3: usb@3100000 {
compatible = "snps,dwc3";
reg = <0x0 0x3100000 0x0 0x10000>;
interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
index 08a7d3ce383f..ea02fd403a9b 100644
--- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
+++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
@@ -27,16 +27,14 @@
cpcap_battery: battery {
compatible = "motorola,cpcap-battery";
- interrupts-extended = <
- &cpcap 6 0 &cpcap 5 0 &cpcap 3 0
- &cpcap 20 0 &cpcap 54 0 &cpcap 57 0
- >;
+ interrupts-extended =
+ <&cpcap 6 0>, <&cpcap 5 0>, <&cpcap 3 0>,
+ <&cpcap 20 0>, <&cpcap 54 0>, <&cpcap 57 0>;
interrupt-names =
"eol", "lowbph", "lowbpl",
- "chrgcurr1", "battdetb",
- "cccal";
- io-channels = <&cpcap_adc 0 &cpcap_adc 1
- &cpcap_adc 5 &cpcap_adc 6>;
+ "chrgcurr1", "battdetb", "cccal";
+ io-channels = <&cpcap_adc 0>, <&cpcap_adc 1>,
+ <&cpcap_adc 5>, <&cpcap_adc 6>;
io-channel-names = "battdetb", "battp",
"chg_isense", "batti";
power-supplies = <&cpcap_charger>;
@@ -44,20 +42,19 @@
cpcap_charger: charger {
compatible = "motorola,mapphone-cpcap-charger";
- interrupts-extended = <
- &cpcap 13 0 &cpcap 12 0 &cpcap 29 0 &cpcap 28 0
- &cpcap 22 0 &cpcap 21 0 &cpcap 20 0 &cpcap 19 0
- &cpcap 54 0
- >;
+ interrupts-extended =
+ <&cpcap 13 0>, <&cpcap 12 0>, <&cpcap 29 0>,
+ <&cpcap 28 0>, <&cpcap 22 0>, <&cpcap 21 0>,
+ <&cpcap 20 0>, <&cpcap 19 0>, <&cpcap 54 0>;
interrupt-names =
- "chrg_det", "rvrs_chrg", "chrg_se1b", "se0conn",
- "rvrs_mode", "chrgcurr2", "chrgcurr1", "vbusvld",
- "battdetb";
- mode-gpios = <&gpio3 29 GPIO_ACTIVE_LOW
- &gpio3 23 GPIO_ACTIVE_LOW>;
- io-channels = <&cpcap_adc 0 &cpcap_adc 1
- &cpcap_adc 2 &cpcap_adc 5
- &cpcap_adc 6>;
+ "chrg_det", "rvrs_chrg", "chrg_se1b",
+ "se0conn", "rvrs_mode", "chrgcurr2",
+ "chrgcurr1", "vbusvld", "battdetb";
+ mode-gpios = <&gpio3 29 GPIO_ACTIVE_LOW>,
+ <&gpio3 23 GPIO_ACTIVE_LOW>;
+ io-channels = <&cpcap_adc 0>, <&cpcap_adc 1>,
+ <&cpcap_adc 2>, <&cpcap_adc 5>,
+ <&cpcap_adc 6>;
io-channel-names = "battdetb", "battp",
"vbus", "chg_isense",
"batti";
@@ -98,22 +95,22 @@
cpcap_usb2_phy: phy {
compatible = "motorola,mapphone-cpcap-usb-phy";
- pinctrl-0 = <&usb_gpio_mux_sel1 &usb_gpio_mux_sel2>;
+ pinctrl-0 = <&usb_gpio_mux_sel1>, <&usb_gpio_mux_sel2>;
pinctrl-1 = <&usb_ulpi_pins>;
pinctrl-2 = <&usb_utmi_pins>;
pinctrl-3 = <&uart3_pins>;
pinctrl-names = "default", "ulpi", "utmi", "uart";
#phy-cells = <0>;
- interrupts-extended = <
- &cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0
- &cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0
- &cpcap 48 0
- >;
+ interrupts-extended =
+ <&cpcap 15 0>, <&cpcap 14 0>, <&cpcap 28 0>,
+ <&cpcap 19 0>, <&cpcap 18 0>, <&cpcap 17 0>,
+ <&cpcap 16 0>, <&cpcap 49 0>, <&cpcap 48 0>;
interrupt-names =
- "id_ground", "id_float", "se0conn", "vbusvld",
- "sessvld", "sessend", "se1", "dm", "dp";
- mode-gpios = <&gpio2 28 GPIO_ACTIVE_HIGH
- &gpio1 0 GPIO_ACTIVE_HIGH>;
+ "id_ground", "id_float", "se0conn",
+ "vbusvld", "sessvld", "sessend",
+ "se1", "dm", "dp";
+ mode-gpios = <&gpio2 28 GPIO_ACTIVE_HIGH>,
+ <&gpio1 0 GPIO_ACTIVE_HIGH>;
io-channels = <&cpcap_adc 2>, <&cpcap_adc 7>;
io-channel-names = "vbus", "id";
vusb-supply = <&vusb>;
diff --git a/arch/arm/boot/dts/mstar-infinity2m-ssd202d-unitv2.dts b/arch/arm/boot/dts/mstar-infinity2m-ssd202d-unitv2.dts
new file mode 100644
index 000000000000..a81684002e45
--- /dev/null
+++ b/arch/arm/boot/dts/mstar-infinity2m-ssd202d-unitv2.dts
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2021 thingy.jp.
+ * Author: Daniel Palmer <daniel@thingy.jp>
+ */
+
+/dts-v1/;
+#include "mstar-infinity2m-ssd202d.dtsi"
+
+/ {
+ model = "UnitV2";
+ compatible = "m5stack,unitv2", "mstar,infinity2m";
+
+ aliases {
+ serial0 = &pm_uart;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+&pm_uart {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/mstar-v7.dtsi b/arch/arm/boot/dts/mstar-v7.dtsi
index b0a21b0b731f..075d583d6f40 100644
--- a/arch/arm/boot/dts/mstar-v7.dtsi
+++ b/arch/arm/boot/dts/mstar-v7.dtsi
@@ -6,6 +6,7 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/mstar-msc313-mpll.h>
/ {
#address-cells = <1>;
@@ -46,6 +47,21 @@
interrupt-affinity = <&cpu0>;
};
+ clocks: clocks {
+ xtal: xtal {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <24000000>;
+ };
+
+ rtc_xtal: rtc_xtal {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ status = "disabled";
+ };
+ };
+
soc: soc {
compatible = "simple-bus";
#address-cells = <1>;
@@ -109,6 +125,13 @@
reg = <0x204400 0x200>;
};
+ mpll: mpll@206000 {
+ compatible = "mstar,msc313-mpll";
+ #clock-cells = <1>;
+ reg = <0x206000 0x200>;
+ clocks = <&xtal>;
+ };
+
gpio: gpio@207800 {
#gpio-cells = <2>;
reg = <0x207800 0x200>;
diff --git a/arch/arm/boot/dts/mt2701.dtsi b/arch/arm/boot/dts/mt2701.dtsi
index fade14284017..4776f85d6d5b 100644
--- a/arch/arm/boot/dts/mt2701.dtsi
+++ b/arch/arm/boot/dts/mt2701.dtsi
@@ -607,7 +607,7 @@
};
usb0: usb@1a1c0000 {
- compatible = "mediatek,mt8173-xhci";
+ compatible = "mediatek,mt2701-xhci", "mediatek,mtk-xhci";
reg = <0 0x1a1c0000 0 0x1000>,
<0 0x1a1c4700 0 0x0100>;
reg-names = "mac", "ippc";
@@ -620,8 +620,9 @@
status = "disabled";
};
- u3phy0: usb-phy@1a1c4000 {
- compatible = "mediatek,mt2701-u3phy";
+ u3phy0: t-phy@1a1c4000 {
+ compatible = "mediatek,mt2701-tphy",
+ "mediatek,generic-tphy-v1";
reg = <0 0x1a1c4000 0 0x0700>;
#address-cells = <2>;
#size-cells = <2>;
@@ -646,7 +647,7 @@
};
usb1: usb@1a240000 {
- compatible = "mediatek,mt8173-xhci";
+ compatible = "mediatek,mt2701-xhci", "mediatek,mtk-xhci";
reg = <0 0x1a240000 0 0x1000>,
<0 0x1a244700 0 0x0100>;
reg-names = "mac", "ippc";
@@ -659,8 +660,9 @@
status = "disabled";
};
- u3phy1: usb-phy@1a244000 {
- compatible = "mediatek,mt2701-u3phy";
+ u3phy1: t-phy@1a244000 {
+ compatible = "mediatek,mt2701-tphy",
+ "mediatek,generic-tphy-v1";
reg = <0 0x1a244000 0 0x0700>;
#address-cells = <2>;
#size-cells = <2>;
@@ -700,8 +702,9 @@
status = "disabled";
};
- u2phy0: usb-phy@11210000 {
- compatible = "mediatek,generic-tphy-v1";
+ u2phy0: t-phy@11210000 {
+ compatible = "mediatek,mt2701-tphy",
+ "mediatek,generic-tphy-v1";
reg = <0 0x11210000 0 0x0800>;
#address-cells = <2>;
#size-cells = <2>;
diff --git a/arch/arm/boot/dts/mt6589.dtsi b/arch/arm/boot/dts/mt6589.dtsi
index f3ccb70c0779..70df00a7bb26 100644
--- a/arch/arm/boot/dts/mt6589.dtsi
+++ b/arch/arm/boot/dts/mt6589.dtsi
@@ -17,6 +17,7 @@
cpus {
#address-cells = <1>;
#size-cells = <0>;
+ enable-method = "mediatek,mt6589-smp";
cpu@0 {
device_type = "cpu";
diff --git a/arch/arm/boot/dts/mt7623.dtsi b/arch/arm/boot/dts/mt7623.dtsi
index aea6809500d7..3c11f7cfcc40 100644
--- a/arch/arm/boot/dts/mt7623.dtsi
+++ b/arch/arm/boot/dts/mt7623.dtsi
@@ -787,8 +787,9 @@
};
};
- pcie0_phy: pcie-phy@1a149000 {
- compatible = "mediatek,generic-tphy-v1";
+ pcie0_phy: t-phy@1a149000 {
+ compatible = "mediatek,mt7623-tphy",
+ "mediatek,generic-tphy-v1";
reg = <0 0x1a149000 0 0x0700>;
#address-cells = <2>;
#size-cells = <2>;
@@ -804,8 +805,9 @@
};
};
- pcie1_phy: pcie-phy@1a14a000 {
- compatible = "mediatek,generic-tphy-v1";
+ pcie1_phy: t-phy@1a14a000 {
+ compatible = "mediatek,mt7623-tphy",
+ "mediatek,generic-tphy-v1";
reg = <0 0x1a14a000 0 0x0700>;
#address-cells = <2>;
#size-cells = <2>;
@@ -823,7 +825,7 @@
usb1: usb@1a1c0000 {
compatible = "mediatek,mt7623-xhci",
- "mediatek,mt8173-xhci";
+ "mediatek,mtk-xhci";
reg = <0 0x1a1c0000 0 0x1000>,
<0 0x1a1c4700 0 0x0100>;
reg-names = "mac", "ippc";
@@ -836,9 +838,9 @@
status = "disabled";
};
- u3phy1: usb-phy@1a1c4000 {
- compatible = "mediatek,mt7623-u3phy",
- "mediatek,mt2701-u3phy";
+ u3phy1: t-phy@1a1c4000 {
+ compatible = "mediatek,mt7623-tphy",
+ "mediatek,generic-tphy-v1";
reg = <0 0x1a1c4000 0 0x0700>;
#address-cells = <2>;
#size-cells = <2>;
@@ -864,7 +866,7 @@
usb2: usb@1a240000 {
compatible = "mediatek,mt7623-xhci",
- "mediatek,mt8173-xhci";
+ "mediatek,mtk-xhci";
reg = <0 0x1a240000 0 0x1000>,
<0 0x1a244700 0 0x0100>;
reg-names = "mac", "ippc";
@@ -877,9 +879,9 @@
status = "disabled";
};
- u3phy2: usb-phy@1a244000 {
- compatible = "mediatek,mt7623-u3phy",
- "mediatek,mt2701-u3phy";
+ u3phy2: t-phy@1a244000 {
+ compatible = "mediatek,mt7623-tphy",
+ "mediatek,generic-tphy-v1";
reg = <0 0x1a244000 0 0x0700>;
#address-cells = <2>;
#size-cells = <2>;
diff --git a/arch/arm/boot/dts/mt7623n.dtsi b/arch/arm/boot/dts/mt7623n.dtsi
index 1880ac9e32cf..bcb0846e29fd 100644
--- a/arch/arm/boot/dts/mt7623n.dtsi
+++ b/arch/arm/boot/dts/mt7623n.dtsi
@@ -246,7 +246,7 @@
status = "disabled";
};
- mipi_tx0: mipi-dphy@10010000 {
+ mipi_tx0: dsi-phy@10010000 {
compatible = "mediatek,mt7623-mipi-tx",
"mediatek,mt2701-mipi-tx";
reg = <0 0x10010000 0 0x90>;
@@ -265,7 +265,7 @@
status = "disabled";
};
- hdmi_phy: phy@10209100 {
+ hdmi_phy: hdmi-phy@10209100 {
compatible = "mediatek,mt7623-hdmi-phy",
"mediatek,mt2701-hdmi-phy";
reg = <0 0x10209100 0 0x24>;
diff --git a/arch/arm/boot/dts/mt7629.dtsi b/arch/arm/boot/dts/mt7629.dtsi
index 5cbb3d244c75..874043f0490d 100644
--- a/arch/arm/boot/dts/mt7629.dtsi
+++ b/arch/arm/boot/dts/mt7629.dtsi
@@ -329,8 +329,9 @@
status = "disabled";
};
- u3phy0: usb-phy@1a0c4000 {
- compatible = "mediatek,generic-tphy-v2";
+ u3phy0: t-phy@1a0c4000 {
+ compatible = "mediatek,mt7629-tphy",
+ "mediatek,generic-tphy-v2";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0 0x1a0c4000 0xe00>;
@@ -413,14 +414,15 @@
};
};
- pciephy1: pcie-phy@1a14a000 {
- compatible = "mediatek,generic-tphy-v2";
+ pciephy1: t-phy@1a14a000 {
+ compatible = "mediatek,mt7629-tphy",
+ "mediatek,generic-tphy-v2";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0 0x1a14a000 0x1000>;
status = "disabled";
- pcieport1: port1phy@0 {
+ pcieport1: pcie-phy@0 {
reg = <0 0x1000>;
clocks = <&clk20m>;
clock-names = "ref";
diff --git a/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts b/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts
new file mode 100644
index 000000000000..eb6eb21cb2a4
--- /dev/null
+++ b/arch/arm/boot/dts/nuvoton-npcm730-gbs.dts
@@ -0,0 +1,1135 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Quanta Computer Inc. George.Hung@quantatw.com
+
+/dts-v1/;
+#include "nuvoton-npcm730.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "Quanta GBS Board (Device Tree)";
+ compatible = "quanta,gbs-bmc","nuvoton,npcm730";
+
+ aliases {
+ ethernet1 = &gmac0;
+ serial0 = &serial0;
+ serial1 = &serial1;
+ serial2 = &serial2;
+ serial3 = &serial3;
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
+ i2c3 = &i2c3;
+ i2c4 = &i2c4;
+ i2c5 = &i2c5;
+ i2c6 = &i2c6;
+ i2c7 = &i2c7;
+ i2c8 = &i2c8;
+ i2c9 = &i2c9;
+ i2c10 = &i2c10;
+ i2c11 = &i2c11;
+ i2c12 = &i2c12;
+ i2c13 = &i2c13;
+ i2c14 = &i2c14;
+ i2c15 = &i2c15;
+ i2c16 = &i2c0_slotPE0_0;
+ i2c17 = &i2c0_slotPE1_1;
+ i2c18 = &i2c0_slotUSB_2;
+ i2c19 = &i2c0_3;
+ i2c20 = &i2c5_i2cool_0;
+ i2c21 = &i2c5_i2cool_1;
+ i2c22 = &i2c5_i2cool_2;
+ i2c23 = &i2c5_hsbp_fru_3;
+ i2c24 = &i2c6_u2_15_0;
+ i2c25 = &i2c6_u2_14_1;
+ i2c26 = &i2c6_u2_13_2;
+ i2c27 = &i2c6_u2_12_3;
+ i2c28 = &i2c7_u2_11_0;
+ i2c29 = &i2c7_u2_10_1;
+ i2c30 = &i2c7_u2_9_2;
+ i2c31 = &i2c7_u2_8_3;
+ i2c32 = &i2c9_vddcr_cpu;
+ i2c33 = &i2c9_vddcr_soc;
+ i2c34 = &i2c9_vddio_efgh;
+ i2c35 = &i2c9_vddio_abcd;
+ i2c36 = &i2c10_u2_7_0;
+ i2c37 = &i2c10_u2_6_1;
+ i2c38 = &i2c10_u2_5_2;
+ i2c39 = &i2c10_u2_4_3;
+ i2c40 = &i2c11_clk_buf0_0;
+ i2c41 = &i2c11_clk_buf1_1;
+ i2c42 = &i2c11_clk_buf2_2;
+ i2c43 = &i2c11_clk_buf3_3;
+ i2c44 = &i2c14_u2_3_0;
+ i2c45 = &i2c14_u2_2_1;
+ i2c46 = &i2c14_u2_1_2;
+ i2c47 = &i2c14_u2_0_3;
+ fiu0 = &fiu0;
+ fiu1 = &fiu3;
+ };
+
+ chosen {
+ stdout-path = &serial0;
+ };
+
+ memory {
+ reg = <0 0x40000000>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ sas-cable0 {
+ label = "sas-cable0";
+ gpios = <&gpio2 9 GPIO_ACTIVE_LOW>;
+ linux,code = <73>;
+ };
+
+ sas-cable1 {
+ label = "sas-cable1";
+ gpios = <&gpio2 8 GPIO_ACTIVE_LOW>;
+ linux,code = <72>;
+ };
+
+ sas-cable2 {
+ label = "sas-cable2";
+ gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
+ linux,code = <71>;
+ };
+
+ sas-cable3 {
+ label = "sas-cable3";
+ gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
+ linux,code = <70>;
+ };
+
+ sata0 {
+ label = "sata0";
+ gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
+ linux,code = <5>;
+ };
+
+ hsbp-cable {
+ label = "hsbp-cable";
+ gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
+ linux,code = <57>;
+ };
+
+ fanbd-cable {
+ label = "fanbd-cable";
+ gpios = <&gpio1 26 GPIO_ACTIVE_HIGH>;
+ linux,code = <58>;
+ };
+
+ bp12v-cable {
+ label = "bp12v-cable";
+ gpios = <&gpio2 5 GPIO_ACTIVE_HIGH>;
+ linux,code = <69>;
+ };
+
+ pe-slot0 {
+ label = "pe-slot0";
+ gpios = <&gpio3 24 GPIO_ACTIVE_LOW>;
+ linux,code = <120>;
+ };
+
+ pe-slot1 {
+ label = "pe-slot1";
+ gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
+ linux,code = <121>;
+ };
+ };
+
+ iio-hwmon {
+ compatible = "iio-hwmon";
+ io-channels = <&adc 1>, <&adc 2>;
+ };
+
+ iio-hwmon-battery {
+ compatible = "iio-hwmon";
+ io-channels = <&adc 0>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ heartbeat { /* gpio153 */
+ gpios = <&gpio4 25 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+ };
+
+ attention { /* gpio215 */
+ gpios = <&gpio6 23 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ sys_boot_status { /* gpio216 */
+ gpios = <&gpio6 24 GPIO_ACTIVE_HIGH>;
+ default-state = "keep";
+ retain-state-shutdown;
+ };
+
+ bmc_fault { /* gpio217 */
+ gpios = <&gpio6 25 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ linux,default-trigger = "panic";
+ panic-indicator;
+ };
+
+ led_u2_0_locate {
+ gpios = <&pca9535_ledlocate 3 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_1_locate {
+ gpios = <&pca9535_ledlocate 2 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_2_locate {
+ gpios = <&pca9535_ledlocate 1 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_3_locate {
+ gpios = <&pca9535_ledlocate 0 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_4_locate {
+ gpios = <&pca9535_ledlocate 7 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_5_locate {
+ gpios = <&pca9535_ledlocate 6 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_6_locate {
+ gpios = <&pca9535_ledlocate 5 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_7_locate {
+ gpios = <&pca9535_ledlocate 4 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_8_locate {
+ gpios = <&pca9535_ledlocate 11 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_9_locate {
+ gpios = <&pca9535_ledlocate 10 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_10_locate {
+ gpios = <&pca9535_ledlocate 9 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_11_locate {
+ gpios = <&pca9535_ledlocate 8 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_12_locate {
+ gpios = <&pca9535_ledlocate 15 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_13_locate {
+ gpios = <&pca9535_ledlocate 14 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_14_locate {
+ gpios = <&pca9535_ledlocate 13 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_15_locate {
+ gpios = <&pca9535_ledlocate 12 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_0_fault {
+ gpios = <&pca9535_ledfault 3 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_1_fault {
+ gpios = <&pca9535_ledfault 2 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_2_fault {
+ gpios = <&pca9535_ledfault 1 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_3_fault {
+ gpios = <&pca9535_ledfault 0 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_4_fault {
+ gpios = <&pca9535_ledfault 7 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_5_fault {
+ gpios = <&pca9535_ledfault 6 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_6_fault {
+ gpios = <&pca9535_ledfault 5 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_7_fault {
+ gpios = <&pca9535_ledfault 4 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_8_fault {
+ gpios = <&pca9535_ledfault 11 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_9_fault {
+ gpios = <&pca9535_ledfault 10 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_10_fault {
+ gpios = <&pca9535_ledfault 9 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_11_fault {
+ gpios = <&pca9535_ledfault 8 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_12_fault {
+ gpios = <&pca9535_ledfault 15 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_13_fault {
+ gpios = <&pca9535_ledfault 14 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_14_fault {
+ gpios = <&pca9535_ledfault 13 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led_u2_15_fault {
+ gpios = <&pca9535_ledfault 12 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ };
+
+ seven-seg-disp {
+ compatible = "seven-seg-gpio-dev";
+ refresh-interval-ms = /bits/ 16 <600>;
+ clock-gpios = <&gpio0 2 GPIO_ACTIVE_LOW>;
+ data-gpios = <&gpio0 3 GPIO_ACTIVE_HIGH>;
+ clear-gpios = <&gpio0 1 GPIO_ACTIVE_HIGH>;
+ };
+
+ pcie-slot {
+ pcie1: pcie-slot@1 {
+ label = "PE0";
+ };
+ pcie2: pcie-slot@2 {
+ label = "PE1";
+ };
+ };
+};
+
+&fiu0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi0cs1_pins>;
+ status = "okay";
+ spi-nor@0 {
+ compatible = "jedec,spi-nor";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0>;
+ spi-max-frequency = <20000000>;
+ spi-rx-bus-width = <2>;
+ label = "bmc";
+ partitions@80000000 {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ u-boot@0 {
+ label = "u-boot";
+ reg = <0x0000000 0xf0000>;
+ };
+ image-descriptor@f0000 {
+ label = "image-descriptor";
+ reg = <0xf0000 0x10000>;
+ };
+ hoth-update@100000 {
+ label = "hoth-update";
+ reg = <0x100000 0x100000>;
+ };
+ kernel@200000 {
+ label = "kernel";
+ reg = <0x200000 0x500000>;
+ };
+ rofs@700000 {
+ label = "rofs";
+ reg = <0x700000 0x35f0000>;
+ };
+ rwfs@3cf0000 {
+ label = "rwfs";
+ reg = <0x3cf0000 0x300000>;
+ };
+ hoth-mailbox@3ff0000 {
+ label = "hoth-mailbox";
+ reg = <0x3ff0000 0x10000>;
+ };
+ };
+ };
+};
+
+&fiu3 {
+ pinctrl-0 = <&spi3_pins>, <&spi3cs1_pins>;
+ status = "okay";
+
+ spi-nor@0 {
+ compatible = "jedec,spi-nor";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0>;
+ spi-max-frequency = <50000000>;
+ spi-rx-bus-width = <2>;
+ m25p,fast-read;
+ label = "pnor";
+ };
+ spi-nor@1 {
+ compatible = "jedec,spi-nor";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <1>;
+ spi-max-frequency = <50000000>;
+ spi-rx-bus-width = <2>;
+ m25p,fast-read;
+ };
+};
+
+&gcr {
+ serial_port_mux: uart-mux-controller {
+ compatible = "mmio-mux";
+ #mux-control-cells = <1>;
+ mux-reg-masks = <0x38 0x07>;
+ idle-states = <2>; /* Serial port mode 3 (takeover) */
+ };
+
+ uart1_mode_mux: uart1-mode-mux-controller {
+ compatible = "mmio-mux";
+ #mux-control-cells = <1>;
+ mux-reg-masks = <0x64 0x01000000>;
+ idle-states = <0>; /* Set UART1 mode to normal (follow SPMOD) */
+ };
+};
+
+&gmac0 {
+ status = "okay";
+ phy-mode = "rgmii-id";
+ snps,eee-force-disable;
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&watchdog1 {
+ status = "okay";
+};
+
+&rng {
+ status = "okay";
+};
+
+&serial0 {
+ status = "okay";
+};
+
+&serial1 {
+ status = "okay";
+};
+
+&serial2 {
+ status = "okay";
+};
+
+&serial3 {
+ status = "okay";
+};
+
+&adc {
+ #io-channel-cells = <1>;
+ status = "okay";
+};
+
+&lpc_kcs {
+ kcs1: kcs1@0 {
+ status = "okay";
+ };
+
+ kcs2: kcs2@0 {
+ status = "okay";
+ };
+
+ kcs3: kcs3@0 {
+ status = "okay";
+ };
+};
+
+&spi1 {
+ cs-gpios = <&gpio4 19 GPIO_ACTIVE_HIGH>; /* dummy - gpio147 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpio224ol_pins &gpio227o_pins
+ &gpio228_pins>;
+ status = "okay";
+
+ jtag_master@0 {
+ compatible = "nuvoton,npcm750-jtag-master";
+ spi-max-frequency = <25000000>;
+ reg = <0>;
+ status = "okay";
+
+ pinctrl-names = "pspi", "gpio";
+ pinctrl-0 = <&pspi2_pins>;
+ pinctrl-1 = <&gpio224ol_pins &gpio227o_pins
+ &gpio228_pins>;
+
+ tck-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>;
+ tdi-gpios = <&gpio7 3 GPIO_ACTIVE_HIGH>;
+ tdo-gpios = <&gpio7 4 GPIO_ACTIVE_HIGH>;
+ tms-gpios = <&gpio7 6 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&i2c0 {
+ clock-frequency = <100000>;
+ status = "okay";
+
+ i2c-switch@71 {
+ compatible = "nxp,pca9546";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x71>;
+ i2c-mux-idle-disconnect;
+ reset-gpios = <&gpio2 20 GPIO_ACTIVE_LOW>;
+
+ i2c0_slotPE0_0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ pcie-slot = &pcie1;
+ };
+
+ i2c0_slotPE1_1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ pcie-slot = &pcie2;
+ };
+
+ i2c0_slotUSB_2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ i2c0_3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ };
+};
+
+&i2c1 {
+ clock-frequency = <100000>;
+ status = "okay";
+
+ pca9535_ifdet: pca9535-ifdet@24 {
+ compatible = "nxp,pca9535";
+ reg = <0x24>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ pca9535_pwren: pca9535-pwren@20 {
+ compatible = "nxp,pca9535";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "pwr_u2_3_en","pwr_u2_2_en",
+ "pwr_u2_1_en","pwr_u2_0_en",
+ "pwr_u2_7_en","pwr_u2_6_en",
+ "pwr_u2_5_en","pwr_u2_4_en",
+ "pwr_u2_11_en","pwr_u2_10_en",
+ "pwr_u2_9_en","pwr_u2_8_en",
+ "pwr_u2_15_en","pwr_u2_14_en",
+ "pwr_u2_13_en","pwr_u2_12_en";
+ };
+
+ pca9535_pwrgd: pca9535-pwrgd@21 {
+ compatible = "nxp,pca9535";
+ reg = <0x21>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ pca9535_ledlocate: pca9535-ledlocate@22 {
+ compatible = "nxp,pca9535";
+ reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ };
+
+ pca9535_ledfault: pca9535-ledfault@23 {
+ compatible = "nxp,pca9535";
+ reg = <0x23>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ };
+
+ pca9535_pwrdisable: pca9535-pwrdisable@25 {
+ compatible = "nxp,pca9535";
+ reg = <0x25>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "u2_3_pwr_dis","u2_2_pwr_dis",
+ "u2_1_pwr_dis","u2_0_pwr_dis",
+ "u2_7_pwr_dis","u2_6_pwr_dis",
+ "u2_5_pwr_dis","u2_4_pwr_dis",
+ "u2_11_pwr_dis","u2_10_pwr_dis",
+ "u2_9_pwr_dis","u2_8_pwr_dis",
+ "u2_15_pwr_dis","u2_14_pwr_dis",
+ "u2_13_pwr_dis","u2_12_pwr_dis";
+ };
+
+ pca9535_perst: pca9535-perst@26 {
+ compatible = "nxp,pca9535";
+ reg = <0x26>;
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ gpio-line-names =
+ "u2_15_perst","u2_14_perst",
+ "u2_13_perst","u2_12_perst",
+ "u2_11_perst","u2_10_perst",
+ "u2_9_perst","u2_8_perst",
+ "u2_7_perst","u2_6_perst",
+ "u2_5_perst","u2_4_perst",
+ "u2_3_perst","u2_2_perst",
+ "u2_1_perst","u2_0_perst";
+ };
+};
+
+&i2c2 {
+ clock-frequency = <100000>;
+ status = "okay";
+
+ sbtsi@4c {
+ compatible = "amd,sbtsi";
+ reg = <0x4c>;
+ };
+};
+
+&i2c5 {
+ clock-frequency = <100000>;
+ status = "okay";
+
+ mb_fru@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+
+ i2c-switch@71 {
+ compatible = "nxp,pca9546";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x71>;
+ i2c-mux-idle-disconnect;
+
+ i2c5_i2cool_0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ max31725@54 {
+ compatible = "maxim,max31725";
+ reg = <0x54>;
+ status = "okay";
+ };
+ };
+
+ i2c5_i2cool_1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ max31725@55 {
+ compatible = "maxim,max31725";
+ reg = <0x55>;
+ status = "okay";
+ };
+ };
+
+ i2c5_i2cool_2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ max31725@5d {
+ compatible = "maxim,max31725";
+ reg = <0x5d>;
+ status = "okay";
+ };
+ fan_fru@51 {
+ compatible = "atmel,24c64";
+ reg = <0x51>;
+ };
+ };
+
+ i2c5_hsbp_fru_3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ hsbp_fru@52 {
+ compatible = "atmel,24c64";
+ reg = <0x52>;
+ status = "okay";
+ };
+ };
+ };
+};
+
+&i2c6 {
+ clock-frequency = <100000>;
+ status = "okay";
+
+ i2c-switch@73 {
+ compatible = "nxp,pca9545";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x73>;
+ i2c-mux-idle-disconnect;
+
+ i2c6_u2_15_0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ i2c6_u2_14_1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+ i2c6_u2_13_2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ i2c6_u2_12_3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ };
+};
+
+&i2c7 {
+ clock-frequency = <100000>;
+ status = "okay";
+
+ i2c-switch@72 {
+ compatible = "nxp,pca9545";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x72>;
+ i2c-mux-idle-disconnect;
+
+ i2c7_u2_11_0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ i2c7_u2_10_1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+ i2c7_u2_9_2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ i2c7_u2_8_3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ };
+};
+
+&i2c8 {
+ clock-frequency = <100000>;
+ status = "okay";
+
+ i2c8_adm1272: adm1272@10 {
+ compatible = "adi,adm1272";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x10>;
+ shunt-resistor-micro-ohms = <300>;
+ };
+};
+
+&i2c9 {
+ clock-frequency = <100000>;
+ status = "okay";
+
+ i2c-switch@71 {
+ compatible = "nxp,pca9546";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x71>;
+ i2c-mux-idle-disconnect;
+ reset-gpios = <&gpio2 22 GPIO_ACTIVE_LOW>;
+
+ i2c9_vddcr_cpu: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ vrm@60 {
+ compatible = "isil,isl68137";
+ reg = <0x60>;
+ };
+ };
+
+ i2c9_vddcr_soc: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ vrm@61 {
+ compatible = "isil,isl68137";
+ reg = <0x61>;
+ };
+ };
+
+ i2c9_vddio_efgh: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ vrm@63 {
+ compatible = "isil,isl68137";
+ reg = <0x63>;
+ };
+ };
+
+ i2c9_vddio_abcd: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ vrm@45 {
+ compatible = "isil,isl68137";
+ reg = <0x45>;
+ };
+ };
+ };
+};
+
+&i2c10 {
+ clock-frequency = <100000>;
+ status = "okay";
+
+ i2c-switch@71 {
+ compatible = "nxp,pca9545";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x71>;
+ i2c-mux-idle-disconnect;
+
+ i2c10_u2_7_0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ i2c10_u2_6_1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+ i2c10_u2_5_2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ i2c10_u2_4_3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ };
+};
+
+&i2c11 {
+ clock-frequency = <100000>;
+ status = "okay";
+
+ i2c-switch@76 {
+ compatible = "nxp,pca9545";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x76>;
+ i2c-mux-idle-disconnect;
+
+ i2c11_clk_buf0_0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ i2c11_clk_buf1_1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+ i2c11_clk_buf2_2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ i2c11_clk_buf3_3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ };
+};
+
+&i2c12 {
+ clock-frequency = <100000>;
+ status = "okay";
+
+ max34451@4e {
+ compatible = "maxim,max34451";
+ reg = <0x4e>;
+ };
+ vrm@5d {
+ compatible = "isil,isl68137";
+ reg = <0x5d>;
+ };
+ vrm@5e {
+ compatible = "isil,isl68137";
+ reg = <0x5e>;
+ };
+};
+
+&i2c13 {
+ clock-frequency = <100000>;
+ status = "okay";
+};
+
+&i2c14 {
+ clock-frequency = <100000>;
+ status = "okay";
+
+ i2c-switch@70 {
+ compatible = "nxp,pca9545";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x70>;
+ i2c-mux-idle-disconnect;
+
+ i2c14_u2_3_0: i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>;
+ };
+
+ i2c14_u2_2_1: i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>;
+ };
+
+ i2c14_u2_1_2: i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <2>;
+ };
+
+ i2c14_u2_0_3: i2c@3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <3>;
+ };
+ };
+};
+
+&pwm_fan {
+ pinctrl-names = "default";
+ pinctrl-0 = <
+ &pwm0_pins &pwm1_pins
+ &pwm2_pins &pwm3_pins
+ &pwm4_pins
+ &fanin0_pins &fanin1_pins
+ &fanin2_pins &fanin3_pins
+ &fanin4_pins
+ >;
+ status = "okay";
+
+ fan@0 {
+ reg = <0x00>;
+ fan-tach-ch = /bits/ 8 <0x00>;
+ };
+ fan@1 {
+ reg = <0x01>;
+ fan-tach-ch = /bits/ 8 <0x01>;
+ };
+ fan@2 {
+ reg = <0x02>;
+ fan-tach-ch = /bits/ 8 <0x02>;
+ };
+ fan@3 {
+ reg = <0x04>;
+ fan-tach-ch = /bits/ 8 <0x04>;
+ };
+ fan@4 {
+ reg = <0x03>;
+ fan-tach-ch = /bits/ 8 <0x03>;
+ };
+};
+
+&pinctrl {
+ pinctrl-names = "default";
+
+ gpio0: gpio@f0010000 {
+ /* POWER_OUT=gpio07, RESET_OUT=gpio06, PS_PWROK=gpio13 */
+ gpio-line-names =
+ /*0-31*/
+ "","","","","","","RESET_OUT","POWER_OUT",
+ "","","","","","PS_PWROK","","",
+ "","","","","","","","",
+ "","","","","","","","";
+ };
+ gpio1: gpio@f0011000 {
+ /* SIO_POWER_GOOD=gpio59 */
+ gpio-line-names =
+ /*32-63*/
+ "","","","","","","","",
+ "","","","","","","","",
+ "","","","","","","","",
+ "","","","SIO_POWER_GOOD","","","","";
+ };
+ gpio2: gpio@f0012000 {
+ bmc_usb_mux_oe_n {
+ gpio-hog;
+ gpios = <25 GPIO_ACTIVE_HIGH>;
+ output-low;
+ line-name = "bmc-usb-mux-oe-n";
+ };
+ bmc_usb_mux_sel {
+ gpio-hog;
+ gpios = <26 GPIO_ACTIVE_HIGH>;
+ output-low;
+ line-name = "bmc-usb-mux-sel";
+ };
+ bmc_usb2517_reset_n {
+ gpio-hog;
+ gpios = <27 GPIO_ACTIVE_LOW>;
+ output-low;
+ line-name = "bmc-usb2517-reset-n";
+ };
+ };
+ gpio3: gpio@f0013000 {
+ assert_cpu0_reset {
+ gpio-hog;
+ gpios = <14 GPIO_ACTIVE_HIGH>;
+ output-low;
+ line-name = "assert-cpu0-reset";
+ };
+ assert_pwrok_cpu0_n {
+ gpio-hog;
+ gpios = <15 GPIO_ACTIVE_HIGH>;
+ output-low;
+ line-name = "assert-pwrok-cpu0-n";
+ };
+ assert_cpu0_prochot {
+ gpio-hog;
+ gpios = <16 GPIO_ACTIVE_HIGH>;
+ output-low;
+ line-name = "assert-cpu0-prochot";
+ };
+ };
+ gpio4: gpio@f0014000 {
+ /* POST_COMPLETE=gpio143 */
+ gpio-line-names =
+ /*128-159*/
+ "","","","","","","","",
+ "","","","","","","","POST_COMPLETE",
+ "","","","","","","","",
+ "","","","","","","","";
+ };
+ gpio5: gpio@f0015000 {
+ /* POWER_BUTTON=gpio177 */
+ gpio-line-names =
+ /*160-191*/
+ "","","","","","","","",
+ "","","","","","","","",
+ "","POWER_BUTTON","","","","","","",
+ "","","","","","","","";
+ };
+ gpio6: gpio@f0016000 {
+ /* SIO_S5=gpio199, RESET_BUTTON=gpio203 */
+ gpio-line-names =
+ /*192-223*/
+ "","","","","","","","SIO_S5",
+ "","","","RESET_BUTTON","","","","",
+ "","","","","","","","",
+ "","","","","","","","";
+ };
+
+ gpio224ol_pins: gpio224ol-pins {
+ pins = "GPIO224/SPIXCK";
+ bias-disable;
+ output-low;
+ };
+ gpio227o_pins: gpio227o-pins {
+ pins = "GPIO227/nSPIXCS0";
+ bias-disable;
+ output-high;
+ };
+ gpio228_pins: gpio228-pins {
+ pins = "GPIO228/nSPIXCS1";
+ bias-disable;
+ input-enable;
+ };
+};
diff --git a/arch/arm/boot/dts/nuvoton-npcm750-evb.dts b/arch/arm/boot/dts/nuvoton-npcm750-evb.dts
index 9f13d08f5804..dea3dbc4a6a5 100644
--- a/arch/arm/boot/dts/nuvoton-npcm750-evb.dts
+++ b/arch/arm/boot/dts/nuvoton-npcm750-evb.dts
@@ -9,7 +9,7 @@
/ {
model = "Nuvoton npcm750 Development Board (Device Tree)";
- compatible = "nuvoton,npcm750";
+ compatible = "nuvoton,npcm750-evb", "nuvoton,npcm750";
aliases {
ethernet2 = &gmac0;
diff --git a/arch/arm/boot/dts/nuvoton-wpcm450-supermicro-x9sci-ln4f.dts b/arch/arm/boot/dts/nuvoton-wpcm450-supermicro-x9sci-ln4f.dts
new file mode 100644
index 000000000000..83f27fbf4e93
--- /dev/null
+++ b/arch/arm/boot/dts/nuvoton-wpcm450-supermicro-x9sci-ln4f.dts
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+// Copyright 2021 Jonathan Neuschäfer
+
+/dts-v1/;
+
+/* The last 16 MiB are dedicated to the GPU */
+/memreserve/ 0x07000000 0x01000000;
+
+#include "nuvoton-wpcm450.dtsi"
+
+/ {
+ model = "Supermicro X9SCi-LN4F BMC";
+ compatible = "supermicro,x9sci-ln4f-bmc", "nuvoton,wpcm450";
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0 0x08000000>; /* 128 MiB */
+ };
+};
+
+&serial0 {
+ /*
+ * Debug serial port. TX is exposed on the right pad of unpopulated
+ * resistor R1247, RX on the right pad of R1162.
+ */
+ status = "okay";
+};
+
+&serial1 {
+ /* "Serial over LAN" port. Connected to ttyS2 of the host system. */
+ status = "okay";
+};
+
+&watchdog0 {
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/nuvoton-wpcm450.dtsi b/arch/arm/boot/dts/nuvoton-wpcm450.dtsi
new file mode 100644
index 000000000000..d7cbeb187484
--- /dev/null
+++ b/arch/arm/boot/dts/nuvoton-wpcm450.dtsi
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+// Copyright 2021 Jonathan Neuschäfer
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ compatible = "nuvoton,wpcm450";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ compatible = "arm,arm926ej-s";
+ device_type = "cpu";
+ reg = <0>;
+ };
+ };
+
+ clk24m: clock-24mhz {
+ /* 24 MHz dummy clock */
+ compatible = "fixed-clock";
+ clock-frequency = <24000000>;
+ #clock-cells = <0>;
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&aic>;
+ ranges;
+
+ serial0: serial@b8000000 {
+ compatible = "nuvoton,wpcm450-uart";
+ reg = <0xb8000000 0x20>;
+ reg-shift = <2>;
+ interrupts = <7 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk24m>;
+ status = "disabled";
+ };
+
+ serial1: serial@b8000100 {
+ compatible = "nuvoton,wpcm450-uart";
+ reg = <0xb8000100 0x20>;
+ reg-shift = <2>;
+ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk24m>;
+ status = "disabled";
+ };
+
+ timer0: timer@b8001000 {
+ compatible = "nuvoton,wpcm450-timer";
+ interrupts = <12 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0xb8001000 0x1c>;
+ clocks = <&clk24m>;
+ };
+
+ watchdog0: watchdog@b800101c {
+ compatible = "nuvoton,wpcm450-wdt";
+ interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0xb800101c 0x4>;
+ clocks = <&clk24m>;
+ status = "disabled";
+ };
+
+ aic: interrupt-controller@b8002000 {
+ compatible = "nuvoton,wpcm450-aic";
+ reg = <0xb8002000 0x1000>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/omap3-echo.dts b/arch/arm/boot/dts/omap3-echo.dts
index b9fd113979f2..8f02ff5e7da6 100644
--- a/arch/arm/boot/dts/omap3-echo.dts
+++ b/arch/arm/boot/dts/omap3-echo.dts
@@ -7,6 +7,7 @@
#include "dm3725.dtsi"
#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
/ {
model = "Amazon Echo (first generation)";
@@ -139,179 +140,367 @@
clock-frequency = <400000>;
lp5523A: lp5523A@32 {
+ #address-cells = <1>;
+ #size-cells = <0>;
compatible = "national,lp5523";
label = "q1";
reg = <0x32>;
clock-mode = /bits/ 8 <0>; /* LP55XX_CLOCK_AUTO */
enable-gpio = <&gpio4 13 GPIO_ACTIVE_HIGH>; /* GPIO_109 */
- chan0 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan1 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan2 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan3 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan4 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan5 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan6 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan7 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan8 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
+ multi-led@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0>;
+ color = <LED_COLOR_ID_RGB>;
+
+ led@0 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x0>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@1 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x1>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+
+ led@6 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x6>;
+ color = <LED_COLOR_ID_RED>;
+ };
+ };
+ multi-led@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x1>;
+ color = <LED_COLOR_ID_RGB>;
+
+ led@2 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x2>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@3 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x3>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+
+ led@7 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x7>;
+ color = <LED_COLOR_ID_RED>;
+ };
+ };
+ multi-led@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x2>;
+ color = <LED_COLOR_ID_RGB>;
+
+ led@4 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x4>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@5 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x5>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+
+ led@8 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x8>;
+ color = <LED_COLOR_ID_RED>;
+ };
};
};
lp5523B: lp5523B@33 {
+ #address-cells = <1>;
+ #size-cells = <0>;
compatible = "national,lp5523";
label = "q3";
reg = <0x33>;
clock-mode = /bits/ 8 <0>; /* LP55XX_CLOCK_AUTO */
- chan0 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan1 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan2 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan3 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan4 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan5 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan6 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan7 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan8 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
+ multi-led@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0>;
+ color = <LED_COLOR_ID_RGB>;
+
+ led@0 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x0>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@1 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x1>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+
+ led@6 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x6>;
+ color = <LED_COLOR_ID_RED>;
+ };
+ };
+ multi-led@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x1>;
+ color = <LED_COLOR_ID_RGB>;
+
+ led@2 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x2>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@3 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x3>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+
+ led@7 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x7>;
+ color = <LED_COLOR_ID_RED>;
+ };
+ };
+ multi-led@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x2>;
+ color = <LED_COLOR_ID_RGB>;
+
+ led@4 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x4>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@5 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x5>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+
+ led@8 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x8>;
+ color = <LED_COLOR_ID_RED>;
+ };
};
};
lp5523C: lp5523C@34 {
+ #address-cells = <1>;
+ #size-cells = <0>;
compatible = "national,lp5523";
label = "q4";
reg = <0x34>;
clock-mode = /bits/ 8 <0>; /* LP55XX_CLOCK_AUTO */
- chan0 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan1 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan2 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan3 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan4 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan5 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan6 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan7 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan8 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
+ multi-led@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0>;
+ color = <LED_COLOR_ID_RGB>;
+
+ led@0 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x0>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@1 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x1>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+
+ led@6 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x6>;
+ color = <LED_COLOR_ID_RED>;
+ };
+ };
+ multi-led@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x1>;
+ color = <LED_COLOR_ID_RGB>;
+
+ led@2 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x2>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@3 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x3>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+
+ led@7 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x7>;
+ color = <LED_COLOR_ID_RED>;
+ };
+ };
+ multi-led@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x2>;
+ color = <LED_COLOR_ID_RGB>;
+
+ led@4 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x4>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@5 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x5>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+
+ led@8 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x8>;
+ color = <LED_COLOR_ID_RED>;
+ };
};
};
lp5523D: lp552D@35 {
+ #address-cells = <1>;
+ #size-cells = <0>;
compatible = "national,lp5523";
label = "q2";
reg = <0x35>;
clock-mode = /bits/ 8 <0>; /* LP55XX_CLOCK_AUTO */
- chan0 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan1 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan2 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan3 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan4 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan5 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan6 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan7 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
- };
- chan8 {
- led-cur = /bits/ 8 <12>;
- max-cur = /bits/ 8 <15>;
+ multi-led@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x0>;
+ color = <LED_COLOR_ID_RGB>;
+
+ led@0 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x0>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@1 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x1>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+
+ led@6 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x6>;
+ color = <LED_COLOR_ID_RED>;
+ };
+ };
+ multi-led@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x1>;
+ color = <LED_COLOR_ID_RGB>;
+
+ led@2 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x2>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@3 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x3>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+
+ led@7 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x7>;
+ color = <LED_COLOR_ID_RED>;
+ };
+ };
+ multi-led@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x2>;
+ color = <LED_COLOR_ID_RGB>;
+
+ led@4 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x4>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+ led@5 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x5>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+
+ led@8 {
+ led-cur = /bits/ 8 <12>;
+ max-cur = /bits/ 8 <15>;
+ reg = <0x8>;
+ color = <LED_COLOR_ID_RED>;
+ };
};
};
};
@@ -417,6 +606,8 @@
};
&mmc3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "okay";
bus-width = <4>;
pinctrl-names = "default";
@@ -426,6 +617,11 @@
mmc-pwrseq = <&sdio_pwrseq>;
vmmc-supply = <&vcc3v3>;
vqmmc-supply = <&vcc1v8>;
+ atheros@0 {
+ compatible = "atheros,ath6kl";
+ reg = <0>;
+ bus-width = <4>;
+ };
};
&tps {
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index 9dcae1f2bc99..c5b9da0d7e6c 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -24,6 +24,9 @@
i2c0 = &i2c1;
i2c1 = &i2c2;
i2c2 = &i2c3;
+ mmc0 = &mmc1;
+ mmc1 = &mmc2;
+ mmc2 = &mmc3;
serial0 = &uart1;
serial1 = &uart2;
serial2 = &uart3;
diff --git a/arch/arm/boot/dts/omap4-l4.dtsi b/arch/arm/boot/dts/omap4-l4.dtsi
index e0bb60a30779..99721673d7af 100644
--- a/arch/arm/boot/dts/omap4-l4.dtsi
+++ b/arch/arm/boot/dts/omap4-l4.dtsi
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
&l4_cfg { /* 0x4a000000 */
- compatible = "ti,omap4-l4-cfg", "simple-bus";
+ compatible = "ti,omap4-l4-cfg", "simple-pm-bus";
+ power-domains = <&prm_core>;
+ clocks = <&l4_cfg_clkctrl OMAP4_L4_CFG_CLKCTRL 0>;
+ clock-names = "fck";
reg = <0x4a000000 0x800>,
<0x4a000800 0x800>,
<0x4a001000 0x1000>;
@@ -16,7 +19,7 @@
<0x00300000 0x4a300000 0x080000>; /* segment 6 */
segment@0 { /* 0x4a000000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00000000 0x00000000 0x000800>, /* ap 0 */
@@ -43,7 +46,6 @@
target-module@2000 { /* 0x4a002000, ap 3 06.0 */
compatible = "ti,sysc-omap4", "ti,sysc";
- ti,hwmods = "ctrl_module_core";
reg = <0x2000 0x4>,
<0x2010 0x4>;
reg-names = "rev", "sysc";
@@ -347,7 +349,7 @@
};
segment@80000 { /* 0x4a080000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00059000 0x000d9000 0x001000>, /* ap 13 */
@@ -639,7 +641,7 @@
};
segment@100000 { /* 0x4a100000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00000000 0x00100000 0x001000>, /* ap 21 */
@@ -653,7 +655,6 @@
target-module@0 { /* 0x4a100000, ap 21 2a.0 */
compatible = "ti,sysc-omap4", "ti,sysc";
- ti,hwmods = "ctrl_module_pad_core";
reg = <0x0 0x4>,
<0x10 0x4>;
reg-names = "rev", "sysc";
@@ -741,13 +742,13 @@
};
segment@180000 { /* 0x4a180000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
};
segment@200000 { /* 0x4a200000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0001e000 0x0021e000 0x001000>, /* ap 31 */
@@ -903,13 +904,13 @@
};
segment@280000 { /* 0x4a280000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
};
l4_cfg_segment_300000: segment@300000 { /* 0x4a300000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00000000 0x00300000 0x020000>, /* ap 67 */
@@ -944,7 +945,10 @@
};
&l4_wkup { /* 0x4a300000 */
- compatible = "ti,omap4-l4-wkup", "simple-bus";
+ compatible = "ti,omap4-l4-wkup", "simple-pm-bus";
+ power-domains = <&prm_wkup>;
+ clocks = <&l4_wkup_clkctrl OMAP4_L4_WKUP_CLKCTRL 0>;
+ clock-names = "fck";
reg = <0x4a300000 0x800>,
<0x4a300800 0x800>,
<0x4a301000 0x1000>;
@@ -956,7 +960,7 @@
<0x00020000 0x4a320000 0x010000>; /* segment 2 */
segment@0 { /* 0x4a300000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00000000 0x00000000 0x000800>, /* ap 0 */
@@ -1041,7 +1045,6 @@
target-module@c000 { /* 0x4a30c000, ap 19 2c.0 */
compatible = "ti,sysc-omap4", "ti,sysc";
- ti,hwmods = "ctrl_module_wkup";
reg = <0xc000 0x4>,
<0xc010 0x4>;
reg-names = "rev", "sysc";
@@ -1062,7 +1065,7 @@
};
segment@10000 { /* 0x4a310000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00000000 0x00010000 0x001000>, /* ap 5 */
@@ -1202,7 +1205,6 @@
target-module@e000 { /* 0x4a31e000, ap 21 30.0 */
compatible = "ti,sysc-omap4", "ti,sysc";
- ti,hwmods = "ctrl_module_pad_wkup";
reg = <0xe000 0x4>,
<0xe010 0x4>;
reg-names = "rev", "sysc";
@@ -1231,7 +1233,7 @@
};
segment@20000 { /* 0x4a320000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00006000 0x00026000 0x001000>, /* ap 13 */
@@ -1284,7 +1286,10 @@
};
&l4_per { /* 0x48000000 */
- compatible = "ti,omap4-l4-per", "simple-bus";
+ compatible = "ti,omap4-l4-per", "simple-pm-bus";
+ power-domains = <&prm_l4per>;
+ clocks = <&l4_per_clkctrl OMAP4_L4_PER_CLKCTRL 0>;
+ clock-names = "fck";
reg = <0x48000000 0x800>,
<0x48000800 0x800>,
<0x48001000 0x400>,
@@ -1298,7 +1303,7 @@
<0x00200000 0x48200000 0x200000>; /* segment 1 */
segment@0 { /* 0x48000000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00000000 0x00000000 0x000800>, /* ap 0 */
@@ -2437,7 +2442,7 @@
};
segment@200000 { /* 0x48200000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00150000 0x00350000 0x001000>, /* ap 77 */
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 72e4f6481776..2bbff9032be3 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -22,6 +22,11 @@
i2c1 = &i2c2;
i2c2 = &i2c3;
i2c3 = &i2c4;
+ mmc0 = &mmc1;
+ mmc1 = &mmc2;
+ mmc2 = &mmc3;
+ mmc3 = &mmc4;
+ mmc4 = &mmc5;
serial0 = &uart1;
serial1 = &uart2;
serial2 = &uart3;
@@ -54,14 +59,12 @@
};
/*
- * Note that 4430 needs cross trigger interface (CTI) supported
- * before we can configure the interrupts. This means sampling
- * events are not supported for pmu. Note that 4460 does not use
- * CTI, see also 4460.dtsi.
+ * Needed early by omap4_sram_init() for barrier, do not move to l3
+ * interconnect as simple-pm-bus probes at module_init() time.
*/
- pmu {
- compatible = "arm,cortex-a9-pmu";
- ti,hwmods = "debugss";
+ ocmcram: sram@40304000 {
+ compatible = "mmio-sram";
+ reg = <0x40304000 0xa000>; /* 40k */
};
gic: interrupt-controller@48241000 {
@@ -97,19 +100,6 @@
};
/*
- * The soc node represents the soc top level view. It is used for IPs
- * that are not memory mapped in the MPU view or for the MPU itself.
- */
- soc {
- compatible = "ti,omap-infra";
- mpu {
- compatible = "ti,omap4-mpu";
- ti,hwmods = "mpu";
- sram = <&ocmcram>;
- };
- };
-
- /*
* XXX: Use a flat representation of the OMAP4 interconnect.
* The real OMAP interconnect network is quite complex.
* Since it will not bring real advantage to represent that in DT for
@@ -117,16 +107,23 @@
* hierarchy.
*/
ocp {
- compatible = "ti,omap4-l3-noc", "simple-bus";
+ compatible = "simple-pm-bus";
+ power-domains = <&prm_l4per>;
+ clocks = <&l3_1_clkctrl OMAP4_L3_MAIN_1_CLKCTRL 0>,
+ <&l3_2_clkctrl OMAP4_L3_MAIN_2_CLKCTRL 0>,
+ <&l3_instr_clkctrl OMAP4_L3_MAIN_3_CLKCTRL 0>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
- ti,hwmods = "l3_main_1", "l3_main_2", "l3_main_3";
- reg = <0x44000000 0x1000>,
- <0x44800000 0x2000>,
- <0x45000000 0x1000>;
- interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+
+ l3-noc@44000000 {
+ compatible = "ti,omap4-l3-noc";
+ reg = <0x44000000 0x1000>,
+ <0x44800000 0x2000>,
+ <0x45000000 0x1000>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ };
l4_wkup: interconnect@4a300000 {
};
@@ -137,12 +134,22 @@
l4_per: interconnect@48000000 {
};
- l4_abe: interconnect@40100000 {
+ target-module@48210000 {
+ compatible = "ti,sysc-omap4-simple", "ti,sysc";
+ power-domains = <&prm_mpu>;
+ clocks = <&mpuss_clkctrl OMAP4_MPU_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x48210000 0x1f0000>;
+
+ mpu {
+ compatible = "ti,omap4-mpu";
+ sram = <&ocmcram>;
+ };
};
- ocmcram: sram@40304000 {
- compatible = "mmio-sram";
- reg = <0x40304000 0xa000>; /* 40k */
+ l4_abe: interconnect@40100000 {
};
target-module@50000000 {
@@ -184,7 +191,6 @@
target-module@52000000 {
compatible = "ti,sysc-omap4", "ti,sysc";
- ti,hwmods = "iss";
reg = <0x52000000 0x4>,
<0x52000010 0x4>;
reg-names = "rev", "sysc";
@@ -198,6 +204,7 @@
<SYSC_IDLE_SMART>,
<SYSC_IDLE_SMART_WKUP>;
ti,sysc-delay-us = <2>;
+ power-domains = <&prm_cam>;
clocks = <&iss_clkctrl OMAP4_ISS_CLKCTRL 0>;
clock-names = "fck";
#address-cells = <1>;
@@ -207,6 +214,26 @@
/* No child device binding, driver in staging */
};
+ /*
+ * Note that 4430 needs cross trigger interface (CTI) supported
+ * before we can configure the interrupts. This means sampling
+ * events are not supported for pmu. Note that 4460 does not use
+ * CTI, see also 4460.dtsi.
+ */
+ target-module@54000000 {
+ compatible = "ti,sysc-omap4-simple", "ti,sysc";
+ power-domains = <&prm_emu>;
+ clocks = <&emu_sys_clkctrl OMAP4_DEBUGSS_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x54000000 0x1000000>;
+
+ pmu: pmu {
+ compatible = "arm,cortex-a9-pmu";
+ };
+ };
+
target-module@55082000 {
compatible = "ti,sysc-omap2", "ti,sysc";
reg = <0x55082000 0x4>,
@@ -256,35 +283,67 @@
/* No child device binding or driver in mainline */
};
- dmm@4e000000 {
- compatible = "ti,omap4-dmm";
- reg = <0x4e000000 0x800>;
- interrupts = <0 113 0x4>;
- ti,hwmods = "dmm";
+ target-module@4e000000 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0x4e000000 0x4>,
+ <0x4e000010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ranges = <0x0 0x4e000000 0x2000000>;
+ #size-cells = <1>;
+ #address-cells = <1>;
+
+ dmm@0 {
+ compatible = "ti,omap4-dmm";
+ reg = <0 0x800>;
+ interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+ };
};
- emif1: emif@4c000000 {
- compatible = "ti,emif-4d";
- reg = <0x4c000000 0x100>;
- interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
- ti,hwmods = "emif1";
- ti,no-idle-on-init;
- phy-type = <1>;
- hw-caps-read-idle-ctrl;
- hw-caps-ll-interface;
- hw-caps-temp-alert;
+ target-module@4c000000 {
+ compatible = "ti,sysc-omap4-simple", "ti,sysc";
+ reg = <0x4c000000 0x4>;
+ reg-names = "rev";
+ clocks = <&l3_emif_clkctrl OMAP4_EMIF1_CLKCTRL 0>;
+ clock-names = "fck";
+ ti,no-idle;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x4c000000 0x1000000>;
+
+ emif1: emif@0 {
+ compatible = "ti,emif-4d";
+ reg = <0 0x100>;
+ interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+ phy-type = <1>;
+ hw-caps-read-idle-ctrl;
+ hw-caps-ll-interface;
+ hw-caps-temp-alert;
+ };
};
- emif2: emif@4d000000 {
- compatible = "ti,emif-4d";
- reg = <0x4d000000 0x100>;
- interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
- ti,hwmods = "emif2";
- ti,no-idle-on-init;
- phy-type = <1>;
- hw-caps-read-idle-ctrl;
- hw-caps-ll-interface;
- hw-caps-temp-alert;
+ target-module@4d000000 {
+ compatible = "ti,sysc-omap4-simple", "ti,sysc";
+ reg = <0x4d000000 0x4>;
+ reg-names = "rev";
+ clocks = <&l3_emif_clkctrl OMAP4_EMIF2_CLKCTRL 0>;
+ clock-names = "fck";
+ ti,no-idle;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x4d000000 0x1000000>;
+
+ emif2: emif@0 {
+ compatible = "ti,emif-4d";
+ reg = <0 0x100>;
+ interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
+ phy-type = <1>;
+ hw-caps-read-idle-ctrl;
+ hw-caps-ll-interface;
+ hw-caps-temp-alert;
+ };
};
dsp: dsp {
@@ -435,6 +494,7 @@
<SYSC_IDLE_NO>,
<SYSC_IDLE_SMART>,
<SYSC_IDLE_SMART_WKUP>;
+ power-domains = <&prm_gfx>;
clocks = <&l3_gfx_clkctrl OMAP4_GPU_CLKCTRL 0>;
clock-names = "fck";
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/omap4460.dtsi b/arch/arm/boot/dts/omap4460.dtsi
index 2d3e54901b6e..3d6db1db94e0 100644
--- a/arch/arm/boot/dts/omap4460.dtsi
+++ b/arch/arm/boot/dts/omap4460.dtsi
@@ -26,13 +26,6 @@
};
};
- pmu {
- compatible = "arm,cortex-a9-pmu";
- interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
- ti,hwmods = "debugss";
- };
-
thermal-zones {
#include "omap4-cpu-thermal.dtsi"
};
@@ -128,4 +121,10 @@
<0x00030000 0x00030000 0x00010000>;
};
+&pmu {
+ compatible = "arm,cortex-a9-pmu";
+ interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+};
+
/include/ "omap446x-clocks.dtsi"
diff --git a/arch/arm/boot/dts/omap44xx-clocks.dtsi b/arch/arm/boot/dts/omap44xx-clocks.dtsi
index 532868591107..1f1c04d8f472 100644
--- a/arch/arm/boot/dts/omap44xx-clocks.dtsi
+++ b/arch/arm/boot/dts/omap44xx-clocks.dtsi
@@ -770,14 +770,6 @@
ti,max-div = <2>;
};
- sha2md5_fck: sha2md5_fck@15c8 {
- #clock-cells = <0>;
- compatible = "ti,gate-clock";
- clocks = <&l3_div_ck>;
- ti,bit-shift = <1>;
- reg = <0x15c8>;
- };
-
usb_phy_cm_clk32k: usb_phy_cm_clk32k@640 {
#clock-cells = <0>;
compatible = "ti,gate-clock";
diff --git a/arch/arm/boot/dts/omap5-l4.dtsi b/arch/arm/boot/dts/omap5-l4.dtsi
index 887b3359dd5a..b148b289e830 100644
--- a/arch/arm/boot/dts/omap5-l4.dtsi
+++ b/arch/arm/boot/dts/omap5-l4.dtsi
@@ -1,5 +1,8 @@
&l4_cfg { /* 0x4a000000 */
- compatible = "ti,omap5-l4-cfg", "simple-bus";
+ compatible = "ti,omap5-l4-cfg", "simple-pm-bus";
+ power-domains = <&prm_core>;
+ clocks = <&l4cfg_clkctrl OMAP5_L4_CFG_CLKCTRL 0>;
+ clock-names = "fck";
reg = <0x4a000000 0x800>,
<0x4a000800 0x800>,
<0x4a001000 0x1000>;
@@ -15,7 +18,7 @@
<0x00300000 0x4a300000 0x080000>; /* segment 6 */
segment@0 { /* 0x4a000000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00000000 0x00000000 0x000800>, /* ap 0 */
@@ -391,7 +394,7 @@
};
segment@80000 { /* 0x4a080000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00059000 0x000d9000 0x001000>, /* ap 13 */
@@ -654,7 +657,7 @@
};
segment@100000 { /* 0x4a100000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00002000 0x00102000 0x001000>, /* ap 59 */
@@ -691,22 +694,44 @@
};
target-module@40000 { /* 0x4a140000, ap 101 16.0 */
- compatible = "ti,sysc";
- status = "disabled";
- #address-cells = <1>;
+ compatible = "ti,sysc-omap4", "ti,sysc";
+ reg = <0x400fc 4>,
+ <0x41100 4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-midle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>,
+ <SYSC_IDLE_SMART_WKUP>;
+ power-domains = <&prm_l3init>;
+ clocks = <&l3init_clkctrl OMAP5_SATA_CLKCTRL 0>;
+ clock-names = "fck";
#size-cells = <1>;
+ #address-cells = <1>;
ranges = <0x0 0x40000 0x10000>;
+
+ sata: sata@0 {
+ compatible = "snps,dwc-ahci";
+ reg = <0 0x1100>, <0x1100 0x8>;
+ interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&sata_phy>;
+ phy-names = "sata-phy";
+ clocks = <&l3init_clkctrl OMAP5_SATA_CLKCTRL 8>;
+ ports-implemented = <0x1>;
+ };
};
};
segment@180000 { /* 0x4a180000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
};
segment@200000 { /* 0x4a200000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0001e000 0x0021e000 0x001000>, /* ap 29 */
@@ -912,20 +937,23 @@
};
segment@280000 { /* 0x4a280000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
};
segment@300000 { /* 0x4a300000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
};
};
&l4_per { /* 0x48000000 */
- compatible = "ti,omap5-l4-per", "simple-bus";
+ compatible = "ti,omap5-l4-per", "simple-pm-bus";
+ power-domains = <&prm_core>;
+ clocks = <&l4per_clkctrl OMAP5_L4_PER_CLKCTRL 0>;
+ clock-names = "fck";
reg = <0x48000000 0x800>,
<0x48000800 0x800>,
<0x48001000 0x400>,
@@ -939,7 +967,7 @@
<0x00200000 0x48200000 0x200000>; /* segment 1 */
segment@0 { /* 0x48000000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00000000 0x00000000 0x000800>, /* ap 0 */
@@ -2148,14 +2176,17 @@
};
segment@200000 { /* 0x48200000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
};
};
&l4_wkup { /* 0x4ae00000 */
- compatible = "ti,omap5-l4-wkup", "simple-bus";
+ compatible = "ti,omap5-l4-wkup", "simple-pm-bus";
+ power-domains = <&prm_wkupaon>;
+ clocks = <&wkupaon_clkctrl OMAP5_L4_WKUP_CLKCTRL 0>;
+ clock-names = "fck";
reg = <0x4ae00000 0x800>,
<0x4ae00800 0x800>,
<0x4ae01000 0x1000>;
@@ -2167,7 +2198,7 @@
<0x00020000 0x4ae20000 0x010000>; /* segment 2 */
segment@0 { /* 0x4ae00000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00000000 0x00000000 0x000800>, /* ap 0 */
@@ -2296,7 +2327,7 @@
};
segment@10000 { /* 0x4ae10000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00000000 0x00010000 0x001000>, /* ap 5 */
@@ -2423,7 +2454,7 @@
};
segment@20000 { /* 0x4ae20000 */
- compatible = "simple-bus";
+ compatible = "simple-pm-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x00006000 0x00026000 0x001000>, /* ap 13 */
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index e025b7c9a357..bac6fa838793 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -25,6 +25,11 @@
i2c2 = &i2c3;
i2c3 = &i2c4;
i2c4 = &i2c5;
+ mmc0 = &mmc1;
+ mmc1 = &mmc2;
+ mmc2 = &mmc3;
+ mmc3 = &mmc4;
+ mmc4 = &mmc5;
serial0 = &uart1;
serial1 = &uart2;
serial2 = &uart3;
@@ -101,6 +106,15 @@
<GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>;
};
+ /*
+ * Needed early by omap4_sram_init() for barrier, do not move to l3
+ * interconnect as simple-pm-bus probes at module_init() time.
+ */
+ ocmcram: sram@40300000 {
+ compatible = "mmio-sram";
+ reg = <0 0x40300000 0 0x20000>; /* 128k */
+ };
+
gic: interrupt-controller@48211000 {
compatible = "arm,cortex-a15-gic";
interrupt-controller;
@@ -121,19 +135,6 @@
};
/*
- * The soc node represents the soc top level view. It is used for IPs
- * that are not memory mapped in the MPU view or for the MPU itself.
- */
- soc {
- compatible = "ti,omap-infra";
- mpu {
- compatible = "ti,omap4-mpu";
- ti,hwmods = "mpu";
- sram = <&ocmcram>;
- };
- };
-
- /*
* XXX: Use a flat representation of the OMAP3 interconnect.
* The real OMAP interconnect network is quite complex.
* Since it will not bring real advantage to represent that in DT for
@@ -141,17 +142,24 @@
* hierarchy.
*/
ocp {
- compatible = "ti,omap5-l3-noc", "simple-bus";
+ compatible = "simple-pm-bus";
+ power-domains = <&prm_core>;
+ clocks = <&l3main1_clkctrl OMAP5_L3_MAIN_1_CLKCTRL 0>,
+ <&l3main2_clkctrl OMAP5_L3_MAIN_2_CLKCTRL 0>,
+ <&l3instr_clkctrl OMAP5_L3_MAIN_3_CLKCTRL 0>;
#address-cells = <1>;
#size-cells = <1>;
ranges = <0 0 0 0xc0000000>;
dma-ranges = <0x80000000 0x0 0x80000000 0x80000000>;
- ti,hwmods = "l3_main_1", "l3_main_2", "l3_main_3";
- reg = <0 0x44000000 0 0x2000>,
- <0 0x44800000 0 0x3000>,
- <0 0x45000000 0 0x4000>;
- interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+
+ l3-noc@44000000 {
+ compatible = "ti,omap5-l3-noc";
+ reg = <0x44000000 0x2000>,
+ <0x44800000 0x3000>,
+ <0x45000000 0x4000>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ };
l4_wkup: interconnect@4ae00000 {
};
@@ -162,31 +170,58 @@
l4_per: interconnect@48000000 {
};
- l4_abe: interconnect@40100000 {
+ target-module@48210000 {
+ compatible = "ti,sysc-omap4-simple", "ti,sysc";
+ power-domains = <&prm_mpu>;
+ clocks = <&mpu_clkctrl OMAP5_MPU_CLKCTRL 0>;
+ clock-names = "fck";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x48210000 0x1f0000>;
+
+ mpu {
+ compatible = "ti,omap4-mpu";
+ sram = <&ocmcram>;
+ };
};
- ocmcram: sram@40300000 {
- compatible = "mmio-sram";
- reg = <0x40300000 0x20000>; /* 128k */
+ l4_abe: interconnect@40100000 {
};
- gpmc: gpmc@50000000 {
- compatible = "ti,omap4430-gpmc";
- reg = <0x50000000 0x1000>;
- #address-cells = <2>;
- #size-cells = <1>;
- interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
- dmas = <&sdma 4>;
- dma-names = "rxtx";
- gpmc,num-cs = <8>;
- gpmc,num-waitpins = <4>;
- ti,hwmods = "gpmc";
- clocks = <&l3_iclk_div>;
+ target-module@50000000 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0x50000000 4>,
+ <0x50000010 4>,
+ <0x50000014 4>;
+ reg-names = "rev", "sysc", "syss";
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ti,syss-mask = <1>;
+ ti,no-idle-on-init;
+ clocks = <&l3main2_clkctrl OMAP5_L3_MAIN_2_GPMC_CLKCTRL 0>;
clock-names = "fck";
- interrupt-controller;
- #interrupt-cells = <2>;
- gpio-controller;
- #gpio-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x50000000 0x50000000 0x00001000>, /* regs */
+ <0x00000000 0x00000000 0x40000000>; /* data */
+
+ gpmc: gpmc@50000000 {
+ compatible = "ti,omap4430-gpmc";
+ reg = <0x50000000 0x1000>;
+ #address-cells = <2>;
+ #size-cells = <1>;
+ interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
+ dmas = <&sdma 4>;
+ dma-names = "rxtx";
+ gpmc,num-cs = <8>;
+ gpmc,num-waitpins = <4>;
+ clock-names = "fck";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
};
target-module@55082000 {
@@ -241,35 +276,67 @@
status = "disabled";
};
- dmm@4e000000 {
- compatible = "ti,omap5-dmm";
- reg = <0x4e000000 0x800>;
- interrupts = <0 113 0x4>;
- ti,hwmods = "dmm";
+ target-module@4e000000 {
+ compatible = "ti,sysc-omap2", "ti,sysc";
+ reg = <0x4e000000 0x4>,
+ <0x4e000010 0x4>;
+ reg-names = "rev", "sysc";
+ ti,sysc-sidle = <SYSC_IDLE_FORCE>,
+ <SYSC_IDLE_NO>,
+ <SYSC_IDLE_SMART>;
+ ranges = <0x0 0x4e000000 0x2000000>;
+ #size-cells = <1>;
+ #address-cells = <1>;
+
+ dmm@0 {
+ compatible = "ti,omap5-dmm";
+ reg = <0 0x800>;
+ interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
+ };
};
- emif1: emif@4c000000 {
- compatible = "ti,emif-4d5";
- ti,hwmods = "emif1";
- ti,no-idle-on-init;
- phy-type = <2>; /* DDR PHY type: Intelli PHY */
- reg = <0x4c000000 0x400>;
- interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
- hw-caps-read-idle-ctrl;
- hw-caps-ll-interface;
- hw-caps-temp-alert;
+ target-module@4c000000 {
+ compatible = "ti,sysc-omap4-simple", "ti,sysc";
+ reg = <0x4c000000 0x4>;
+ reg-names = "rev";
+ clocks = <&emif_clkctrl OMAP5_EMIF1_CLKCTRL 0>;
+ clock-names = "fck";
+ ti,no-idle;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x4c000000 0x1000000>;
+
+ emif1: emif@0 {
+ compatible = "ti,emif-4d5";
+ reg = <0 0x400>;
+ interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+ phy-type = <2>; /* DDR PHY type: Intelli PHY */
+ hw-caps-read-idle-ctrl;
+ hw-caps-ll-interface;
+ hw-caps-temp-alert;
+ };
};
- emif2: emif@4d000000 {
- compatible = "ti,emif-4d5";
- ti,hwmods = "emif2";
- ti,no-idle-on-init;
- phy-type = <2>; /* DDR PHY type: Intelli PHY */
- reg = <0x4d000000 0x400>;
- interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
- hw-caps-read-idle-ctrl;
- hw-caps-ll-interface;
- hw-caps-temp-alert;
+ target-module@4d000000 {
+ compatible = "ti,sysc-omap4-simple", "ti,sysc";
+ reg = <0x4d000000 0x4>;
+ reg-names = "rev";
+ clocks = <&emif_clkctrl OMAP5_EMIF2_CLKCTRL 0>;
+ clock-names = "fck";
+ ti,no-idle;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x4d000000 0x1000000>;
+
+ emif2: emif@0 {
+ compatible = "ti,emif-4d5";
+ reg = <0 0x400>;
+ interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
+ phy-type = <2>; /* DDR PHY type: Intelli PHY */
+ hw-caps-read-idle-ctrl;
+ hw-caps-ll-interface;
+ hw-caps-temp-alert;
+ };
};
aes1_target: target-module@4b501000 {
@@ -369,18 +436,6 @@
#thermal-sensor-cells = <1>;
};
- /* OCP2SCP3 */
- sata: sata@4a141100 {
- compatible = "snps,dwc-ahci";
- reg = <0x4a140000 0x1100>, <0x4a141100 0x7>;
- interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
- phys = <&sata_phy>;
- phy-names = "sata-phy";
- clocks = <&l3init_clkctrl OMAP5_SATA_CLKCTRL 8>;
- ti,hwmods = "sata";
- ports-implemented = <0x1>;
- };
-
target-module@56000000 {
compatible = "ti,sysc-omap4", "ti,sysc";
reg = <0x5600fe00 0x4>,
diff --git a/arch/arm/boot/dts/owl-s500-roseapplepi.dts b/arch/arm/boot/dts/owl-s500-roseapplepi.dts
index ff91561ca99c..b8c5db2344aa 100644
--- a/arch/arm/boot/dts/owl-s500-roseapplepi.dts
+++ b/arch/arm/boot/dts/owl-s500-roseapplepi.dts
@@ -2,7 +2,7 @@
/*
* Roseapple Pi
*
- * Copyright (C) 2020 Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
+ * Copyright (C) 2020-2021 Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
*/
/dts-v1/;
@@ -27,20 +27,140 @@
reg = <0x0 0x80000000>; /* 2GB */
};
- /* Fixed regulator used in the absence of PMIC */
- sd_vcc: sd-vcc {
+ syspwr: regulator-5v0 {
compatible = "regulator-fixed";
- regulator-name = "fixed-3.1V";
- regulator-min-microvolt = <3100000>;
- regulator-max-microvolt = <3100000>;
+ regulator-name = "SYSPWR";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
regulator-always-on;
};
};
+&cpu0 {
+ cpu0-supply = <&vdd_cpu>;
+};
+
&i2c0 {
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&i2c0_pins>;
+
+ atc260x: pmic@65 {
+ compatible = "actions,atc2603c";
+ reg = <0x65>;
+ interrupt-parent = <&sirq>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
+
+ reset-time-sec = <6>;
+
+ regulators {
+ compatible = "actions,atc2603c-regulator";
+
+ dcdc1-supply = <&syspwr>;
+ dcdc2-supply = <&syspwr>;
+ dcdc3-supply = <&syspwr>;
+ ldo1-supply = <&syspwr>;
+ ldo2-supply = <&syspwr>;
+ ldo3-supply = <&syspwr>;
+ ldo5-supply = <&syspwr>;
+ ldo6-supply = <&syspwr>;
+ ldo7-supply = <&syspwr>;
+ ldo8-supply = <&syspwr>;
+ ldo11-supply = <&syspwr>;
+ ldo12-supply = <&syspwr>;
+ switchldo1-supply = <&vcc>;
+
+ vdd_cpu: dcdc1 {
+ regulator-name = "VDD_CPU";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-always-on;
+ };
+
+ vddq: dcdc2 {
+ regulator-name = "VDDQ";
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <2150000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vcc: dcdc3 {
+ regulator-name = "VCC";
+ regulator-min-microvolt = <2600000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vcc_3v3: ldo1 {
+ regulator-name = "VCC_3V3";
+ regulator-min-microvolt = <2600000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ avcc: ldo2 {
+ regulator-name = "AVCC";
+ regulator-min-microvolt = <2600000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vdd_1v8: ldo3 {
+ regulator-name = "VDD_1V8";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-always-on;
+ };
+
+ vcc_3v1: ldo5 {
+ regulator-name = "VCC_3V1";
+ regulator-min-microvolt = <2600000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ avdd: ldo6 {
+ regulator-name = "AVDD";
+ regulator-min-microvolt = <700000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-always-on;
+ };
+
+ sens_1v8: ldo7 {
+ regulator-name = "SENS_1V8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ ldo8: ldo8 {
+ regulator-name = "LDO8";
+ regulator-min-microvolt = <2300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ svcc: ldo11 {
+ regulator-name = "SVCC";
+ regulator-min-microvolt = <2600000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ rtc_vdd: ldo12 {
+ regulator-name = "RTC_VDD";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ sd_vcc: switchldo1 {
+ regulator-name = "SD_VCC";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+ };
+ };
};
&i2c1 {
diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi
index 7bf1da916f25..ff1bdb10ad19 100644
--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
+++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
@@ -480,7 +480,7 @@
status = "disabled";
};
- nand: qpic-nand@79b0000 {
+ nand: nand-controller@79b0000 {
compatible = "qcom,ipq4019-nand";
reg = <0x79b0000 0x1000>;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
index 0cda654371ae..30ee913faae6 100644
--- a/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
+++ b/arch/arm/boot/dts/qcom-msm8974-lge-nexus5-hammerhead.dts
@@ -529,6 +529,10 @@
pinctrl-names = "default";
pinctrl-0 = <&mpu6515_pin>;
+ mount-matrix = "0", "-1", "0",
+ "-1", "0", "0",
+ "0", "0", "1";
+
i2c-gate {
#address-cells = <1>;
#size-cells = <0>;
@@ -575,7 +579,7 @@
maxim,rcomp = /bits/ 8 <0x4d>;
interrupt-parent = <&msmgpio>;
- interrupts = <9 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <9 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&fuelgauge_pin>;
diff --git a/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts b/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
index a0f7f461f48c..d737de7173cf 100644
--- a/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
+++ b/arch/arm/boot/dts/qcom-msm8974-samsung-klte.dts
@@ -322,6 +322,27 @@
status = "okay";
};
+ /* blsp2_uart8 */
+ serial@f995e000 {
+ status = "okay";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp2_uart8_pins_active>;
+ pinctrl-1 = <&blsp2_uart8_pins_sleep>;
+
+ bluetooth {
+ compatible = "brcm,bcm43540-bt";
+ max-speed = <3000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_pins>;
+ device-wakeup-gpios = <&msmgpio 91 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpio_expander 9 GPIO_ACTIVE_HIGH>;
+ interrupt-parent = <&msmgpio>;
+ interrupts = <75 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host-wakeup";
+ };
+ };
+
gpio-keys {
compatible = "gpio-keys";
input-name = "gpio-keys";
@@ -356,6 +377,35 @@
};
pinctrl@fd510000 {
+ blsp2_uart8_pins_active: blsp2-uart8-pins-active {
+ pins = "gpio45", "gpio46", "gpio47", "gpio48";
+ function = "blsp_uart8";
+ drive-strength = <8>;
+ bias-disable;
+ };
+
+ blsp2_uart8_pins_sleep: blsp2-uart8-pins-sleep {
+ pins = "gpio45", "gpio46", "gpio47", "gpio48";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+
+ bt_pins: bt-pins {
+ hostwake {
+ pins = "gpio75";
+ function = "gpio";
+ drive-strength = <16>;
+ input-enable;
+ };
+
+ devwake {
+ pins = "gpio91";
+ function = "gpio";
+ drive-strength = <2>;
+ };
+ };
+
sdhc1_pin_a: sdhc1-pin-active {
clk {
pins = "sdc1_clk";
@@ -717,7 +767,7 @@
maxim,rcomp = /bits/ 8 <0x56>;
interrupt-parent = <&pma8084_gpios>;
- interrupts = <21 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&fuelgauge_pin>;
diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi
index c65d33591efa..db4c06bf7888 100644
--- a/arch/arm/boot/dts/qcom-msm8974.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8974.dtsi
@@ -715,6 +715,15 @@
status = "disabled";
};
+ blsp2_uart8: serial@f995e000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0xf995e000 0x1000>;
+ interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP2_UART2_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
blsp2_uart10: serial@f9960000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
reg = <0xf9960000 0x1000>;
diff --git a/arch/arm/boot/dts/qcom-sdx55-t55.dts b/arch/arm/boot/dts/qcom-sdx55-t55.dts
new file mode 100644
index 000000000000..ddcd53aa533d
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-sdx55-t55.dts
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2021, Linaro Ltd.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include "qcom-sdx55.dtsi"
+#include "qcom-pmx55.dtsi"
+
+/ {
+ model = "Thundercomm T55 Development Kit";
+ compatible = "qcom,sdx55-t55", "qcom,sdx55";
+ qcom,board-id = <0xb010008 0x4>;
+
+ aliases {
+ serial0 = &blsp1_uart3;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ mpss_debug_mem: memory@8ef00000 {
+ no-map;
+ reg = <0x8ef00000 0x800000>;
+ };
+
+ ipa_fw_mem: memory@8fced000 {
+ no-map;
+ reg = <0x8fced000 0x10000>;
+ };
+
+ mpss_adsp_mem: memory@90800000 {
+ no-map;
+ reg = <0x90800000 0xf800000>;
+ };
+ };
+
+ vph_pwr: vph-pwr-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+ };
+
+ vreg_bob_3p3: pmx55-bob {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_bob_3p3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+
+ vin-supply = <&vph_pwr>;
+ };
+
+ vreg_s7e_mx_0p752: pmx55-s7e {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_s7e_mx_0p752";
+ regulator-min-microvolt = <752000>;
+ regulator-max-microvolt = <752000>;
+
+ vin-supply = <&vph_pwr>;
+ };
+
+ vreg_sd_vdd: sd-vdd {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_sd_vdd";
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+
+ vin-supply = <&vreg_vddpx_2>;
+ };
+
+ vreg_vddpx_2: vddpx-2 {
+ compatible = "regulator-gpio";
+ regulator-name = "vreg_vddpx_2";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2850000>;
+ enable-gpios = <&tlmm 98 GPIO_ACTIVE_HIGH>;
+ gpios = <&tlmm 100 GPIO_ACTIVE_HIGH>;
+ states = <1800000 0>, <2850000 1>;
+ startup-delay-us = <200000>;
+ enable-active-high;
+ regulator-boot-on;
+
+ vin-supply = <&vph_pwr>;
+ };
+};
+
+&apps_rsc {
+ pmx55-rpmh-regulators {
+ compatible = "qcom,pmx55-rpmh-regulators";
+ qcom,pmic-id = "e";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+ vdd-s5-supply = <&vph_pwr>;
+ vdd-s6-supply = <&vph_pwr>;
+ vdd-s7-supply = <&vph_pwr>;
+ vdd-l1-l2-supply = <&vreg_s2e_1p224>;
+ vdd-l3-l9-supply = <&vreg_s3e_0p824>;
+ vdd-l4-l12-supply = <&vreg_s4e_1p904>;
+ vdd-l5-l6-supply = <&vreg_s4e_1p904>;
+ vdd-l7-l8-supply = <&vreg_s3e_0p824>;
+ vdd-l10-l11-l13-supply = <&vreg_bob_3p3>;
+ vdd-l14-supply = <&vreg_s7e_mx_0p752>;
+ vdd-l15-supply = <&vreg_s2e_1p224>;
+ vdd-l16-supply = <&vreg_s4e_1p904>;
+
+ vreg_s2e_1p224: smps2 {
+ regulator-min-microvolt = <1280000>;
+ regulator-max-microvolt = <1400000>;
+ };
+
+ vreg_s3e_0p824: smps3 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1000000>;
+ };
+
+ vreg_s4e_1p904: smps4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1960000>;
+ };
+
+ vreg_l1e_bb_1p2: ldo1 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo2 {
+ regulator-min-microvolt = <1128000>;
+ regulator-max-microvolt = <1128000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo3 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ vreg_l4e_bb_0p875: ldo4 {
+ regulator-min-microvolt = <872000>;
+ regulator-max-microvolt = <872000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ vreg_l5e_bb_1p7: ldo5 {
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <1900000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo7 {
+ regulator-min-microvolt = <480000>;
+ regulator-max-microvolt = <900000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo8 {
+ regulator-min-microvolt = <480000>;
+ regulator-max-microvolt = <900000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo9 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ vreg_l10e_3p1: ldo10 {
+ regulator-min-microvolt = <3088000>;
+ regulator-max-microvolt = <3088000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo11 {
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <2928000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo12 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo13 {
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <2928000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo14 {
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo15 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo16 {
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <1904000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+ };
+};
+
+&blsp1_uart3 {
+ status = "ok";
+};
+
+&qpic_bam {
+ status = "ok";
+};
+
+&qpic_nand {
+ status = "ok";
+
+ nand@0 {
+ reg = <0>;
+
+ nand-ecc-strength = <8>;
+ nand-ecc-step-size = <512>;
+ nand-bus-width = <8>;
+ /* efs2 partition is secured */
+ secure-regions = <0x500000 0xb00000>;
+ };
+};
+
+&remoteproc_mpss {
+ status = "okay";
+ memory-region = <&mpss_adsp_mem>;
+};
+
+&usb_hsphy {
+ status = "okay";
+ vdda-pll-supply = <&vreg_l4e_bb_0p875>;
+ vdda33-supply = <&vreg_l10e_3p1>;
+ vdda18-supply = <&vreg_l5e_bb_1p7>;
+};
+
+&usb_qmpphy {
+ status = "okay";
+ vdda-phy-supply = <&vreg_l4e_bb_0p875>;
+ vdda-pll-supply = <&vreg_l1e_bb_1p2>;
+};
+
+&usb {
+ status = "okay";
+};
+
+&usb_dwc3 {
+ dr_mode = "peripheral";
+};
diff --git a/arch/arm/boot/dts/qcom-sdx55-telit-fn980-tlb.dts b/arch/arm/boot/dts/qcom-sdx55-telit-fn980-tlb.dts
new file mode 100644
index 000000000000..3065f84634b8
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-sdx55-telit-fn980-tlb.dts
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2021, Linaro Ltd.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include "qcom-sdx55.dtsi"
+#include "qcom-pmx55.dtsi"
+
+/ {
+ model = "Telit FN980 TLB";
+ compatible = "qcom,sdx55-telit-fn980-tlb", "qcom,sdx55";
+ qcom,board-id = <0xb010008 0x0>;
+
+ aliases {
+ serial0 = &blsp1_uart3;
+ };
+
+ chosen {
+ stdout-path = "serial0:921600n8";
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ mpss_debug_mem: memory@8ef00000 {
+ no-map;
+ reg = <0x8ef00000 0x800000>;
+ };
+
+ ipa_fw_mem: memory@8fced000 {
+ no-map;
+ reg = <0x8fced000 0x10000>;
+ };
+
+ mpss_adsp_mem: memory@90800000 {
+ no-map;
+ reg = <0x90800000 0xf800000>;
+ };
+ };
+
+ vph_pwr: vph-pwr-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+ };
+
+ vreg_bob_3p3: pmx55-bob {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_bob_3p3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+
+ vin-supply = <&vph_pwr>;
+ };
+
+ vreg_s7e_mx_0p752: pmx55-s7e {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_s7e_mx_0p752";
+ regulator-min-microvolt = <752000>;
+ regulator-max-microvolt = <752000>;
+
+ vin-supply = <&vph_pwr>;
+ };
+
+ vreg_sd_vdd: sd-vdd {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_sd_vdd";
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+
+ vin-supply = <&vreg_vddpx_2>;
+ };
+
+ vreg_vddpx_2: vddpx-2 {
+ compatible = "regulator-gpio";
+ regulator-name = "vreg_vddpx_2";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2850000>;
+ enable-gpios = <&tlmm 98 GPIO_ACTIVE_HIGH>;
+ gpios = <&tlmm 100 GPIO_ACTIVE_HIGH>;
+ states = <1800000 0>, <2850000 1>;
+ startup-delay-us = <200000>;
+ enable-active-high;
+ regulator-boot-on;
+
+ vin-supply = <&vph_pwr>;
+ };
+};
+
+&apps_rsc {
+ pmx55-rpmh-regulators {
+ compatible = "qcom,pmx55-rpmh-regulators";
+ qcom,pmic-id = "e";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+ vdd-s5-supply = <&vph_pwr>;
+ vdd-s6-supply = <&vph_pwr>;
+ vdd-s7-supply = <&vph_pwr>;
+ vdd-l1-l2-supply = <&vreg_s2e_1p224>;
+ vdd-l3-l9-supply = <&vreg_s3e_0p824>;
+ vdd-l4-l12-supply = <&vreg_s4e_1p904>;
+ vdd-l5-l6-supply = <&vreg_s4e_1p904>;
+ vdd-l7-l8-supply = <&vreg_s3e_0p824>;
+ vdd-l10-l11-l13-supply = <&vreg_bob_3p3>;
+ vdd-l14-supply = <&vreg_s7e_mx_0p752>;
+ vdd-l15-supply = <&vreg_s2e_1p224>;
+ vdd-l16-supply = <&vreg_s4e_1p904>;
+
+ vreg_s2e_1p224: smps2 {
+ regulator-min-microvolt = <1280000>;
+ regulator-max-microvolt = <1400000>;
+ };
+
+ vreg_s3e_0p824: smps3 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1000000>;
+ };
+
+ vreg_s4e_1p904: smps4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1960000>;
+ };
+
+ vreg_l1e_bb_1p2: ldo1 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo2 {
+ regulator-min-microvolt = <1128000>;
+ regulator-max-microvolt = <1128000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo3 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ vreg_l4e_bb_0p875: ldo4 {
+ regulator-min-microvolt = <872000>;
+ regulator-max-microvolt = <872000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ vreg_l5e_bb_1p7: ldo5 {
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <1900000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo7 {
+ regulator-min-microvolt = <480000>;
+ regulator-max-microvolt = <900000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo8 {
+ regulator-min-microvolt = <480000>;
+ regulator-max-microvolt = <900000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo9 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ vreg_l10e_3p1: ldo10 {
+ regulator-min-microvolt = <3088000>;
+ regulator-max-microvolt = <3088000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo11 {
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <2928000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo12 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo13 {
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <2928000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo14 {
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo15 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+
+ ldo16 {
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <1904000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_LPM>;
+ };
+ };
+};
+
+&blsp1_uart3 {
+ status = "ok";
+};
+
+&qpic_bam {
+ status = "ok";
+};
+
+&qpic_nand {
+ status = "ok";
+
+ nand@0 {
+ reg = <0>;
+
+ nand-ecc-strength = <8>;
+ nand-ecc-step-size = <512>;
+ nand-bus-width = <8>;
+ /* ico and efs2 partitions are secured */
+ secure-regions = <0x500000 0x500000
+ 0xa00000 0xb00000>;
+ };
+};
+
+&remoteproc_mpss {
+ status = "okay";
+ memory-region = <&mpss_adsp_mem>;
+};
+
+&usb_hsphy {
+ status = "okay";
+ vdda-pll-supply = <&vreg_l4e_bb_0p875>;
+ vdda33-supply = <&vreg_l10e_3p1>;
+ vdda18-supply = <&vreg_l5e_bb_1p7>;
+};
+
+&usb_qmpphy {
+ status = "okay";
+ vdda-phy-supply = <&vreg_l4e_bb_0p875>;
+ vdda-pll-supply = <&vreg_l1e_bb_1p2>;
+};
+
+&usb {
+ status = "okay";
+};
+
+&usb_dwc3 {
+ dr_mode = "peripheral";
+};
diff --git a/arch/arm/boot/dts/qcom-sdx55.dtsi b/arch/arm/boot/dts/qcom-sdx55.dtsi
index e4180bbc4655..0057c7c04d31 100644
--- a/arch/arm/boot/dts/qcom-sdx55.dtsi
+++ b/arch/arm/boot/dts/qcom-sdx55.dtsi
@@ -8,6 +8,7 @@
#include <dt-bindings/clock/qcom,gcc-sdx55.h>
#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interconnect/qcom,sdx55.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/power/qcom-rpmpd.h>
#include <dt-bindings/soc/qcom,rpmh-rsc.h>
@@ -53,6 +54,41 @@
compatible = "arm,cortex-a7";
reg = <0x0>;
enable-method = "psci";
+ clocks = <&apcs>;
+ power-domains = <&rpmhpd SDX55_CX>;
+ power-domain-names = "rpmhpd";
+ operating-points-v2 = <&cpu_opp_table>;
+ };
+ };
+
+ cpu_opp_table: cpu-opp-table {
+ compatible = "operating-points-v2";
+ opp-shared;
+
+ opp-345600000 {
+ opp-hz = /bits/ 64 <345600000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-576000000 {
+ opp-hz = /bits/ 64 <576000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+
+ opp-1094400000 {
+ opp-hz = /bits/ 64 <1094400000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ };
+
+ opp-1555200000 {
+ opp-hz = /bits/ 64 <1555200000>;
+ required-opps = <&rpmhpd_opp_turbo>;
+ };
+ };
+
+ firmware {
+ scm {
+ compatible = "qcom,scm-sdx55", "qcom,scm";
};
};
@@ -119,6 +155,37 @@
hwlocks = <&tcsr_mutex 3>;
};
+ smp2p-mpss {
+ compatible = "qcom,smp2p";
+ qcom,smem = <435>, <428>;
+ interrupts = <GIC_SPI 113 IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&apcs 14>;
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <1>;
+
+ modem_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ modem_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ ipa_smp2p_out: ipa-ap-to-modem {
+ qcom,entry-name = "ipa";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ ipa_smp2p_in: ipa-modem-to-ap {
+ qcom,entry-name = "ipa";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
soc: soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -187,6 +254,34 @@
};
};
+ mc_virt: interconnect@1100000 {
+ compatible = "qcom,sdx55-mc-virt";
+ reg = <0x01100000 0x400000>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ mem_noc: interconnect@9680000 {
+ compatible = "qcom,sdx55-mem-noc";
+ reg = <0x09680000 0x40000>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ system_noc: interconnect@162c000 {
+ compatible = "qcom,sdx55-system-noc";
+ reg = <0x0162c000 0x31200>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ ipa_virt: interconnect@1e00000 {
+ compatible = "qcom,sdx55-ipa-virt";
+ reg = <0x01e00000 0x100000>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
qpic_bam: dma-controller@1b04000 {
compatible = "qcom,bam-v1.7.0";
reg = <0x01b04000 0x1c000>;
@@ -199,7 +294,7 @@
status = "disabled";
};
- qpic_nand: nand@1b30000 {
+ qpic_nand: nand-controller@1b30000 {
compatible = "qcom,sdx55-nand";
reg = <0x01b30000 0x10000>;
#address-cells = <1>;
@@ -215,6 +310,47 @@
status = "disabled";
};
+ ipa: ipa@1e40000 {
+ compatible = "qcom,sdx55-ipa";
+
+ iommus = <&apps_smmu 0x5e0 0x0>,
+ <&apps_smmu 0x5e2 0x0>;
+ reg = <0x1e40000 0x7000>,
+ <0x1e50000 0x4b20>,
+ <0x1e04000 0x2c000>;
+ reg-names = "ipa-reg",
+ "ipa-shared",
+ "gsi";
+
+ interrupts-extended = <&intc GIC_SPI 241 IRQ_TYPE_EDGE_RISING>,
+ <&intc GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
+ <&ipa_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&ipa_smp2p_in 1 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "ipa",
+ "gsi",
+ "ipa-clock-query",
+ "ipa-setup-ready";
+
+ clocks = <&rpmhcc RPMH_IPA_CLK>;
+ clock-names = "core";
+
+ interconnects = <&system_noc MASTER_IPA &system_noc SLAVE_SNOC_MEM_NOC_GC>,
+ <&mem_noc MASTER_SNOC_GC_MEM_NOC &mc_virt SLAVE_EBI_CH0>,
+ <&system_noc MASTER_IPA &system_noc SLAVE_OCIMEM>,
+ <&mem_noc MASTER_AMPSS_M0 &system_noc SLAVE_IPA_CFG>;
+ interconnect-names = "memory-a",
+ "memory-b",
+ "imem",
+ "config";
+
+ qcom,smem-states = <&ipa_smp2p_out 0>,
+ <&ipa_smp2p_out 1>;
+ qcom,smem-state-names = "ipa-clock-enabled-valid",
+ "ipa-clock-enabled";
+
+ status = "disabled";
+ };
+
tcsr_mutex: hwlock@1f40000 {
compatible = "qcom,tcsr-mutex";
reg = <0x01f40000 0x40000>;
@@ -233,6 +369,39 @@
status = "disabled";
};
+ remoteproc_mpss: remoteproc@4080000 {
+ compatible = "qcom,sdx55-mpss-pas";
+ reg = <0x04080000 0x4040>;
+
+ interrupts-extended = <&intc GIC_SPI 250 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 3 IRQ_TYPE_EDGE_RISING>,
+ <&modem_smp2p_in 7 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready", "handover",
+ "stop-ack", "shutdown-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ power-domains = <&rpmhpd SDX55_CX>,
+ <&rpmhpd SDX55_MSS>;
+ power-domain-names = "cx", "mss";
+
+ qcom,smem-states = <&modem_smp2p_out 0>;
+ qcom,smem-state-names = "stop";
+
+ status = "disabled";
+
+ glink-edge {
+ interrupts = <GIC_SPI 114 IRQ_TYPE_EDGE_RISING>;
+ label = "mpss";
+ qcom,remote-pid = <1>;
+ mboxes = <&apcs 15>;
+ };
+ };
+
usb: usb@a6f8800 {
compatible = "qcom,sdx55-dwc3", "qcom,dwc3";
reg = <0x0a6f8800 0x400>;
@@ -319,6 +488,21 @@
#interrupt-cells = <2>;
};
+ imem@1468f000 {
+ compatible = "simple-mfd";
+ reg = <0x1468f000 0x1000>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ ranges = <0x0 0x1468f000 0x1000>;
+
+ pil-reloc@94c {
+ compatible = "qcom,pil-reloc-info";
+ reg = <0x94c 0x200>;
+ };
+ };
+
apps_smmu: iommu@15000000 {
compatible = "qcom,sdx55-smmu-500", "arm,mmu-500";
reg = <0x15000000 0x20000>;
@@ -352,6 +536,23 @@
<0x17802000 0x1000>;
};
+ a7pll: clock@17808000 {
+ compatible = "qcom,sdx55-a7pll";
+ reg = <0x17808000 0x1000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "bi_tcxo";
+ #clock-cells = <0>;
+ };
+
+ apcs: mailbox@17810000 {
+ compatible = "qcom,sdx55-apcs-gcc", "syscon";
+ reg = <0x17810000 0x2000>;
+ #mbox-cells = <1>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>, <&a7pll>, <&gcc GPLL0>;
+ clock-names = "ref", "pll", "aux";
+ #clock-cells = <0>;
+ };
+
watchdog@17817000 {
compatible = "qcom,apss-wdt-sdx55", "qcom,kpss-wdt";
reg = <0x17817000 0x1000>;
@@ -491,6 +692,10 @@
};
};
};
+
+ apps_bcm_voter: bcm_voter {
+ compatible = "qcom,bcm-voter";
+ };
};
};
diff --git a/arch/arm/boot/dts/r7s9210-rza2mevb.dts b/arch/arm/boot/dts/r7s9210-rza2mevb.dts
index d062d02865e7..68498ce2aec0 100644
--- a/arch/arm/boot/dts/r7s9210-rza2mevb.dts
+++ b/arch/arm/boot/dts/r7s9210-rza2mevb.dts
@@ -4,6 +4,28 @@
*
* Copyright (C) 2018 Renesas Electronics
*
+ * As upstream Linux does not support XIP, it cannot run in 8 MiB of HyperRAM.
+ * Hence the 64 MiB of SDRAM on the sub-board needs to be enabled, which has
+ * the following ramifications:
+ * - SCIF4 connected to the on-board USB-serial can no longer be used as the
+ * serial console,
+ * - Instead, SCIF2 is used as the serial console, by connecting a 3.3V TTL
+ * USB-to-Serial adapter to the CMOS camera connector:
+ * - RXD = CN17-9,
+ * - TXD = CN17-10,
+ * - GND = CN17-2 or CN17-17,
+ * - The first Ethernet channel can no longer be used,
+ * - USB Channel 1 loses the overcurrent input signal.
+ *
+ * Please make sure your sub-board matches the following switch settings:
+ *
+ * SW6 SW6-1 set to SDRAM
+ * ON SW6-2 set to Audio
+ * +---------------------+ SW6-3 set to DRP
+ * | = = = = = | SW6-4 set to CEU
+ * | = = | SW6-5 set to Ether2
+ * | 1 2 3 4 5 6 7 8 9 0 | SW6-6 set to VDC6
+ * +---------------------+ SW6-7 set to VDC6
*/
/dts-v1/;
@@ -17,9 +39,8 @@
compatible = "renesas,rza2mevb", "renesas,r7s9210";
aliases {
- serial0 = &scif4;
- ethernet0 = &ether0;
- ethernet1 = &ether1;
+ serial0 = &scif2;
+ ethernet0 = &ether1;
};
chosen {
@@ -58,9 +79,9 @@
};
};
- memory@40000000 {
+ memory@c000000 {
device_type = "memory";
- reg = <0x40000000 0x00800000>; /* HyperRAM */
+ reg = <0x0c000000 0x04000000>; /* SDRAM */
};
};
@@ -72,17 +93,6 @@
status = "okay";
};
-&ether0 {
- pinctrl-names = "default";
- pinctrl-0 = <&eth0_pins>;
- status = "okay";
- renesas,no-ether-link;
- phy-handle = <&phy0>;
- phy0: ethernet-phy@0 {
- reg = <0>;
- };
-};
-
&ether1 {
pinctrl-names = "default";
pinctrl-0 = <&eth1_pins>;
@@ -142,9 +152,9 @@
};
/* Serial Console */
- scif4_pins: serial4 {
- pinmux = <RZA2_PINMUX(PORT9, 0, 4)>, /* TxD4 */
- <RZA2_PINMUX(PORT9, 1, 4)>; /* RxD4 */
+ scif2_pins: serial2 {
+ pinmux = <RZA2_PINMUX(PORTE, 2, 3)>, /* TxD2 */
+ <RZA2_PINMUX(PORTE, 1, 3)>; /* RxD2 */
};
sdhi0_pins: sdhi0 {
@@ -165,8 +175,7 @@
usb1_pins: usb1 {
pinmux = <RZA2_PINMUX(PORTC, 0, 1)>, /* VBUSIN1 */
- <RZA2_PINMUX(PORTC, 5, 1)>, /* VBUSEN1 */
- <RZA2_PINMUX(PORT7, 5, 5)>; /* OVRCUR1 */
+ <RZA2_PINMUX(PORTC, 5, 1)>; /* VBUSEN1 */
};
};
@@ -176,9 +185,9 @@
};
/* Serial Console */
-&scif4 {
+&scif2 {
pinctrl-names = "default";
- pinctrl-0 = <&scif4_pins>;
+ pinctrl-0 = <&scif2_pins>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/r8a7742-iwg21d-q7-dbcm-ca.dts b/arch/arm/boot/dts/r8a7742-iwg21d-q7-dbcm-ca.dts
index 98c3fbd89fa6..2bcb229844ab 100644
--- a/arch/arm/boot/dts/r8a7742-iwg21d-q7-dbcm-ca.dts
+++ b/arch/arm/boot/dts/r8a7742-iwg21d-q7-dbcm-ca.dts
@@ -91,92 +91,20 @@
status = "okay";
};
-&i2c0 {
- ov5640@3c {
- compatible = "ovti,ov5640";
- reg = <0x3c>;
- clocks = <&mclk_cam1>;
- clock-names = "xclk";
-
- port {
- ov5640_0: endpoint {
- bus-width = <8>;
- data-shift = <2>;
- bus-type = <6>;
- pclk-sample = <1>;
- remote-endpoint = <&vin0ep>;
- };
- };
- };
-};
-
&i2c1 {
pinctrl-0 = <&i2c1_pins>;
pinctrl-names = "default";
- status = "okay";
+ /* status set to "okay" when needed by camera configuration below */
clock-frequency = <400000>;
-
- ov5640@3c {
- compatible = "ovti,ov5640";
- reg = <0x3c>;
- clocks = <&mclk_cam2>;
- clock-names = "xclk";
-
- port {
- ov5640_1: endpoint {
- bus-width = <8>;
- data-shift = <2>;
- bus-type = <6>;
- pclk-sample = <1>;
- remote-endpoint = <&vin1ep>;
- };
- };
- };
-};
-
-&i2c2 {
- ov5640@3c {
- compatible = "ovti,ov5640";
- reg = <0x3c>;
- clocks = <&mclk_cam3>;
- clock-names = "xclk";
-
- port {
- ov5640_2: endpoint {
- bus-width = <8>;
- data-shift = <2>;
- bus-type = <6>;
- pclk-sample = <1>;
- remote-endpoint = <&vin2ep>;
- };
- };
- };
};
&i2c3 {
pinctrl-0 = <&i2c3_pins>;
pinctrl-names = "default";
- status = "okay";
+ /* status set to "okay" when needed by camera configuration below */
clock-frequency = <400000>;
-
- ov5640@3c {
- compatible = "ovti,ov5640";
- reg = <0x3c>;
- clocks = <&mclk_cam4>;
- clock-names = "xclk";
-
- port {
- ov5640_3: endpoint {
- bus-width = <8>;
- data-shift = <2>;
- bus-type = <6>;
- pclk-sample = <1>;
- remote-endpoint = <&vin3ep>;
- };
- };
- };
};
&pfc {
@@ -267,6 +195,22 @@
cts-gpios = <&gpio4 17 GPIO_ACTIVE_LOW>;
};
+/*
+ * Below configuration ties VINx endpoints to ov5640/ov7725 camera endpoints
+ *
+ * (un)comment the #include statements to change configuration
+ */
+
+/* 8bit CMOS Camera 1 (J13) */
+#define CAM_PARENT_I2C i2c0
+#define MCLK_CAM mclk_cam1
+#define CAM_EP cam0ep
+#define VIN_EP vin0ep
+#undef CAM_ENABLED
+#include "r8a7742-iwg21d-q7-dbcm-ov5640-single.dtsi"
+//#include "r8a7742-iwg21d-q7-dbcm-ov7725-single.dtsi"
+
+#ifdef CAM_ENABLED
&vin0 {
/*
* Set SW2 switch on the SOM to 'ON'
@@ -278,13 +222,29 @@
port {
vin0ep: endpoint {
- remote-endpoint = <&ov5640_0>;
+ remote-endpoint = <&cam0ep>;
bus-width = <8>;
bus-type = <6>;
};
};
};
-
+#endif /* CAM_ENABLED */
+
+#undef CAM_PARENT_I2C
+#undef MCLK_CAM
+#undef CAM_EP
+#undef VIN_EP
+
+/* 8bit CMOS Camera 2 (J14) */
+#define CAM_PARENT_I2C i2c1
+#define MCLK_CAM mclk_cam2
+#define CAM_EP cam1ep
+#define VIN_EP vin1ep
+#undef CAM_ENABLED
+#include "r8a7742-iwg21d-q7-dbcm-ov5640-single.dtsi"
+//#include "r8a7742-iwg21d-q7-dbcm-ov7725-single.dtsi"
+
+#ifdef CAM_ENABLED
&vin1 {
/* Set SW1 switch on the SOM to 'ON' */
status = "okay";
@@ -293,13 +253,30 @@
port {
vin1ep: endpoint {
- remote-endpoint = <&ov5640_1>;
+ remote-endpoint = <&cam1ep>;
bus-width = <8>;
bus-type = <6>;
};
};
};
+#endif /* CAM_ENABLED */
+
+#undef CAM_PARENT_I2C
+#undef MCLK_CAM
+#undef CAM_EP
+#undef VIN_EP
+
+/* 8bit CMOS Camera 3 (J12) */
+#define CAM_PARENT_I2C i2c2
+#define MCLK_CAM mclk_cam3
+#define CAM_EP cam2ep
+#define VIN_EP vin2ep
+#undef CAM_ENABLED
+#include "r8a7742-iwg21d-q7-dbcm-ov5640-single.dtsi"
+//#include "r8a7742-iwg21d-q7-dbcm-ov7725-single.dtsi"
+
+#ifdef CAM_ENABLED
&vin2 {
status = "okay";
pinctrl-0 = <&vin2_pins>;
@@ -307,14 +284,30 @@
port {
vin2ep: endpoint {
- remote-endpoint = <&ov5640_2>;
+ remote-endpoint = <&cam2ep>;
bus-width = <8>;
data-shift = <8>;
bus-type = <6>;
};
};
};
-
+#endif /* CAM_ENABLED */
+
+#undef CAM_PARENT_I2C
+#undef MCLK_CAM
+#undef CAM_EP
+#undef VIN_EP
+
+/* 8bit CMOS Camera 4 (J11) */
+#define CAM_PARENT_I2C i2c3
+#define MCLK_CAM mclk_cam4
+#define CAM_EP cam3ep
+#define VIN_EP vin3ep
+#undef CAM_ENABLED
+#include "r8a7742-iwg21d-q7-dbcm-ov5640-single.dtsi"
+//#include "r8a7742-iwg21d-q7-dbcm-ov7725-single.dtsi"
+
+#ifdef CAM_ENABLED
&vin3 {
status = "okay";
pinctrl-0 = <&vin3_pins>;
@@ -322,9 +315,15 @@
port {
vin3ep: endpoint {
- remote-endpoint = <&ov5640_3>;
+ remote-endpoint = <&cam3ep>;
bus-width = <8>;
bus-type = <6>;
};
};
};
+#endif /* CAM_ENABLED */
+
+#undef CAM_PARENT_I2C
+#undef MCLK_CAM
+#undef CAM_EP
+#undef VIN_EP
diff --git a/arch/arm/boot/dts/r8a7742-iwg21d-q7-dbcm-ov5640-single.dtsi b/arch/arm/boot/dts/r8a7742-iwg21d-q7-dbcm-ov5640-single.dtsi
new file mode 100644
index 000000000000..70c72ba4fe72
--- /dev/null
+++ b/arch/arm/boot/dts/r8a7742-iwg21d-q7-dbcm-ov5640-single.dtsi
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This include file ties a VIN interface with a single ov5640 sensor on
+ * the iWave-RZ/G1H Qseven board development platform connected with the
+ * camera daughter board.
+ *
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+
+#define CAM_ENABLED 1
+
+&CAM_PARENT_I2C {
+ status = "okay";
+
+ ov5640@3c {
+ compatible = "ovti,ov5640";
+ reg = <0x3c>;
+ clocks = <&MCLK_CAM>;
+ clock-names = "xclk";
+ status = "okay";
+
+ port {
+ CAM_EP: endpoint {
+ bus-width = <8>;
+ data-shift = <2>;
+ bus-type = <6>;
+ pclk-sample = <1>;
+ remote-endpoint = <&VIN_EP>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/r8a7742-iwg21d-q7-dbcm-ov7725-single.dtsi b/arch/arm/boot/dts/r8a7742-iwg21d-q7-dbcm-ov7725-single.dtsi
new file mode 100644
index 000000000000..f5e77f024251
--- /dev/null
+++ b/arch/arm/boot/dts/r8a7742-iwg21d-q7-dbcm-ov7725-single.dtsi
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This include file ties a VIN interface with a single ov7725 sensor on
+ * the iWave-RZ/G1H Qseven board development platform connected with the
+ * camera daughter board.
+ *
+ * Copyright (C) 2020 Renesas Electronics Corp.
+ */
+
+#define CAM_ENABLED 1
+
+&CAM_PARENT_I2C {
+ status = "okay";
+
+ ov7725@21 {
+ compatible = "ovti,ov7725";
+ reg = <0x21>;
+ clocks = <&MCLK_CAM>;
+ status = "okay";
+
+ port {
+ CAM_EP: endpoint {
+ bus-width = <8>;
+ bus-type = <6>;
+ remote-endpoint = <&VIN_EP>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/r8a7742-iwg21d-q7.dts b/arch/arm/boot/dts/r8a7742-iwg21d-q7.dts
index 0063ef92f50e..94bf8a116b52 100644
--- a/arch/arm/boot/dts/r8a7742-iwg21d-q7.dts
+++ b/arch/arm/boot/dts/r8a7742-iwg21d-q7.dts
@@ -387,8 +387,8 @@
rcar_sound,dai {
dai0 {
- playback = <&ssi4 &src4 &dvc1>;
- capture = <&ssi3 &src3 &dvc0>;
+ playback = <&ssi4>, <&src4>, <&dvc1>;
+ capture = <&ssi3>, <&src3>, <&dvc0>;
};
};
};
diff --git a/arch/arm/boot/dts/r8a7742.dtsi b/arch/arm/boot/dts/r8a7742.dtsi
index 6a78c813057b..dd1b976d2a6c 100644
--- a/arch/arm/boot/dts/r8a7742.dtsi
+++ b/arch/arm/boot/dts/r8a7742.dtsi
@@ -367,13 +367,13 @@
apmu@e6151000 {
compatible = "renesas,r8a7742-apmu", "renesas,apmu";
reg = <0 0xe6151000 0 0x188>;
- cpus = <&cpu4 &cpu5 &cpu6 &cpu7>;
+ cpus = <&cpu4>, <&cpu5>, <&cpu6>, <&cpu7>;
};
apmu@e6152000 {
compatible = "renesas,r8a7742-apmu", "renesas,apmu";
reg = <0 0xe6152000 0 0x188>;
- cpus = <&cpu0 &cpu1 &cpu2 &cpu3>;
+ cpus = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
};
rst: reset-controller@e6160000 {
diff --git a/arch/arm/boot/dts/r8a7743-sk-rzg1m.dts b/arch/arm/boot/dts/r8a7743-sk-rzg1m.dts
index 807e7d0d6b62..4ace117470e8 100644
--- a/arch/arm/boot/dts/r8a7743-sk-rzg1m.dts
+++ b/arch/arm/boot/dts/r8a7743-sk-rzg1m.dts
@@ -61,7 +61,7 @@
};
&ether {
- pinctrl-0 = <&ether_pins &phy1_pins>;
+ pinctrl-0 = <&ether_pins>, <&phy1_pins>;
pinctrl-names = "default";
phy-handle = <&phy1>;
diff --git a/arch/arm/boot/dts/r8a7743.dtsi b/arch/arm/boot/dts/r8a7743.dtsi
index f444e418f408..6e37b8da278b 100644
--- a/arch/arm/boot/dts/r8a7743.dtsi
+++ b/arch/arm/boot/dts/r8a7743.dtsi
@@ -293,7 +293,7 @@
apmu@e6152000 {
compatible = "renesas,r8a7743-apmu", "renesas,apmu";
reg = <0 0xe6152000 0 0x188>;
- cpus = <&cpu0 &cpu1>;
+ cpus = <&cpu0>, <&cpu1>;
};
rst: reset-controller@e6160000 {
diff --git a/arch/arm/boot/dts/r8a7744.dtsi b/arch/arm/boot/dts/r8a7744.dtsi
index 0442aad4f9db..ace20861c0c4 100644
--- a/arch/arm/boot/dts/r8a7744.dtsi
+++ b/arch/arm/boot/dts/r8a7744.dtsi
@@ -293,7 +293,7 @@
apmu@e6152000 {
compatible = "renesas,r8a7744-apmu", "renesas,apmu";
reg = <0 0xe6152000 0 0x188>;
- cpus = <&cpu0 &cpu1>;
+ cpus = <&cpu0>, <&cpu1>;
};
rst: reset-controller@e6160000 {
diff --git a/arch/arm/boot/dts/r8a7745-iwg22d-sodimm.dts b/arch/arm/boot/dts/r8a7745-iwg22d-sodimm.dts
index 1c7b37a01f0a..73bd62d8a929 100644
--- a/arch/arm/boot/dts/r8a7745-iwg22d-sodimm.dts
+++ b/arch/arm/boot/dts/r8a7745-iwg22d-sodimm.dts
@@ -289,8 +289,8 @@
rcar_sound,dai {
dai0 {
- playback = <&ssi3 &src3 &dvc0>;
- capture = <&ssi4 &src4 &dvc1>;
+ playback = <&ssi3>, <&src3>, <&dvc0>;
+ capture = <&ssi4>, <&src4>, <&dvc1>;
};
};
};
diff --git a/arch/arm/boot/dts/r8a7745-sk-rzg1e.dts b/arch/arm/boot/dts/r8a7745-sk-rzg1e.dts
index db72a801abe5..59d1a9bfab05 100644
--- a/arch/arm/boot/dts/r8a7745-sk-rzg1e.dts
+++ b/arch/arm/boot/dts/r8a7745-sk-rzg1e.dts
@@ -56,7 +56,7 @@
};
&ether {
- pinctrl-0 = <&ether_pins &phy1_pins>;
+ pinctrl-0 = <&ether_pins>, <&phy1_pins>;
pinctrl-names = "default";
phy-handle = <&phy1>;
diff --git a/arch/arm/boot/dts/r8a7745.dtsi b/arch/arm/boot/dts/r8a7745.dtsi
index 0f14ac22921d..be33bdabe452 100644
--- a/arch/arm/boot/dts/r8a7745.dtsi
+++ b/arch/arm/boot/dts/r8a7745.dtsi
@@ -258,7 +258,7 @@
apmu@e6151000 {
compatible = "renesas,r8a7745-apmu", "renesas,apmu";
reg = <0 0xe6151000 0 0x188>;
- cpus = <&cpu0 &cpu1>;
+ cpus = <&cpu0>, <&cpu1>;
};
rst: reset-controller@e6160000 {
diff --git a/arch/arm/boot/dts/r8a77470.dtsi b/arch/arm/boot/dts/r8a77470.dtsi
index 691b1a131c87..a1d7f6e7a2e3 100644
--- a/arch/arm/boot/dts/r8a77470.dtsi
+++ b/arch/arm/boot/dts/r8a77470.dtsi
@@ -205,7 +205,7 @@
apmu@e6151000 {
compatible = "renesas,r8a77470-apmu", "renesas,apmu";
reg = <0 0xe6151000 0 0x188>;
- cpus = <&cpu0 &cpu1>;
+ cpus = <&cpu0>, <&cpu1>;
};
rst: reset-controller@e6160000 {
diff --git a/arch/arm/boot/dts/r8a7790-lager.dts b/arch/arm/boot/dts/r8a7790-lager.dts
index 09a152b91557..2dad0742d2ba 100644
--- a/arch/arm/boot/dts/r8a7790-lager.dts
+++ b/arch/arm/boot/dts/r8a7790-lager.dts
@@ -53,6 +53,9 @@
i2c11 = &i2cexio1;
i2c12 = &i2chdmi;
i2c13 = &i2cpwr;
+ mmc0 = &mmcif1;
+ mmc1 = &sdhi0;
+ mmc2 = &sdhi2;
};
chosen {
@@ -659,7 +662,7 @@
};
&ether {
- pinctrl-0 = <&ether_pins &phy1_pins>;
+ pinctrl-0 = <&ether_pins>, <&phy1_pins>;
pinctrl-names = "default";
phy-handle = <&phy1>;
@@ -908,7 +911,7 @@
};
&rcar_sound {
- pinctrl-0 = <&sound_pins &sound_clk_pins>;
+ pinctrl-0 = <&sound_pins>, <&sound_clk_pins>;
pinctrl-names = "default";
/* Single DAI */
@@ -918,8 +921,8 @@
rcar_sound,dai {
dai0 {
- playback = <&ssi0 &src2 &dvc0>;
- capture = <&ssi1 &src3 &dvc1>;
+ playback = <&ssi0>, <&src2>, <&dvc0>;
+ capture = <&ssi1>, <&src3>, <&dvc1>;
};
};
};
diff --git a/arch/arm/boot/dts/r8a7790-stout.dts b/arch/arm/boot/dts/r8a7790-stout.dts
index 6a457bc9280a..d51f23572d7f 100644
--- a/arch/arm/boot/dts/r8a7790-stout.dts
+++ b/arch/arm/boot/dts/r8a7790-stout.dts
@@ -191,7 +191,7 @@
};
&ether {
- pinctrl-0 = <&ether_pins &phy1_pins>;
+ pinctrl-0 = <&ether_pins>, <&phy1_pins>;
pinctrl-names = "default";
phy-handle = <&phy1>;
@@ -321,7 +321,7 @@
&iic3 {
pinctrl-names = "default";
- pinctrl-0 = <&iic3_pins &pmic_irq_pins>;
+ pinctrl-0 = <&iic3_pins>, <&pmic_irq_pins>;
status = "okay";
pmic@58 {
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index b0569b4ea5c8..de29394eed63 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -381,13 +381,13 @@
apmu@e6151000 {
compatible = "renesas,r8a7790-apmu", "renesas,apmu";
reg = <0 0xe6151000 0 0x188>;
- cpus = <&cpu4 &cpu5 &cpu6 &cpu7>;
+ cpus = <&cpu4>, <&cpu5>, <&cpu6>, <&cpu7>;
};
apmu@e6152000 {
compatible = "renesas,r8a7790-apmu", "renesas,apmu";
reg = <0 0xe6152000 0 0x188>;
- cpus = <&cpu0 &cpu1 &cpu2 &cpu3>;
+ cpus = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
};
rst: reset-controller@e6160000 {
diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
index f603cba5441f..61e881bbbf6e 100644
--- a/arch/arm/boot/dts/r8a7791-koelsch.dts
+++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
@@ -53,6 +53,9 @@
i2c12 = &i2cexio1;
i2c13 = &i2chdmi;
i2c14 = &i2cexio4;
+ mmc0 = &sdhi0;
+ mmc1 = &sdhi1;
+ mmc2 = &sdhi2;
};
chosen {
@@ -78,6 +81,9 @@
keyboard {
compatible = "gpio-keys";
+ pinctrl-0 = <&sw2_pins>;
+ pinctrl-names = "default";
+
key-1 {
gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
linux,code = <KEY_1>;
@@ -615,10 +621,15 @@
groups = "audio_clk_a";
function = "audio_clk";
};
+
+ sw2_pins: sw2 {
+ pins = "GP_5_0", "GP_5_1", "GP_5_2", "GP_5_3";
+ bias-pull-up;
+ };
};
&ether {
- pinctrl-0 = <&ether_pins &phy1_pins>;
+ pinctrl-0 = <&ether_pins>, <&phy1_pins>;
pinctrl-names = "default";
phy-handle = <&phy1>;
@@ -878,7 +889,7 @@
};
&rcar_sound {
- pinctrl-0 = <&sound_pins &sound_clk_pins>;
+ pinctrl-0 = <&sound_pins>, <&sound_clk_pins>;
pinctrl-names = "default";
/* Single DAI */
@@ -888,8 +899,8 @@
rcar_sound,dai {
dai0 {
- playback = <&ssi0 &src2 &dvc0>;
- capture = <&ssi1 &src3 &dvc1>;
+ playback = <&ssi0>, <&src2>, <&dvc0>;
+ capture = <&ssi1>, <&src3>, <&dvc1>;
};
};
};
diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts
index c6d563fb7ec7..c6ef636965c1 100644
--- a/arch/arm/boot/dts/r8a7791-porter.dts
+++ b/arch/arm/boot/dts/r8a7791-porter.dts
@@ -28,6 +28,8 @@
serial0 = &scif0;
i2c9 = &gpioi2c2;
i2c10 = &i2chdmi;
+ mmc0 = &sdhi0;
+ mmc1 = &sdhi2;
};
chosen {
@@ -292,7 +294,7 @@
};
&ether {
- pinctrl-0 = <&ether_pins &phy1_pins>;
+ pinctrl-0 = <&ether_pins>, <&phy1_pins>;
pinctrl-names = "default";
phy-handle = <&phy1>;
@@ -494,7 +496,7 @@
};
&rcar_sound {
- pinctrl-0 = <&ssi_pins &audio_clk_pins>;
+ pinctrl-0 = <&ssi_pins>, <&audio_clk_pins>;
pinctrl-names = "default";
status = "okay";
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index 87f0d6dc3e5a..9d8320f71a6a 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -315,7 +315,7 @@
apmu@e6152000 {
compatible = "renesas,r8a7791-apmu", "renesas,apmu";
reg = <0 0xe6152000 0 0x188>;
- cpus = <&cpu0 &cpu1>;
+ cpus = <&cpu0>, <&cpu1>;
};
rst: reset-controller@e6160000 {
diff --git a/arch/arm/boot/dts/r8a7792-blanche.dts b/arch/arm/boot/dts/r8a7792-blanche.dts
index 9368ac2cf508..c100ae903a46 100644
--- a/arch/arm/boot/dts/r8a7792-blanche.dts
+++ b/arch/arm/boot/dts/r8a7792-blanche.dts
@@ -334,7 +334,7 @@
};
&du {
- pinctrl-0 = <&du0_pins &du1_pins>;
+ pinctrl-0 = <&du0_pins>, <&du1_pins>;
pinctrl-names = "default";
clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>, <&x1_clk>, <&x2_clk>;
diff --git a/arch/arm/boot/dts/r8a7792-wheat.dts b/arch/arm/boot/dts/r8a7792-wheat.dts
index ba2d2a589012..434e4655be9d 100644
--- a/arch/arm/boot/dts/r8a7792-wheat.dts
+++ b/arch/arm/boot/dts/r8a7792-wheat.dts
@@ -307,7 +307,7 @@
};
&du {
- pinctrl-0 = <&du0_pins &du1_pins>;
+ pinctrl-0 = <&du0_pins>, <&du1_pins>;
pinctrl-names = "default";
clocks = <&cpg CPG_MOD 724>, <&cpg CPG_MOD 723>, <&osc2_clk>;
diff --git a/arch/arm/boot/dts/r8a7792.dtsi b/arch/arm/boot/dts/r8a7792.dtsi
index f5b299bfcb23..253e8bf643d1 100644
--- a/arch/arm/boot/dts/r8a7792.dtsi
+++ b/arch/arm/boot/dts/r8a7792.dtsi
@@ -314,7 +314,7 @@
apmu@e6152000 {
compatible = "renesas,r8a7792-apmu", "renesas,apmu";
reg = <0 0xe6152000 0 0x188>;
- cpus = <&cpu0 &cpu1>;
+ cpus = <&cpu0>, <&cpu1>;
};
rst: reset-controller@e6160000 {
diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts
index abf487e8fe0f..87fa57a99399 100644
--- a/arch/arm/boot/dts/r8a7793-gose.dts
+++ b/arch/arm/boot/dts/r8a7793-gose.dts
@@ -49,6 +49,9 @@
i2c10 = &gpioi2c4;
i2c11 = &i2chdmi;
i2c12 = &i2cexio4;
+ mmc0 = &sdhi0;
+ mmc1 = &sdhi1;
+ mmc2 = &sdhi2;
};
chosen {
@@ -576,7 +579,7 @@
};
&ether {
- pinctrl-0 = <&ether_pins &phy1_pins>;
+ pinctrl-0 = <&ether_pins>, <&phy1_pins>;
pinctrl-names = "default";
phy-handle = <&phy1>;
@@ -751,7 +754,7 @@
};
&rcar_sound {
- pinctrl-0 = <&sound_pins &sound_clk_pins>;
+ pinctrl-0 = <&sound_pins>, <&sound_clk_pins>;
pinctrl-names = "default";
/* Single DAI */
@@ -761,8 +764,8 @@
rcar_sound,dai {
dai0 {
- playback = <&ssi0 &src2 &dvc0>;
- capture = <&ssi1 &src3 &dvc1>;
+ playback = <&ssi0>, <&src2>, <&dvc0>;
+ capture = <&ssi1>, <&src3>, <&dvc1>;
};
};
};
diff --git a/arch/arm/boot/dts/r8a7793.dtsi b/arch/arm/boot/dts/r8a7793.dtsi
index f930f69f7bcc..6d74475030ed 100644
--- a/arch/arm/boot/dts/r8a7793.dtsi
+++ b/arch/arm/boot/dts/r8a7793.dtsi
@@ -290,7 +290,7 @@
apmu@e6152000 {
compatible = "renesas,r8a7793-apmu", "renesas,apmu";
reg = <0 0xe6152000 0 0x188>;
- cpus = <&cpu0 &cpu1>;
+ cpus = <&cpu0>, <&cpu1>;
};
rst: reset-controller@e6160000 {
diff --git a/arch/arm/boot/dts/r8a7794-alt.dts b/arch/arm/boot/dts/r8a7794-alt.dts
index 3f1cc5bbf329..f9dba5688d3f 100644
--- a/arch/arm/boot/dts/r8a7794-alt.dts
+++ b/arch/arm/boot/dts/r8a7794-alt.dts
@@ -19,6 +19,9 @@
i2c10 = &gpioi2c4;
i2c11 = &i2chdmi;
i2c12 = &i2cexio4;
+ mmc0 = &mmcif0;
+ mmc1 = &sdhi0;
+ mmc2 = &sdhi1;
};
chosen {
@@ -330,7 +333,7 @@
};
&ether {
- pinctrl-0 = <&ether_pins &phy1_pins>;
+ pinctrl-0 = <&ether_pins>, <&phy1_pins>;
pinctrl-names = "default";
phy-handle = <&phy1>;
diff --git a/arch/arm/boot/dts/r8a7794-silk.dts b/arch/arm/boot/dts/r8a7794-silk.dts
index 677596f6c9c9..eb89a27a6ed0 100644
--- a/arch/arm/boot/dts/r8a7794-silk.dts
+++ b/arch/arm/boot/dts/r8a7794-silk.dts
@@ -31,6 +31,8 @@
serial0 = &scif2;
i2c9 = &gpioi2c1;
i2c10 = &i2chdmi;
+ mmc0 = &mmcif0;
+ mmc1 = &sdhi1;
};
chosen {
@@ -379,7 +381,7 @@
};
&ether {
- pinctrl-0 = <&ether_pins &phy1_pins>;
+ pinctrl-0 = <&ether_pins>, <&phy1_pins>;
pinctrl-names = "default";
phy-handle = <&phy1>;
@@ -518,7 +520,7 @@
};
&du {
- pinctrl-0 = <&du0_pins &du1_pins>;
+ pinctrl-0 = <&du0_pins>, <&du1_pins>;
pinctrl-names = "default";
status = "okay";
@@ -541,7 +543,7 @@
};
&rcar_sound {
- pinctrl-0 = <&ssi_pins &audio_clk_pins>;
+ pinctrl-0 = <&ssi_pins>, <&audio_clk_pins>;
pinctrl-names = "default";
status = "okay";
diff --git a/arch/arm/boot/dts/r8a7794.dtsi b/arch/arm/boot/dts/r8a7794.dtsi
index cd5e2904068a..330dc516ecd1 100644
--- a/arch/arm/boot/dts/r8a7794.dtsi
+++ b/arch/arm/boot/dts/r8a7794.dtsi
@@ -256,7 +256,7 @@
apmu@e6151000 {
compatible = "renesas,r8a7794-apmu", "renesas,apmu";
reg = <0 0xe6151000 0 0x188>;
- cpus = <&cpu0 &cpu1>;
+ cpus = <&cpu0>, <&cpu1>;
};
rst: reset-controller@e6160000 {
diff --git a/arch/arm/boot/dts/rk3036.dtsi b/arch/arm/boot/dts/rk3036.dtsi
index 47a787a12e55..e24230d50a78 100644
--- a/arch/arm/boot/dts/rk3036.dtsi
+++ b/arch/arm/boot/dts/rk3036.dtsi
@@ -355,7 +355,6 @@
reg = <0x20050000 0x10>;
#pwm-cells = <3>;
clocks = <&cru PCLK_PWM>;
- clock-names = "pwm";
pinctrl-names = "default";
pinctrl-0 = <&pwm0_pin>;
status = "disabled";
@@ -366,7 +365,6 @@
reg = <0x20050010 0x10>;
#pwm-cells = <3>;
clocks = <&cru PCLK_PWM>;
- clock-names = "pwm";
pinctrl-names = "default";
pinctrl-0 = <&pwm1_pin>;
status = "disabled";
@@ -377,7 +375,6 @@
reg = <0x20050020 0x10>;
#pwm-cells = <3>;
clocks = <&cru PCLK_PWM>;
- clock-names = "pwm";
pinctrl-names = "default";
pinctrl-0 = <&pwm2_pin>;
status = "disabled";
@@ -388,7 +385,6 @@
reg = <0x20050030 0x10>;
#pwm-cells = <2>;
clocks = <&cru PCLK_PWM>;
- clock-names = "pwm";
pinctrl-names = "default";
pinctrl-0 = <&pwm3_pin>;
status = "disabled";
diff --git a/arch/arm/boot/dts/rk3228-evb.dts b/arch/arm/boot/dts/rk3228-evb.dts
index aed879db6c15..69a5e239ed1a 100644
--- a/arch/arm/boot/dts/rk3228-evb.dts
+++ b/arch/arm/boot/dts/rk3228-evb.dts
@@ -8,6 +8,10 @@
model = "Rockchip RK3228 Evaluation board";
compatible = "rockchip,rk3228-evb", "rockchip,rk3228";
+ aliases {
+ mmc0 = &emmc;
+ };
+
memory@60000000 {
device_type = "memory";
reg = <0x60000000 0x40000000>;
diff --git a/arch/arm/boot/dts/rk3229-evb.dts b/arch/arm/boot/dts/rk3229-evb.dts
index 350497a3ca86..797476e8bef1 100644
--- a/arch/arm/boot/dts/rk3229-evb.dts
+++ b/arch/arm/boot/dts/rk3229-evb.dts
@@ -9,6 +9,10 @@
model = "Rockchip RK3229 Evaluation board";
compatible = "rockchip,rk3229-evb", "rockchip,rk3229";
+ aliases {
+ mmc0 = &emmc;
+ };
+
memory@60000000 {
device_type = "memory";
reg = <0x60000000 0x40000000>;
diff --git a/arch/arm/boot/dts/rk3229-xms6.dts b/arch/arm/boot/dts/rk3229-xms6.dts
index 263393ac4fa6..7bfbfd11fb55 100644
--- a/arch/arm/boot/dts/rk3229-xms6.dts
+++ b/arch/arm/boot/dts/rk3229-xms6.dts
@@ -9,6 +9,12 @@
model = "Mecer Xtreme Mini S6";
compatible = "mecer,xms6", "rockchip,rk3229";
+ aliases {
+ mmc0 = &sdmmc;
+ mmc1 = &sdio;
+ mmc2 = &emmc;
+ };
+
memory@60000000 {
device_type = "memory";
reg = <0x60000000 0x40000000>;
diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi
index a4dd50aaf3fc..208f21245095 100644
--- a/arch/arm/boot/dts/rk322x.dtsi
+++ b/arch/arm/boot/dts/rk322x.dtsi
@@ -14,9 +14,6 @@
interrupt-parent = <&gic>;
aliases {
- mmc0 = &sdmmc;
- mmc1 = &sdio;
- mmc2 = &emmc;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
@@ -370,7 +367,7 @@
};
wdt: watchdog@110a0000 {
- compatible = "snps,dw-wdt";
+ compatible = "rockchip,rk3228-wdt", "snps,dw-wdt";
reg = <0x110a0000 0x100>;
interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru PCLK_CPU>;
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index ea7416c31f9b..05557ad02b33 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -679,7 +679,6 @@
pinctrl-names = "default";
pinctrl-0 = <&pwm0_pin>;
clocks = <&cru PCLK_RKPWM>;
- clock-names = "pwm";
status = "disabled";
};
@@ -690,7 +689,6 @@
pinctrl-names = "default";
pinctrl-0 = <&pwm1_pin>;
clocks = <&cru PCLK_RKPWM>;
- clock-names = "pwm";
status = "disabled";
};
@@ -701,7 +699,6 @@
pinctrl-names = "default";
pinctrl-0 = <&pwm2_pin>;
clocks = <&cru PCLK_RKPWM>;
- clock-names = "pwm";
status = "disabled";
};
@@ -712,7 +709,6 @@
pinctrl-names = "default";
pinctrl-0 = <&pwm3_pin>;
clocks = <&cru PCLK_RKPWM>;
- clock-names = "pwm";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/rv1108-elgin-r1.dts b/arch/arm/boot/dts/rv1108-elgin-r1.dts
index b1db924710c8..f62c9f7af79d 100644
--- a/arch/arm/boot/dts/rv1108-elgin-r1.dts
+++ b/arch/arm/boot/dts/rv1108-elgin-r1.dts
@@ -12,6 +12,10 @@
model = "Elgin RV1108 R1 board";
compatible = "elgin,rv1108-r1", "rockchip,rv1108";
+ aliases {
+ mmc0 = &emmc;
+ };
+
memory@60000000 {
device_type = "memory";
reg = <0x60000000 0x08000000>;
diff --git a/arch/arm/boot/dts/rv1108-evb.dts b/arch/arm/boot/dts/rv1108-evb.dts
index 30f3d0470ad9..fe5fc9bf75c9 100644
--- a/arch/arm/boot/dts/rv1108-evb.dts
+++ b/arch/arm/boot/dts/rv1108-evb.dts
@@ -8,6 +8,10 @@
model = "Rockchip RV1108 Evaluation board";
compatible = "rockchip,rv1108-evb", "rockchip,rv1108";
+ aliases {
+ mmc0 = &sdmmc;
+ };
+
memory@60000000 {
device_type = "memory";
reg = <0x60000000 0x08000000>;
diff --git a/arch/arm/boot/dts/rv1108.dtsi b/arch/arm/boot/dts/rv1108.dtsi
index 7319a2473b80..884872ca5207 100644
--- a/arch/arm/boot/dts/rv1108.dtsi
+++ b/arch/arm/boot/dts/rv1108.dtsi
@@ -19,9 +19,6 @@
i2c1 = &i2c1;
i2c2 = &i2c2;
i2c3 = &i2c3;
- mmc0 = &emmc;
- mmc1 = &sdio;
- mmc2 = &sdmmc;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
@@ -303,11 +300,10 @@
};
watchdog: watchdog@10360000 {
- compatible = "snps,dw-wdt";
+ compatible = "rockchip,rv1108-wdt", "snps,dw-wdt";
reg = <0x10360000 0x100>;
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru PCLK_WDT>;
- clock-names = "pclk_wdt";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/s5pv210-fascinate4g.dts b/arch/arm/boot/dts/s5pv210-fascinate4g.dts
index ca064359dd30..b47d8300e536 100644
--- a/arch/arm/boot/dts/s5pv210-fascinate4g.dts
+++ b/arch/arm/boot/dts/s5pv210-fascinate4g.dts
@@ -115,7 +115,7 @@
compatible = "maxim,max77836-battery";
interrupt-parent = <&gph3>;
- interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&fg_irq>;
diff --git a/arch/arm/boot/dts/sam9x60.dtsi b/arch/arm/boot/dts/sam9x60.dtsi
index 84066c1298df..ec45ced3cde6 100644
--- a/arch/arm/boot/dts/sam9x60.dtsi
+++ b/arch/arm/boot/dts/sam9x60.dtsi
@@ -606,6 +606,15 @@
compatible = "microchip,sam9x60-pinctrl", "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus";
ranges = <0xfffff400 0xfffff400 0x800>;
+ /* mux-mask corresponding to sam9x60 SoC in TFBGA228L package */
+ atmel,mux-mask = <
+ /* A B C */
+ 0xffffffff 0xffe03fff 0xef00019d /* pioA */
+ 0x03ffffff 0x02fc7e7f 0x00780000 /* pioB */
+ 0xffffffff 0xffffffff 0xf83fffff /* pioC */
+ 0x003fffff 0x003f8000 0x00000000 /* pioD */
+ >;
+
pioA: gpio@fffff400 {
compatible = "microchip,sam9x60-gpio", "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
reg = <0xfffff400 0x200>;
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 2c4952427296..801969c113d6 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -40,7 +40,7 @@
interrupts = <2 IRQ_TYPE_LEVEL_HIGH 0>;
};
- etb {
+ etb@740000 {
compatible = "arm,coresight-etb10", "arm,primecell";
reg = <0x740000 0x1000>;
@@ -56,9 +56,9 @@
};
};
- etm {
+ etm@73c000 {
compatible = "arm,coresight-etm3x", "arm,primecell";
- reg = <0x73C000 0x1000>;
+ reg = <0x73c000 0x1000>;
clocks = <&pmc PMC_TYPE_CORE PMC_MCK>;
clock-names = "apb_pclk";
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
index 7c979652f330..d1841bffe3c5 100644
--- a/arch/arm/boot/dts/sama5d3.dtsi
+++ b/arch/arm/boot/dts/sama5d3.dtsi
@@ -709,7 +709,7 @@
atmel,pins =
<AT91_PIOD 5 AT91_PERIPH_A AT91_PINCTRL_PULL_UP /* PD5 periph A MCI0_DA4 with pullup, conflicts with TIOA0, PWMH2 */
AT91_PIOD 6 AT91_PERIPH_A AT91_PINCTRL_PULL_UP /* PD6 periph A MCI0_DA5 with pullup, conflicts with TIOB0, PWML2 */
- AT91_PIOD 7 AT91_PERIPH_A AT91_PINCTRL_PULL_UP /* PD7 periph A MCI0_DA6 with pullup, conlicts with TCLK0, PWMH3 */
+ AT91_PIOD 7 AT91_PERIPH_A AT91_PINCTRL_PULL_UP /* PD7 periph A MCI0_DA6 with pullup, conflicts with TCLK0, PWMH3 */
AT91_PIOD 8 AT91_PERIPH_A AT91_PINCTRL_PULL_UP>; /* PD8 periph A MCI0_DA7 with pullup, conflicts with PWML3 */
};
};
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index 05c55875835d..e47e1ca63043 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -101,6 +101,13 @@
ranges = <0 0x100000 0x2400>;
};
+ vdec0: vdec@300000 {
+ compatible = "microchip,sama5d4-vdec";
+ reg = <0x00300000 0x100000>;
+ interrupts = <19 IRQ_TYPE_LEVEL_HIGH 4>;
+ clocks = <&pmc PMC_TYPE_PERIPHERAL 19>;
+ };
+
usb0: gadget@400000 {
compatible = "atmel,sama5d3-udc";
reg = <0x00400000 0x100000
diff --git a/arch/arm/boot/dts/ste-ab8500.dtsi b/arch/arm/boot/dts/ste-ab8500.dtsi
index 4fd09997a2b9..a16a00fb5fa5 100644
--- a/arch/arm/boot/dts/ste-ab8500.dtsi
+++ b/arch/arm/boot/dts/ste-ab8500.dtsi
@@ -317,8 +317,8 @@
// supplies to the display/camera
ab8500_ldo_aux1_reg: ab8500_ldo_aux1 {
- regulator-min-microvolt = <2500000>;
- regulator-max-microvolt = <2900000>;
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <3300000>;
regulator-boot-on;
/* BUG: If turned off MMC will be affected. */
regulator-always-on;
diff --git a/arch/arm/boot/dts/ste-href-tvk1281618-r2.dtsi b/arch/arm/boot/dts/ste-href-tvk1281618-r2.dtsi
index e024520f4d47..8d59202cebd6 100644
--- a/arch/arm/boot/dts/ste-href-tvk1281618-r2.dtsi
+++ b/arch/arm/boot/dts/ste-href-tvk1281618-r2.dtsi
@@ -1,14 +1,89 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Device Tree for the TVK1281618 R2 UIB
+ * Device Tree for the TVK1281618 R2 user interface board (UIB)
*/
-#include "ste-href-tvk1281618.dtsi"
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/input/input.h>
/ {
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ vdd-supply = <&ab8500_ldo_aux1_reg>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&prox_tvk_mode>, <&hall_tvk_mode>;
+
+ button@139 {
+ /* Proximity sensor */
+ gpios = <&gpio6 25 GPIO_ACTIVE_HIGH>;
+ linux,code = <11>; /* SW_FRONT_PROXIMITY */
+ label = "SFH7741 Proximity Sensor";
+ };
+ button@145 {
+ /* Hall sensor */
+ gpios = <&gpio4 17 GPIO_ACTIVE_HIGH>;
+ linux,code = <0>; /* SW_LID */
+ label = "HED54XXU11 Hall Effect Sensor";
+ };
+ };
+
soc {
+ i2c@80004000 {
+ tc35893@44 {
+ compatible = "toshiba,tc35893";
+ reg = <0x44>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <26 IRQ_TYPE_EDGE_RISING>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&tc35893_tvk_mode>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ status = "disabled";
+
+ tc3589x_gpio {
+ compatible = "toshiba,tc3589x-gpio";
+ interrupts = <0>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+ tc3589x_keypad {
+ compatible = "toshiba,tc3589x-keypad";
+ interrupts = <6>;
+ debounce-delay-ms = <4>;
+ keypad,num-columns = <8>;
+ keypad,num-rows = <8>;
+ linux,no-autorepeat;
+ wakeup-source;
+ linux,keymap = <MATRIX_KEY(3, 1, KEY_END)>,
+ <MATRIX_KEY(4, 1, KEY_HOME)>,
+ <MATRIX_KEY(6, 4, KEY_VOLUMEDOWN)>,
+ <MATRIX_KEY(4, 2, KEY_EMAIL)>,
+ <MATRIX_KEY(3, 3, KEY_RIGHT)>,
+ <MATRIX_KEY(2, 5, KEY_BACKSPACE)>,
+ <MATRIX_KEY(6, 7, KEY_MENU)>,
+ <MATRIX_KEY(5, 0, KEY_ENTER)>,
+ <MATRIX_KEY(4, 3, KEY_0)>,
+ <MATRIX_KEY(3, 4, KEY_DOT)>,
+ <MATRIX_KEY(5, 2, KEY_UP)>,
+ <MATRIX_KEY(3, 5, KEY_DOWN)>,
+ <MATRIX_KEY(4, 5, KEY_SEND)>,
+ <MATRIX_KEY(0, 5, KEY_BACK)>,
+ <MATRIX_KEY(6, 2, KEY_VOLUMEUP)>,
+ <MATRIX_KEY(1, 3, KEY_SPACE)>,
+ <MATRIX_KEY(7, 6, KEY_LEFT)>,
+ <MATRIX_KEY(5, 5, KEY_SEARCH)>;
+ };
+ };
+ };
+
i2c@80128000 {
- lsm303dlh@18 {
+ accelerometer@18 {
/* Accelerometer */
compatible = "st,lsm303dlh-accel";
st,drdy-int-pin = <1>;
@@ -30,7 +105,7 @@
* <&gpio2 19 IRQ_TYPE_EDGE_FALLING>;
*/
};
- lsm303dlh@1e {
+ magnetometer@1e {
/* Magnetometer */
compatible = "st,lsm303dlh-magn";
reg = <0x1e>;
@@ -48,7 +123,7 @@
* <&gpio2 19 IRQ_TYPE_EDGE_FALLING>;
*/
};
- lis331dl@1c {
+ accelerometer@1c {
/* Accelerometer */
compatible = "st,lis331dl-accel";
st,drdy-int-pin = <1>;
@@ -62,6 +137,72 @@
interrupts = <18 IRQ_TYPE_EDGE_RISING>,
<19 IRQ_TYPE_EDGE_RISING>;
};
+ magnetometer@f {
+ /* Magnetometer */
+ compatible = "asahi-kasei,ak8974";
+ reg = <0x0f>;
+ avdd-supply = <&ab8500_ldo_aux1_reg>;
+ dvdd-supply = <&db8500_vsmps2_reg>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gyro_magn_tvk_mode>;
+ /*
+ * These interrupts cannot be used: the other component
+ * ST-Micro L3D4200D gyro that is connected to the same lines
+ * cannot set its DRDY line to open drain, so it cannot be
+ * shared with other peripherals. The should be defined for
+ * the falling edge if they could be wired together.
+ *
+ * interrupts-extended =
+ * <&gpio1 0 IRQ_TYPE_EDGE_FALLING>,
+ * <&gpio0 31 IRQ_TYPE_EDGE_FALLING>;
+ */
+ };
+ gyroscope@68 {
+ /* Gyroscope */
+ compatible = "st,l3g4200d-gyro";
+ st,drdy-int-pin = <2>;
+ reg = <0x68>;
+ vdd-supply = <&ab8500_ldo_aux1_reg>;
+ vddio-supply = <&db8500_vsmps2_reg>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&gyro_magn_tvk_mode>;
+ interrupts-extended =
+ <&gpio1 0 IRQ_TYPE_EDGE_RISING>,
+ <&gpio0 31 IRQ_TYPE_EDGE_RISING>;
+ };
+ pressure@5c {
+ /* Barometer/pressure sensor */
+ compatible = "st,lps001wp-press";
+ reg = <0x5c>;
+ vdd-supply = <&ab8500_ldo_aux1_reg>;
+ vddio-supply = <&db8500_vsmps2_reg>;
+ };
+ };
+ i2c@80110000 {
+ synaptics@4b {
+ /* Synaptics RMI4 TM1217 touchscreen */
+ compatible = "syna,rmi4-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x4b>;
+ vdd-supply = <&ab8500_ldo_aux1_reg>;
+ vddio-supply = <&db8500_vsmps2_reg>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&synaptics_tvk_mode>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <20 IRQ_TYPE_EDGE_FALLING>;
+
+ rmi4-f01@1 {
+ reg = <0x1>;
+ syna,nosleep = <1>;
+ };
+ rmi4-f11@11 {
+ reg = <0x11>;
+ syna,sensor-type = <1>;
+ /* This is a landscape display */
+ touchscreen-swapped-x-y;
+ };
+ };
};
mcde@a0350000 {
status = "okay";
@@ -75,5 +216,68 @@
};
};
};
+ pinctrl {
+ prox {
+ prox_tvk_mode: prox_tvk {
+ tvk_cfg {
+ pins = "GPIO217_AH12";
+ ste,config = <&gpio_in_pu>;
+ };
+ };
+ };
+ hall {
+ hall_tvk_mode: hall_tvk {
+ tvk_cfg {
+ pins = "GPIO145_C13";
+ ste,config = <&gpio_in_pu>;
+ };
+ };
+ };
+ tc35893 {
+ /* IRQ from the TC35893 */
+ tc35893_tvk_mode: tc35893_tvk {
+ tvk_cfg {
+ pins = "GPIO218_AH11";
+ ste,config = <&gpio_in_pu>;
+ };
+ };
+ };
+ accelerometer {
+ accel_tvk_mode: accel_tvk {
+ /* Accelerometer interrupt lines 1 & 2 */
+ tvk_cfg {
+ pins = "GPIO82_C1", "GPIO83_D3";
+ ste,config = <&gpio_in_pd>;
+ };
+ };
+ };
+ gyroscope {
+ /*
+ * These lines are shared between Gyroscope l3g400dh
+ * and AK8974 magnetometer.
+ */
+ gyro_magn_tvk_mode: gyro_magn_tvk {
+ /* GPIO 31 used for INT pull down the line */
+ tvk_cfg1 {
+ pins = "GPIO31_V3";
+ ste,config = <&gpio_in_pd>;
+ };
+ /* GPIO 32 used for DRDY, pull this down */
+ tvk_cfg2 {
+ pins = "GPIO32_V2";
+ ste,config = <&gpio_in_pd>;
+ };
+ };
+ };
+ synaptics {
+ synaptics_tvk_mode: synaptics_tvk {
+ /* Touchscreen uses GPIO 84 */
+ tvk_cfg1 {
+ pins = "GPIO84_C2";
+ ste,config = <&gpio_in_pu>;
+ };
+ };
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi b/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
index cb3677f0a1cb..70f058352efc 100644
--- a/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
+++ b/arch/arm/boot/dts/ste-href-tvk1281618-r3.dtsi
@@ -1,44 +1,152 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Device Tree for the TVK1281618 R2 UIB
+ * Device Tree for the TVK1281618 R3 user interface board (UIB)
+ * also known as the "CYTTSP board"
*/
-#include "ste-href-tvk1281618.dtsi"
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/input/input.h>
/ {
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ vdd-supply = <&ab8500_ldo_aux1_reg>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hall_tvk_mode>;
+
+ button@145 {
+ /* Hall sensor */
+ gpios = <&gpio4 17 GPIO_ACTIVE_HIGH>;
+ linux,code = <0>; /* SW_LID */
+ label = "HED54XXU11 Hall Effect Sensor";
+ };
+ };
+
soc {
- i2c@80128000 {
- /* Marked:
- * 129
- * M35
- * L3GD20
- */
- l3gd20@6a {
- /* Gyroscope */
- compatible = "st,l3gd20";
+ i2c@80004000 {
+ tc35893@44 {
+ compatible = "toshiba,tc35893";
+ reg = <0x44>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <0 IRQ_TYPE_EDGE_RISING>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&tc35893_tvk_mode>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
status = "disabled";
+
+ tc3589x_gpio {
+ compatible = "toshiba,tc3589x-gpio";
+ interrupts = <0>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+ tc3589x_keypad {
+ compatible = "toshiba,tc3589x-keypad";
+ interrupts = <6>;
+ debounce-delay-ms = <4>;
+ keypad,num-columns = <8>;
+ keypad,num-rows = <8>;
+ linux,no-autorepeat;
+ wakeup-source;
+ linux,keymap = <MATRIX_KEY(3, 1, KEY_END)>,
+ <MATRIX_KEY(4, 1, KEY_HOME)>,
+ <MATRIX_KEY(6, 4, KEY_VOLUMEDOWN)>,
+ <MATRIX_KEY(4, 2, KEY_EMAIL)>,
+ <MATRIX_KEY(3, 3, KEY_RIGHT)>,
+ <MATRIX_KEY(2, 5, KEY_BACKSPACE)>,
+ <MATRIX_KEY(6, 7, KEY_MENU)>,
+ <MATRIX_KEY(5, 0, KEY_ENTER)>,
+ <MATRIX_KEY(4, 3, KEY_0)>,
+ <MATRIX_KEY(3, 4, KEY_DOT)>,
+ <MATRIX_KEY(5, 2, KEY_UP)>,
+ <MATRIX_KEY(3, 5, KEY_DOWN)>,
+ <MATRIX_KEY(4, 5, KEY_SEND)>,
+ <MATRIX_KEY(0, 5, KEY_BACK)>,
+ <MATRIX_KEY(6, 2, KEY_VOLUMEUP)>,
+ <MATRIX_KEY(1, 3, KEY_SPACE)>,
+ <MATRIX_KEY(7, 6, KEY_LEFT)>,
+ <MATRIX_KEY(5, 5, KEY_SEARCH)>;
+ };
+ };
+ };
+
+ i2c@80128000 {
+ accelerometer@19 {
+ compatible = "st,lsm303dlhc-accel";
st,drdy-int-pin = <1>;
- drive-open-drain;
- reg = <0x6a>; // 0x6a or 0x6b
+ reg = <0x19>;
vdd-supply = <&ab8500_ldo_aux1_reg>;
vddio-supply = <&db8500_vsmps2_reg>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <18 IRQ_TYPE_EDGE_RISING>,
+ <19 IRQ_TYPE_EDGE_RISING>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&accel_tvk_mode>;
};
- /*
- * Marked:
- * 2122
- * C3H
- * DQEEE
- * LIS3DH?
- */
- lis3dh@18 {
- /* Accelerometer */
- compatible = "st,lis3dh-accel";
+ magnetometer@1e {
+ compatible = "st,lsm303dlm-magn";
st,drdy-int-pin = <1>;
- reg = <0x18>;
+ reg = <0x1e>;
vdd-supply = <&ab8500_ldo_aux1_reg>;
vddio-supply = <&db8500_vsmps2_reg>;
+ // This interrupt is not properly working with the driver
+ // interrupt-parent = <&gpio1>;
+ // interrupts = <0 IRQ_TYPE_EDGE_RISING>;
pinctrl-names = "default";
- pinctrl-0 = <&accel_tvk_mode>;
+ pinctrl-0 = <&magn_tvk_mode>;
+ };
+ gyroscope@68 {
+ /* Gyroscope */
+ compatible = "st,l3g4200d-gyro";
+ reg = <0x68>;
+ vdd-supply = <&ab8500_ldo_aux1_reg>;
+ vddio-supply = <&db8500_vsmps2_reg>;
+ };
+ pressure@5c {
+ /* Barometer/pressure sensor */
+ compatible = "st,lps001wp-press";
+ reg = <0x5c>;
+ vdd-supply = <&ab8500_ldo_aux1_reg>;
+ vddio-supply = <&db8500_vsmps2_reg>;
+ };
+ };
+
+ spi@80111000 {
+ num-cs = <1>;
+ cs-gpios = <&gpio6 24 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2_default_mode>;
+ status = "okay";
+
+ touchscreen@0 {
+ compatible = "cypress,cy8ctma340";
+ /*
+ * Actually the max frequency is 6 MHz, but over 2 MHz the
+ * data rate needs to be restricted to max 2Mbps which the
+ * SPI framework cannot handle.
+ */
+ spi-max-frequency = <2000000>;
+ reg = <0>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <20 IRQ_TYPE_EDGE_FALLING>;
+ vcpin-supply = <&ab8500_ldo_aux1_reg>;
+ vdd-supply = <&db8500_vsmps2_reg>;
+ reset-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
+ touchscreen-size-x = <480>;
+ touchscreen-size-y = <854>;
+ active-interval-ms = <0>;
+ touch-timeout-ms = <255>;
+ lowpower-interval-ms = <10>;
+ bootloader-key = /bits/ 8 <0x00 0x01 0x02 0x03 0x04 0x05 0x06 0x07>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cyttsp_tvk_mode>;
};
};
@@ -54,5 +162,57 @@
};
};
};
+
+ pinctrl {
+ hall {
+ hall_tvk_mode: hall_tvk {
+ tvk_cfg {
+ pins = "GPIO145_C13";
+ ste,config = <&gpio_in_pu>;
+ };
+ };
+ };
+ tc35893 {
+ /* IRQ from the TC35893 */
+ tc35893_tvk_mode: tc35893_tvk {
+ tvk_cfg {
+ pins = "GPIO64_F3";
+ ste,config = <&gpio_in_pu>;
+ };
+ };
+ };
+ accelerometer {
+ accel_tvk_mode: accel_tvk {
+ /* Accelerometer interrupt lines 1 & 2 */
+ tvk_cfg {
+ pins = "GPIO82_C1", "GPIO83_D3";
+ ste,config = <&gpio_in_pd>;
+ };
+ };
+ };
+ magnetometer {
+ magn_tvk_mode: magn_tvk {
+ /* GPIO 32 used for DRDY, pull this down */
+ tvk_cfg {
+ pins = "GPIO32_V2";
+ ste,config = <&gpio_in_pd>;
+ };
+ };
+ };
+ cyttsp {
+ cyttsp_tvk_mode: cyttsp_tvk {
+ /* Touchscreen uses GPIO84 for IRQ */
+ tvk_cfg1 {
+ pins = "GPIO84_C2";
+ ste,config = <&gpio_in_pu>;
+ };
+ /* GPIO143 is reset */
+ tvk_cfg2 {
+ pins = "GPIO143_D12";
+ ste,config = <&gpio_out_hi>;
+ };
+ };
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/ste-href-tvk1281618.dtsi b/arch/arm/boot/dts/ste-href-tvk1281618.dtsi
deleted file mode 100644
index e1dbfae22595..000000000000
--- a/arch/arm/boot/dts/ste-href-tvk1281618.dtsi
+++ /dev/null
@@ -1,218 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2012 ST-Ericsson AB
- *
- * Device Tree for the TVK1281618 family of UIBs
- */
-
-#include <dt-bindings/interrupt-controller/irq.h>
-
-/ {
- gpio_keys {
- compatible = "gpio-keys";
- #address-cells = <1>;
- #size-cells = <0>;
- vdd-supply = <&ab8500_ldo_aux1_reg>;
- pinctrl-names = "default";
- pinctrl-0 = <&prox_tvk_mode>, <&hall_tvk_mode>;
-
- button@139 {
- /* Proximity sensor */
- gpios = <&gpio6 25 GPIO_ACTIVE_HIGH>;
- linux,code = <11>; /* SW_FRONT_PROXIMITY */
- label = "SFH7741 Proximity Sensor";
- };
- button@145 {
- /* Hall sensor */
- gpios = <&gpio4 17 GPIO_ACTIVE_HIGH>;
- linux,code = <0>; /* SW_LID */
- label = "HED54XXU11 Hall Effect Sensor";
- };
- };
-
- soc {
- i2c@80004000 {
- tc35893@44 {
- compatible = "toshiba,tc35893";
- reg = <0x44>;
- interrupt-parent = <&gpio6>;
- interrupts = <26 IRQ_TYPE_EDGE_RISING>;
- pinctrl-names = "default";
- pinctrl-0 = <&tc35893_tvk_mode>;
-
- interrupt-controller;
- #interrupt-cells = <1>;
-
- tc3589x_gpio {
- compatible = "toshiba,tc3589x-gpio";
- interrupts = <0>;
-
- interrupt-controller;
- #interrupt-cells = <2>;
- gpio-controller;
- #gpio-cells = <2>;
- };
- tc3589x_keypad {
- compatible = "toshiba,tc3589x-keypad";
- interrupts = <6>;
- debounce-delay-ms = <4>;
- keypad,num-columns = <8>;
- keypad,num-rows = <8>;
- linux,no-autorepeat;
- wakeup-source;
- linux,keymap = <0x0301006b
- 0x04010066
- 0x06040072
- 0x040200d7
- 0x0303006a
- 0x0205000e
- 0x0607008b
- 0x0500001c
- 0x0403000b
- 0x03040034
- 0x05020067
- 0x0305006c
- 0x040500e7
- 0x0005009e
- 0x06020073
- 0x01030039
- 0x07060069
- 0x050500d9>;
- };
- };
- };
- /* Sensors mounted on all board variants */
- i2c@80128000 {
- ak8974@f {
- /* Magnetometer */
- compatible = "asahi-kasei,ak8974";
- reg = <0x0f>;
- avdd-supply = <&ab8500_ldo_aux1_reg>;
- dvdd-supply = <&db8500_vsmps2_reg>;
- pinctrl-names = "default";
- pinctrl-0 = <&gyro_magn_tvk_mode>;
- /*
- * These interrupts cannot be used: the other component
- * ST-Micro L3D4200D gyro that is connected to the same lines
- * cannot set its DRDY line to open drain, so it cannot be
- * shared with other peripherals. The should be defined for
- * the falling edge if they could be wired together.
- *
- * interrupts-extended =
- * <&gpio1 0 IRQ_TYPE_EDGE_FALLING>,
- * <&gpio0 31 IRQ_TYPE_EDGE_FALLING>;
- */
- };
- l3g4200d@68 {
- /* Gyroscope */
- compatible = "st,l3g4200d-gyro";
- st,drdy-int-pin = <2>;
- reg = <0x68>;
- vdd-supply = <&ab8500_ldo_aux1_reg>;
- vddio-supply = <&db8500_vsmps2_reg>;
- pinctrl-names = "default";
- pinctrl-0 = <&gyro_magn_tvk_mode>;
- interrupts-extended =
- <&gpio1 0 IRQ_TYPE_EDGE_RISING>,
- <&gpio0 31 IRQ_TYPE_EDGE_RISING>;
- };
- lsp001wm@5c {
- /* Barometer/pressure sensor */
- compatible = "st,lps001wp-press";
- reg = <0x5c>;
- vdd-supply = <&ab8500_ldo_aux1_reg>;
- vddio-supply = <&db8500_vsmps2_reg>;
- };
- };
-
- i2c@80110000 {
- synaptics@4b {
- /* Synaptics RMI4 TM1217 touchscreen */
- compatible = "syna,rmi4-i2c";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x4b>;
- vdd-supply = <&ab8500_ldo_aux1_reg>;
- vddio-supply = <&db8500_vsmps2_reg>;
- pinctrl-names = "default";
- pinctrl-0 = <&synaptics_tvk_mode>;
- interrupt-parent = <&gpio2>;
- interrupts = <20 IRQ_TYPE_EDGE_FALLING>;
-
- rmi-f01@1 {
- reg = <0x1>;
- syna,nosleep = <1>;
- };
- rmi-f11@11 {
- reg = <0x11>;
- touchscreen-inverted-x;
- syna,sensor-type = <1>;
- };
- };
- };
-
- pinctrl {
- /* Pull up this GPIO pin */
- tc35893 {
- tc35893_tvk_mode: tc35893_tvk {
- tvk_cfg {
- pins = "GPIO218_AH11";
- ste,config = <&gpio_in_pu>;
- };
- };
- };
- prox {
- prox_tvk_mode: prox_tvk {
- tvk_cfg {
- pins = "GPIO217_AH12";
- ste,config = <&gpio_in_pu>;
- };
- };
- };
- hall {
- hall_tvk_mode: hall_tvk {
- tvk_cfg {
- pins = "GPIO145_C13";
- ste,config = <&gpio_in_pu>;
- };
- };
- };
- accelerometer {
- accel_tvk_mode: accel_tvk {
- /* Accelerometer interrupt lines 1 & 2 */
- tvk_cfg {
- pins = "GPIO82_C1", "GPIO83_D3";
- ste,config = <&gpio_in_pd>;
- };
- };
- };
- gyroscope {
- /*
- * These lines are shared between Gyroscope l3g400dh
- * and AK8974 magnetometer.
- */
- gyro_magn_tvk_mode: gyro_magn_tvk {
- /* GPIO 31 used for INT pull down the line */
- tvk_cfg1 {
- pins = "GPIO31_V3";
- ste,config = <&gpio_in_pd>;
- };
- /* GPIO 32 used for DRDY, pull this down */
- tvk_cfg2 {
- pins = "GPIO32_V2";
- ste,config = <&gpio_in_pd>;
- };
- };
- };
- synaptics {
- synaptics_tvk_mode: synaptics_tvk {
- /* Touchscreen uses GPIO 84 */
- tvk_cfg1 {
- pins = "GPIO84_C2";
- ste,config = <&gpio_in_pu>;
- };
- };
- };
- };
- };
-};
diff --git a/arch/arm/boot/dts/ste-href520-tvk.dts b/arch/arm/boot/dts/ste-href520-tvk.dts
index a036a03f6718..4201547c5988 100644
--- a/arch/arm/boot/dts/ste-href520-tvk.dts
+++ b/arch/arm/boot/dts/ste-href520-tvk.dts
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * Device Tree for the HREF520 version with the TVK1281618 UIB
+ * Device Tree for the HREF520 version with the TVK1281618 R3 UIB
*/
/dts-v1/;
@@ -9,7 +9,7 @@
#include "ste-href-tvk1281618-r3.dtsi"
/ {
- model = "ST-Ericsson HREF520 and TVK1281618 UIB";
+ model = "ST-Ericsson HREF520 and TVK1281618 R3 UIB";
compatible = "st-ericsson,href520", "st-ericsson,u8500";
diff --git a/arch/arm/boot/dts/ste-hrefprev60-tvk.dts b/arch/arm/boot/dts/ste-hrefprev60-tvk.dts
index 4e6e4439dcff..75506339a93c 100644
--- a/arch/arm/boot/dts/ste-hrefprev60-tvk.dts
+++ b/arch/arm/boot/dts/ste-hrefprev60-tvk.dts
@@ -9,7 +9,7 @@
#include "ste-href-tvk1281618-r2.dtsi"
/ {
- model = "ST-Ericsson HREF (pre-v60) and TVK1281618 UIB";
+ model = "ST-Ericsson HREF (pre-v60) and TVK1281618 R2 UIB";
compatible = "st-ericsson,mop500", "st-ericsson,u8500";
/* ST6G3244ME level translator for 1.8/2.9 V */
diff --git a/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts b/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts
index 9c2d2ee6d6d8..2db2f8be8b03 100644
--- a/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts
+++ b/arch/arm/boot/dts/ste-hrefv60plus-tvk.dts
@@ -2,7 +2,7 @@
/*
* Copyright 2012 ST-Ericsson AB
*
- * Device Tree for the HREF version 60 or later with the TVK1281618 UIB
+ * Device Tree for the HREF version 60 or later with the TVK1281618 R2 UIB
*/
/dts-v1/;
@@ -11,7 +11,7 @@
#include "ste-href-tvk1281618-r2.dtsi"
/ {
- model = "ST-Ericsson HREF (v60+) and TVK1281618 UIB";
+ model = "ST-Ericsson HREF (v60+) and TVK1281618 R2 UIB";
compatible = "st-ericsson,hrefv60+", "st-ericsson,u8500";
/* ST6G3244ME level translator for 1.8/2.9 V */
diff --git a/arch/arm/boot/dts/ste-ux500-samsung-janice.dts b/arch/arm/boot/dts/ste-ux500-samsung-janice.dts
index 7411bfeda285..f24369873ce2 100644
--- a/arch/arm/boot/dts/ste-ux500-samsung-janice.dts
+++ b/arch/arm/boot/dts/ste-ux500-samsung-janice.dts
@@ -135,21 +135,22 @@
/*
* This regulator is a GPIO line that drives the Broadcom WLAN
- * line BT_VREG_EN high and enables the internal regulators
- * inside the chip.
+ * line WL_REG_ON high and enables the internal regulators
+ * inside the chip. Unfortunatley it is erroneously named
+ * WLAN_RST_N on the schematic but it is not a reset line.
*
* The voltage specified here is only used to determine the OCR mask,
* the for the SDIO connector, the chip is actually connected
* directly to VBAT.
*/
- wl_bt_reg: regulator-gpio-wlan {
+ wl_reg: regulator-gpio-wlan {
compatible = "regulator-fixed";
- regulator-name = "BT_VREG_EN";
+ regulator-name = "WL_REG_ON";
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3000000>;
startup-delay-us = <100000>;
- /* GPIO222 (BT_VREG_EN) */
- gpio = <&gpio6 30 GPIO_ACTIVE_HIGH>;
+ /* GPIO215 (WLAN_RST_N to WL_REG_ON) */
+ gpio = <&gpio6 23 GPIO_ACTIVE_HIGH>;
enable-active-high;
pinctrl-names = "default";
pinctrl-0 = <&wlan_ldo_en_default>;
@@ -390,11 +391,10 @@
pinctrl-1 = <&mc1_a_2_sleep>;
/*
* GPIO-controlled voltage enablement: this drives
- * the BT_VREG_EN line high when we use this device.
- * Represented as regulator to fill OCR mask and to
- * be usable in parallel with the Bluetooth chip.
+ * the WL_REG_ON line high when we use this device.
+ * Represented as regulator to fill OCR mask.
*/
- vmmc-supply = <&wl_bt_reg>;
+ vmmc-supply = <&wl_reg>;
#address-cells = <1>;
#size-cells = <0>;
@@ -408,9 +408,6 @@
interrupt-parent = <&gpio6>;
interrupts = <24 IRQ_TYPE_EDGE_FALLING>;
interrupt-names = "host-wake";
- /* GPIO215 WLAN_RST_N */
- /* FIXME: kernel does not use this assert/deassert */
- reset-gpios = <&gpio6 23 GPIO_ACTIVE_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&wlan_default_mode>;
};
@@ -440,15 +437,8 @@
bluetooth {
compatible = "brcm,bcm4330-bt";
- /*
- * We actually have shutdown-gpios, BT_VREG_EN on GPIO222,
- * but since this GPIO is shared with the WLAN chip, we need
- * to reference the regulator instead. The regulator
- * framework will reference count the GPIO usage and
- * make sure we can use the same GPIO for several supplies.
- */
- // shutdown-gpios = <&gpio6 30 GPIO_ACTIVE_HIGH>;
- vbat-supply = <&wl_bt_reg>;
+ /* GPIO222 rail BT_VREG_EN to BT_REG_ON */
+ shutdown-gpios = <&gpio6 30 GPIO_ACTIVE_HIGH>;
/* BT_WAKE on GPIO199 */
device-wakeup-gpios = <&gpio6 7 GPIO_ACTIVE_HIGH>;
/* BT_HOST_WAKE on GPIO97 */
@@ -759,9 +749,9 @@
/* GPIO that enables the WLAN internal LDO regulators */
wlan-ldo {
wlan_ldo_en_default: wlan_ldo_default {
- /* GPIO222 BT_VREG_ON */
+ /* GPIO215 named WLAN_RST_N */
janice_cfg1 {
- pins = "GPIO222_AJ9";
+ pins = "GPIO215_AH13";
ste,config = <&gpio_out_lo>;
};
};
@@ -875,11 +865,6 @@
};
wlan {
wlan_default_mode: wlan_default {
- /* GPIO215 used for RESET_N */
- janice_cfg1 {
- pins = "GPIO215_AH13";
- ste,config = <&gpio_out_lo>;
- };
/* GPIO216 for WL_HOST_WAKE */
janice_cfg2 {
pins = "GPIO216_AG12";
@@ -889,14 +874,17 @@
};
bluetooth {
bluetooth_default_mode: bluetooth_default {
+ /* GPIO199 BT_WAKE and GPIO222 BT_VREG_ON */
janice_cfg1 {
- pins = "GPIO199_AH23";
+ pins = "GPIO199_AH23", "GPIO222_AJ9";
ste,config = <&gpio_out_lo>;
};
+ /* GPIO97 BT_HOST_WAKE */
janice_cfg2 {
pins = "GPIO97_D9";
ste,config = <&gpio_in_nopull>;
};
+ /* GPIO209 BT_RST_N */
janice_cfg3 {
pins = "GPIO209_AG15";
ste,config = <&gpio_out_hi>;
diff --git a/arch/arm/boot/dts/stm32h7-pinctrl.dtsi b/arch/arm/boot/dts/stm32h7-pinctrl.dtsi
new file mode 100644
index 000000000000..aa1bc3e10a49
--- /dev/null
+++ b/arch/arm/boot/dts/stm32h7-pinctrl.dtsi
@@ -0,0 +1,275 @@
+/*
+ * Copyright 2017 - Alexandre Torgue <alexandre.torgue@st.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/pinctrl/stm32-pinfunc.h>
+
+&pinctrl {
+
+ i2c1_pins_a: i2c1-0 {
+ pins {
+ pinmux = <STM32_PINMUX('B', 6, AF4)>, /* I2C1_SCL */
+ <STM32_PINMUX('B', 7, AF4)>; /* I2C1_SDA */
+ bias-disable;
+ drive-open-drain;
+ slew-rate = <0>;
+ };
+ };
+
+ ethernet_rmii: rmii-0 {
+ pins {
+ pinmux = <STM32_PINMUX('G', 11, AF11)>,
+ <STM32_PINMUX('G', 13, AF11)>,
+ <STM32_PINMUX('G', 12, AF11)>,
+ <STM32_PINMUX('C', 4, AF11)>,
+ <STM32_PINMUX('C', 5, AF11)>,
+ <STM32_PINMUX('A', 7, AF11)>,
+ <STM32_PINMUX('C', 1, AF11)>,
+ <STM32_PINMUX('A', 2, AF11)>,
+ <STM32_PINMUX('A', 1, AF11)>;
+ slew-rate = <2>;
+ };
+ };
+
+ sdmmc1_b4_pins_a: sdmmc1-b4-0 {
+ pins {
+ pinmux = <STM32_PINMUX('C', 8, AF12)>, /* SDMMC1_D0 */
+ <STM32_PINMUX('C', 9, AF12)>, /* SDMMC1_D1 */
+ <STM32_PINMUX('C', 10, AF12)>, /* SDMMC1_D2 */
+ <STM32_PINMUX('C', 11, AF12)>, /* SDMMC1_D3 */
+ <STM32_PINMUX('C', 12, AF12)>, /* SDMMC1_CK */
+ <STM32_PINMUX('D', 2, AF12)>; /* SDMMC1_CMD */
+ slew-rate = <3>;
+ drive-push-pull;
+ bias-disable;
+ };
+ };
+
+ sdmmc1_b4_od_pins_a: sdmmc1-b4-od-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('C', 8, AF12)>, /* SDMMC1_D0 */
+ <STM32_PINMUX('C', 9, AF12)>, /* SDMMC1_D1 */
+ <STM32_PINMUX('C', 10, AF12)>, /* SDMMC1_D2 */
+ <STM32_PINMUX('C', 11, AF12)>, /* SDMMC1_D3 */
+ <STM32_PINMUX('C', 12, AF12)>; /* SDMMC1_CK */
+ slew-rate = <3>;
+ drive-push-pull;
+ bias-disable;
+ };
+ pins2{
+ pinmux = <STM32_PINMUX('D', 2, AF12)>; /* SDMMC1_CMD */
+ slew-rate = <3>;
+ drive-open-drain;
+ bias-disable;
+ };
+ };
+
+ sdmmc1_b4_sleep_pins_a: sdmmc1-b4-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('C', 8, ANALOG)>, /* SDMMC1_D0 */
+ <STM32_PINMUX('C', 9, ANALOG)>, /* SDMMC1_D1 */
+ <STM32_PINMUX('C', 10, ANALOG)>, /* SDMMC1_D2 */
+ <STM32_PINMUX('C', 11, ANALOG)>, /* SDMMC1_D3 */
+ <STM32_PINMUX('C', 12, ANALOG)>, /* SDMMC1_CK */
+ <STM32_PINMUX('D', 2, ANALOG)>; /* SDMMC1_CMD */
+ };
+ };
+
+ sdmmc1_dir_pins_a: sdmmc1-dir-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('C', 6, AF8)>, /* SDMMC1_D0DIR */
+ <STM32_PINMUX('C', 7, AF8)>, /* SDMMC1_D123DIR */
+ <STM32_PINMUX('B', 9, AF7)>; /* SDMMC1_CDIR */
+ slew-rate = <3>;
+ drive-push-pull;
+ bias-pull-up;
+ };
+ pins2{
+ pinmux = <STM32_PINMUX('B', 8, AF7)>; /* SDMMC1_CKIN */
+ bias-pull-up;
+ };
+ };
+
+ sdmmc1_dir_sleep_pins_a: sdmmc1-dir-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('C', 6, ANALOG)>, /* SDMMC1_D0DIR */
+ <STM32_PINMUX('C', 7, ANALOG)>, /* SDMMC1_D123DIR */
+ <STM32_PINMUX('B', 9, ANALOG)>, /* SDMMC1_CDIR */
+ <STM32_PINMUX('B', 8, ANALOG)>; /* SDMMC1_CKIN */
+ };
+ };
+
+ sdmmc2_b4_pins_a: sdmmc2-b4-0 {
+ pins {
+ pinmux = <STM32_PINMUX('B', 14, AF9)>, /* SDMMC1_D0 */
+ <STM32_PINMUX('B', 15, AF9)>, /* SDMMC1_D1 */
+ <STM32_PINMUX('B', 3, AF9)>, /* SDMMC1_D2 */
+ <STM32_PINMUX('B', 4, AF9)>, /* SDMMC1_D3 */
+ <STM32_PINMUX('D', 6, AF11)>, /* SDMMC1_CK */
+ <STM32_PINMUX('D', 7, AF11)>; /* SDMMC1_CMD */
+ slew-rate = <3>;
+ drive-push-pull;
+ bias-disable;
+ };
+ };
+
+ sdmmc2_b4_od_pins_a: sdmmc2-b4-od-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 14, AF9)>, /* SDMMC2_D0 */
+ <STM32_PINMUX('B', 15, AF9)>, /* SDMMC1_D1 */
+ <STM32_PINMUX('B', 3, AF9)>, /* SDMMC1_D2 */
+ <STM32_PINMUX('B', 4, AF9)>, /* SDMMC1_D3 */
+ <STM32_PINMUX('D', 6, AF11)>; /* SDMMC1_CK */
+ slew-rate = <3>;
+ drive-push-pull;
+ bias-disable;
+ };
+ pins2{
+ pinmux = <STM32_PINMUX('D', 7, AF11)>; /* SDMMC1_CMD */
+ slew-rate = <3>;
+ drive-open-drain;
+ bias-disable;
+ };
+ };
+
+ sdmmc2_b4_sleep_pins_a: sdmmc2-b4-sleep-0 {
+ pins {
+ pinmux = <STM32_PINMUX('B', 14, ANALOG)>, /* SDMMC1_D0 */
+ <STM32_PINMUX('B', 15, ANALOG)>, /* SDMMC1_D1 */
+ <STM32_PINMUX('B', 3, ANALOG)>, /* SDMMC1_D2 */
+ <STM32_PINMUX('B', 4, ANALOG)>, /* SDMMC1_D3 */
+ <STM32_PINMUX('D', 6, ANALOG)>, /* SDMMC1_CK */
+ <STM32_PINMUX('D', 7, ANALOG)>; /* SDMMC1_CMD */
+ };
+ };
+
+ spi1_pins: spi1-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('A', 5, AF5)>,
+ /* SPI1_CLK */
+ <STM32_PINMUX('B', 5, AF5)>;
+ /* SPI1_MOSI */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <2>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('G', 9, AF5)>;
+ /* SPI1_MISO */
+ bias-disable;
+ };
+ };
+
+ uart4_pins: uart4-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('A', 0, AF8)>; /* UART4_TX */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('I', 9, AF8)>; /* UART4_RX */
+ bias-disable;
+ };
+ };
+
+ usart1_pins: usart1-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 14, AF4)>; /* USART1_TX */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('B', 15, AF4)>; /* USART1_RX */
+ bias-disable;
+ };
+ };
+
+ usart2_pins: usart2-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('D', 5, AF7)>; /* USART2_TX */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('D', 6, AF7)>; /* USART2_RX */
+ bias-disable;
+ };
+ };
+
+ usart3_pins: usart3-0 {
+ pins1 {
+ pinmux = <STM32_PINMUX('B', 10, AF7)>, /* USART3_TX */
+ <STM32_PINMUX('D', 12, AF7)>; /* USART3_RTS_DE */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins2 {
+ pinmux = <STM32_PINMUX('B', 11, AF7)>, /* USART3_RX */
+ <STM32_PINMUX('D', 11, AF7)>; /* USART3_CTS_NSS */
+ bias-disable;
+ };
+ };
+
+ usbotg_hs_pins_a: usbotg-hs-0 {
+ pins {
+ pinmux = <STM32_PINMUX('H', 4, AF10)>, /* ULPI_NXT */
+ <STM32_PINMUX('I', 11, AF10)>, /* ULPI_DIR> */
+ <STM32_PINMUX('C', 0, AF10)>, /* ULPI_STP> */
+ <STM32_PINMUX('A', 5, AF10)>, /* ULPI_CK> */
+ <STM32_PINMUX('A', 3, AF10)>, /* ULPI_D0> */
+ <STM32_PINMUX('B', 0, AF10)>, /* ULPI_D1> */
+ <STM32_PINMUX('B', 1, AF10)>, /* ULPI_D2> */
+ <STM32_PINMUX('B', 10, AF10)>, /* ULPI_D3> */
+ <STM32_PINMUX('B', 11, AF10)>, /* ULPI_D4> */
+ <STM32_PINMUX('B', 12, AF10)>, /* ULPI_D5> */
+ <STM32_PINMUX('B', 13, AF10)>, /* ULPI_D6> */
+ <STM32_PINMUX('B', 5, AF10)>; /* ULPI_D7> */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <2>;
+ };
+ };
+};
+
diff --git a/arch/arm/boot/dts/stm32h743-pinctrl.dtsi b/arch/arm/boot/dts/stm32h743-pinctrl.dtsi
deleted file mode 100644
index fa5dcb6a5fdd..000000000000
--- a/arch/arm/boot/dts/stm32h743-pinctrl.dtsi
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * Copyright 2017 - Alexandre Torgue <alexandre.torgue@st.com>
- *
- * This file is dual-licensed: you can use it either under the terms
- * of the GPL or the X11 license, at your option. Note that this dual
- * licensing only applies to this file, and not this project as a
- * whole.
- *
- * a) This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Or, alternatively,
- *
- * b) Permission is hereby granted, free of charge, to any person
- * obtaining a copy of this software and associated documentation
- * files (the "Software"), to deal in the Software without
- * restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following
- * conditions:
- *
- * The above copyright notice and this permission notice shall be
- * included in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
- * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
- * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <dt-bindings/pinctrl/stm32-pinfunc.h>
-
-/ {
- soc {
- pin-controller {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "st,stm32h743-pinctrl";
- ranges = <0 0x58020000 0x3000>;
- interrupt-parent = <&exti>;
- st,syscfg = <&syscfg 0x8>;
- pins-are-numbered;
-
- gpioa: gpio@58020000 {
- gpio-controller;
- #gpio-cells = <2>;
- reg = <0x0 0x400>;
- clocks = <&rcc GPIOA_CK>;
- st,bank-name = "GPIOA";
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpiob: gpio@58020400 {
- gpio-controller;
- #gpio-cells = <2>;
- reg = <0x400 0x400>;
- clocks = <&rcc GPIOB_CK>;
- st,bank-name = "GPIOB";
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpioc: gpio@58020800 {
- gpio-controller;
- #gpio-cells = <2>;
- reg = <0x800 0x400>;
- clocks = <&rcc GPIOC_CK>;
- st,bank-name = "GPIOC";
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpiod: gpio@58020c00 {
- gpio-controller;
- #gpio-cells = <2>;
- reg = <0xc00 0x400>;
- clocks = <&rcc GPIOD_CK>;
- st,bank-name = "GPIOD";
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpioe: gpio@58021000 {
- gpio-controller;
- #gpio-cells = <2>;
- reg = <0x1000 0x400>;
- clocks = <&rcc GPIOE_CK>;
- st,bank-name = "GPIOE";
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpiof: gpio@58021400 {
- gpio-controller;
- #gpio-cells = <2>;
- reg = <0x1400 0x400>;
- clocks = <&rcc GPIOF_CK>;
- st,bank-name = "GPIOF";
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpiog: gpio@58021800 {
- gpio-controller;
- #gpio-cells = <2>;
- reg = <0x1800 0x400>;
- clocks = <&rcc GPIOG_CK>;
- st,bank-name = "GPIOG";
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpioh: gpio@58021c00 {
- gpio-controller;
- #gpio-cells = <2>;
- reg = <0x1c00 0x400>;
- clocks = <&rcc GPIOH_CK>;
- st,bank-name = "GPIOH";
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpioi: gpio@58022000 {
- gpio-controller;
- #gpio-cells = <2>;
- reg = <0x2000 0x400>;
- clocks = <&rcc GPIOI_CK>;
- st,bank-name = "GPIOI";
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpioj: gpio@58022400 {
- gpio-controller;
- #gpio-cells = <2>;
- reg = <0x2400 0x400>;
- clocks = <&rcc GPIOJ_CK>;
- st,bank-name = "GPIOJ";
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- gpiok: gpio@58022800 {
- gpio-controller;
- #gpio-cells = <2>;
- reg = <0x2800 0x400>;
- clocks = <&rcc GPIOK_CK>;
- st,bank-name = "GPIOK";
- interrupt-controller;
- #interrupt-cells = <2>;
- };
-
- i2c1_pins_a: i2c1-0 {
- pins {
- pinmux = <STM32_PINMUX('B', 6, AF4)>, /* I2C1_SCL */
- <STM32_PINMUX('B', 7, AF4)>; /* I2C1_SDA */
- bias-disable;
- drive-open-drain;
- slew-rate = <0>;
- };
- };
-
- ethernet_rmii: rmii-0 {
- pins {
- pinmux = <STM32_PINMUX('G', 11, AF11)>,
- <STM32_PINMUX('G', 13, AF11)>,
- <STM32_PINMUX('G', 12, AF11)>,
- <STM32_PINMUX('C', 4, AF11)>,
- <STM32_PINMUX('C', 5, AF11)>,
- <STM32_PINMUX('A', 7, AF11)>,
- <STM32_PINMUX('C', 1, AF11)>,
- <STM32_PINMUX('A', 2, AF11)>,
- <STM32_PINMUX('A', 1, AF11)>;
- slew-rate = <2>;
- };
- };
-
- sdmmc1_b4_pins_a: sdmmc1-b4-0 {
- pins {
- pinmux = <STM32_PINMUX('C', 8, AF12)>, /* SDMMC1_D0 */
- <STM32_PINMUX('C', 9, AF12)>, /* SDMMC1_D1 */
- <STM32_PINMUX('C', 10, AF12)>, /* SDMMC1_D2 */
- <STM32_PINMUX('C', 11, AF12)>, /* SDMMC1_D3 */
- <STM32_PINMUX('C', 12, AF12)>, /* SDMMC1_CK */
- <STM32_PINMUX('D', 2, AF12)>; /* SDMMC1_CMD */
- slew-rate = <3>;
- drive-push-pull;
- bias-disable;
- };
- };
-
- sdmmc1_b4_od_pins_a: sdmmc1-b4-od-0 {
- pins1 {
- pinmux = <STM32_PINMUX('C', 8, AF12)>, /* SDMMC1_D0 */
- <STM32_PINMUX('C', 9, AF12)>, /* SDMMC1_D1 */
- <STM32_PINMUX('C', 10, AF12)>, /* SDMMC1_D2 */
- <STM32_PINMUX('C', 11, AF12)>, /* SDMMC1_D3 */
- <STM32_PINMUX('C', 12, AF12)>; /* SDMMC1_CK */
- slew-rate = <3>;
- drive-push-pull;
- bias-disable;
- };
- pins2{
- pinmux = <STM32_PINMUX('D', 2, AF12)>; /* SDMMC1_CMD */
- slew-rate = <3>;
- drive-open-drain;
- bias-disable;
- };
- };
-
- sdmmc1_b4_sleep_pins_a: sdmmc1-b4-sleep-0 {
- pins {
- pinmux = <STM32_PINMUX('C', 8, ANALOG)>, /* SDMMC1_D0 */
- <STM32_PINMUX('C', 9, ANALOG)>, /* SDMMC1_D1 */
- <STM32_PINMUX('C', 10, ANALOG)>, /* SDMMC1_D2 */
- <STM32_PINMUX('C', 11, ANALOG)>, /* SDMMC1_D3 */
- <STM32_PINMUX('C', 12, ANALOG)>, /* SDMMC1_CK */
- <STM32_PINMUX('D', 2, ANALOG)>; /* SDMMC1_CMD */
- };
- };
-
- sdmmc1_dir_pins_a: sdmmc1-dir-0 {
- pins1 {
- pinmux = <STM32_PINMUX('C', 6, AF8)>, /* SDMMC1_D0DIR */
- <STM32_PINMUX('C', 7, AF8)>, /* SDMMC1_D123DIR */
- <STM32_PINMUX('B', 9, AF7)>; /* SDMMC1_CDIR */
- slew-rate = <3>;
- drive-push-pull;
- bias-pull-up;
- };
- pins2{
- pinmux = <STM32_PINMUX('B', 8, AF7)>; /* SDMMC1_CKIN */
- bias-pull-up;
- };
- };
-
- sdmmc1_dir_sleep_pins_a: sdmmc1-dir-sleep-0 {
- pins {
- pinmux = <STM32_PINMUX('C', 6, ANALOG)>, /* SDMMC1_D0DIR */
- <STM32_PINMUX('C', 7, ANALOG)>, /* SDMMC1_D123DIR */
- <STM32_PINMUX('B', 9, ANALOG)>, /* SDMMC1_CDIR */
- <STM32_PINMUX('B', 8, ANALOG)>; /* SDMMC1_CKIN */
- };
- };
-
- usart1_pins: usart1-0 {
- pins1 {
- pinmux = <STM32_PINMUX('B', 14, AF4)>; /* USART1_TX */
- bias-disable;
- drive-push-pull;
- slew-rate = <0>;
- };
- pins2 {
- pinmux = <STM32_PINMUX('B', 15, AF4)>; /* USART1_RX */
- bias-disable;
- };
- };
-
- usart2_pins: usart2-0 {
- pins1 {
- pinmux = <STM32_PINMUX('D', 5, AF7)>; /* USART2_TX */
- bias-disable;
- drive-push-pull;
- slew-rate = <0>;
- };
- pins2 {
- pinmux = <STM32_PINMUX('D', 6, AF7)>; /* USART2_RX */
- bias-disable;
- };
- };
-
- usbotg_hs_pins_a: usbotg-hs-0 {
- pins {
- pinmux = <STM32_PINMUX('H', 4, AF10)>, /* ULPI_NXT */
- <STM32_PINMUX('I', 11, AF10)>, /* ULPI_DIR> */
- <STM32_PINMUX('C', 0, AF10)>, /* ULPI_STP> */
- <STM32_PINMUX('A', 5, AF10)>, /* ULPI_CK> */
- <STM32_PINMUX('A', 3, AF10)>, /* ULPI_D0> */
- <STM32_PINMUX('B', 0, AF10)>, /* ULPI_D1> */
- <STM32_PINMUX('B', 1, AF10)>, /* ULPI_D2> */
- <STM32_PINMUX('B', 10, AF10)>, /* ULPI_D3> */
- <STM32_PINMUX('B', 11, AF10)>, /* ULPI_D4> */
- <STM32_PINMUX('B', 12, AF10)>, /* ULPI_D5> */
- <STM32_PINMUX('B', 13, AF10)>, /* ULPI_D6> */
- <STM32_PINMUX('B', 5, AF10)>; /* ULPI_D7> */
- bias-disable;
- drive-push-pull;
- slew-rate = <2>;
- };
- };
- };
- };
-};
diff --git a/arch/arm/boot/dts/stm32h743.dtsi b/arch/arm/boot/dts/stm32h743.dtsi
index 4ebffb0a45a3..05ecdf9ff03a 100644
--- a/arch/arm/boot/dts/stm32h743.dtsi
+++ b/arch/arm/boot/dts/stm32h743.dtsi
@@ -135,6 +135,22 @@
clocks = <&rcc USART2_CK>;
};
+ usart3: serial@40004800 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x40004800 0x400>;
+ interrupts = <39>;
+ status = "disabled";
+ clocks = <&rcc USART3_CK>;
+ };
+
+ uart4: serial@40004c00 {
+ compatible = "st,stm32h7-uart";
+ reg = <0x40004c00 0x400>;
+ interrupts = <52>;
+ status = "disabled";
+ clocks = <&rcc UART4_CK>;
+ };
+
i2c1: i2c@40005400 {
compatible = "st,stm32f7-i2c";
#address-cells = <1>;
@@ -159,7 +175,7 @@
status = "disabled";
};
- i2c3: i2c@40005C00 {
+ i2c3: i2c@40005c00 {
compatible = "st,stm32f7-i2c";
#address-cells = <1>;
#size-cells = <0>;
@@ -368,6 +384,21 @@
max-frequency = <120000000>;
};
+ sdmmc2: mmc@48022400 {
+ compatible = "arm,pl18x", "arm,primecell";
+ arm,primecell-periphid = <0x10153180>;
+ reg = <0x48022400 0x400>;
+ interrupts = <124>;
+ interrupt-names = "cmd_irq";
+ clocks = <&rcc SDMMC2_CK>;
+ clock-names = "apb_pclk";
+ resets = <&rcc STM32H7_AHB2_RESET(SDMMC2)>;
+ cap-sd-highspeed;
+ cap-mmc-highspeed;
+ max-frequency = <120000000>;
+ status = "disabled";
+ };
+
exti: interrupt-controller@58000000 {
compatible = "st,stm32h7-exti";
interrupt-controller;
@@ -392,7 +423,7 @@
status = "disabled";
};
- i2c4: i2c@58001C00 {
+ i2c4: i2c@58001c00 {
compatible = "st,stm32f7-i2c";
#address-cells = <1>;
#size-cells = <0>;
@@ -555,6 +586,148 @@
snps,pbl = <8>;
status = "disabled";
};
+
+ pinctrl: pin-controller@58020000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "st,stm32h743-pinctrl";
+ ranges = <0 0x58020000 0x3000>;
+ interrupt-parent = <&exti>;
+ st,syscfg = <&syscfg 0x8>;
+ pins-are-numbered;
+
+ gpioa: gpio@58020000 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x0 0x400>;
+ clocks = <&rcc GPIOA_CK>;
+ st,bank-name = "GPIOA";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ ngpios = <16>;
+ gpio-ranges = <&pinctrl 0 0 16>;
+ };
+
+ gpiob: gpio@58020400 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x400 0x400>;
+ clocks = <&rcc GPIOB_CK>;
+ st,bank-name = "GPIOB";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ ngpios = <16>;
+ gpio-ranges = <&pinctrl 0 16 16>;
+ };
+
+ gpioc: gpio@58020800 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x800 0x400>;
+ clocks = <&rcc GPIOC_CK>;
+ st,bank-name = "GPIOC";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ ngpios = <16>;
+ gpio-ranges = <&pinctrl 0 32 16>;
+ };
+
+ gpiod: gpio@58020c00 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0xc00 0x400>;
+ clocks = <&rcc GPIOD_CK>;
+ st,bank-name = "GPIOD";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ ngpios = <16>;
+ gpio-ranges = <&pinctrl 0 48 16>;
+ };
+
+ gpioe: gpio@58021000 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x1000 0x400>;
+ clocks = <&rcc GPIOE_CK>;
+ st,bank-name = "GPIOE";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ ngpios = <16>;
+ gpio-ranges = <&pinctrl 0 64 16>;
+ };
+
+ gpiof: gpio@58021400 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x1400 0x400>;
+ clocks = <&rcc GPIOF_CK>;
+ st,bank-name = "GPIOF";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ ngpios = <16>;
+ gpio-ranges = <&pinctrl 0 80 16>;
+ };
+
+ gpiog: gpio@58021800 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x1800 0x400>;
+ clocks = <&rcc GPIOG_CK>;
+ st,bank-name = "GPIOG";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ ngpios = <16>;
+ gpio-ranges = <&pinctrl 0 96 16>;
+ };
+
+ gpioh: gpio@58021c00 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x1c00 0x400>;
+ clocks = <&rcc GPIOH_CK>;
+ st,bank-name = "GPIOH";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ ngpios = <16>;
+ gpio-ranges = <&pinctrl 0 112 16>;
+ };
+
+ gpioi: gpio@58022000 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x2000 0x400>;
+ clocks = <&rcc GPIOI_CK>;
+ st,bank-name = "GPIOI";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ ngpios = <16>;
+ gpio-ranges = <&pinctrl 0 128 16>;
+ };
+
+ gpioj: gpio@58022400 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x2400 0x400>;
+ clocks = <&rcc GPIOJ_CK>;
+ st,bank-name = "GPIOJ";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ ngpios = <16>;
+ gpio-ranges = <&pinctrl 0 144 16>;
+ };
+
+ gpiok: gpio@58022800 {
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x2800 0x400>;
+ clocks = <&rcc GPIOK_CK>;
+ st,bank-name = "GPIOK";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ ngpios = <8>;
+ gpio-ranges = <&pinctrl 0 160 8>;
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/stm32h743i-disco.dts b/arch/arm/boot/dts/stm32h743i-disco.dts
index e446d311c520..59e01ce10318 100644
--- a/arch/arm/boot/dts/stm32h743i-disco.dts
+++ b/arch/arm/boot/dts/stm32h743i-disco.dts
@@ -42,7 +42,7 @@
/dts-v1/;
#include "stm32h743.dtsi"
-#include "stm32h743-pinctrl.dtsi"
+#include "stm32h7-pinctrl.dtsi"
/ {
model = "STMicroelectronics STM32H743i-Discovery board";
diff --git a/arch/arm/boot/dts/stm32h743i-eval.dts b/arch/arm/boot/dts/stm32h743i-eval.dts
index 8f398178f5e5..38cc7faf6884 100644
--- a/arch/arm/boot/dts/stm32h743i-eval.dts
+++ b/arch/arm/boot/dts/stm32h743i-eval.dts
@@ -42,7 +42,7 @@
/dts-v1/;
#include "stm32h743.dtsi"
-#include "stm32h743-pinctrl.dtsi"
+#include "stm32h7-pinctrl.dtsi"
/ {
model = "STMicroelectronics STM32H743i-EVAL board";
diff --git a/arch/arm/boot/dts/stm32h750.dtsi b/arch/arm/boot/dts/stm32h750.dtsi
new file mode 100644
index 000000000000..41e3b1e3a874
--- /dev/null
+++ b/arch/arm/boot/dts/stm32h750.dtsi
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright (C) STMicroelectronics 2021 - All Rights Reserved */
+
+#include "stm32h743.dtsi"
+
+
diff --git a/arch/arm/boot/dts/stm32h750i-art-pi.dts b/arch/arm/boot/dts/stm32h750i-art-pi.dts
new file mode 100644
index 000000000000..9bb73bb61901
--- /dev/null
+++ b/arch/arm/boot/dts/stm32h750i-art-pi.dts
@@ -0,0 +1,229 @@
+/*
+ * Copyright 2021 - Dillon Min <dillon.minfei@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * For art-pi board resources, you can refer to link:
+ * https://art-pi.gitee.io/website/
+ */
+
+/dts-v1/;
+#include "stm32h750.dtsi"
+#include "stm32h7-pinctrl.dtsi"
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "RT-Thread STM32H750i-ART-PI board";
+ compatible = "st,stm32h750i-art-pi", "st,stm32h750";
+
+ chosen {
+ bootargs = "root=/dev/ram";
+ stdout-path = "serial0:2000000n8";
+ };
+
+ memory@c0000000 {
+ device_type = "memory";
+ reg = <0xc0000000 0x2000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ linux,cma {
+ compatible = "shared-dma-pool";
+ no-map;
+ size = <0x100000>;
+ linux,dma-default;
+ };
+ };
+
+ aliases {
+ serial0 = &uart4;
+ serial1 = &usart3;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ led-red {
+ gpios = <&gpioi 8 0>;
+ };
+ led-green {
+ gpios = <&gpioc 15 0>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ v3v3: regulator-v3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "v3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ wlan_pwr: regulator-wlan {
+ compatible = "regulator-fixed";
+
+ regulator-name = "wl-reg";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+
+ gpios = <&gpioc 13 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+};
+
+&clk_hse {
+ clock-frequency = <25000000>;
+};
+
+&dma1 {
+ status = "okay";
+};
+
+&dma2 {
+ status = "okay";
+};
+
+&mac {
+ status = "disabled";
+ pinctrl-0 = <&ethernet_rmii>;
+ pinctrl-names = "default";
+ phy-mode = "rmii";
+ phy-handle = <&phy0>;
+
+ mdio0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+ };
+};
+
+&sdmmc1 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc1_b4_pins_a>;
+ pinctrl-1 = <&sdmmc1_b4_od_pins_a>;
+ pinctrl-2 = <&sdmmc1_b4_sleep_pins_a>;
+ broken-cd;
+ st,neg-edge;
+ bus-width = <4>;
+ vmmc-supply = <&v3v3>;
+ status = "okay";
+};
+
+&sdmmc2 {
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc2_b4_pins_a>;
+ pinctrl-1 = <&sdmmc2_b4_od_pins_a>;
+ pinctrl-2 = <&sdmmc2_b4_sleep_pins_a>;
+ broken-cd;
+ non-removable;
+ st,neg-edge;
+ bus-width = <4>;
+ vmmc-supply = <&wlan_pwr>;
+ status = "okay";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ brcmf: bcrmf@1 {
+ reg = <1>;
+ compatible = "brcm,bcm4329-fmac";
+ };
+};
+
+&spi1 {
+ status = "okay";
+ pinctrl-0 = <&spi1_pins>;
+ pinctrl-names = "default";
+ cs-gpios = <&gpioa 4 GPIO_ACTIVE_LOW>;
+ dmas = <&dmamux1 37 0x400 0x05>,
+ <&dmamux1 38 0x400 0x05>;
+ dma-names = "rx", "tx";
+
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "winbond,w25q128", "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <80000000>;
+
+ partition@0 {
+ label = "root filesystem";
+ reg = <0 0x1000000>;
+ };
+ };
+};
+
+&usart2 {
+ pinctrl-0 = <&usart2_pins>;
+ pinctrl-names = "default";
+ status = "disabled";
+};
+
+&usart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&usart3_pins>;
+ dmas = <&dmamux1 45 0x400 0x05>,
+ <&dmamux1 46 0x400 0x05>;
+ dma-names = "rx", "tx";
+ st,hw-flow-ctrl;
+ status = "okay";
+
+ bluetooth {
+ compatible = "brcm,bcm43438-bt";
+ host-wakeup-gpios = <&gpioc 0 GPIO_ACTIVE_HIGH>;
+ device-wakeup-gpios = <&gpioi 10 GPIO_ACTIVE_HIGH>;
+ shutdown-gpios = <&gpioi 11 GPIO_ACTIVE_HIGH>;
+ max-speed = <115200>;
+ };
+};
+
+&uart4 {
+ pinctrl-0 = <&uart4_pins>;
+ pinctrl-names = "default";
+ status = "okay";
+};
+
+
diff --git a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
index 7b4249ed1983..060baa8b7e9d 100644
--- a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
@@ -1891,10 +1891,15 @@
usart2_idle_pins_c: usart2-idle-2 {
pins1 {
pinmux = <STM32_PINMUX('D', 5, ANALOG)>, /* USART2_TX */
- <STM32_PINMUX('D', 4, ANALOG)>, /* USART2_RTS */
<STM32_PINMUX('D', 3, ANALOG)>; /* USART2_CTS_NSS */
};
pins2 {
+ pinmux = <STM32_PINMUX('D', 4, AF7)>; /* USART2_RTS */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <3>;
+ };
+ pins3 {
pinmux = <STM32_PINMUX('D', 6, AF7)>; /* USART2_RX */
bias-disable;
};
@@ -1940,10 +1945,15 @@
usart3_idle_pins_b: usart3-idle-1 {
pins1 {
pinmux = <STM32_PINMUX('B', 10, ANALOG)>, /* USART3_TX */
- <STM32_PINMUX('G', 8, ANALOG)>, /* USART3_RTS */
<STM32_PINMUX('I', 10, ANALOG)>; /* USART3_CTS_NSS */
};
pins2 {
+ pinmux = <STM32_PINMUX('G', 8, AF8)>; /* USART3_RTS */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins3 {
pinmux = <STM32_PINMUX('B', 12, AF8)>; /* USART3_RX */
bias-disable;
};
@@ -1976,10 +1986,15 @@
usart3_idle_pins_c: usart3-idle-2 {
pins1 {
pinmux = <STM32_PINMUX('B', 10, ANALOG)>, /* USART3_TX */
- <STM32_PINMUX('G', 8, ANALOG)>, /* USART3_RTS */
<STM32_PINMUX('B', 13, ANALOG)>; /* USART3_CTS_NSS */
};
pins2 {
+ pinmux = <STM32_PINMUX('G', 8, AF8)>; /* USART3_RTS */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <0>;
+ };
+ pins3 {
pinmux = <STM32_PINMUX('B', 12, AF8)>; /* USART3_RX */
bias-disable;
};
diff --git a/arch/arm/boot/dts/stm32mp151.dtsi b/arch/arm/boot/dts/stm32mp151.dtsi
index 4b8031782555..fcd3230c469b 100644
--- a/arch/arm/boot/dts/stm32mp151.dtsi
+++ b/arch/arm/boot/dts/stm32mp151.dtsi
@@ -452,32 +452,36 @@
usart2: serial@4000e000 {
compatible = "st,stm32h7-uart";
reg = <0x4000e000 0x400>;
- interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&exti 27 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&rcc USART2_K>;
+ wakeup-source;
status = "disabled";
};
usart3: serial@4000f000 {
compatible = "st,stm32h7-uart";
reg = <0x4000f000 0x400>;
- interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&exti 28 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&rcc USART3_K>;
+ wakeup-source;
status = "disabled";
};
uart4: serial@40010000 {
compatible = "st,stm32h7-uart";
reg = <0x40010000 0x400>;
- interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&exti 30 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&rcc UART4_K>;
+ wakeup-source;
status = "disabled";
};
uart5: serial@40011000 {
compatible = "st,stm32h7-uart";
reg = <0x40011000 0x400>;
- interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&exti 31 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&rcc UART5_K>;
+ wakeup-source;
status = "disabled";
};
@@ -493,6 +497,7 @@
#size-cells = <0>;
st,syscfg-fmp = <&syscfg 0x4 0x1>;
wakeup-source;
+ i2c-analog-filter;
status = "disabled";
};
@@ -508,6 +513,7 @@
#size-cells = <0>;
st,syscfg-fmp = <&syscfg 0x4 0x2>;
wakeup-source;
+ i2c-analog-filter;
status = "disabled";
};
@@ -523,6 +529,7 @@
#size-cells = <0>;
st,syscfg-fmp = <&syscfg 0x4 0x4>;
wakeup-source;
+ i2c-analog-filter;
status = "disabled";
};
@@ -538,6 +545,7 @@
#size-cells = <0>;
st,syscfg-fmp = <&syscfg 0x4 0x10>;
wakeup-source;
+ i2c-analog-filter;
status = "disabled";
};
@@ -577,16 +585,18 @@
uart7: serial@40018000 {
compatible = "st,stm32h7-uart";
reg = <0x40018000 0x400>;
- interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&exti 32 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&rcc UART7_K>;
+ wakeup-source;
status = "disabled";
};
uart8: serial@40019000 {
compatible = "st,stm32h7-uart";
reg = <0x40019000 0x400>;
- interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&exti 33 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&rcc UART8_K>;
+ wakeup-source;
status = "disabled";
};
@@ -665,8 +675,9 @@
usart6: serial@44003000 {
compatible = "st,stm32h7-uart";
reg = <0x44003000 0x400>;
- interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&exti 29 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&rcc USART6_K>;
+ wakeup-source;
status = "disabled";
};
@@ -1421,11 +1432,13 @@
"mac-clk-tx",
"mac-clk-rx",
"eth-ck",
+ "ptp_ref",
"ethstp";
clocks = <&rcc ETHMAC>,
<&rcc ETHTX>,
<&rcc ETHRX>,
<&rcc ETHCK_K>,
+ <&rcc ETHPTP_K>,
<&rcc ETHSTP>;
st,syscon = <&syscfg 0x4>;
snps,mixed-burst;
@@ -1505,8 +1518,9 @@
usart1: serial@5c000000 {
compatible = "st,stm32h7-uart";
reg = <0x5c000000 0x400>;
- interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&exti 26 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&rcc USART1_K>;
+ wakeup-source;
status = "disabled";
};
@@ -1536,6 +1550,7 @@
#size-cells = <0>;
st,syscfg-fmp = <&syscfg 0x4 0x8>;
wakeup-source;
+ i2c-analog-filter;
status = "disabled";
};
@@ -1573,6 +1588,7 @@
#size-cells = <0>;
st,syscfg-fmp = <&syscfg 0x4 0x20>;
wakeup-source;
+ i2c-analog-filter;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/stm32mp153c-dhcom-drc02.dts b/arch/arm/boot/dts/stm32mp153c-dhcom-drc02.dts
index 02a39132958e..b4e504f026ce 100644
--- a/arch/arm/boot/dts/stm32mp153c-dhcom-drc02.dts
+++ b/arch/arm/boot/dts/stm32mp153c-dhcom-drc02.dts
@@ -20,6 +20,10 @@
"st,stm32mp153";
};
+&cryp1 {
+ status = "okay";
+};
+
&m_can1 {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&m_can1_pins_a>;
diff --git a/arch/arm/boot/dts/stm32mp157a-icore-stm32mp1-ctouch2.dts b/arch/arm/boot/dts/stm32mp157a-icore-stm32mp1-ctouch2.dts
new file mode 100644
index 000000000000..d3058a036c74
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp157a-icore-stm32mp1-ctouch2.dts
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (c) STMicroelectronics 2019 - All Rights Reserved
+ * Copyright (c) 2020 Engicam srl
+ * Copyright (c) 2020 Amarula Solutons(India)
+ */
+
+/dts-v1/;
+#include "stm32mp157.dtsi"
+#include "stm32mp157a-icore-stm32mp1.dtsi"
+#include "stm32mp15-pinctrl.dtsi"
+#include "stm32mp15xxaa-pinctrl.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "Engicam i.Core STM32MP1 C.TOUCH 2.0";
+ compatible = "engicam,icore-stm32mp1-ctouch2",
+ "engicam,icore-stm32mp1", "st,stm32mp157";
+
+ aliases {
+ serial0 = &uart4;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+&sdmmc1 {
+ bus-width = <4>;
+ disable-wp;
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc1_b4_pins_a>;
+ pinctrl-1 = <&sdmmc1_b4_od_pins_a>;
+ pinctrl-2 = <&sdmmc1_b4_sleep_pins_a>;
+ st,neg-edge;
+ vmmc-supply = <&v3v3>;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default", "sleep", "idle";
+ pinctrl-0 = <&uart4_pins_a>;
+ pinctrl-1 = <&uart4_sleep_pins_a>;
+ pinctrl-2 = <&uart4_idle_pins_a>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/stm32mp157a-icore-stm32mp1-edimm2.2.dts b/arch/arm/boot/dts/stm32mp157a-icore-stm32mp1-edimm2.2.dts
new file mode 100644
index 000000000000..ec9f1d1cd50f
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp157a-icore-stm32mp1-edimm2.2.dts
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (c) STMicroelectronics 2019 - All Rights Reserved
+ * Copyright (c) 2020 Engicam srl
+ * Copyright (c) 2020 Amarula Solutons(India)
+ */
+
+/dts-v1/;
+#include "stm32mp157.dtsi"
+#include "stm32mp157a-icore-stm32mp1.dtsi"
+#include "stm32mp15-pinctrl.dtsi"
+#include "stm32mp15xxaa-pinctrl.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "Engicam i.Core STM32MP1 EDIMM2.2 Starter Kit";
+ compatible = "engicam,icore-stm32mp1-edimm2.2",
+ "engicam,icore-stm32mp1", "st,stm32mp157";
+
+ aliases {
+ serial0 = &uart4;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+&sdmmc1 {
+ bus-width = <4>;
+ disable-wp;
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc1_b4_pins_a>;
+ pinctrl-1 = <&sdmmc1_b4_od_pins_a>;
+ pinctrl-2 = <&sdmmc1_b4_sleep_pins_a>;
+ st,neg-edge;
+ vmmc-supply = <&v3v3>;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default", "sleep", "idle";
+ pinctrl-0 = <&uart4_pins_a>;
+ pinctrl-1 = <&uart4_sleep_pins_a>;
+ pinctrl-2 = <&uart4_idle_pins_a>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/stm32mp157a-icore-stm32mp1.dtsi b/arch/arm/boot/dts/stm32mp157a-icore-stm32mp1.dtsi
new file mode 100644
index 000000000000..01166ccacf2b
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp157a-icore-stm32mp1.dtsi
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (c) STMicroelectronics 2019 - All Rights Reserved
+ * Copyright (c) 2020 Engicam srl
+ * Copyright (c) 2020 Amarula Solutons(India)
+ */
+
+/ {
+ compatible = "engicam,icore-stm32mp1", "st,stm32mp157";
+
+ memory@c0000000 {
+ device_type = "memory";
+ reg = <0xc0000000 0x20000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ mcuram2: mcuram2@10000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10000000 0x40000>;
+ no-map;
+ };
+
+ vdev0vring0: vdev0vring0@10040000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10040000 0x1000>;
+ no-map;
+ };
+
+ vdev0vring1: vdev0vring1@10041000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10041000 0x1000>;
+ no-map;
+ };
+
+ vdev0buffer: vdev0buffer@10042000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10042000 0x4000>;
+ no-map;
+ };
+
+ mcuram: mcuram@30000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x30000000 0x40000>;
+ no-map;
+ };
+
+ retram: retram@38000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x38000000 0x10000>;
+ no-map;
+ };
+ };
+
+ vddcore: regulator-vddcore {
+ compatible = "regulator-fixed";
+ regulator-name = "vddcore";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ };
+
+ vdd: regulator-vdd {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vdd_usb: regulator-vdd-usb {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_usb";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vdda: regulator-vdda {
+ compatible = "regulator-fixed";
+ regulator-name = "vdda";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vdd_ddr: regulator-vdd-ddr {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_ddr";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ };
+
+ vtt_ddr: regulator-vtt-ddr {
+ compatible = "regulator-fixed";
+ regulator-name = "vtt_ddr";
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <675000>;
+ regulator-always-on;
+ vin-supply = <&vdd>;
+ };
+
+ vref_ddr: regulator-vref-ddr {
+ compatible = "regulator-fixed";
+ regulator-name = "vref_ddr";
+ regulator-min-microvolt = <675000>;
+ regulator-max-microvolt = <675000>;
+ regulator-always-on;
+ vin-supply = <&vdd>;
+ };
+
+ vdd_sd: regulator-vdd-sd {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_sd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ v3v3: regulator-v3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "v3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ v2v8: regulator-v2v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "v2v8";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-always-on;
+ vin-supply = <&v3v3>;
+ };
+
+ v1v8: regulator-v1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "v1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ vin-supply = <&v3v3>;
+ };
+};
+
+&dts {
+ status = "okay";
+};
+
+&i2c2 {
+ i2c-scl-falling-time-ns = <20>;
+ i2c-scl-rising-time-ns = <185>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c2_pins_a>;
+ pinctrl-1 = <&i2c2_sleep_pins_a>;
+ status = "okay";
+};
+
+&ipcc {
+ status = "okay";
+};
+
+&iwdg2{
+ timeout-sec = <32>;
+ status = "okay";
+};
+
+&m4_rproc{
+ memory-region = <&retram>, <&mcuram>, <&mcuram2>, <&vdev0vring0>,
+ <&vdev0vring1>, <&vdev0buffer>;
+ mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>;
+ mbox-names = "vq0", "vq1", "shutdown";
+ interrupt-parent = <&exti>;
+ interrupts = <68 1>;
+ status = "okay";
+};
+
+&rng1 {
+ status = "okay";
+};
+
+&rtc{
+ status = "okay";
+};
+
+&vrefbuf {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ vdda-supply = <&vdd>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/stm32mp157a-microgea-stm32mp1-microdev2.0-of7.dts b/arch/arm/boot/dts/stm32mp157a-microgea-stm32mp1-microdev2.0-of7.dts
new file mode 100644
index 000000000000..674b2d330dc4
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp157a-microgea-stm32mp1-microdev2.0-of7.dts
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (c) STMicroelectronics 2019 - All Rights Reserved
+ * Copyright (c) 2020 Engicam srl
+ * Copyright (c) 2020 Amarula Solutons(India)
+ */
+
+/dts-v1/;
+#include "stm32mp157.dtsi"
+#include "stm32mp157a-microgea-stm32mp1.dtsi"
+#include "stm32mp15-pinctrl.dtsi"
+#include "stm32mp15xxaa-pinctrl.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "Engicam MicroGEA STM32MP1 MicroDev 2.0 7\" Open Frame";
+ compatible = "engicam,microgea-stm32mp1-microdev2.0-of7",
+ "engicam,microgea-stm32mp1", "st,stm32mp157";
+
+ aliases {
+ serial0 = &uart4;
+ serial1 = &uart8;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ backlight: backlight {
+ compatible = "gpio-backlight";
+ gpios = <&gpiod 13 GPIO_ACTIVE_HIGH>;
+ default-on;
+ };
+
+ lcd_3v3: regulator-lcd-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "lcd_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpiof 10 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-always-on;
+ power-supply = <&panel_pwr>;
+ };
+
+ panel_pwr: regulator-panel-pwr {
+ compatible = "regulator-fixed";
+ regulator-name = "panel_pwr";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpiob 10 GPIO_ACTIVE_HIGH>;
+ regulator-always-on;
+ };
+
+ panel {
+ compatible = "auo,b101aw03";
+ backlight = <&backlight>;
+ enable-gpios = <&gpiof 2 GPIO_ACTIVE_HIGH>;
+ power-supply = <&lcd_3v3>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&ltdc_ep0_out>;
+ };
+ };
+ };
+};
+
+&i2c2 {
+ i2c-scl-falling-time-ns = <20>;
+ i2c-scl-rising-time-ns = <185>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c2_pins_a>;
+ pinctrl-1 = <&i2c2_sleep_pins_a>;
+ status = "okay";
+};
+
+&ltdc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&ltdc_pins>;
+ status = "okay";
+
+ port {
+ ltdc_ep0_out: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&panel_in>;
+ };
+ };
+};
+
+&pinctrl {
+ ltdc_pins: ltdc {
+ pins {
+ pinmux = <STM32_PINMUX('G', 10, AF14)>, /* LTDC_B2 */
+ <STM32_PINMUX('H', 12, AF14)>, /* LTDC_R6 */
+ <STM32_PINMUX('H', 11, AF14)>, /* LTDC_R5 */
+ <STM32_PINMUX('D', 10, AF14)>, /* LTDC_B3 */
+ <STM32_PINMUX('D', 9, AF14)>, /* LTDC_B0 */
+ <STM32_PINMUX('E', 5, AF14)>, /* LTDC_G0 */
+ <STM32_PINMUX('E', 6, AF14)>, /* LTDC_G1 */
+ <STM32_PINMUX('E', 13, AF14)>, /* LTDC_DE */
+ <STM32_PINMUX('E', 15, AF14)>, /* LTDC_R7 */
+ <STM32_PINMUX('G', 7, AF14)>, /* LTDC_CLK */
+ <STM32_PINMUX('G', 12, AF14)>, /* LTDC_B1 */
+ <STM32_PINMUX('H', 2, AF14)>, /* LTDC_R0 */
+ <STM32_PINMUX('H', 3, AF14)>, /* LTDC_R1 */
+ <STM32_PINMUX('H', 8, AF14)>, /* LTDC_R2 */
+ <STM32_PINMUX('H', 9, AF14)>, /* LTDC_R3 */
+ <STM32_PINMUX('H', 10, AF14)>, /* LTDC_R4 */
+ <STM32_PINMUX('H', 13, AF14)>, /* LTDC_G2 */
+ <STM32_PINMUX('H', 14, AF14)>, /* LTDC_G3 */
+ <STM32_PINMUX('H', 15, AF14)>, /* LTDC_G4 */
+ <STM32_PINMUX('I', 0, AF14)>, /* LTDC_G5 */
+ <STM32_PINMUX('I', 1, AF14)>, /* LTDC_G6 */
+ <STM32_PINMUX('I', 2, AF14)>, /* LTDC_G7 */
+ <STM32_PINMUX('I', 4, AF14)>, /* LTDC_B4 */
+ <STM32_PINMUX('I', 5, AF14)>, /* LTDC_B5 */
+ <STM32_PINMUX('B', 8, AF14)>, /* LTDC_B6 */
+ <STM32_PINMUX('I', 7, AF14)>, /* LTDC_B7 */
+ <STM32_PINMUX('I', 9, AF14)>, /* LTDC_VSYNC */
+ <STM32_PINMUX('I', 10, AF14)>; /* LTDC_HSYNC */
+ bias-disable;
+ drive-push-pull;
+ slew-rate = <3>;
+ };
+ };
+};
+
+&sdmmc1 {
+ bus-width = <4>;
+ disable-wp;
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc1_b4_pins_a>;
+ pinctrl-1 = <&sdmmc1_b4_od_pins_a>;
+ pinctrl-2 = <&sdmmc1_b4_sleep_pins_a>;
+ st,neg-edge;
+ vmmc-supply = <&vdd>;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default", "sleep", "idle";
+ pinctrl-0 = <&uart4_pins_a>;
+ pinctrl-1 = <&uart4_sleep_pins_a>;
+ pinctrl-2 = <&uart4_idle_pins_a>;
+ status = "okay";
+};
+
+/* J31: RS323 */
+&uart8 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart8_pins_a>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/stm32mp157a-microgea-stm32mp1-microdev2.0.dts b/arch/arm/boot/dts/stm32mp157a-microgea-stm32mp1-microdev2.0.dts
new file mode 100644
index 000000000000..7a75868164dc
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp157a-microgea-stm32mp1-microdev2.0.dts
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (c) STMicroelectronics 2019 - All Rights Reserved
+ * Copyright (c) 2020 Engicam srl
+ * Copyright (c) 2020 Amarula Solutons(India)
+ */
+
+/dts-v1/;
+#include "stm32mp157.dtsi"
+#include "stm32mp157a-microgea-stm32mp1.dtsi"
+#include "stm32mp15-pinctrl.dtsi"
+#include "stm32mp15xxaa-pinctrl.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "Engicam MicroGEA STM32MP1 MicroDev 2.0 Carrier Board";
+ compatible = "engicam,microgea-stm32mp1-microdev2.0",
+ "engicam,microgea-stm32mp1", "st,stm32mp157";
+
+ aliases {
+ serial0 = &uart4;
+ serial1 = &uart8;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+&sdmmc1 {
+ bus-width = <4>;
+ disable-wp;
+ pinctrl-names = "default", "opendrain", "sleep";
+ pinctrl-0 = <&sdmmc1_b4_pins_a>;
+ pinctrl-1 = <&sdmmc1_b4_od_pins_a>;
+ pinctrl-2 = <&sdmmc1_b4_sleep_pins_a>;
+ st,neg-edge;
+ vmmc-supply = <&vdd>;
+ status = "okay";
+};
+
+&uart4 {
+ pinctrl-names = "default", "sleep", "idle";
+ pinctrl-0 = <&uart4_pins_a>;
+ pinctrl-1 = <&uart4_sleep_pins_a>;
+ pinctrl-2 = <&uart4_idle_pins_a>;
+ status = "okay";
+};
+
+/* J31: RS323 */
+&uart8 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart8_pins_a>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/stm32mp157a-microgea-stm32mp1.dtsi b/arch/arm/boot/dts/stm32mp157a-microgea-stm32mp1.dtsi
new file mode 100644
index 000000000000..0b85175f151e
--- /dev/null
+++ b/arch/arm/boot/dts/stm32mp157a-microgea-stm32mp1.dtsi
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright (c) STMicroelectronics 2019 - All Rights Reserved
+ * Copyright (c) 2020 Engicam srl
+ * Copyright (c) 2020 Amarula Solutons(India)
+ */
+
+/ {
+ compatible = "engicam,microgea-stm32mp1", "st,stm32mp157";
+
+ memory@c0000000 {
+ device_type = "memory";
+ reg = <0xc0000000 0x10000000>;
+ };
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ mcuram2: mcuram2@10000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10000000 0x40000>;
+ no-map;
+ };
+
+ vdev0vring0: vdev0vring0@10040000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10040000 0x1000>;
+ no-map;
+ };
+
+ vdev0vring1: vdev0vring1@10041000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10041000 0x1000>;
+ no-map;
+ };
+
+ vdev0buffer: vdev0buffer@10042000 {
+ compatible = "shared-dma-pool";
+ reg = <0x10042000 0x4000>;
+ no-map;
+ };
+
+ mcuram: mcuram@30000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x30000000 0x40000>;
+ no-map;
+ };
+
+ retram: retram@38000000 {
+ compatible = "shared-dma-pool";
+ reg = <0x38000000 0x10000>;
+ no-map;
+ };
+ };
+
+ vin: regulator-vin {
+ compatible = "regulator-fixed";
+ regulator-name = "vin";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ };
+
+ vddcore: regulator-vddcore {
+ compatible = "regulator-fixed";
+ regulator-name = "vddcore";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ vin-supply = <&vin>;
+ };
+
+ vdd: regulator-vdd {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ vin-supply = <&vin>;
+ };
+
+ vddq_ddr: regulator-vddq-ddr {
+ compatible = "regulator-fixed";
+ regulator-name = "vddq_ddr";
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-always-on;
+ vin-supply = <&vin>;
+ };
+};
+
+&dts {
+ status = "okay";
+};
+
+&fmc {
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&fmc_pins_a>;
+ pinctrl-1 = <&fmc_sleep_pins_a>;
+ status = "okay";
+
+ nand-controller@4,0 {
+ status = "okay";
+
+ nand@0 {
+ reg = <0>;
+ nand-on-flash-bbt;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+ };
+};
+
+&ipcc {
+ status = "okay";
+};
+
+&iwdg2{
+ timeout-sec = <32>;
+ status = "okay";
+};
+
+&m4_rproc{
+ memory-region = <&retram>, <&mcuram>, <&mcuram2>, <&vdev0vring0>,
+ <&vdev0vring1>, <&vdev0buffer>;
+ mboxes = <&ipcc 0>, <&ipcc 1>, <&ipcc 2>;
+ mbox-names = "vq0", "vq1", "shutdown";
+ interrupt-parent = <&exti>;
+ interrupts = <68 1>;
+ status = "okay";
+};
+
+&rng1 {
+ status = "okay";
+};
+
+&rtc{
+ status = "okay";
+};
+
+&vrefbuf {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ vdda-supply = <&vdd>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/stm32mp157c-dhcom-pdk2.dts b/arch/arm/boot/dts/stm32mp157c-dhcom-pdk2.dts
index d3b81382f97c..6dd8216c235e 100644
--- a/arch/arm/boot/dts/stm32mp157c-dhcom-pdk2.dts
+++ b/arch/arm/boot/dts/stm32mp157c-dhcom-pdk2.dts
@@ -20,6 +20,10 @@
"st,stm32mp157";
};
+&cryp1 {
+ status = "okay";
+};
+
&m_can1 {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&m_can1_pins_a>;
diff --git a/arch/arm/boot/dts/stm32mp157c-dhcom-picoitx.dts b/arch/arm/boot/dts/stm32mp157c-dhcom-picoitx.dts
index cfb8f8a0c82d..7067a860aaff 100644
--- a/arch/arm/boot/dts/stm32mp157c-dhcom-picoitx.dts
+++ b/arch/arm/boot/dts/stm32mp157c-dhcom-picoitx.dts
@@ -20,6 +20,10 @@
"st,stm32mp157";
};
+&cryp1 {
+ status = "okay";
+};
+
&m_can1 {
pinctrl-names = "default", "sleep";
pinctrl-0 = <&m_can1_pins_a>;
diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-drc02.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-drc02.dtsi
index fad23d6f69b8..fb45c5aa878d 100644
--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-drc02.dtsi
+++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-drc02.dtsi
@@ -43,15 +43,15 @@
&gpiod {
gpio-line-names = "", "", "", "",
- "", "", "", "",
- "", "", "", "Out1",
- "Out2", "", "", "";
+ "", "", "DHCOM-B", "",
+ "", "", "", "DRC02-Out1",
+ "DRC02-Out2", "", "", "";
};
&gpioi {
- gpio-line-names = "In1", "", "", "",
- "", "", "", "",
- "In2", "", "", "",
+ gpio-line-names = "DRC02-In1", "DHCOM-O", "DHCOM-H", "DHCOM-I",
+ "DHCOM-R", "DHCOM-M", "", "",
+ "DRC02-In2", "", "", "",
"", "", "", "";
/*
diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-picoitx.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-picoitx.dtsi
index cd3a1798ca68..ba816ef8b9b2 100644
--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-picoitx.dtsi
+++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-picoitx.dtsi
@@ -57,22 +57,22 @@
&gpioc {
gpio-line-names = "", "", "", "",
- "", "", "In1", "",
+ "", "", "PicoITX-In1", "",
"", "", "", "",
"", "", "", "";
};
&gpiod {
gpio-line-names = "", "", "", "",
- "", "", "", "",
- "", "", "", "Out1",
- "Out2", "", "", "";
+ "", "", "DHCOM-B", "",
+ "", "", "", "PicoITX-Out1",
+ "PicoITX-Out2", "", "", "";
};
&gpiog {
- gpio-line-names = "In2", "", "", "",
- "", "", "", "",
+ gpio-line-names = "PicoITX-In2", "", "", "",
"", "", "", "",
+ "DHCOM-L", "", "", "",
"", "", "", "";
};
diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
index 2617815e42a6..272a1a67a9ad 100644
--- a/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
+++ b/arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
@@ -92,6 +92,10 @@
};
};
+&crc1 {
+ status = "okay";
+};
+
&dac {
pinctrl-names = "default";
pinctrl-0 = <&dac_ch1_pins_a &dac_ch2_pins_a>;
@@ -164,10 +168,70 @@
};
};
+&gpioa {
+ gpio-line-names = "", "", "", "",
+ "", "", "DHCOM-K", "",
+ "", "", "", "",
+ "", "", "", "";
+};
+
+&gpiob {
+ gpio-line-names = "", "", "", "",
+ "", "", "", "",
+ "DHCOM-Q", "", "", "",
+ "", "", "", "";
+};
+
&gpioc {
+ gpio-line-names = "", "", "", "",
+ "", "", "DHCOM-E", "",
+ "", "", "", "",
+ "", "", "", "";
status = "okay";
};
+&gpiod {
+ gpio-line-names = "", "", "", "",
+ "", "", "DHCOM-B", "",
+ "", "", "", "DHCOM-F",
+ "DHCOM-D", "", "", "";
+};
+
+&gpioe {
+ gpio-line-names = "", "", "", "",
+ "", "", "DHCOM-P", "",
+ "", "", "", "",
+ "", "", "", "";
+};
+
+&gpiof {
+ gpio-line-names = "", "", "", "DHCOM-A",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "";
+};
+
+&gpiog {
+ gpio-line-names = "DHCOM-C", "", "", "",
+ "", "", "", "",
+ "DHCOM-L", "", "", "",
+ "", "", "", "";
+};
+
+&gpioh {
+ gpio-line-names = "", "", "", "",
+ "", "", "", "DHCOM-N",
+ "DHCOM-J", "DHCOM-W", "DHCOM-V", "DHCOM-U",
+ "DHCOM-T", "", "DHCOM-S", "";
+};
+
+&gpioi {
+ gpio-line-names = "DHCOM-G", "DHCOM-O", "DHCOM-H", "DHCOM-I",
+ "DHCOM-R", "DHCOM-M", "", "",
+ "", "", "", "",
+ "", "", "", "";
+};
+
&i2c4 {
pinctrl-names = "default";
pinctrl-0 = <&i2c4_pins_a>;
diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
index b09e87fe901a..64dca5b7f748 100644
--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
+++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-avenger96.dtsi
@@ -162,6 +162,41 @@
};
};
+&gpioa {
+ gpio-line-names = "", "", "", "",
+ "", "", "", "",
+ "", "", "", "AV96-K",
+ "AV96-I", "", "AV96-A", "";
+};
+
+&gpiob {
+ gpio-line-names = "", "", "", "",
+ "", "AV96-J", "", "",
+ "", "", "", "AV96-B",
+ "", "AV96-L", "", "";
+};
+
+&gpioc {
+ gpio-line-names = "", "", "", "AV96-C",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "";
+};
+
+&gpiod {
+ gpio-line-names = "", "", "", "",
+ "", "", "", "",
+ "AV96-D", "", "", "",
+ "", "", "AV96-E", "AV96-F";
+};
+
+&gpiof {
+ gpio-line-names = "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "AV96-G", "AV96-H", "", "";
+};
+
&i2c1 { /* X6 I2C1 */
pinctrl-names = "default";
pinctrl-0 = <&i2c1_pins_b>;
diff --git a/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi b/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
index 803eb8bc9c85..013ae369791d 100644
--- a/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
+++ b/arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
@@ -21,6 +21,10 @@
};
};
+&crc1 {
+ status = "okay";
+};
+
&dts {
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun4i-a10-topwise-a721.dts b/arch/arm/boot/dts/sun4i-a10-topwise-a721.dts
new file mode 100644
index 000000000000..3628f12d2521
--- /dev/null
+++ b/arch/arm/boot/dts/sun4i-a10-topwise-a721.dts
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2020 Pascal Roeleven <dev@pascalroeleven.nl>
+ */
+
+/dts-v1/;
+#include "sun4i-a10.dtsi"
+#include "sunxi-common-regulators.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/pwm/pwm.h>
+
+/ {
+ model = "Topwise A721";
+ compatible = "topwise,a721", "allwinner,sun4i-a10";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ backlight: backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm 0 100000 PWM_POLARITY_INVERTED>;
+ power-supply = <&reg_vbat>;
+ enable-gpios = <&pio 7 7 GPIO_ACTIVE_HIGH>; /* PH7 */
+ brightness-levels = <0 30 40 50 60 70 80 90 100>;
+ default-brightness-level = <8>;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ panel {
+ compatible = "starry,kr070pe2t";
+ backlight = <&backlight>;
+ power-supply = <&reg_lcd_power>;
+
+ port {
+ panel_input: endpoint {
+ remote-endpoint = <&tcon0_out_panel>;
+ };
+ };
+ };
+
+ reg_lcd_power: reg-lcd-power {
+ compatible = "regulator-fixed";
+ regulator-name = "reg-lcd-power";
+ gpio = <&pio 7 8 GPIO_ACTIVE_HIGH>; /* PH8 */
+ enable-active-high;
+ };
+
+ reg_vbat: reg-vbat {
+ compatible = "regulator-fixed";
+ regulator-name = "vbat";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+ };
+
+};
+
+&codec {
+ status = "okay";
+};
+
+&cpu0 {
+ cpu-supply = <&reg_dcdc2>;
+};
+
+&de {
+ status = "okay";
+};
+
+&ehci0 {
+ status = "okay";
+};
+
+&ehci1 {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+
+ axp209: pmic@34 {
+ reg = <0x34>;
+ interrupts = <0>;
+ };
+};
+
+#include "axp209.dtsi"
+
+&ac_power_supply {
+ status = "okay";
+};
+
+&battery_power_supply {
+ status = "okay";
+};
+
+&i2c1 {
+ status = "okay";
+
+ accelerometer@4c {
+ compatible = "fsl,mma7660";
+ reg = <0x4c>;
+ };
+};
+
+&i2c2 {
+ status = "okay";
+
+ touchscreen@38 {
+ compatible = "edt,edt-ft5406";
+ reg = <0x38>;
+ interrupt-parent = <&pio>;
+ interrupts = <7 21 IRQ_TYPE_EDGE_FALLING>;
+ touchscreen-size-x = <800>;
+ touchscreen-size-y = <480>;
+ vcc-supply = <&reg_vcc3v3>;
+ };
+};
+
+&lradc {
+ vref-supply = <&reg_ldo2>;
+ status = "okay";
+
+ button-571 {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ channel = <0>;
+ voltage = <571428>;
+ };
+
+ button-761 {
+ label = "Volume Down";
+ linux,code = <KEY_VOLUMEDOWN>;
+ channel = <0>;
+ voltage = <761904>;
+ };
+};
+
+&mmc0 {
+ vmmc-supply = <&reg_vcc3v3>;
+ bus-width = <4>;
+ cd-gpios = <&pio 7 1 GPIO_ACTIVE_LOW>; /* PH01 */
+ status = "okay";
+};
+
+&ohci0 {
+ status = "okay";
+};
+
+&ohci1 {
+ status = "okay";
+};
+
+&otg_sram {
+ status = "okay";
+};
+
+&pio {
+ vcc-pb-supply = <&reg_vcc3v3>;
+ vcc-pf-supply = <&reg_vcc3v3>;
+ vcc-ph-supply = <&reg_vcc3v3>;
+};
+
+&pwm {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm0_pin>;
+ status = "okay";
+};
+
+&reg_dcdc2 {
+ regulator-always-on;
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-name = "vdd-cpu";
+};
+
+&reg_dcdc3 {
+ regulator-always-on;
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-name = "vdd-int-dll";
+};
+
+&reg_ldo1 {
+ regulator-name = "vdd-rtc";
+};
+
+&reg_ldo2 {
+ regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-name = "avcc";
+};
+
+&reg_usb0_vbus {
+ status = "okay";
+};
+
+&reg_usb1_vbus {
+ status = "okay";
+};
+
+&reg_usb2_vbus {
+ status = "okay";
+};
+
+&tcon0_out {
+ tcon0_out_panel: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&panel_input>;
+ };
+};
+
+&uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart0_pb_pins>;
+ status = "okay";
+};
+
+&usb_otg {
+ dr_mode = "otg";
+ status = "okay";
+};
+
+&usb_power_supply {
+ status = "okay";
+};
+
+&usbphy {
+ usb0_id_det-gpios = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
+ usb0_vbus_det-gpios = <&pio 7 5 GPIO_ACTIVE_HIGH>; /* PH5 */
+ usb0_vbus-supply = <&reg_usb0_vbus>;
+ usb1_vbus-supply = <&reg_usb1_vbus>;
+ usb2_vbus-supply = <&reg_usb2_vbus>;
+ status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
index 486cec6f71e0..236ebfc06192 100644
--- a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
+++ b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
@@ -227,7 +227,7 @@
compatible = "x-powers,axp221";
reg = <0x68>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
x-powers,drive-vbus-en;
};
};
diff --git a/arch/arm/boot/dts/sun6i-a31-m9.dts b/arch/arm/boot/dts/sun6i-a31-m9.dts
index e4f3415e6108..7d2eaaf5c33e 100644
--- a/arch/arm/boot/dts/sun6i-a31-m9.dts
+++ b/arch/arm/boot/dts/sun6i-a31-m9.dts
@@ -116,7 +116,7 @@
compatible = "x-powers,axp221";
reg = <0x68>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
};
};
diff --git a/arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts b/arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts
index 7bd4bdd66a76..83611434270c 100644
--- a/arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts
+++ b/arch/arm/boot/dts/sun6i-a31-mele-a1000g-quad.dts
@@ -116,7 +116,7 @@
compatible = "x-powers,axp221";
reg = <0x68>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
};
};
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index a75033e85fcb..a31f9072bf79 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -611,6 +611,7 @@
pio: pinctrl@1c20800 {
compatible = "allwinner,sun6i-a31-pinctrl";
reg = <0x01c20800 0x400>;
+ interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
@@ -802,6 +803,7 @@
lradc: lradc@1c22800 {
compatible = "allwinner,sun4i-a10-lradc-keys";
reg = <0x01c22800 0x100>;
+ interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@@ -1299,6 +1301,7 @@
#clock-cells = <1>;
compatible = "allwinner,sun6i-a31-rtc";
reg = <0x01f00000 0x54>;
+ interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&osc32k>;
@@ -1308,7 +1311,7 @@
r_intc: interrupt-controller@1f00c00 {
compatible = "allwinner,sun6i-a31-r-intc";
interrupt-controller;
- #interrupt-cells = <2>;
+ #interrupt-cells = <3>;
reg = <0x01f00c00 0x400>;
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
};
@@ -1383,6 +1386,7 @@
r_pio: pinctrl@1f02c00 {
compatible = "allwinner,sun6i-a31-r-pinctrl";
reg = <0x01f02c00 0x400>;
+ interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&apb0_gates 0>, <&osc24M>, <&rtc 0>;
diff --git a/arch/arm/boot/dts/sun6i-a31s-primo81.dts b/arch/arm/boot/dts/sun6i-a31s-primo81.dts
index 66bc6ca77afb..b32b70ada7fd 100644
--- a/arch/arm/boot/dts/sun6i-a31s-primo81.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-primo81.dts
@@ -159,7 +159,7 @@
compatible = "x-powers,axp221";
reg = <0x68>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
x-powers,drive-vbus-en;
};
};
diff --git a/arch/arm/boot/dts/sun6i-a31s-sina31s-core.dtsi b/arch/arm/boot/dts/sun6i-a31s-sina31s-core.dtsi
index 7455c0db4a8a..227ad489731c 100644
--- a/arch/arm/boot/dts/sun6i-a31s-sina31s-core.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31s-sina31s-core.dtsi
@@ -79,7 +79,7 @@
compatible = "x-powers,axp221";
reg = <0x68>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
};
};
diff --git a/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts b/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts
index efb25b949f30..96554ab4f6d3 100644
--- a/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-sinovoip-bpi-m2.dts
@@ -149,7 +149,7 @@
compatible = "x-powers,axp221";
reg = <0x68>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
eldoin-supply = <&reg_dcdc1>;
x-powers,drive-vbus-en;
};
diff --git a/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts b/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
index cadc45255d7b..0b61f5368d44 100644
--- a/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
@@ -99,7 +99,7 @@
compatible = "x-powers,axp221";
reg = <0x68>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
};
};
diff --git a/arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi
index 6bf3fbdd738f..f38d19c6be8c 100644
--- a/arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi
+++ b/arch/arm/boot/dts/sun6i-reference-design-tablet.dtsi
@@ -80,7 +80,7 @@
compatible = "x-powers,axp221";
reg = <0x68>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
drivevbus-supply = <&reg_vcc5v0>;
x-powers,drive-vbus-en;
};
diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
index a42fac676b31..4461d5098b20 100644
--- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
@@ -338,6 +338,7 @@
pio: pinctrl@1c20800 {
/* compatible gets set in SoC specific dtsi file */
reg = <0x01c20800 0x400>;
+ interrupt-parent = <&r_intc>;
/* interrupts get set in SoC specific dtsi file */
clocks = <&ccu CLK_BUS_PIO>, <&osc24M>, <&rtc 0>;
clock-names = "apb", "hosc", "losc";
@@ -473,6 +474,7 @@
lradc: lradc@1c22800 {
compatible = "allwinner,sun4i-a10-lradc-keys";
reg = <0x01c22800 0x100>;
+ interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@@ -709,6 +711,7 @@
rtc: rtc@1f00000 {
compatible = "allwinner,sun8i-a23-rtc";
reg = <0x01f00000 0x400>;
+ interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
clock-output-names = "osc32k", "osc32k-out";
@@ -719,7 +722,7 @@
r_intc: interrupt-controller@1f00c00 {
compatible = "allwinner,sun6i-a31-r-intc";
interrupt-controller;
- #interrupt-cells = <2>;
+ #interrupt-cells = <3>;
reg = <0x01f00c00 0x400>;
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
};
@@ -805,6 +808,7 @@
r_pio: pinctrl@1f02c00 {
compatible = "allwinner,sun8i-a23-r-pinctrl";
reg = <0x01f02c00 0x400>;
+ interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&apb0_gates 0>, <&osc24M>, <&rtc 0>;
clock-names = "apb", "hosc", "losc";
diff --git a/arch/arm/boot/dts/sun8i-a33-olinuxino.dts b/arch/arm/boot/dts/sun8i-a33-olinuxino.dts
index 8538514c8588..6fee8f133508 100644
--- a/arch/arm/boot/dts/sun8i-a33-olinuxino.dts
+++ b/arch/arm/boot/dts/sun8i-a33-olinuxino.dts
@@ -99,7 +99,7 @@
compatible = "x-powers,axp223";
reg = <0x3a3>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
eldoin-supply = <&reg_dcdc1>;
x-powers,drive-vbus-en;
};
diff --git a/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts b/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts
index d54a067fc76e..0c82ff3c7cb4 100644
--- a/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts
+++ b/arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts
@@ -166,7 +166,7 @@
compatible = "x-powers,axp223";
reg = <0x3a3>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
eldoin-supply = <&reg_dcdc1>;
};
};
diff --git a/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts b/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts
index 9c006fc18821..c31c97d16024 100644
--- a/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts
@@ -122,7 +122,7 @@
compatible = "x-powers,axp818", "x-powers,axp813";
reg = <0x3a3>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
eldoin-supply = <&reg_dcdc1>;
swin-supply = <&reg_dcdc1>;
};
@@ -142,7 +142,7 @@
ac100_rtc: rtc {
compatible = "x-powers,ac100-rtc";
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
clocks = <&ac100_codec>;
#clock-cells = <1>;
clock-output-names = "cko1_rtc",
diff --git a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
index b60016a4429c..5a7e1bd5f825 100644
--- a/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
@@ -203,7 +203,7 @@
compatible = "x-powers,axp813";
reg = <0x3a3>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
eldoin-supply = <&reg_dcdc1>;
fldoin-supply = <&reg_dcdc5>;
swin-supply = <&reg_dcdc1>;
@@ -225,7 +225,7 @@
ac100_rtc: rtc {
compatible = "x-powers,ac100-rtc";
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
clocks = <&ac100_codec>;
#clock-cells = <1>;
clock-output-names = "cko1_rtc",
diff --git a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
index e26af7cf10e0..870993393fc2 100644
--- a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
@@ -239,7 +239,7 @@
compatible = "x-powers,axp818", "x-powers,axp813";
reg = <0x3a3>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
eldoin-supply = <&reg_dcdc1>;
swin-supply = <&reg_dcdc1>;
x-powers,drive-vbus-en;
@@ -260,7 +260,7 @@
ac100_rtc: rtc {
compatible = "x-powers,ac100-rtc";
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
clocks = <&ac100_codec>;
#clock-cells = <1>;
clock-output-names = "cko1_rtc",
diff --git a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
index 83b01b03e08e..7fe2a584ddf9 100644
--- a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
+++ b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
@@ -263,7 +263,7 @@
compatible = "x-powers,axp813";
reg = <0x3a3>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
swin-supply = <&reg_dcdc1>;
x-powers,drive-vbus-en;
};
@@ -283,7 +283,7 @@
ac100_rtc: rtc {
compatible = "x-powers,ac100-rtc";
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
clocks = <&ac100_codec>;
#clock-cells = <1>;
clock-output-names = "cko1_rtc",
diff --git a/arch/arm/boot/dts/sun8i-a83t.dtsi b/arch/arm/boot/dts/sun8i-a83t.dtsi
index bd898b250e74..ac97eac91349 100644
--- a/arch/arm/boot/dts/sun8i-a83t.dtsi
+++ b/arch/arm/boot/dts/sun8i-a83t.dtsi
@@ -708,6 +708,7 @@
pio: pinctrl@1c20800 {
compatible = "allwinner,sun8i-a83t-pinctrl";
+ interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
@@ -1111,7 +1112,7 @@
compatible = "allwinner,sun8i-a83t-r-intc",
"allwinner,sun6i-a31-r-intc";
interrupt-controller;
- #interrupt-cells = <2>;
+ #interrupt-cells = <3>;
reg = <0x01f00c00 0x400>;
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
};
@@ -1147,6 +1148,7 @@
r_lradc: lradc@1f03c00 {
compatible = "allwinner,sun8i-a83t-r-lradc";
reg = <0x01f03c00 0x100>;
+ interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@@ -1154,6 +1156,7 @@
r_pio: pinctrl@1f02c00 {
compatible = "allwinner,sun8i-a83t-r-pinctrl";
reg = <0x01f02c00 0x400>;
+ interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&r_ccu CLK_APB0_PIO>, <&osc24M>,
<&osc16Md512>;
diff --git a/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts b/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts
index f3f7a2c912ab..8e8634ff2f9d 100644
--- a/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts
+++ b/arch/arm/boot/dts/sun8i-h2-plus-bananapi-m2-zero.dts
@@ -26,6 +26,17 @@
stdout-path = "serial0:115200n8";
};
+ connector {
+ compatible = "hdmi-connector";
+ type = "c";
+
+ port {
+ hdmi_con_in: endpoint {
+ remote-endpoint = <&hdmi_out_con>;
+ };
+ };
+ };
+
leds {
compatible = "gpio-leds";
@@ -103,10 +114,24 @@
cpu-supply = <&reg_vdd_cpux>;
};
+&de {
+ status = "okay";
+};
+
&ehci0 {
status = "okay";
};
+&hdmi {
+ status = "okay";
+};
+
+&hdmi_out {
+ hdmi_out_con: endpoint {
+ remote-endpoint = <&hdmi_con_in>;
+ };
+};
+
&mmc0 {
vmmc-supply = <&reg_vcc3v3>;
bus-width = <4>;
diff --git a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
index 62b5280ec093..f0e591e1c771 100644
--- a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
+++ b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
@@ -111,6 +111,17 @@
#sound-dai-cells = <0>;
compatible = "linux,spdif-dit";
};
+
+ r-gpio-keys {
+ compatible = "gpio-keys";
+
+ power {
+ label = "power";
+ linux,code = <KEY_POWER>;
+ gpios = <&r_pio 0 3 GPIO_ACTIVE_LOW>;
+ wakeup-source;
+ };
+ };
};
&de {
diff --git a/arch/arm/boot/dts/sun8i-r16-bananapi-m2m.dts b/arch/arm/boot/dts/sun8i-r16-bananapi-m2m.dts
index 293016d081cd..bf5b5e2f6168 100644
--- a/arch/arm/boot/dts/sun8i-r16-bananapi-m2m.dts
+++ b/arch/arm/boot/dts/sun8i-r16-bananapi-m2m.dts
@@ -164,7 +164,7 @@
compatible = "x-powers,axp223";
reg = <0x3a3>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
eldoin-supply = <&reg_dcdc1>;
x-powers,drive-vbus-en;
};
diff --git a/arch/arm/boot/dts/sun8i-r16-parrot.dts b/arch/arm/boot/dts/sun8i-r16-parrot.dts
index 2be1b76fe2f6..95543a9c2118 100644
--- a/arch/arm/boot/dts/sun8i-r16-parrot.dts
+++ b/arch/arm/boot/dts/sun8i-r16-parrot.dts
@@ -165,7 +165,7 @@
compatible = "x-powers,axp223";
reg = <0x3a3>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
drivevbus-supply = <&reg_vcc5v0>;
x-powers,drive-vbus-en;
};
diff --git a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
index 797d61cff11e..872d56caa9ce 100644
--- a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
+++ b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi
@@ -94,7 +94,7 @@
compatible = "x-powers,axp223";
reg = <0x3a3>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
eldoin-supply = <&reg_dcdc1>;
drivevbus-supply = <&reg_vcc5v0>;
x-powers,drive-vbus-en;
diff --git a/arch/arm/boot/dts/sunxi-h3-h5.dtsi b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
index 9be13378d4df..c7428df9469e 100644
--- a/arch/arm/boot/dts/sunxi-h3-h5.dtsi
+++ b/arch/arm/boot/dts/sunxi-h3-h5.dtsi
@@ -395,6 +395,7 @@
pio: pinctrl@1c20800 {
/* compatible is in per SoC .dtsi file */
reg = <0x01c20800 0x400>;
+ interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&ccu CLK_BUS_PIO>, <&osc24M>, <&rtc 0>;
@@ -852,6 +853,7 @@
rtc: rtc@1f00000 {
/* compatible is in per SoC .dtsi file */
reg = <0x01f00000 0x400>;
+ interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
clock-output-names = "osc32k", "osc32k-out", "iosc";
@@ -859,6 +861,15 @@
#clock-cells = <1>;
};
+ r_intc: interrupt-controller@1f00c00 {
+ compatible = "allwinner,sun8i-h3-r-intc",
+ "allwinner,sun6i-a31-r-intc";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ reg = <0x01f00c00 0x400>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
r_ccu: clock@1f01400 {
compatible = "allwinner,sun8i-h3-r-ccu";
reg = <0x01f01400 0x100>;
@@ -900,6 +911,7 @@
r_pio: pinctrl@1f02c00 {
compatible = "allwinner,sun8i-h3-r-pinctrl";
reg = <0x01f02c00 0x400>;
+ interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&r_ccu CLK_APB0_PIO>, <&osc24M>, <&rtc 0>;
clock-names = "apb", "hosc", "losc";
diff --git a/arch/arm/boot/dts/tegra124-peripherals-opp.dtsi b/arch/arm/boot/dts/tegra124-peripherals-opp.dtsi
index 49d9420a3289..781ac8601030 100644
--- a/arch/arm/boot/dts/tegra124-peripherals-opp.dtsi
+++ b/arch/arm/boot/dts/tegra124-peripherals-opp.dtsi
@@ -128,24 +128,28 @@
opp-microvolt = <800000 800000 1150000>;
opp-hz = /bits/ 64 <204000000>;
opp-supported-hw = <0x0003>;
+ opp-suspend;
};
opp@204000000,950 {
opp-microvolt = <950000 950000 1150000>;
opp-hz = /bits/ 64 <204000000>;
opp-supported-hw = <0x0008>;
+ opp-suspend;
};
opp@204000000,1050 {
opp-microvolt = <1050000 1050000 1150000>;
opp-hz = /bits/ 64 <204000000>;
opp-supported-hw = <0x0010>;
+ opp-suspend;
};
opp@204000000,1110 {
opp-microvolt = <1110000 1110000 1150000>;
opp-hz = /bits/ 64 <204000000>;
opp-supported-hw = <0x0004>;
+ opp-suspend;
};
opp@264000000,800 {
@@ -360,6 +364,7 @@
opp-hz = /bits/ 64 <204000000>;
opp-supported-hw = <0x001F>;
opp-peak-kBps = <3264000>;
+ opp-suspend;
};
opp@264000000 {
diff --git a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
index d3b99535d755..2298fc034183 100644
--- a/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
+++ b/arch/arm/boot/dts/tegra20-acer-a500-picasso.dts
@@ -448,8 +448,10 @@
reset-gpios = <&gpio TEGRA_GPIO(Q, 7) GPIO_ACTIVE_LOW>;
- avdd-supply = <&vdd_3v3_sys>;
+ vdda-supply = <&vdd_3v3_sys>;
vdd-supply = <&vdd_3v3_sys>;
+
+ atmel,wakeup-method = <1>;
};
gyroscope@68 {
@@ -575,7 +577,7 @@
vdd_core: sm0 {
regulator-name = "vdd_sm0,vdd_core";
- regulator-min-microvolt = <1200000>;
+ regulator-min-microvolt = <950000>;
regulator-max-microvolt = <1300000>;
regulator-coupled-with = <&rtc_vdd &vdd_cpu>;
regulator-coupled-max-spread = <170000 550000>;
@@ -616,7 +618,7 @@
rtc_vdd: ldo2 {
regulator-name = "vdd_ldo2,vdd_rtc";
- regulator-min-microvolt = <1200000>;
+ regulator-min-microvolt = <950000>;
regulator-max-microvolt = <1300000>;
regulator-coupled-with = <&vdd_core &vdd_cpu>;
regulator-coupled-max-spread = <170000 550000>;
@@ -838,9 +840,10 @@
#cooling-cells = <2>;
};
- cpu@1 {
+ cpu1: cpu@1 {
cpu-supply = <&vdd_cpu>;
operating-points-v2 = <&cpu0_opp_table>;
+ #cooling-cells = <2>;
};
};
@@ -1055,7 +1058,7 @@
trip0: cpu-alert0 {
/* start throttling at 50C */
temperature = <50000>;
- hysteresis = <3000>;
+ hysteresis = <200>;
type = "passive";
};
@@ -1070,7 +1073,8 @@
cooling-maps {
map0 {
trip = <&trip0>;
- cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
};
};
diff --git a/arch/arm/boot/dts/tegra20-cpu-opp.dtsi b/arch/arm/boot/dts/tegra20-cpu-opp.dtsi
index 702a635e88e7..135de316383b 100644
--- a/arch/arm/boot/dts/tegra20-cpu-opp.dtsi
+++ b/arch/arm/boot/dts/tegra20-cpu-opp.dtsi
@@ -9,12 +9,14 @@
clock-latency-ns = <400000>;
opp-supported-hw = <0x0F 0x0003>;
opp-hz = /bits/ 64 <216000000>;
+ opp-suspend;
};
opp@216000000,800 {
clock-latency-ns = <400000>;
opp-supported-hw = <0x0F 0x0004>;
opp-hz = /bits/ 64 <216000000>;
+ opp-suspend;
};
opp@312000000,750 {
diff --git a/arch/arm/boot/dts/tegra20-paz00.dts b/arch/arm/boot/dts/tegra20-paz00.dts
index 7e49112cd9a1..940a9f31cd86 100644
--- a/arch/arm/boot/dts/tegra20-paz00.dts
+++ b/arch/arm/boot/dts/tegra20-paz00.dts
@@ -387,10 +387,10 @@
core_vdd_reg: sm0 {
regulator-name = "+1.2vs_sm0,vdd_core";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1225000>;
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <1300000>;
regulator-coupled-with = <&rtc_vdd_reg &cpu_vdd_reg>;
- regulator-coupled-max-spread = <170000 450000>;
+ regulator-coupled-max-spread = <170000 550000>;
regulator-always-on;
nvidia,tegra-core-regulator;
@@ -401,7 +401,7 @@
regulator-min-microvolt = <750000>;
regulator-max-microvolt = <1100000>;
regulator-coupled-with = <&core_vdd_reg &rtc_vdd_reg>;
- regulator-coupled-max-spread = <450000 450000>;
+ regulator-coupled-max-spread = <550000 550000>;
regulator-always-on;
nvidia,tegra-cpu-regulator;
@@ -425,10 +425,10 @@
rtc_vdd_reg: ldo2 {
regulator-name = "+1.2vs_ldo2,vdd_rtc";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1225000>;
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <1300000>;
regulator-coupled-with = <&core_vdd_reg &cpu_vdd_reg>;
- regulator-coupled-max-spread = <170000 450000>;
+ regulator-coupled-max-spread = <170000 550000>;
regulator-always-on;
nvidia,tegra-rtc-regulator;
diff --git a/arch/arm/boot/dts/tegra20-peripherals-opp.dtsi b/arch/arm/boot/dts/tegra20-peripherals-opp.dtsi
index b84afecea154..ef3ad2e5f270 100644
--- a/arch/arm/boot/dts/tegra20-peripherals-opp.dtsi
+++ b/arch/arm/boot/dts/tegra20-peripherals-opp.dtsi
@@ -68,6 +68,7 @@
opp-microvolt = <1000000 1000000 1300000>;
opp-hz = /bits/ 64 <216000000>;
opp-supported-hw = <0x000F>;
+ opp-suspend;
};
opp@300000000 {
diff --git a/arch/arm/boot/dts/tegra20-ventana.dts b/arch/arm/boot/dts/tegra20-ventana.dts
index 055334ae3d28..99a356c1ccec 100644
--- a/arch/arm/boot/dts/tegra20-ventana.dts
+++ b/arch/arm/boot/dts/tegra20-ventana.dts
@@ -2,8 +2,10 @@
/dts-v1/;
#include <dt-bindings/input/input.h>
+#include <dt-bindings/thermal/thermal.h>
#include "tegra20.dtsi"
#include "tegra20-cpu-opp.dtsi"
+#include "tegra20-cpu-opp-microvolt.dtsi"
/ {
model = "NVIDIA Tegra20 Ventana evaluation board";
@@ -420,18 +422,28 @@
regulator-always-on;
};
- sm0 {
+ vdd_core: sm0 {
regulator-name = "vdd_sm0,vdd_core";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-coupled-with = <&rtc_vdd &vdd_cpu>;
+ regulator-coupled-max-spread = <170000 550000>;
regulator-always-on;
+ regulator-boot-on;
+
+ nvidia,tegra-core-regulator;
};
- sm1 {
+ vdd_cpu: sm1 {
regulator-name = "vdd_sm1,vdd_cpu";
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <1000000>;
+ regulator-min-microvolt = <750000>;
+ regulator-max-microvolt = <1125000>;
+ regulator-coupled-with = <&vdd_core &rtc_vdd>;
+ regulator-coupled-max-spread = <550000 550000>;
regulator-always-on;
+ regulator-boot-on;
+
+ nvidia,tegra-cpu-regulator;
};
sm2_reg: sm2 {
@@ -450,10 +462,16 @@
regulator-always-on;
};
- ldo2 {
+ rtc_vdd: ldo2 {
regulator-name = "vdd_ldo2,vdd_rtc";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <1200000>;
+ regulator-min-microvolt = <950000>;
+ regulator-max-microvolt = <1300000>;
+ regulator-coupled-with = <&vdd_core &vdd_cpu>;
+ regulator-coupled-max-spread = <170000 550000>;
+ regulator-always-on;
+ regulator-boot-on;
+
+ nvidia,tegra-rtc-regulator;
};
ldo3 {
@@ -511,9 +529,10 @@
};
};
- temperature-sensor@4c {
+ nct1008: temperature-sensor@4c {
compatible = "onnn,nct1008";
reg = <0x4c>;
+ #thermal-sensor-cells = <1>;
};
};
@@ -595,11 +614,15 @@
cpus {
cpu0: cpu@0 {
+ cpu-supply = <&vdd_cpu>;
operating-points-v2 = <&cpu0_opp_table>;
+ #cooling-cells = <2>;
};
- cpu@1 {
+ cpu1: cpu@1 {
+ cpu-supply = <&vdd_cpu>;
operating-points-v2 = <&cpu0_opp_table>;
+ #cooling-cells = <2>;
};
};
@@ -697,4 +720,37 @@
<&tegra_car TEGRA20_CLK_CDEV1>;
clock-names = "pll_a", "pll_a_out0", "mclk";
};
+
+ thermal-zones {
+ cpu-thermal {
+ polling-delay-passive = <1000>; /* milliseconds */
+ polling-delay = <5000>; /* milliseconds */
+
+ thermal-sensors = <&nct1008 1>;
+
+ trips {
+ trip0: cpu-alert0 {
+ /* start throttling at 50C */
+ temperature = <50000>;
+ hysteresis = <200>;
+ type = "passive";
+ };
+
+ trip1: cpu-crit {
+ /* shut down at 60C */
+ temperature = <60000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&trip0>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+ };
};
diff --git a/arch/arm/boot/dts/tegra30-apalis.dtsi b/arch/arm/boot/dts/tegra30-apalis.dtsi
index 6544ce70b46f..b2ac51fb15b1 100644
--- a/arch/arm/boot/dts/tegra30-apalis.dtsi
+++ b/arch/arm/boot/dts/tegra30-apalis.dtsi
@@ -860,6 +860,7 @@
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
#interrupt-cells = <2>;
interrupt-controller;
+ wakeup-source;
ti,system-power-controller;
diff --git a/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-common.dtsi b/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-common.dtsi
index ac1c1a63eb0e..dc773b1bf8ee 100644
--- a/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-common.dtsi
+++ b/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-common.dtsi
@@ -1056,19 +1056,22 @@
#cooling-cells = <2>;
};
- cpu@1 {
+ cpu1: cpu@1 {
cpu-supply = <&vdd_cpu>;
operating-points-v2 = <&cpu0_opp_table>;
+ #cooling-cells = <2>;
};
- cpu@2 {
+ cpu2: cpu@2 {
cpu-supply = <&vdd_cpu>;
operating-points-v2 = <&cpu0_opp_table>;
+ #cooling-cells = <2>;
};
- cpu@3 {
+ cpu3: cpu@3 {
cpu-supply = <&vdd_cpu>;
operating-points-v2 = <&cpu0_opp_table>;
+ #cooling-cells = <2>;
};
};
@@ -1281,7 +1284,10 @@
cooling-maps {
map0 {
trip = <&trip0>;
- cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
};
};
diff --git a/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-ti-pmic.dtsi b/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-ti-pmic.dtsi
index bfc06b988781..b97da45ebdb4 100644
--- a/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-ti-pmic.dtsi
+++ b/arch/arm/boot/dts/tegra30-asus-nexus7-grouper-ti-pmic.dtsi
@@ -12,6 +12,7 @@
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
#interrupt-cells = <2>;
interrupt-controller;
+ wakeup-source;
ti,en-gpio-sleep = <0 0 1 0 0 0 0 0 0>;
ti,system-power-controller;
diff --git a/arch/arm/boot/dts/tegra30-beaver.dts b/arch/arm/boot/dts/tegra30-beaver.dts
index e0624b74fb50..e159feeedef7 100644
--- a/arch/arm/boot/dts/tegra30-beaver.dts
+++ b/arch/arm/boot/dts/tegra30-beaver.dts
@@ -1776,6 +1776,7 @@
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
#interrupt-cells = <2>;
interrupt-controller;
+ wakeup-source;
ti,system-power-controller;
diff --git a/arch/arm/boot/dts/tegra30-cardhu-a04.dts b/arch/arm/boot/dts/tegra30-cardhu-a04.dts
index c1c0ca628af1..a11028b8b67b 100644
--- a/arch/arm/boot/dts/tegra30-cardhu-a04.dts
+++ b/arch/arm/boot/dts/tegra30-cardhu-a04.dts
@@ -2,8 +2,6 @@
/dts-v1/;
#include "tegra30-cardhu.dtsi"
-#include "tegra30-cpu-opp.dtsi"
-#include "tegra30-cpu-opp-microvolt.dtsi"
/* This dts file support the cardhu A04 and later versions of board */
@@ -92,50 +90,4 @@
enable-active-high;
gpio = <&gpio TEGRA_GPIO(DD, 0) GPIO_ACTIVE_HIGH>;
};
-
- i2c@7000d000 {
- pmic: tps65911@2d {
- regulators {
- vddctrl_reg: vddctrl {
- regulator-min-microvolt = <800000>;
- regulator-max-microvolt = <1125000>;
- regulator-coupled-with = <&vddcore_reg>;
- regulator-coupled-max-spread = <300000>;
- regulator-max-step-microvolt = <100000>;
-
- nvidia,tegra-cpu-regulator;
- };
- };
- };
-
- vddcore_reg: tps62361@60 {
- regulator-coupled-with = <&vddctrl_reg>;
- regulator-coupled-max-spread = <300000>;
- regulator-max-step-microvolt = <100000>;
-
- nvidia,tegra-core-regulator;
- };
- };
-
- cpus {
- cpu0: cpu@0 {
- cpu-supply = <&vddctrl_reg>;
- operating-points-v2 = <&cpu0_opp_table>;
- };
-
- cpu@1 {
- cpu-supply = <&vddctrl_reg>;
- operating-points-v2 = <&cpu0_opp_table>;
- };
-
- cpu@2 {
- cpu-supply = <&vddctrl_reg>;
- operating-points-v2 = <&cpu0_opp_table>;
- };
-
- cpu@3 {
- cpu-supply = <&vddctrl_reg>;
- operating-points-v2 = <&cpu0_opp_table>;
- };
- };
};
diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi
index dab9989fa760..2dff14b87f3e 100644
--- a/arch/arm/boot/dts/tegra30-cardhu.dtsi
+++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
#include <dt-bindings/input/input.h>
+#include <dt-bindings/thermal/thermal.h>
#include "tegra30.dtsi"
+#include "tegra30-cpu-opp.dtsi"
+#include "tegra30-cpu-opp-microvolt.dtsi"
/**
* This file contains common DT entry for all fab version of Cardhu.
@@ -240,6 +243,7 @@
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
#interrupt-cells = <2>;
interrupt-controller;
+ wakeup-source;
ti,system-power-controller;
@@ -272,9 +276,14 @@
vddctrl_reg: vddctrl {
regulator-name = "vdd_cpu,vdd_sys";
- regulator-min-microvolt = <1000000>;
- regulator-max-microvolt = <1000000>;
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1250000>;
+ regulator-coupled-with = <&vdd_core>;
+ regulator-coupled-max-spread = <300000>;
+ regulator-max-step-microvolt = <100000>;
regulator-always-on;
+
+ nvidia,tegra-cpu-regulator;
};
vio_reg: vio {
@@ -334,25 +343,31 @@
};
};
- temperature-sensor@4c {
+ nct1008: temperature-sensor@4c {
compatible = "onnn,nct1008";
reg = <0x4c>;
vcc-supply = <&sys_3v3_reg>;
interrupt-parent = <&gpio>;
interrupts = <TEGRA_GPIO(CC, 2) IRQ_TYPE_LEVEL_LOW>;
+ #thermal-sensor-cells = <1>;
};
- tps62361@60 {
+ vdd_core: tps62361@60 {
compatible = "ti,tps62361";
reg = <0x60>;
regulator-name = "tps62361-vout";
regulator-min-microvolt = <500000>;
regulator-max-microvolt = <1500000>;
+ regulator-coupled-with = <&vddctrl_reg>;
+ regulator-coupled-max-spread = <300000>;
+ regulator-max-step-microvolt = <100000>;
regulator-boot-on;
regulator-always-on;
ti,vsel0-state-high;
ti,vsel1-state-high;
+
+ nvidia,tegra-core-regulator;
};
};
@@ -424,6 +439,32 @@
#clock-cells = <0>;
};
+ cpus {
+ cpu0: cpu@0 {
+ cpu-supply = <&vddctrl_reg>;
+ operating-points-v2 = <&cpu0_opp_table>;
+ #cooling-cells = <2>;
+ };
+
+ cpu1: cpu@1 {
+ cpu-supply = <&vddctrl_reg>;
+ operating-points-v2 = <&cpu0_opp_table>;
+ #cooling-cells = <2>;
+ };
+
+ cpu2: cpu@2 {
+ cpu-supply = <&vddctrl_reg>;
+ operating-points-v2 = <&cpu0_opp_table>;
+ #cooling-cells = <2>;
+ };
+
+ cpu3: cpu@3 {
+ cpu-supply = <&vddctrl_reg>;
+ operating-points-v2 = <&cpu0_opp_table>;
+ #cooling-cells = <2>;
+ };
+ };
+
panel: panel {
compatible = "chunghwa,claa101wb01";
ddc-i2c-bus = <&panelddc>;
@@ -603,6 +644,41 @@
<&tegra_car TEGRA30_CLK_EXTERN1>;
};
+ thermal-zones {
+ cpu-thermal {
+ polling-delay-passive = <1000>; /* milliseconds */
+ polling-delay = <5000>; /* milliseconds */
+
+ thermal-sensors = <&nct1008 1>;
+
+ trips {
+ trip0: cpu-alert0 {
+ /* throttle at 57C until temperature drops to 56.8C */
+ temperature = <57000>;
+ hysteresis = <200>;
+ type = "passive";
+ };
+
+ trip1: cpu-crit {
+ /* shut down at 60C */
+ temperature = <60000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&trip0>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+ };
+
gpio-keys {
compatible = "gpio-keys";
diff --git a/arch/arm/boot/dts/tegra30-colibri.dtsi b/arch/arm/boot/dts/tegra30-colibri.dtsi
index e36aa3ce6c3d..413e35215804 100644
--- a/arch/arm/boot/dts/tegra30-colibri.dtsi
+++ b/arch/arm/boot/dts/tegra30-colibri.dtsi
@@ -737,6 +737,7 @@
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
#interrupt-cells = <2>;
interrupt-controller;
+ wakeup-source;
ti,system-power-controller;
diff --git a/arch/arm/boot/dts/tegra30-cpu-opp.dtsi b/arch/arm/boot/dts/tegra30-cpu-opp.dtsi
index 0f7135006d19..72f2fe26cc0e 100644
--- a/arch/arm/boot/dts/tegra30-cpu-opp.dtsi
+++ b/arch/arm/boot/dts/tegra30-cpu-opp.dtsi
@@ -45,18 +45,21 @@
clock-latency-ns = <100000>;
opp-supported-hw = <0x1F 0x31FE>;
opp-hz = /bits/ 64 <204000000>;
+ opp-suspend;
};
opp@204000000,850 {
clock-latency-ns = <100000>;
opp-supported-hw = <0x1F 0x0C01>;
opp-hz = /bits/ 64 <204000000>;
+ opp-suspend;
};
opp@204000000,912 {
clock-latency-ns = <100000>;
opp-supported-hw = <0x1F 0x0200>;
opp-hz = /bits/ 64 <204000000>;
+ opp-suspend;
};
opp@312000000,850 {
diff --git a/arch/arm/boot/dts/tegra30-ouya.dts b/arch/arm/boot/dts/tegra30-ouya.dts
index 0368b3b816ef..9a10e0d69762 100644
--- a/arch/arm/boot/dts/tegra30-ouya.dts
+++ b/arch/arm/boot/dts/tegra30-ouya.dts
@@ -139,6 +139,7 @@
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
#interrupt-cells = <2>;
interrupt-controller;
+ wakeup-source;
ti,en-gpio-sleep = <0 1 1 1 1 1 0 0 1>;
ti,system-power-controller;
@@ -391,19 +392,23 @@
cpu-supply = <&vdd_cpu>;
#cooling-cells = <2>;
};
- cpu@1 {
+
+ cpu1: cpu@1 {
operating-points-v2 = <&cpu0_opp_table>;
cpu-supply = <&vdd_cpu>;
+ #cooling-cells = <2>;
};
- cpu@2 {
+ cpu2: cpu@2 {
operating-points-v2 = <&cpu0_opp_table>;
cpu-supply = <&vdd_cpu>;
+ #cooling-cells = <2>;
};
- cpu@3 {
+ cpu3: cpu@3 {
operating-points-v2 = <&cpu0_opp_table>;
cpu-supply = <&vdd_cpu>;
+ #cooling-cells = <2>;
};
};
@@ -455,7 +460,10 @@
};
map1 {
trip = <&cpu_alert1>;
- cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&cpu3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
};
};
};
diff --git a/arch/arm/boot/dts/tegra30-peripherals-opp.dtsi b/arch/arm/boot/dts/tegra30-peripherals-opp.dtsi
index cbe84d25e726..2c9780319725 100644
--- a/arch/arm/boot/dts/tegra30-peripherals-opp.dtsi
+++ b/arch/arm/boot/dts/tegra30-peripherals-opp.dtsi
@@ -128,12 +128,14 @@
opp-microvolt = <1000000 1000000 1350000>;
opp-hz = /bits/ 64 <204000000>;
opp-supported-hw = <0x0007>;
+ opp-suspend;
};
opp@204000000,1250 {
opp-microvolt = <1250000 1250000 1350000>;
opp-hz = /bits/ 64 <204000000>;
opp-supported-hw = <0x0008>;
+ opp-suspend;
};
opp@333500000,1000 {
@@ -312,6 +314,7 @@
opp-hz = /bits/ 64 <204000000>;
opp-supported-hw = <0x000F>;
opp-peak-kBps = <1632000>;
+ opp-suspend;
};
opp@333500000 {
diff --git a/arch/arm/boot/dts/uniphier-pxs2.dtsi b/arch/arm/boot/dts/uniphier-pxs2.dtsi
index b0b15c97306b..e81e5937a60a 100644
--- a/arch/arm/boot/dts/uniphier-pxs2.dtsi
+++ b/arch/arm/boot/dts/uniphier-pxs2.dtsi
@@ -583,7 +583,7 @@
clocks = <&sys_clk 6>;
reset-names = "ether";
resets = <&sys_rst 6>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
local-mac-address = [00 00 00 00 00 00];
socionext,syscon-phy-mode = <&soc_glue 0>;
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig
index 6403b064e8dc..06c888a45eb3 100644
--- a/arch/arm/configs/at91_dt_defconfig
+++ b/arch/arm/configs/at91_dt_defconfig
@@ -46,6 +46,7 @@ CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
CONFIG_MTD=y
+CONFIG_MTD_TESTS=m
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_DATAFLASH=y
@@ -53,7 +54,7 @@ CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_ATMEL=y
CONFIG_MTD_SPI_NOR=y
CONFIG_MTD_UBI=y
-CONFIG_MTD_UBI_GLUEBI=y
+CONFIG_MTD_UBI_FASTMAP=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=4
@@ -131,6 +132,8 @@ CONFIG_MEDIA_SUPPORT_FILTER=y
# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_MEDIA_PLATFORM_SUPPORT=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=m
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_VIDEO_ATMEL_ISI=y
CONFIG_VIDEO_OV2640=m
diff --git a/arch/arm/configs/dove_defconfig b/arch/arm/configs/dove_defconfig
index e70c997d5f4c..b935162a8bba 100644
--- a/arch/arm/configs/dove_defconfig
+++ b/arch/arm/configs/dove_defconfig
@@ -63,7 +63,6 @@ CONFIG_INPUT_EVDEV=y
# CONFIG_MOUSE_PS2 is not set
# CONFIG_SERIO is not set
CONFIG_LEGACY_PTY_COUNT=16
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig
index 81665b7abf83..a49e699e52de 100644
--- a/arch/arm/configs/ezx_defconfig
+++ b/arch/arm/configs/ezx_defconfig
@@ -213,7 +213,6 @@ CONFIG_POWER_SUPPLY=y
CONFIG_EZX_PCAP=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_DEBUG=y
-CONFIG_REGULATOR_USERSPACE_CONSUMER=y
CONFIG_REGULATOR_PCAP=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_VIDEO_DEV=y
diff --git a/arch/arm/configs/footbridge_defconfig b/arch/arm/configs/footbridge_defconfig
index 3a7938f244e5..7a32de51f0fa 100644
--- a/arch/arm/configs/footbridge_defconfig
+++ b/arch/arm/configs/footbridge_defconfig
@@ -7,7 +7,6 @@ CONFIG_EXPERT=y
CONFIG_MODULES=y
CONFIG_ARCH_FOOTBRIDGE=y
CONFIG_ARCH_CATS=y
-CONFIG_ARCH_PERSONAL_SERVER=y
CONFIG_ARCH_EBSA285_HOST=y
CONFIG_ARCH_NETWINDER=y
CONFIG_LEDS=y
@@ -65,7 +64,6 @@ CONFIG_PARIDE_ON26=m
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
-CONFIG_IDE=y
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_NET_VENDOR_3COM=y
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig
index ae15a2a33802..118c4c927f26 100644
--- a/arch/arm/configs/imote2_defconfig
+++ b/arch/arm/configs/imote2_defconfig
@@ -194,7 +194,6 @@ CONFIG_POWER_SUPPLY=y
CONFIG_PMIC_DA903X=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_DEBUG=y
-CONFIG_REGULATOR_USERSPACE_CONSUMER=y
CONFIG_REGULATOR_DA903X=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_VIDEO_DEV=y
diff --git a/arch/arm/configs/magician_defconfig b/arch/arm/configs/magician_defconfig
index b4670d42f378..abde1fb23b20 100644
--- a/arch/arm/configs/magician_defconfig
+++ b/arch/arm/configs/magician_defconfig
@@ -72,7 +72,6 @@ CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_UINPUT=m
# CONFIG_SERIO is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_PXA=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
diff --git a/arch/arm/configs/moxart_defconfig b/arch/arm/configs/moxart_defconfig
index 6834e97af348..eacc089d86c5 100644
--- a/arch/arm/configs/moxart_defconfig
+++ b/arch/arm/configs/moxart_defconfig
@@ -79,7 +79,6 @@ CONFIG_INPUT_EVBUG=y
# CONFIG_SERIO is not set
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=1
diff --git a/arch/arm/configs/mps2_defconfig b/arch/arm/configs/mps2_defconfig
index 1d923dbb9928..89f4a6ff30bd 100644
--- a/arch/arm/configs/mps2_defconfig
+++ b/arch/arm/configs/mps2_defconfig
@@ -69,7 +69,6 @@ CONFIG_SMSC911X=y
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_NONSTANDARD=y
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_MPS2_UART_CONSOLE=y
CONFIG_SERIAL_MPS2_UART=y
# CONFIG_HW_RANDOM is not set
diff --git a/arch/arm/configs/multi_v5_defconfig b/arch/arm/configs/multi_v5_defconfig
index 9f862b21b40a..80a3ae02d759 100644
--- a/arch/arm/configs/multi_v5_defconfig
+++ b/arch/arm/configs/multi_v5_defconfig
@@ -33,6 +33,8 @@ CONFIG_SOC_IMX25=y
CONFIG_SOC_IMX27=y
CONFIG_ARCH_MVEBU=y
CONFIG_MACH_KIRKWOOD=y
+CONFIG_ARCH_NPCM=y
+CONFIG_ARCH_WPCM450=y
CONFIG_ARCH_ORION5X=y
CONFIG_MACH_DB88F5281=y
CONFIG_MACH_RD88F5182=y
@@ -178,6 +180,7 @@ CONFIG_THERMAL=y
CONFIG_KIRKWOOD_THERMAL=y
CONFIG_AT91SAM9X_WATCHDOG=y
CONFIG_ORION_WATCHDOG=y
+CONFIG_NPCM7XX_WATCHDOG=y
CONFIG_IMX2_WDT=y
CONFIG_MFD_ATMEL_HLCDC=y
# CONFIG_ABX500_CORE is not set
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 3823da605430..52a0400fdd92 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -79,7 +79,7 @@ CONFIG_ARCH_MSM8960=y
CONFIG_ARCH_MSM8974=y
CONFIG_ARCH_ROCKCHIP=y
CONFIG_ARCH_RENESAS=y
-CONFIG_ARCH_SOCFPGA=y
+CONFIG_ARCH_INTEL_SOCFPGA=y
CONFIG_PLAT_SPEAR=y
CONFIG_ARCH_SPEAR13XX=y
CONFIG_MACH_SPEAR1310=y
@@ -791,7 +791,6 @@ CONFIG_USB_XHCI_MVEBU=y
CONFIG_USB_XHCI_TEGRA=m
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_HCD_STI=y
-CONFIG_USB_EHCI_TEGRA=y
CONFIG_USB_EHCI_EXYNOS=m
CONFIG_USB_EHCI_MV=m
CONFIG_USB_OHCI_HCD=y
@@ -817,6 +816,7 @@ CONFIG_USB_DWC2=y
CONFIG_USB_CHIPIDEA=y
CONFIG_USB_CHIPIDEA_UDC=y
CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_CHIPIDEA_TEGRA=y
CONFIG_USB_ISP1760=y
CONFIG_USB_HSIC_USB3503=y
CONFIG_AB8500_USB=y
diff --git a/arch/arm/configs/mvebu_v5_defconfig b/arch/arm/configs/mvebu_v5_defconfig
index 4f16716bfc32..d57ff30dabff 100644
--- a/arch/arm/configs/mvebu_v5_defconfig
+++ b/arch/arm/configs/mvebu_v5_defconfig
@@ -100,7 +100,6 @@ CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
CONFIG_LEGACY_PTY_COUNT=16
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_RUNTIME_UARTS=2
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index f250bf1cc022..77b4bea12feb 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -87,6 +87,155 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_LOG_NETDEV=m
+CONFIG_NF_CONNTRACK_ZONES=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_TIMEOUT=y
+CONFIG_NF_CONNTRACK_TIMESTAMP=y
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NF_CT_NETLINK_TIMEOUT=m
+CONFIG_NF_CT_NETLINK_HELPER=m
+CONFIG_NETFILTER_NETLINK_GLUE_CT=y
+CONFIG_NF_TABLES=m
+CONFIG_NF_TABLES_INET=y
+CONFIG_NF_TABLES_NETDEV=y
+CONFIG_NFT_NUMGEN=m
+CONFIG_NFT_CT=m
+CONFIG_NFT_COUNTER=m
+CONFIG_NFT_CONNLIMIT=m
+CONFIG_NFT_LOG=m
+CONFIG_NFT_LIMIT=m
+CONFIG_NFT_MASQ=m
+CONFIG_NFT_REDIR=m
+CONFIG_NFT_NAT=m
+CONFIG_NFT_TUNNEL=m
+CONFIG_NFT_OBJREF=m
+CONFIG_NFT_QUEUE=m
+CONFIG_NFT_QUOTA=m
+CONFIG_NFT_REJECT=m
+CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
+CONFIG_NFT_FIB_INET=m
+CONFIG_NFT_XFRM=m
+CONFIG_NFT_SOCKET=m
+CONFIG_NFT_OSF=m
+CONFIG_NFT_TPROXY=m
+CONFIG_NFT_SYNPROXY=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
+CONFIG_NFT_FIB_NETDEV=m
+CONFIG_NFT_REJECT_NETDEV=m
+CONFIG_NF_FLOW_TABLE_INET=m
+CONFIG_NF_FLOW_TABLE=m
+CONFIG_NETFILTER_XT_TARGET_AUDIT=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_CT=m
+CONFIG_NETFILTER_XT_TARGET_DSCP=m
+CONFIG_NETFILTER_XT_TARGET_HMARK=m
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
+CONFIG_NETFILTER_XT_TARGET_LED=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TEE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_BPF=m
+CONFIG_NETFILTER_XT_MATCH_CGROUP=m
+CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_CPU=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m
+CONFIG_NETFILTER_XT_MATCH_DSCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPCOMP=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_L2TP=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_NFACCT=m
+CONFIG_NETFILTER_XT_MATCH_OSF=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SCTP=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_NFT_DUP_IPV4=m
+CONFIG_NFT_FIB_IPV4=m
+CONFIG_NF_FLOW_TABLE_IPV4=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_RPFILTER=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_SYNPROXY=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_TARGET_NETMAP=m
+CONFIG_IP_NF_TARGET_REDIRECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_SECURITY=m
+CONFIG_NFT_DUP_IPV6=m
+CONFIG_NFT_FIB_IPV6=m
+CONFIG_NF_FLOW_TABLE_IPV6=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RPFILTER=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_MATCH_SRH=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_TARGET_SYNPROXY=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_IP6_NF_SECURITY=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_IP6_NF_TARGET_NPT=m
+CONFIG_NF_TABLES_BRIDGE=m
+CONFIG_NFT_BRIDGE_META=m
+CONFIG_NFT_BRIDGE_REJECT=m
+CONFIG_NF_LOG_BRIDGE=m
CONFIG_BRIDGE=m
CONFIG_BRIDGE_VLAN_FILTERING=y
CONFIG_VLAN_8021Q=m
@@ -137,7 +286,6 @@ CONFIG_PCI_EPF_TEST=m
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_OMAP_OCP2SCP=y
-CONFIG_SIMPLE_PM_BUS=y
CONFIG_CONNECTOR=m
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
@@ -165,6 +313,7 @@ CONFIG_SENSORS_TSL2550=m
CONFIG_SRAM=y
CONFIG_PCI_ENDPOINT_TEST=m
CONFIG_EEPROM_AT24=m
+CONFIG_EEPROM_AT25=m
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_ATA=y
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig
index bd7dd81c9c54..363f1b1b08e3 100644
--- a/arch/arm/configs/pxa_defconfig
+++ b/arch/arm/configs/pxa_defconfig
@@ -215,8 +215,6 @@ CONFIG_IIO=m
CONFIG_AD5446=m
CONFIG_EEPROM_AT24=m
CONFIG_SENSORS_LIS3_SPI=m
-CONFIG_IDE=m
-CONFIG_BLK_DEV_IDECS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=m
CONFIG_CHR_DEV_ST=m
@@ -415,7 +413,6 @@ CONFIG_MFD_TC6393XB=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_DEBUG=y
CONFIG_REGULATOR_FIXED_VOLTAGE=m
-CONFIG_REGULATOR_USERSPACE_CONSUMER=m
CONFIG_REGULATOR_ACT8865=m
CONFIG_REGULATOR_AS3711=m
CONFIG_REGULATOR_AXP20X=m
diff --git a/arch/arm/configs/qcom_defconfig b/arch/arm/configs/qcom_defconfig
index 3f36887e8333..26353cbfa968 100644
--- a/arch/arm/configs/qcom_defconfig
+++ b/arch/arm/configs/qcom_defconfig
@@ -215,6 +215,8 @@ CONFIG_DMADEVICES=y
CONFIG_QCOM_BAM_DMA=y
CONFIG_STAGING=y
CONFIG_COMMON_CLK_QCOM=y
+CONFIG_QCOM_A7PLL=y
+CONFIG_QCOM_CLK_APCS_SDX55=y
CONFIG_QCOM_CLK_RPM=y
CONFIG_QCOM_CLK_RPMH=y
CONFIG_QCOM_CLK_SMD_RPM=y
@@ -232,11 +234,14 @@ CONFIG_ARM_SMMU=y
CONFIG_HWSPINLOCK=y
CONFIG_HWSPINLOCK_QCOM=y
CONFIG_MAILBOX=y
+CONFIG_QCOM_APCS_IPC=y
CONFIG_REMOTEPROC=y
CONFIG_QCOM_ADSP_PIL=y
+CONFIG_QCOM_Q6V5_PAS=y
CONFIG_QCOM_Q6V5_PIL=y
CONFIG_QCOM_WCNSS_PIL=y
CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_QCOM_GLINK_SMEM=y
CONFIG_RPMSG_QCOM_SMD=y
CONFIG_QCOM_COMMAND_DB=y
CONFIG_QCOM_GSBI=y
@@ -273,6 +278,7 @@ CONFIG_QCOM_QFPROM=y
CONFIG_INTERCONNECT=y
CONFIG_INTERCONNECT_QCOM=y
CONFIG_INTERCONNECT_QCOM_MSM8974=m
+CONFIG_INTERCONNECT_QCOM_SDX55=m
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
@@ -290,7 +296,7 @@ CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
CONFIG_DMA_CMA=y
-CONFIG_CMA_SIZE_MBYTES=256
+CONFIG_CMA_SIZE_MBYTES=64
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
@@ -299,3 +305,5 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_WATCHDOG=y
CONFIG_QCOM_WDT=y
CONFIG_ARM_PSCI=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPUFREQ_DT=y
diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig
index f4c3c0652432..17db3b3e2dd3 100644
--- a/arch/arm/configs/sama5_defconfig
+++ b/arch/arm/configs/sama5_defconfig
@@ -8,11 +8,6 @@ CONFIG_CGROUPS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EMBEDDED=y
CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_FORCE_LOAD=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
CONFIG_ARCH_AT91=y
CONFIG_SOC_SAMA5D2=y
CONFIG_SOC_SAMA5D3=y
@@ -25,9 +20,14 @@ CONFIG_KEXEC=y
CONFIG_VFP=y
CONFIG_NEON=y
CONFIG_KERNEL_MODE_NEON=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_PM_DEBUG=y
CONFIG_PM_ADVANCED_DEBUG=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -55,6 +55,7 @@ CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
CONFIG_MTD=y
+CONFIG_MTD_TESTS=m
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
@@ -62,7 +63,7 @@ CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_ATMEL=y
CONFIG_MTD_SPI_NOR=y
CONFIG_MTD_UBI=y
-CONFIG_MTD_UBI_GLUEBI=y
+CONFIG_MTD_UBI_FASTMAP=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=4
@@ -75,8 +76,8 @@ CONFIG_BLK_DEV_SD=y
CONFIG_NETDEVICES=y
CONFIG_NET_DSA_MICROCHIP_KSZ9477=m
CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI=m
-CONFIG_MACB=y
# CONFIG_NET_VENDOR_BROADCOM is not set
+CONFIG_MACB=y
# CONFIG_NET_VENDOR_CIRRUS is not set
# CONFIG_NET_VENDOR_FARADAY is not set
# CONFIG_NET_VENDOR_INTEL is not set
@@ -89,9 +90,12 @@ CONFIG_MACB=y
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_MICREL_PHY=y
+CONFIG_USB_LAN78XX=m
CONFIG_LIBERTAS_THINFIRM=m
CONFIG_LIBERTAS_THINFIRM_USB=m
-CONFIG_RTL8187=m
+CONFIG_MWIFIEX=m
+CONFIG_MWIFIEX_SDIO=m
+CONFIG_MWIFIEX_USB=m
CONFIG_RT2X00=m
CONFIG_RT2500USB=m
CONFIG_RT73USB=m
@@ -99,10 +103,7 @@ CONFIG_RT2800USB=m
CONFIG_RT2800USB_RT53XX=y
CONFIG_RT2800USB_RT55XX=y
CONFIG_RT2800USB_UNKNOWN=y
-CONFIG_MWIFIEX=m
-CONFIG_MWIFIEX_SDIO=m
-CONFIG_MWIFIEX_USB=m
-# CONFIG_INPUT_MOUSEDEV is not set
+CONFIG_RTL8187=m
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_QT1070=y
@@ -124,10 +125,10 @@ CONFIG_SPI_ATMEL=y
CONFIG_SPI_ATMEL_QUADSPI=y
CONFIG_SPI_GPIO=y
CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_SAMA5D2_PIOBU=m
+CONFIG_GPIO_SAMA5D2_PIOBU=y
+CONFIG_POWER_RESET=y
CONFIG_POWER_SUPPLY=y
CONFIG_BATTERY_ACT8945A=y
-CONFIG_POWER_RESET=y
CONFIG_SENSORS_JC42=m
CONFIG_WATCHDOG=y
CONFIG_AT91SAM9X_WATCHDOG=y
@@ -139,13 +140,15 @@ CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_ACT8865=y
CONFIG_REGULATOR_ACT8945A=y
-CONFIG_REGULATOR_MCP16502=m
+CONFIG_REGULATOR_MCP16502=y
CONFIG_REGULATOR_PWM=m
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_SUPPORT_FILTER=y
# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_MEDIA_PLATFORM_SUPPORT=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=m
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_VIDEO_ATMEL_ISC=y
CONFIG_VIDEO_ATMEL_ISI=y
@@ -166,11 +169,12 @@ CONFIG_SND=y
CONFIG_SND_SOC=y
CONFIG_SND_ATMEL_SOC=y
CONFIG_SND_ATMEL_SOC_WM8904=y
-# CONFIG_HID_GENERIC is not set
CONFIG_SND_ATMEL_SOC_CLASSD=y
CONFIG_SND_ATMEL_SOC_PDMIC=y
CONFIG_SND_ATMEL_SOC_TSE850_PCM5142=m
CONFIG_SND_ATMEL_SOC_I2S=y
+CONFIG_SND_SOC_MIKROE_PROTO=m
+# CONFIG_HID_GENERIC is not set
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_EHCI_HCD=y
@@ -201,6 +205,9 @@ CONFIG_RTC_DRV_AT91RM9200=y
CONFIG_DMADEVICES=y
CONFIG_AT_HDMAC=y
CONFIG_AT_XDMAC=y
+CONFIG_STAGING=y
+CONFIG_STAGING_MEDIA=y
+CONFIG_VIDEO_HANTRO=m
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_IIO=y
CONFIG_AT91_ADC=y
@@ -224,14 +231,14 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_CODEPAGE_850=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_DEV_ATMEL_AES=y
+CONFIG_CRYPTO_DEV_ATMEL_TDES=y
+CONFIG_CRYPTO_DEV_ATMEL_SHA=y
CONFIG_STRIP_ASM_SYMS=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_MEMORY_INIT=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_FTRACE is not set
CONFIG_DEBUG_USER=y
-CONFIG_CRYPTO_USER_API_HASH=m
-CONFIG_CRYPTO_USER_API_SKCIPHER=m
-CONFIG_CRYPTO_DEV_ATMEL_AES=y
-CONFIG_CRYPTO_DEV_ATMEL_TDES=y
-CONFIG_CRYPTO_DEV_ATMEL_SHA=y
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig
index 01a6ebdf033a..f25b09efb816 100644
--- a/arch/arm/configs/shmobile_defconfig
+++ b/arch/arm/configs/shmobile_defconfig
@@ -220,6 +220,5 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=64
CONFIG_PRINTK_TIME=y
-# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig
index 0c60eb382c80..2d9404ea52c6 100644
--- a/arch/arm/configs/socfpga_defconfig
+++ b/arch/arm/configs/socfpga_defconfig
@@ -9,7 +9,7 @@ CONFIG_NAMESPACES=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EMBEDDED=y
CONFIG_PROFILING=y
-CONFIG_ARCH_SOCFPGA=y
+CONFIG_ARCH_INTEL_SOCFPGA=y
CONFIG_ARM_THUMBEE=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index bcedfe1422aa..dbb1ef601762 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -55,6 +55,7 @@ CONFIG_KEYBOARD_GPIO=y
CONFIG_KEYBOARD_NOMADIK=y
CONFIG_KEYBOARD_STMPE=y
CONFIG_KEYBOARD_TC3589X=y
+CONFIG_KEYBOARD_TM2_TOUCHKEY=y
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ATMEL_MXT=y
@@ -74,10 +75,12 @@ CONFIG_SERIAL_DEV_BUS=y
CONFIG_HW_RANDOM=y
CONFIG_I2C_GPIO=y
CONFIG_SPI=y
+CONFIG_SPI_GPIO=y
CONFIG_SPI_PL022=y
CONFIG_GPIO_STMPE=y
CONFIG_GPIO_TC3589X=y
CONFIG_SENSORS_IIO_HWMON=y
+CONFIG_SENSORS_NTC_THERMISTOR=y
CONFIG_THERMAL=y
CONFIG_CPU_THERMAL=y
CONFIG_WATCHDOG=y
@@ -85,6 +88,10 @@ CONFIG_MFD_STMPE=y
CONFIG_MFD_TC3589X=y
CONFIG_REGULATOR_AB8500=y
CONFIG_REGULATOR_GPIO=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_V4L2_FLASH_LED_CLASS=y
CONFIG_DRM=y
CONFIG_DRM_PANEL_NOVATEK_NT35510=y
CONFIG_DRM_PANEL_SAMSUNG_S6D16D0=y
@@ -112,10 +119,12 @@ CONFIG_MMC=y
CONFIG_MMC_ARMMMCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_CLASS_FLASH=y
CONFIG_LEDS_LM3530=y
CONFIG_LEDS_GPIO=y
CONFIG_LEDS_LP55XX_COMMON=y
CONFIG_LEDS_LP5521=y
+CONFIG_LEDS_RT8515=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_AB8500=y
@@ -124,16 +133,21 @@ CONFIG_DMADEVICES=y
CONFIG_STE_DMA40=y
CONFIG_HWSPINLOCK=y
CONFIG_HSEM_U8500=y
+CONFIG_EXTCON_FSA9480=y
CONFIG_IIO=y
CONFIG_IIO_SW_TRIGGER=y
CONFIG_BMA180=y
+CONFIG_BMC150_ACCEL=y
CONFIG_IIO_ST_ACCEL_3AXIS=y
+CONFIG_IIO_RESCALE=y
+CONFIG_MPU3050_I2C=y
CONFIG_IIO_ST_GYRO_3AXIS=y
CONFIG_INV_MPU6050_I2C=y
CONFIG_BH1780=y
CONFIG_GP2AP002=y
CONFIG_AK8974=y
CONFIG_IIO_ST_MAGN_3AXIS=y
+CONFIG_YAMAHA_YAS530=y
CONFIG_IIO_HRTIMER_TRIGGER=y
CONFIG_IIO_ST_PRESS=y
CONFIG_EXT2_FS=y
diff --git a/arch/arm/configs/xcep_defconfig b/arch/arm/configs/xcep_defconfig
index f1fbdfc5c8c6..4d8e7f2eaef7 100644
--- a/arch/arm/configs/xcep_defconfig
+++ b/arch/arm/configs/xcep_defconfig
@@ -53,7 +53,6 @@ CONFIG_NET_ETHERNET=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_PXA=y
CONFIG_SERIAL_PXA_CONSOLE=y
# CONFIG_LEGACY_PTYS is not set
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index 8f26c454ea12..eafa898ba6a7 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -45,20 +45,12 @@ poly1305-arm-y := poly1305-core.o poly1305-glue.o
nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o
curve25519-neon-y := curve25519-core.o curve25519-glue.o
-ifdef REGENERATE_ARM_CRYPTO
quiet_cmd_perl = PERL $@
cmd_perl = $(PERL) $(<) > $(@)
-$(src)/poly1305-core.S_shipped: $(src)/poly1305-armv4.pl
+$(obj)/%-core.S: $(src)/%-armv4.pl
$(call cmd,perl)
-$(src)/sha256-core.S_shipped: $(src)/sha256-armv4.pl
- $(call cmd,perl)
-
-$(src)/sha512-core.S_shipped: $(src)/sha512-armv4.pl
- $(call cmd,perl)
-endif
-
clean-files += poly1305-core.S sha256-core.S sha512-core.S
# massage the perlasm code a bit so we only get the NEON routine if we need it
diff --git a/arch/arm/crypto/aes-cipher-core.S b/arch/arm/crypto/aes-cipher-core.S
index 472e56d09eea..1da3f41359aa 100644
--- a/arch/arm/crypto/aes-cipher-core.S
+++ b/arch/arm/crypto/aes-cipher-core.S
@@ -99,28 +99,6 @@
__hround \out2, \out3, \in2, \in1, \in0, \in3, \in1, \in0, 0, \sz, \op, \oldcpsr
.endm
- .macro __rev, out, in
- .if __LINUX_ARM_ARCH__ < 6
- lsl t0, \in, #24
- and t1, \in, #0xff00
- and t2, \in, #0xff0000
- orr \out, t0, \in, lsr #24
- orr \out, \out, t1, lsl #8
- orr \out, \out, t2, lsr #8
- .else
- rev \out, \in
- .endif
- .endm
-
- .macro __adrl, out, sym, c
- .if __LINUX_ARM_ARCH__ < 7
- ldr\c \out, =\sym
- .else
- movw\c \out, #:lower16:\sym
- movt\c \out, #:upper16:\sym
- .endif
- .endm
-
.macro do_crypt, round, ttab, ltab, bsz
push {r3-r11, lr}
@@ -133,10 +111,10 @@
ldr r7, [in, #12]
#ifdef CONFIG_CPU_BIG_ENDIAN
- __rev r4, r4
- __rev r5, r5
- __rev r6, r6
- __rev r7, r7
+ rev_l r4, t0
+ rev_l r5, t0
+ rev_l r6, t0
+ rev_l r7, t0
#endif
eor r4, r4, r8
@@ -144,7 +122,7 @@
eor r6, r6, r10
eor r7, r7, r11
- __adrl ttab, \ttab
+ mov_l ttab, \ttab
/*
* Disable interrupts and prefetch the 1024-byte 'ft' or 'it' table into
* L1 cache, assuming cacheline size >= 32. This is a hardening measure
@@ -180,7 +158,7 @@
2: .ifb \ltab
add ttab, ttab, #1
.else
- __adrl ttab, \ltab
+ mov_l ttab, \ltab
// Prefetch inverse S-box for final round; see explanation above
.set i, 0
.rept 256 / 64
@@ -194,10 +172,10 @@
\round r4, r5, r6, r7, r8, r9, r10, r11, \bsz, b, rounds
#ifdef CONFIG_CPU_BIG_ENDIAN
- __rev r4, r4
- __rev r5, r5
- __rev r6, r6
- __rev r7, r7
+ rev_l r4, t0
+ rev_l r5, t0
+ rev_l r6, t0
+ rev_l r7, t0
#endif
ldr out, [sp]
diff --git a/arch/arm/crypto/blake2b-neon-glue.c b/arch/arm/crypto/blake2b-neon-glue.c
index 34d73200e7fa..4b59d027ba4a 100644
--- a/arch/arm/crypto/blake2b-neon-glue.c
+++ b/arch/arm/crypto/blake2b-neon-glue.c
@@ -85,8 +85,8 @@ static int __init blake2b_neon_mod_init(void)
static void __exit blake2b_neon_mod_exit(void)
{
- return crypto_unregister_shashes(blake2b_neon_algs,
- ARRAY_SIZE(blake2b_neon_algs));
+ crypto_unregister_shashes(blake2b_neon_algs,
+ ARRAY_SIZE(blake2b_neon_algs));
}
module_init(blake2b_neon_mod_init);
diff --git a/arch/arm/crypto/blake2s-core.S b/arch/arm/crypto/blake2s-core.S
index bed897e9a181..86345751bbf3 100644
--- a/arch/arm/crypto/blake2s-core.S
+++ b/arch/arm/crypto/blake2s-core.S
@@ -8,6 +8,7 @@
*/
#include <linux/linkage.h>
+#include <asm/assembler.h>
// Registers used to hold message words temporarily. There aren't
// enough ARM registers to hold the whole message block, so we have to
@@ -38,6 +39,23 @@
#endif
.endm
+.macro _le32_bswap a, tmp
+#ifdef __ARMEB__
+ rev_l \a, \tmp
+#endif
+.endm
+
+.macro _le32_bswap_8x a, b, c, d, e, f, g, h, tmp
+ _le32_bswap \a, \tmp
+ _le32_bswap \b, \tmp
+ _le32_bswap \c, \tmp
+ _le32_bswap \d, \tmp
+ _le32_bswap \e, \tmp
+ _le32_bswap \f, \tmp
+ _le32_bswap \g, \tmp
+ _le32_bswap \h, \tmp
+.endm
+
// Execute a quarter-round of BLAKE2s by mixing two columns or two diagonals.
// (a0, b0, c0, d0) and (a1, b1, c1, d1) give the registers containing the two
// columns/diagonals. s0-s1 are the word offsets to the message words the first
@@ -180,8 +198,10 @@ ENTRY(blake2s_compress_arch)
tst r1, #3
bne .Lcopy_block_misaligned
ldmia r1!, {r2-r9}
+ _le32_bswap_8x r2, r3, r4, r5, r6, r7, r8, r9, r14
stmia r12!, {r2-r9}
ldmia r1!, {r2-r9}
+ _le32_bswap_8x r2, r3, r4, r5, r6, r7, r8, r9, r14
stmia r12, {r2-r9}
.Lcopy_block_done:
str r1, [sp, #68] // Update message pointer
@@ -268,6 +288,7 @@ ENTRY(blake2s_compress_arch)
1:
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
ldr r3, [r1], #4
+ _le32_bswap r3, r4
#else
ldrb r3, [r1, #0]
ldrb r4, [r1, #1]
diff --git a/arch/arm/crypto/chacha-scalar-core.S b/arch/arm/crypto/chacha-scalar-core.S
index 2985b80a45b5..083fe1ab96d0 100644
--- a/arch/arm/crypto/chacha-scalar-core.S
+++ b/arch/arm/crypto/chacha-scalar-core.S
@@ -41,32 +41,15 @@
X14 .req r12
X15 .req r14
-.macro __rev out, in, t0, t1, t2
-.if __LINUX_ARM_ARCH__ >= 6
- rev \out, \in
-.else
- lsl \t0, \in, #24
- and \t1, \in, #0xff00
- and \t2, \in, #0xff0000
- orr \out, \t0, \in, lsr #24
- orr \out, \out, \t1, lsl #8
- orr \out, \out, \t2, lsr #8
-.endif
-.endm
-
-.macro _le32_bswap x, t0, t1, t2
+.macro _le32_bswap_4x a, b, c, d, tmp
#ifdef __ARMEB__
- __rev \x, \x, \t0, \t1, \t2
+ rev_l \a, \tmp
+ rev_l \b, \tmp
+ rev_l \c, \tmp
+ rev_l \d, \tmp
#endif
.endm
-.macro _le32_bswap_4x a, b, c, d, t0, t1, t2
- _le32_bswap \a, \t0, \t1, \t2
- _le32_bswap \b, \t0, \t1, \t2
- _le32_bswap \c, \t0, \t1, \t2
- _le32_bswap \d, \t0, \t1, \t2
-.endm
-
.macro __ldrd a, b, src, offset
#if __LINUX_ARM_ARCH__ >= 6
ldrd \a, \b, [\src, #\offset]
@@ -200,7 +183,7 @@
add X1, X1, r9
add X2, X2, r10
add X3, X3, r11
- _le32_bswap_4x X0, X1, X2, X3, r8, r9, r10
+ _le32_bswap_4x X0, X1, X2, X3, r8
ldmia r12!, {r8-r11}
eor X0, X0, r8
eor X1, X1, r9
@@ -216,7 +199,7 @@
ldmia r12!, {X0-X3}
add X6, r10, X6, ror #brot
add X7, r11, X7, ror #brot
- _le32_bswap_4x X4, X5, X6, X7, r8, r9, r10
+ _le32_bswap_4x X4, X5, X6, X7, r8
eor X4, X4, X0
eor X5, X5, X1
eor X6, X6, X2
@@ -231,7 +214,7 @@
add r1, r1, r9 // x9
add r6, r6, r10 // x10
add r7, r7, r11 // x11
- _le32_bswap_4x r0, r1, r6, r7, r8, r9, r10
+ _le32_bswap_4x r0, r1, r6, r7, r8
ldmia r12!, {r8-r11}
eor r0, r0, r8 // x8
eor r1, r1, r9 // x9
@@ -245,7 +228,7 @@
add r3, r9, r3, ror #drot // x13
add r4, r10, r4, ror #drot // x14
add r5, r11, r5, ror #drot // x15
- _le32_bswap_4x r2, r3, r4, r5, r9, r10, r11
+ _le32_bswap_4x r2, r3, r4, r5, r9
ldr r9, [sp, #72] // load LEN
eor r2, r2, r0 // x12
eor r3, r3, r1 // x13
@@ -301,7 +284,7 @@
add X1, X1, r9
add X2, X2, r10
add X3, X3, r11
- _le32_bswap_4x X0, X1, X2, X3, r8, r9, r10
+ _le32_bswap_4x X0, X1, X2, X3, r8
stmia r14!, {X0-X3}
// Save keystream for x4-x7
@@ -311,7 +294,7 @@
add X5, r9, X5, ror #brot
add X6, r10, X6, ror #brot
add X7, r11, X7, ror #brot
- _le32_bswap_4x X4, X5, X6, X7, r8, r9, r10
+ _le32_bswap_4x X4, X5, X6, X7, r8
add r8, sp, #64
stmia r14!, {X4-X7}
@@ -323,7 +306,7 @@
add r1, r1, r9 // x9
add r6, r6, r10 // x10
add r7, r7, r11 // x11
- _le32_bswap_4x r0, r1, r6, r7, r8, r9, r10
+ _le32_bswap_4x r0, r1, r6, r7, r8
stmia r14!, {r0,r1,r6,r7}
__ldrd r8, r9, sp, 144
__ldrd r10, r11, sp, 152
@@ -331,7 +314,7 @@
add r3, r9, r3, ror #drot // x13
add r4, r10, r4, ror #drot // x14
add r5, r11, r5, ror #drot // x15
- _le32_bswap_4x r2, r3, r4, r5, r9, r10, r11
+ _le32_bswap_4x r2, r3, r4, r5, r9
stmia r14, {r2-r5}
// Stack: ks0-ks15 unused0-unused7 x0-x15 OUT IN LEN
diff --git a/arch/arm/crypto/curve25519-core.S b/arch/arm/crypto/curve25519-core.S
index be18af52e7dc..b697fa5d059a 100644
--- a/arch/arm/crypto/curve25519-core.S
+++ b/arch/arm/crypto/curve25519-core.S
@@ -10,8 +10,8 @@
#include <linux/linkage.h>
.text
-.fpu neon
.arch armv7-a
+.fpu neon
.align 4
ENTRY(curve25519_neon)
diff --git a/arch/arm/crypto/poly1305-core.S_shipped b/arch/arm/crypto/poly1305-core.S_shipped
deleted file mode 100644
index 37b71d990293..000000000000
--- a/arch/arm/crypto/poly1305-core.S_shipped
+++ /dev/null
@@ -1,1158 +0,0 @@
-#ifndef __KERNEL__
-# include "arm_arch.h"
-#else
-# define __ARM_ARCH__ __LINUX_ARM_ARCH__
-# define __ARM_MAX_ARCH__ __LINUX_ARM_ARCH__
-# define poly1305_init poly1305_init_arm
-# define poly1305_blocks poly1305_blocks_arm
-# define poly1305_emit poly1305_emit_arm
-.globl poly1305_blocks_neon
-#endif
-
-#if defined(__thumb2__)
-.syntax unified
-.thumb
-#else
-.code 32
-#endif
-
-.text
-
-.globl poly1305_emit
-.globl poly1305_blocks
-.globl poly1305_init
-.type poly1305_init,%function
-.align 5
-poly1305_init:
-.Lpoly1305_init:
- stmdb sp!,{r4-r11}
-
- eor r3,r3,r3
- cmp r1,#0
- str r3,[r0,#0] @ zero hash value
- str r3,[r0,#4]
- str r3,[r0,#8]
- str r3,[r0,#12]
- str r3,[r0,#16]
- str r3,[r0,#36] @ clear is_base2_26
- add r0,r0,#20
-
-#ifdef __thumb2__
- it eq
-#endif
- moveq r0,#0
- beq .Lno_key
-
-#if __ARM_MAX_ARCH__>=7
- mov r3,#-1
- str r3,[r0,#28] @ impossible key power value
-# ifndef __KERNEL__
- adr r11,.Lpoly1305_init
- ldr r12,.LOPENSSL_armcap
-# endif
-#endif
- ldrb r4,[r1,#0]
- mov r10,#0x0fffffff
- ldrb r5,[r1,#1]
- and r3,r10,#-4 @ 0x0ffffffc
- ldrb r6,[r1,#2]
- ldrb r7,[r1,#3]
- orr r4,r4,r5,lsl#8
- ldrb r5,[r1,#4]
- orr r4,r4,r6,lsl#16
- ldrb r6,[r1,#5]
- orr r4,r4,r7,lsl#24
- ldrb r7,[r1,#6]
- and r4,r4,r10
-
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
-# if !defined(_WIN32)
- ldr r12,[r11,r12] @ OPENSSL_armcap_P
-# endif
-# if defined(__APPLE__) || defined(_WIN32)
- ldr r12,[r12]
-# endif
-#endif
- ldrb r8,[r1,#7]
- orr r5,r5,r6,lsl#8
- ldrb r6,[r1,#8]
- orr r5,r5,r7,lsl#16
- ldrb r7,[r1,#9]
- orr r5,r5,r8,lsl#24
- ldrb r8,[r1,#10]
- and r5,r5,r3
-
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
- tst r12,#ARMV7_NEON @ check for NEON
-# ifdef __thumb2__
- adr r9,.Lpoly1305_blocks_neon
- adr r11,.Lpoly1305_blocks
- it ne
- movne r11,r9
- adr r12,.Lpoly1305_emit
- orr r11,r11,#1 @ thumb-ify addresses
- orr r12,r12,#1
-# else
- add r12,r11,#(.Lpoly1305_emit-.Lpoly1305_init)
- ite eq
- addeq r11,r11,#(.Lpoly1305_blocks-.Lpoly1305_init)
- addne r11,r11,#(.Lpoly1305_blocks_neon-.Lpoly1305_init)
-# endif
-#endif
- ldrb r9,[r1,#11]
- orr r6,r6,r7,lsl#8
- ldrb r7,[r1,#12]
- orr r6,r6,r8,lsl#16
- ldrb r8,[r1,#13]
- orr r6,r6,r9,lsl#24
- ldrb r9,[r1,#14]
- and r6,r6,r3
-
- ldrb r10,[r1,#15]
- orr r7,r7,r8,lsl#8
- str r4,[r0,#0]
- orr r7,r7,r9,lsl#16
- str r5,[r0,#4]
- orr r7,r7,r10,lsl#24
- str r6,[r0,#8]
- and r7,r7,r3
- str r7,[r0,#12]
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
- stmia r2,{r11,r12} @ fill functions table
- mov r0,#1
-#else
- mov r0,#0
-#endif
-.Lno_key:
- ldmia sp!,{r4-r11}
-#if __ARM_ARCH__>=5
- bx lr @ bx lr
-#else
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-#endif
-.size poly1305_init,.-poly1305_init
-.type poly1305_blocks,%function
-.align 5
-poly1305_blocks:
-.Lpoly1305_blocks:
- stmdb sp!,{r3-r11,lr}
-
- ands r2,r2,#-16
- beq .Lno_data
-
- add r2,r2,r1 @ end pointer
- sub sp,sp,#32
-
-#if __ARM_ARCH__<7
- ldmia r0,{r4-r12} @ load context
- add r0,r0,#20
- str r2,[sp,#16] @ offload stuff
- str r0,[sp,#12]
-#else
- ldr lr,[r0,#36] @ is_base2_26
- ldmia r0!,{r4-r8} @ load hash value
- str r2,[sp,#16] @ offload stuff
- str r0,[sp,#12]
-
- adds r9,r4,r5,lsl#26 @ base 2^26 -> base 2^32
- mov r10,r5,lsr#6
- adcs r10,r10,r6,lsl#20
- mov r11,r6,lsr#12
- adcs r11,r11,r7,lsl#14
- mov r12,r7,lsr#18
- adcs r12,r12,r8,lsl#8
- mov r2,#0
- teq lr,#0
- str r2,[r0,#16] @ clear is_base2_26
- adc r2,r2,r8,lsr#24
-
- itttt ne
- movne r4,r9 @ choose between radixes
- movne r5,r10
- movne r6,r11
- movne r7,r12
- ldmia r0,{r9-r12} @ load key
- it ne
- movne r8,r2
-#endif
-
- mov lr,r1
- cmp r3,#0
- str r10,[sp,#20]
- str r11,[sp,#24]
- str r12,[sp,#28]
- b .Loop
-
-.align 4
-.Loop:
-#if __ARM_ARCH__<7
- ldrb r0,[lr],#16 @ load input
-# ifdef __thumb2__
- it hi
-# endif
- addhi r8,r8,#1 @ 1<<128
- ldrb r1,[lr,#-15]
- ldrb r2,[lr,#-14]
- ldrb r3,[lr,#-13]
- orr r1,r0,r1,lsl#8
- ldrb r0,[lr,#-12]
- orr r2,r1,r2,lsl#16
- ldrb r1,[lr,#-11]
- orr r3,r2,r3,lsl#24
- ldrb r2,[lr,#-10]
- adds r4,r4,r3 @ accumulate input
-
- ldrb r3,[lr,#-9]
- orr r1,r0,r1,lsl#8
- ldrb r0,[lr,#-8]
- orr r2,r1,r2,lsl#16
- ldrb r1,[lr,#-7]
- orr r3,r2,r3,lsl#24
- ldrb r2,[lr,#-6]
- adcs r5,r5,r3
-
- ldrb r3,[lr,#-5]
- orr r1,r0,r1,lsl#8
- ldrb r0,[lr,#-4]
- orr r2,r1,r2,lsl#16
- ldrb r1,[lr,#-3]
- orr r3,r2,r3,lsl#24
- ldrb r2,[lr,#-2]
- adcs r6,r6,r3
-
- ldrb r3,[lr,#-1]
- orr r1,r0,r1,lsl#8
- str lr,[sp,#8] @ offload input pointer
- orr r2,r1,r2,lsl#16
- add r10,r10,r10,lsr#2
- orr r3,r2,r3,lsl#24
-#else
- ldr r0,[lr],#16 @ load input
- it hi
- addhi r8,r8,#1 @ padbit
- ldr r1,[lr,#-12]
- ldr r2,[lr,#-8]
- ldr r3,[lr,#-4]
-# ifdef __ARMEB__
- rev r0,r0
- rev r1,r1
- rev r2,r2
- rev r3,r3
-# endif
- adds r4,r4,r0 @ accumulate input
- str lr,[sp,#8] @ offload input pointer
- adcs r5,r5,r1
- add r10,r10,r10,lsr#2
- adcs r6,r6,r2
-#endif
- add r11,r11,r11,lsr#2
- adcs r7,r7,r3
- add r12,r12,r12,lsr#2
-
- umull r2,r3,r5,r9
- adc r8,r8,#0
- umull r0,r1,r4,r9
- umlal r2,r3,r8,r10
- umlal r0,r1,r7,r10
- ldr r10,[sp,#20] @ reload r10
- umlal r2,r3,r6,r12
- umlal r0,r1,r5,r12
- umlal r2,r3,r7,r11
- umlal r0,r1,r6,r11
- umlal r2,r3,r4,r10
- str r0,[sp,#0] @ future r4
- mul r0,r11,r8
- ldr r11,[sp,#24] @ reload r11
- adds r2,r2,r1 @ d1+=d0>>32
- eor r1,r1,r1
- adc lr,r3,#0 @ future r6
- str r2,[sp,#4] @ future r5
-
- mul r2,r12,r8
- eor r3,r3,r3
- umlal r0,r1,r7,r12
- ldr r12,[sp,#28] @ reload r12
- umlal r2,r3,r7,r9
- umlal r0,r1,r6,r9
- umlal r2,r3,r6,r10
- umlal r0,r1,r5,r10
- umlal r2,r3,r5,r11
- umlal r0,r1,r4,r11
- umlal r2,r3,r4,r12
- ldr r4,[sp,#0]
- mul r8,r9,r8
- ldr r5,[sp,#4]
-
- adds r6,lr,r0 @ d2+=d1>>32
- ldr lr,[sp,#8] @ reload input pointer
- adc r1,r1,#0
- adds r7,r2,r1 @ d3+=d2>>32
- ldr r0,[sp,#16] @ reload end pointer
- adc r3,r3,#0
- add r8,r8,r3 @ h4+=d3>>32
-
- and r1,r8,#-4
- and r8,r8,#3
- add r1,r1,r1,lsr#2 @ *=5
- adds r4,r4,r1
- adcs r5,r5,#0
- adcs r6,r6,#0
- adcs r7,r7,#0
- adc r8,r8,#0
-
- cmp r0,lr @ done yet?
- bhi .Loop
-
- ldr r0,[sp,#12]
- add sp,sp,#32
- stmdb r0,{r4-r8} @ store the result
-
-.Lno_data:
-#if __ARM_ARCH__>=5
- ldmia sp!,{r3-r11,pc}
-#else
- ldmia sp!,{r3-r11,lr}
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-#endif
-.size poly1305_blocks,.-poly1305_blocks
-.type poly1305_emit,%function
-.align 5
-poly1305_emit:
-.Lpoly1305_emit:
- stmdb sp!,{r4-r11}
-
- ldmia r0,{r3-r7}
-
-#if __ARM_ARCH__>=7
- ldr ip,[r0,#36] @ is_base2_26
-
- adds r8,r3,r4,lsl#26 @ base 2^26 -> base 2^32
- mov r9,r4,lsr#6
- adcs r9,r9,r5,lsl#20
- mov r10,r5,lsr#12
- adcs r10,r10,r6,lsl#14
- mov r11,r6,lsr#18
- adcs r11,r11,r7,lsl#8
- mov r0,#0
- adc r0,r0,r7,lsr#24
-
- tst ip,ip
- itttt ne
- movne r3,r8
- movne r4,r9
- movne r5,r10
- movne r6,r11
- it ne
- movne r7,r0
-#endif
-
- adds r8,r3,#5 @ compare to modulus
- adcs r9,r4,#0
- adcs r10,r5,#0
- adcs r11,r6,#0
- adc r0,r7,#0
- tst r0,#4 @ did it carry/borrow?
-
-#ifdef __thumb2__
- it ne
-#endif
- movne r3,r8
- ldr r8,[r2,#0]
-#ifdef __thumb2__
- it ne
-#endif
- movne r4,r9
- ldr r9,[r2,#4]
-#ifdef __thumb2__
- it ne
-#endif
- movne r5,r10
- ldr r10,[r2,#8]
-#ifdef __thumb2__
- it ne
-#endif
- movne r6,r11
- ldr r11,[r2,#12]
-
- adds r3,r3,r8
- adcs r4,r4,r9
- adcs r5,r5,r10
- adc r6,r6,r11
-
-#if __ARM_ARCH__>=7
-# ifdef __ARMEB__
- rev r3,r3
- rev r4,r4
- rev r5,r5
- rev r6,r6
-# endif
- str r3,[r1,#0]
- str r4,[r1,#4]
- str r5,[r1,#8]
- str r6,[r1,#12]
-#else
- strb r3,[r1,#0]
- mov r3,r3,lsr#8
- strb r4,[r1,#4]
- mov r4,r4,lsr#8
- strb r5,[r1,#8]
- mov r5,r5,lsr#8
- strb r6,[r1,#12]
- mov r6,r6,lsr#8
-
- strb r3,[r1,#1]
- mov r3,r3,lsr#8
- strb r4,[r1,#5]
- mov r4,r4,lsr#8
- strb r5,[r1,#9]
- mov r5,r5,lsr#8
- strb r6,[r1,#13]
- mov r6,r6,lsr#8
-
- strb r3,[r1,#2]
- mov r3,r3,lsr#8
- strb r4,[r1,#6]
- mov r4,r4,lsr#8
- strb r5,[r1,#10]
- mov r5,r5,lsr#8
- strb r6,[r1,#14]
- mov r6,r6,lsr#8
-
- strb r3,[r1,#3]
- strb r4,[r1,#7]
- strb r5,[r1,#11]
- strb r6,[r1,#15]
-#endif
- ldmia sp!,{r4-r11}
-#if __ARM_ARCH__>=5
- bx lr @ bx lr
-#else
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-#endif
-.size poly1305_emit,.-poly1305_emit
-#if __ARM_MAX_ARCH__>=7
-.fpu neon
-
-.type poly1305_init_neon,%function
-.align 5
-poly1305_init_neon:
-.Lpoly1305_init_neon:
- ldr r3,[r0,#48] @ first table element
- cmp r3,#-1 @ is value impossible?
- bne .Lno_init_neon
-
- ldr r4,[r0,#20] @ load key base 2^32
- ldr r5,[r0,#24]
- ldr r6,[r0,#28]
- ldr r7,[r0,#32]
-
- and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26
- mov r3,r4,lsr#26
- mov r4,r5,lsr#20
- orr r3,r3,r5,lsl#6
- mov r5,r6,lsr#14
- orr r4,r4,r6,lsl#12
- mov r6,r7,lsr#8
- orr r5,r5,r7,lsl#18
- and r3,r3,#0x03ffffff
- and r4,r4,#0x03ffffff
- and r5,r5,#0x03ffffff
-
- vdup.32 d0,r2 @ r^1 in both lanes
- add r2,r3,r3,lsl#2 @ *5
- vdup.32 d1,r3
- add r3,r4,r4,lsl#2
- vdup.32 d2,r2
- vdup.32 d3,r4
- add r4,r5,r5,lsl#2
- vdup.32 d4,r3
- vdup.32 d5,r5
- add r5,r6,r6,lsl#2
- vdup.32 d6,r4
- vdup.32 d7,r6
- vdup.32 d8,r5
-
- mov r5,#2 @ counter
-
-.Lsquare_neon:
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
- @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
- @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
- @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
- @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
-
- vmull.u32 q5,d0,d0[1]
- vmull.u32 q6,d1,d0[1]
- vmull.u32 q7,d3,d0[1]
- vmull.u32 q8,d5,d0[1]
- vmull.u32 q9,d7,d0[1]
-
- vmlal.u32 q5,d7,d2[1]
- vmlal.u32 q6,d0,d1[1]
- vmlal.u32 q7,d1,d1[1]
- vmlal.u32 q8,d3,d1[1]
- vmlal.u32 q9,d5,d1[1]
-
- vmlal.u32 q5,d5,d4[1]
- vmlal.u32 q6,d7,d4[1]
- vmlal.u32 q8,d1,d3[1]
- vmlal.u32 q7,d0,d3[1]
- vmlal.u32 q9,d3,d3[1]
-
- vmlal.u32 q5,d3,d6[1]
- vmlal.u32 q8,d0,d5[1]
- vmlal.u32 q6,d5,d6[1]
- vmlal.u32 q7,d7,d6[1]
- vmlal.u32 q9,d1,d5[1]
-
- vmlal.u32 q8,d7,d8[1]
- vmlal.u32 q5,d1,d8[1]
- vmlal.u32 q6,d3,d8[1]
- vmlal.u32 q7,d5,d8[1]
- vmlal.u32 q9,d0,d7[1]
-
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
- @ and P. Schwabe
- @
- @ H0>>+H1>>+H2>>+H3>>+H4
- @ H3>>+H4>>*5+H0>>+H1
- @
- @ Trivia.
- @
- @ Result of multiplication of n-bit number by m-bit number is
- @ n+m bits wide. However! Even though 2^n is a n+1-bit number,
- @ m-bit number multiplied by 2^n is still n+m bits wide.
- @
- @ Sum of two n-bit numbers is n+1 bits wide, sum of three - n+2,
- @ and so is sum of four. Sum of 2^m n-m-bit numbers and n-bit
- @ one is n+1 bits wide.
- @
- @ >>+ denotes Hnext += Hn>>26, Hn &= 0x3ffffff. This means that
- @ H0, H2, H3 are guaranteed to be 26 bits wide, while H1 and H4
- @ can be 27. However! In cases when their width exceeds 26 bits
- @ they are limited by 2^26+2^6. This in turn means that *sum*
- @ of the products with these values can still be viewed as sum
- @ of 52-bit numbers as long as the amount of addends is not a
- @ power of 2. For example,
- @
- @ H4 = H4*R0 + H3*R1 + H2*R2 + H1*R3 + H0 * R4,
- @
- @ which can't be larger than 5 * (2^26 + 2^6) * (2^26 + 2^6), or
- @ 5 * (2^52 + 2*2^32 + 2^12), which in turn is smaller than
- @ 8 * (2^52) or 2^55. However, the value is then multiplied by
- @ by 5, so we should be looking at 5 * 5 * (2^52 + 2^33 + 2^12),
- @ which is less than 32 * (2^52) or 2^57. And when processing
- @ data we are looking at triple as many addends...
- @
- @ In key setup procedure pre-reduced H0 is limited by 5*4+1 and
- @ 5*H4 - by 5*5 52-bit addends, or 57 bits. But when hashing the
- @ input H0 is limited by (5*4+1)*3 addends, or 58 bits, while
- @ 5*H4 by 5*5*3, or 59[!] bits. How is this relevant? vmlal.u32
- @ instruction accepts 2x32-bit input and writes 2x64-bit result.
- @ This means that result of reduction have to be compressed upon
- @ loop wrap-around. This can be done in the process of reduction
- @ to minimize amount of instructions [as well as amount of
- @ 128-bit instructions, which benefits low-end processors], but
- @ one has to watch for H2 (which is narrower than H0) and 5*H4
- @ not being wider than 58 bits, so that result of right shift
- @ by 26 bits fits in 32 bits. This is also useful on x86,
- @ because it allows to use paddd in place for paddq, which
- @ benefits Atom, where paddq is ridiculously slow.
-
- vshr.u64 q15,q8,#26
- vmovn.i64 d16,q8
- vshr.u64 q4,q5,#26
- vmovn.i64 d10,q5
- vadd.i64 q9,q9,q15 @ h3 -> h4
- vbic.i32 d16,#0xfc000000 @ &=0x03ffffff
- vadd.i64 q6,q6,q4 @ h0 -> h1
- vbic.i32 d10,#0xfc000000
-
- vshrn.u64 d30,q9,#26
- vmovn.i64 d18,q9
- vshr.u64 q4,q6,#26
- vmovn.i64 d12,q6
- vadd.i64 q7,q7,q4 @ h1 -> h2
- vbic.i32 d18,#0xfc000000
- vbic.i32 d12,#0xfc000000
-
- vadd.i32 d10,d10,d30
- vshl.u32 d30,d30,#2
- vshrn.u64 d8,q7,#26
- vmovn.i64 d14,q7
- vadd.i32 d10,d10,d30 @ h4 -> h0
- vadd.i32 d16,d16,d8 @ h2 -> h3
- vbic.i32 d14,#0xfc000000
-
- vshr.u32 d30,d10,#26
- vbic.i32 d10,#0xfc000000
- vshr.u32 d8,d16,#26
- vbic.i32 d16,#0xfc000000
- vadd.i32 d12,d12,d30 @ h0 -> h1
- vadd.i32 d18,d18,d8 @ h3 -> h4
-
- subs r5,r5,#1
- beq .Lsquare_break_neon
-
- add r6,r0,#(48+0*9*4)
- add r7,r0,#(48+1*9*4)
-
- vtrn.32 d0,d10 @ r^2:r^1
- vtrn.32 d3,d14
- vtrn.32 d5,d16
- vtrn.32 d1,d12
- vtrn.32 d7,d18
-
- vshl.u32 d4,d3,#2 @ *5
- vshl.u32 d6,d5,#2
- vshl.u32 d2,d1,#2
- vshl.u32 d8,d7,#2
- vadd.i32 d4,d4,d3
- vadd.i32 d2,d2,d1
- vadd.i32 d6,d6,d5
- vadd.i32 d8,d8,d7
-
- vst4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]!
- vst4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]!
- vst4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]!
- vst4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]!
- vst1.32 {d8[0]},[r6,:32]
- vst1.32 {d8[1]},[r7,:32]
-
- b .Lsquare_neon
-
-.align 4
-.Lsquare_break_neon:
- add r6,r0,#(48+2*4*9)
- add r7,r0,#(48+3*4*9)
-
- vmov d0,d10 @ r^4:r^3
- vshl.u32 d2,d12,#2 @ *5
- vmov d1,d12
- vshl.u32 d4,d14,#2
- vmov d3,d14
- vshl.u32 d6,d16,#2
- vmov d5,d16
- vshl.u32 d8,d18,#2
- vmov d7,d18
- vadd.i32 d2,d2,d12
- vadd.i32 d4,d4,d14
- vadd.i32 d6,d6,d16
- vadd.i32 d8,d8,d18
-
- vst4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]!
- vst4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]!
- vst4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]!
- vst4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]!
- vst1.32 {d8[0]},[r6]
- vst1.32 {d8[1]},[r7]
-
-.Lno_init_neon:
- bx lr @ bx lr
-.size poly1305_init_neon,.-poly1305_init_neon
-
-.type poly1305_blocks_neon,%function
-.align 5
-poly1305_blocks_neon:
-.Lpoly1305_blocks_neon:
- ldr ip,[r0,#36] @ is_base2_26
-
- cmp r2,#64
- blo .Lpoly1305_blocks
-
- stmdb sp!,{r4-r7}
- vstmdb sp!,{d8-d15} @ ABI specification says so
-
- tst ip,ip @ is_base2_26?
- bne .Lbase2_26_neon
-
- stmdb sp!,{r1-r3,lr}
- bl .Lpoly1305_init_neon
-
- ldr r4,[r0,#0] @ load hash value base 2^32
- ldr r5,[r0,#4]
- ldr r6,[r0,#8]
- ldr r7,[r0,#12]
- ldr ip,[r0,#16]
-
- and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26
- mov r3,r4,lsr#26
- veor d10,d10,d10
- mov r4,r5,lsr#20
- orr r3,r3,r5,lsl#6
- veor d12,d12,d12
- mov r5,r6,lsr#14
- orr r4,r4,r6,lsl#12
- veor d14,d14,d14
- mov r6,r7,lsr#8
- orr r5,r5,r7,lsl#18
- veor d16,d16,d16
- and r3,r3,#0x03ffffff
- orr r6,r6,ip,lsl#24
- veor d18,d18,d18
- and r4,r4,#0x03ffffff
- mov r1,#1
- and r5,r5,#0x03ffffff
- str r1,[r0,#36] @ set is_base2_26
-
- vmov.32 d10[0],r2
- vmov.32 d12[0],r3
- vmov.32 d14[0],r4
- vmov.32 d16[0],r5
- vmov.32 d18[0],r6
- adr r5,.Lzeros
-
- ldmia sp!,{r1-r3,lr}
- b .Lhash_loaded
-
-.align 4
-.Lbase2_26_neon:
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ load hash value
-
- veor d10,d10,d10
- veor d12,d12,d12
- veor d14,d14,d14
- veor d16,d16,d16
- veor d18,d18,d18
- vld4.32 {d10[0],d12[0],d14[0],d16[0]},[r0]!
- adr r5,.Lzeros
- vld1.32 {d18[0]},[r0]
- sub r0,r0,#16 @ rewind
-
-.Lhash_loaded:
- add r4,r1,#32
- mov r3,r3,lsl#24
- tst r2,#31
- beq .Leven
-
- vld4.32 {d20[0],d22[0],d24[0],d26[0]},[r1]!
- vmov.32 d28[0],r3
- sub r2,r2,#16
- add r4,r1,#32
-
-# ifdef __ARMEB__
- vrev32.8 q10,q10
- vrev32.8 q13,q13
- vrev32.8 q11,q11
- vrev32.8 q12,q12
-# endif
- vsri.u32 d28,d26,#8 @ base 2^32 -> base 2^26
- vshl.u32 d26,d26,#18
-
- vsri.u32 d26,d24,#14
- vshl.u32 d24,d24,#12
- vadd.i32 d29,d28,d18 @ add hash value and move to #hi
-
- vbic.i32 d26,#0xfc000000
- vsri.u32 d24,d22,#20
- vshl.u32 d22,d22,#6
-
- vbic.i32 d24,#0xfc000000
- vsri.u32 d22,d20,#26
- vadd.i32 d27,d26,d16
-
- vbic.i32 d20,#0xfc000000
- vbic.i32 d22,#0xfc000000
- vadd.i32 d25,d24,d14
-
- vadd.i32 d21,d20,d10
- vadd.i32 d23,d22,d12
-
- mov r7,r5
- add r6,r0,#48
-
- cmp r2,r2
- b .Long_tail
-
-.align 4
-.Leven:
- subs r2,r2,#64
- it lo
- movlo r4,r5
-
- vmov.i32 q14,#1<<24 @ padbit, yes, always
- vld4.32 {d20,d22,d24,d26},[r1] @ inp[0:1]
- add r1,r1,#64
- vld4.32 {d21,d23,d25,d27},[r4] @ inp[2:3] (or 0)
- add r4,r4,#64
- itt hi
- addhi r7,r0,#(48+1*9*4)
- addhi r6,r0,#(48+3*9*4)
-
-# ifdef __ARMEB__
- vrev32.8 q10,q10
- vrev32.8 q13,q13
- vrev32.8 q11,q11
- vrev32.8 q12,q12
-# endif
- vsri.u32 q14,q13,#8 @ base 2^32 -> base 2^26
- vshl.u32 q13,q13,#18
-
- vsri.u32 q13,q12,#14
- vshl.u32 q12,q12,#12
-
- vbic.i32 q13,#0xfc000000
- vsri.u32 q12,q11,#20
- vshl.u32 q11,q11,#6
-
- vbic.i32 q12,#0xfc000000
- vsri.u32 q11,q10,#26
-
- vbic.i32 q10,#0xfc000000
- vbic.i32 q11,#0xfc000000
-
- bls .Lskip_loop
-
- vld4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]! @ load r^2
- vld4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]! @ load r^4
- vld4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]!
- vld4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]!
- b .Loop_neon
-
-.align 5
-.Loop_neon:
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
- @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
- @ ___________________/
- @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
- @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
- @ ___________________/ ____________________/
- @
- @ Note that we start with inp[2:3]*r^2. This is because it
- @ doesn't depend on reduction in previous iteration.
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
- @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
- @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
- @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
- @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
-
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ inp[2:3]*r^2
-
- vadd.i32 d24,d24,d14 @ accumulate inp[0:1]
- vmull.u32 q7,d25,d0[1]
- vadd.i32 d20,d20,d10
- vmull.u32 q5,d21,d0[1]
- vadd.i32 d26,d26,d16
- vmull.u32 q8,d27,d0[1]
- vmlal.u32 q7,d23,d1[1]
- vadd.i32 d22,d22,d12
- vmull.u32 q6,d23,d0[1]
-
- vadd.i32 d28,d28,d18
- vmull.u32 q9,d29,d0[1]
- subs r2,r2,#64
- vmlal.u32 q5,d29,d2[1]
- it lo
- movlo r4,r5
- vmlal.u32 q8,d25,d1[1]
- vld1.32 d8[1],[r7,:32]
- vmlal.u32 q6,d21,d1[1]
- vmlal.u32 q9,d27,d1[1]
-
- vmlal.u32 q5,d27,d4[1]
- vmlal.u32 q8,d23,d3[1]
- vmlal.u32 q9,d25,d3[1]
- vmlal.u32 q6,d29,d4[1]
- vmlal.u32 q7,d21,d3[1]
-
- vmlal.u32 q8,d21,d5[1]
- vmlal.u32 q5,d25,d6[1]
- vmlal.u32 q9,d23,d5[1]
- vmlal.u32 q6,d27,d6[1]
- vmlal.u32 q7,d29,d6[1]
-
- vmlal.u32 q8,d29,d8[1]
- vmlal.u32 q5,d23,d8[1]
- vmlal.u32 q9,d21,d7[1]
- vmlal.u32 q6,d25,d8[1]
- vmlal.u32 q7,d27,d8[1]
-
- vld4.32 {d21,d23,d25,d27},[r4] @ inp[2:3] (or 0)
- add r4,r4,#64
-
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ (hash+inp[0:1])*r^4 and accumulate
-
- vmlal.u32 q8,d26,d0[0]
- vmlal.u32 q5,d20,d0[0]
- vmlal.u32 q9,d28,d0[0]
- vmlal.u32 q6,d22,d0[0]
- vmlal.u32 q7,d24,d0[0]
- vld1.32 d8[0],[r6,:32]
-
- vmlal.u32 q8,d24,d1[0]
- vmlal.u32 q5,d28,d2[0]
- vmlal.u32 q9,d26,d1[0]
- vmlal.u32 q6,d20,d1[0]
- vmlal.u32 q7,d22,d1[0]
-
- vmlal.u32 q8,d22,d3[0]
- vmlal.u32 q5,d26,d4[0]
- vmlal.u32 q9,d24,d3[0]
- vmlal.u32 q6,d28,d4[0]
- vmlal.u32 q7,d20,d3[0]
-
- vmlal.u32 q8,d20,d5[0]
- vmlal.u32 q5,d24,d6[0]
- vmlal.u32 q9,d22,d5[0]
- vmlal.u32 q6,d26,d6[0]
- vmlal.u32 q8,d28,d8[0]
-
- vmlal.u32 q7,d28,d6[0]
- vmlal.u32 q5,d22,d8[0]
- vmlal.u32 q9,d20,d7[0]
- vmov.i32 q14,#1<<24 @ padbit, yes, always
- vmlal.u32 q6,d24,d8[0]
- vmlal.u32 q7,d26,d8[0]
-
- vld4.32 {d20,d22,d24,d26},[r1] @ inp[0:1]
- add r1,r1,#64
-# ifdef __ARMEB__
- vrev32.8 q10,q10
- vrev32.8 q11,q11
- vrev32.8 q12,q12
- vrev32.8 q13,q13
-# endif
-
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ lazy reduction interleaved with base 2^32 -> base 2^26 of
- @ inp[0:3] previously loaded to q10-q13 and smashed to q10-q14.
-
- vshr.u64 q15,q8,#26
- vmovn.i64 d16,q8
- vshr.u64 q4,q5,#26
- vmovn.i64 d10,q5
- vadd.i64 q9,q9,q15 @ h3 -> h4
- vbic.i32 d16,#0xfc000000
- vsri.u32 q14,q13,#8 @ base 2^32 -> base 2^26
- vadd.i64 q6,q6,q4 @ h0 -> h1
- vshl.u32 q13,q13,#18
- vbic.i32 d10,#0xfc000000
-
- vshrn.u64 d30,q9,#26
- vmovn.i64 d18,q9
- vshr.u64 q4,q6,#26
- vmovn.i64 d12,q6
- vadd.i64 q7,q7,q4 @ h1 -> h2
- vsri.u32 q13,q12,#14
- vbic.i32 d18,#0xfc000000
- vshl.u32 q12,q12,#12
- vbic.i32 d12,#0xfc000000
-
- vadd.i32 d10,d10,d30
- vshl.u32 d30,d30,#2
- vbic.i32 q13,#0xfc000000
- vshrn.u64 d8,q7,#26
- vmovn.i64 d14,q7
- vaddl.u32 q5,d10,d30 @ h4 -> h0 [widen for a sec]
- vsri.u32 q12,q11,#20
- vadd.i32 d16,d16,d8 @ h2 -> h3
- vshl.u32 q11,q11,#6
- vbic.i32 d14,#0xfc000000
- vbic.i32 q12,#0xfc000000
-
- vshrn.u64 d30,q5,#26 @ re-narrow
- vmovn.i64 d10,q5
- vsri.u32 q11,q10,#26
- vbic.i32 q10,#0xfc000000
- vshr.u32 d8,d16,#26
- vbic.i32 d16,#0xfc000000
- vbic.i32 d10,#0xfc000000
- vadd.i32 d12,d12,d30 @ h0 -> h1
- vadd.i32 d18,d18,d8 @ h3 -> h4
- vbic.i32 q11,#0xfc000000
-
- bhi .Loop_neon
-
-.Lskip_loop:
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
-
- add r7,r0,#(48+0*9*4)
- add r6,r0,#(48+1*9*4)
- adds r2,r2,#32
- it ne
- movne r2,#0
- bne .Long_tail
-
- vadd.i32 d25,d24,d14 @ add hash value and move to #hi
- vadd.i32 d21,d20,d10
- vadd.i32 d27,d26,d16
- vadd.i32 d23,d22,d12
- vadd.i32 d29,d28,d18
-
-.Long_tail:
- vld4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]! @ load r^1
- vld4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]! @ load r^2
-
- vadd.i32 d24,d24,d14 @ can be redundant
- vmull.u32 q7,d25,d0
- vadd.i32 d20,d20,d10
- vmull.u32 q5,d21,d0
- vadd.i32 d26,d26,d16
- vmull.u32 q8,d27,d0
- vadd.i32 d22,d22,d12
- vmull.u32 q6,d23,d0
- vadd.i32 d28,d28,d18
- vmull.u32 q9,d29,d0
-
- vmlal.u32 q5,d29,d2
- vld4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]!
- vmlal.u32 q8,d25,d1
- vld4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]!
- vmlal.u32 q6,d21,d1
- vmlal.u32 q9,d27,d1
- vmlal.u32 q7,d23,d1
-
- vmlal.u32 q8,d23,d3
- vld1.32 d8[1],[r7,:32]
- vmlal.u32 q5,d27,d4
- vld1.32 d8[0],[r6,:32]
- vmlal.u32 q9,d25,d3
- vmlal.u32 q6,d29,d4
- vmlal.u32 q7,d21,d3
-
- vmlal.u32 q8,d21,d5
- it ne
- addne r7,r0,#(48+2*9*4)
- vmlal.u32 q5,d25,d6
- it ne
- addne r6,r0,#(48+3*9*4)
- vmlal.u32 q9,d23,d5
- vmlal.u32 q6,d27,d6
- vmlal.u32 q7,d29,d6
-
- vmlal.u32 q8,d29,d8
- vorn q0,q0,q0 @ all-ones, can be redundant
- vmlal.u32 q5,d23,d8
- vshr.u64 q0,q0,#38
- vmlal.u32 q9,d21,d7
- vmlal.u32 q6,d25,d8
- vmlal.u32 q7,d27,d8
-
- beq .Lshort_tail
-
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ (hash+inp[0:1])*r^4:r^3 and accumulate
-
- vld4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]! @ load r^3
- vld4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]! @ load r^4
-
- vmlal.u32 q7,d24,d0
- vmlal.u32 q5,d20,d0
- vmlal.u32 q8,d26,d0
- vmlal.u32 q6,d22,d0
- vmlal.u32 q9,d28,d0
-
- vmlal.u32 q5,d28,d2
- vld4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]!
- vmlal.u32 q8,d24,d1
- vld4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]!
- vmlal.u32 q6,d20,d1
- vmlal.u32 q9,d26,d1
- vmlal.u32 q7,d22,d1
-
- vmlal.u32 q8,d22,d3
- vld1.32 d8[1],[r7,:32]
- vmlal.u32 q5,d26,d4
- vld1.32 d8[0],[r6,:32]
- vmlal.u32 q9,d24,d3
- vmlal.u32 q6,d28,d4
- vmlal.u32 q7,d20,d3
-
- vmlal.u32 q8,d20,d5
- vmlal.u32 q5,d24,d6
- vmlal.u32 q9,d22,d5
- vmlal.u32 q6,d26,d6
- vmlal.u32 q7,d28,d6
-
- vmlal.u32 q8,d28,d8
- vorn q0,q0,q0 @ all-ones
- vmlal.u32 q5,d22,d8
- vshr.u64 q0,q0,#38
- vmlal.u32 q9,d20,d7
- vmlal.u32 q6,d24,d8
- vmlal.u32 q7,d26,d8
-
-.Lshort_tail:
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ horizontal addition
-
- vadd.i64 d16,d16,d17
- vadd.i64 d10,d10,d11
- vadd.i64 d18,d18,d19
- vadd.i64 d12,d12,d13
- vadd.i64 d14,d14,d15
-
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ lazy reduction, but without narrowing
-
- vshr.u64 q15,q8,#26
- vand.i64 q8,q8,q0
- vshr.u64 q4,q5,#26
- vand.i64 q5,q5,q0
- vadd.i64 q9,q9,q15 @ h3 -> h4
- vadd.i64 q6,q6,q4 @ h0 -> h1
-
- vshr.u64 q15,q9,#26
- vand.i64 q9,q9,q0
- vshr.u64 q4,q6,#26
- vand.i64 q6,q6,q0
- vadd.i64 q7,q7,q4 @ h1 -> h2
-
- vadd.i64 q5,q5,q15
- vshl.u64 q15,q15,#2
- vshr.u64 q4,q7,#26
- vand.i64 q7,q7,q0
- vadd.i64 q5,q5,q15 @ h4 -> h0
- vadd.i64 q8,q8,q4 @ h2 -> h3
-
- vshr.u64 q15,q5,#26
- vand.i64 q5,q5,q0
- vshr.u64 q4,q8,#26
- vand.i64 q8,q8,q0
- vadd.i64 q6,q6,q15 @ h0 -> h1
- vadd.i64 q9,q9,q4 @ h3 -> h4
-
- cmp r2,#0
- bne .Leven
-
- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
- @ store hash value
-
- vst4.32 {d10[0],d12[0],d14[0],d16[0]},[r0]!
- vst1.32 {d18[0]},[r0]
-
- vldmia sp!,{d8-d15} @ epilogue
- ldmia sp!,{r4-r7}
- bx lr @ bx lr
-.size poly1305_blocks_neon,.-poly1305_blocks_neon
-
-.align 5
-.Lzeros:
-.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
-#ifndef __KERNEL__
-.LOPENSSL_armcap:
-# ifdef _WIN32
-.word OPENSSL_armcap_P
-# else
-.word OPENSSL_armcap_P-.Lpoly1305_init
-# endif
-.comm OPENSSL_armcap_P,4,4
-.hidden OPENSSL_armcap_P
-#endif
-#endif
-.asciz "Poly1305 for ARMv4/NEON, CRYPTOGAMS by @dot-asm"
-.align 2
diff --git a/arch/arm/crypto/poly1305-glue.c b/arch/arm/crypto/poly1305-glue.c
index 3023c1acfa19..c31bd8f7c092 100644
--- a/arch/arm/crypto/poly1305-glue.c
+++ b/arch/arm/crypto/poly1305-glue.c
@@ -29,7 +29,7 @@ void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit)
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
-void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
+void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
{
poly1305_init_arm(&dctx->h, key);
dctx->s[0] = get_unaligned_le32(key + 16);
diff --git a/arch/arm/crypto/sha256-core.S_shipped b/arch/arm/crypto/sha256-core.S_shipped
deleted file mode 100644
index 6363014a50d7..000000000000
--- a/arch/arm/crypto/sha256-core.S_shipped
+++ /dev/null
@@ -1,2816 +0,0 @@
-@ SPDX-License-Identifier: GPL-2.0
-
-@ This code is taken from the OpenSSL project but the author (Andy Polyakov)
-@ has relicensed it under the GPLv2. Therefore this program is free software;
-@ you can redistribute it and/or modify it under the terms of the GNU General
-@ Public License version 2 as published by the Free Software Foundation.
-@
-@ The original headers, including the original license headers, are
-@ included below for completeness.
-
-@ ====================================================================
-@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
-@ project. The module is, however, dual licensed under OpenSSL and
-@ CRYPTOGAMS licenses depending on where you obtain it. For further
-@ details see https://www.openssl.org/~appro/cryptogams/.
-@ ====================================================================
-
-@ SHA256 block procedure for ARMv4. May 2007.
-
-@ Performance is ~2x better than gcc 3.4 generated code and in "abso-
-@ lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per
-@ byte [on single-issue Xscale PXA250 core].
-
-@ July 2010.
-@
-@ Rescheduling for dual-issue pipeline resulted in 22% improvement on
-@ Cortex A8 core and ~20 cycles per processed byte.
-
-@ February 2011.
-@
-@ Profiler-assisted and platform-specific optimization resulted in 16%
-@ improvement on Cortex A8 core and ~15.4 cycles per processed byte.
-
-@ September 2013.
-@
-@ Add NEON implementation. On Cortex A8 it was measured to process one
-@ byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon
-@ S4 does it in 12.5 cycles too, but it's 50% faster than integer-only
-@ code (meaning that latter performs sub-optimally, nothing was done
-@ about it).
-
-@ May 2014.
-@
-@ Add ARMv8 code path performing at 2.0 cpb on Apple A7.
-
-#ifndef __KERNEL__
-# include "arm_arch.h"
-#else
-# define __ARM_ARCH__ __LINUX_ARM_ARCH__
-# define __ARM_MAX_ARCH__ 7
-#endif
-
-.text
-#if __ARM_ARCH__<7
-.code 32
-#else
-.syntax unified
-# ifdef __thumb2__
-.thumb
-# else
-.code 32
-# endif
-#endif
-
-.type K256,%object
-.align 5
-K256:
-.word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
-.word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
-.word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
-.word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
-.word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
-.word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
-.word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
-.word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
-.word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
-.word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
-.word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
-.word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
-.word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
-.word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
-.word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
-.word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
-.size K256,.-K256
-.word 0 @ terminator
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
-.LOPENSSL_armcap:
-.word OPENSSL_armcap_P-sha256_block_data_order
-#endif
-.align 5
-
-.global sha256_block_data_order
-.type sha256_block_data_order,%function
-sha256_block_data_order:
-.Lsha256_block_data_order:
-#if __ARM_ARCH__<7
- sub r3,pc,#8 @ sha256_block_data_order
-#else
- adr r3,.Lsha256_block_data_order
-#endif
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
- ldr r12,.LOPENSSL_armcap
- ldr r12,[r3,r12] @ OPENSSL_armcap_P
- tst r12,#ARMV8_SHA256
- bne .LARMv8
- tst r12,#ARMV7_NEON
- bne .LNEON
-#endif
- add r2,r1,r2,lsl#6 @ len to point at the end of inp
- stmdb sp!,{r0,r1,r2,r4-r11,lr}
- ldmia r0,{r4,r5,r6,r7,r8,r9,r10,r11}
- sub r14,r3,#256+32 @ K256
- sub sp,sp,#16*4 @ alloca(X[16])
-.Loop:
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r5,r6 @ magic
- eor r12,r12,r12
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 0
-# if 0==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r8,r8,ror#5
- add r4,r4,r12 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r8,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 0
- add r4,r4,r12 @ h+=Maj(a,b,c) from the past
- ldrb r12,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r12,lsl#8
- ldrb r12,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 0==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r8,r8,ror#5
- orr r2,r2,r12,lsl#24
- eor r0,r0,r8,ror#19 @ Sigma1(e)
-#endif
- ldr r12,[r14],#4 @ *K256++
- add r11,r11,r2 @ h+=X[i]
- str r2,[sp,#0*4]
- eor r2,r9,r10
- add r11,r11,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r8
- add r11,r11,r12 @ h+=K256[i]
- eor r2,r2,r10 @ Ch(e,f,g)
- eor r0,r4,r4,ror#11
- add r11,r11,r2 @ h+=Ch(e,f,g)
-#if 0==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 0<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r4,r5 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#2*4] @ from future BODY_16_xx
- eor r12,r4,r5 @ a^b, b^c in next round
- ldr r1,[sp,#15*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r4,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r7,r7,r11 @ d+=h
- eor r3,r3,r5 @ Maj(a,b,c)
- add r11,r11,r0,ror#2 @ h+=Sigma0(a)
- @ add r11,r11,r3 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 1
-# if 1==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r7,r7,ror#5
- add r11,r11,r3 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r7,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 1
- add r11,r11,r3 @ h+=Maj(a,b,c) from the past
- ldrb r3,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r3,lsl#8
- ldrb r3,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 1==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r7,r7,ror#5
- orr r2,r2,r3,lsl#24
- eor r0,r0,r7,ror#19 @ Sigma1(e)
-#endif
- ldr r3,[r14],#4 @ *K256++
- add r10,r10,r2 @ h+=X[i]
- str r2,[sp,#1*4]
- eor r2,r8,r9
- add r10,r10,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r7
- add r10,r10,r3 @ h+=K256[i]
- eor r2,r2,r9 @ Ch(e,f,g)
- eor r0,r11,r11,ror#11
- add r10,r10,r2 @ h+=Ch(e,f,g)
-#if 1==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 1<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r11,r4 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#3*4] @ from future BODY_16_xx
- eor r3,r11,r4 @ a^b, b^c in next round
- ldr r1,[sp,#0*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r11,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r6,r6,r10 @ d+=h
- eor r12,r12,r4 @ Maj(a,b,c)
- add r10,r10,r0,ror#2 @ h+=Sigma0(a)
- @ add r10,r10,r12 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 2
-# if 2==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r6,r6,ror#5
- add r10,r10,r12 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r6,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 2
- add r10,r10,r12 @ h+=Maj(a,b,c) from the past
- ldrb r12,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r12,lsl#8
- ldrb r12,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 2==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r6,r6,ror#5
- orr r2,r2,r12,lsl#24
- eor r0,r0,r6,ror#19 @ Sigma1(e)
-#endif
- ldr r12,[r14],#4 @ *K256++
- add r9,r9,r2 @ h+=X[i]
- str r2,[sp,#2*4]
- eor r2,r7,r8
- add r9,r9,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r6
- add r9,r9,r12 @ h+=K256[i]
- eor r2,r2,r8 @ Ch(e,f,g)
- eor r0,r10,r10,ror#11
- add r9,r9,r2 @ h+=Ch(e,f,g)
-#if 2==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 2<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r10,r11 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#4*4] @ from future BODY_16_xx
- eor r12,r10,r11 @ a^b, b^c in next round
- ldr r1,[sp,#1*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r10,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r5,r5,r9 @ d+=h
- eor r3,r3,r11 @ Maj(a,b,c)
- add r9,r9,r0,ror#2 @ h+=Sigma0(a)
- @ add r9,r9,r3 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 3
-# if 3==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r5,r5,ror#5
- add r9,r9,r3 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r5,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 3
- add r9,r9,r3 @ h+=Maj(a,b,c) from the past
- ldrb r3,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r3,lsl#8
- ldrb r3,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 3==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r5,r5,ror#5
- orr r2,r2,r3,lsl#24
- eor r0,r0,r5,ror#19 @ Sigma1(e)
-#endif
- ldr r3,[r14],#4 @ *K256++
- add r8,r8,r2 @ h+=X[i]
- str r2,[sp,#3*4]
- eor r2,r6,r7
- add r8,r8,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r5
- add r8,r8,r3 @ h+=K256[i]
- eor r2,r2,r7 @ Ch(e,f,g)
- eor r0,r9,r9,ror#11
- add r8,r8,r2 @ h+=Ch(e,f,g)
-#if 3==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 3<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r9,r10 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#5*4] @ from future BODY_16_xx
- eor r3,r9,r10 @ a^b, b^c in next round
- ldr r1,[sp,#2*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r9,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r4,r4,r8 @ d+=h
- eor r12,r12,r10 @ Maj(a,b,c)
- add r8,r8,r0,ror#2 @ h+=Sigma0(a)
- @ add r8,r8,r12 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 4
-# if 4==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r4,r4,ror#5
- add r8,r8,r12 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r4,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 4
- add r8,r8,r12 @ h+=Maj(a,b,c) from the past
- ldrb r12,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r12,lsl#8
- ldrb r12,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 4==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r4,r4,ror#5
- orr r2,r2,r12,lsl#24
- eor r0,r0,r4,ror#19 @ Sigma1(e)
-#endif
- ldr r12,[r14],#4 @ *K256++
- add r7,r7,r2 @ h+=X[i]
- str r2,[sp,#4*4]
- eor r2,r5,r6
- add r7,r7,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r4
- add r7,r7,r12 @ h+=K256[i]
- eor r2,r2,r6 @ Ch(e,f,g)
- eor r0,r8,r8,ror#11
- add r7,r7,r2 @ h+=Ch(e,f,g)
-#if 4==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 4<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r8,r9 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#6*4] @ from future BODY_16_xx
- eor r12,r8,r9 @ a^b, b^c in next round
- ldr r1,[sp,#3*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r8,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r11,r11,r7 @ d+=h
- eor r3,r3,r9 @ Maj(a,b,c)
- add r7,r7,r0,ror#2 @ h+=Sigma0(a)
- @ add r7,r7,r3 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 5
-# if 5==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r11,r11,ror#5
- add r7,r7,r3 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r11,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 5
- add r7,r7,r3 @ h+=Maj(a,b,c) from the past
- ldrb r3,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r3,lsl#8
- ldrb r3,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 5==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r11,r11,ror#5
- orr r2,r2,r3,lsl#24
- eor r0,r0,r11,ror#19 @ Sigma1(e)
-#endif
- ldr r3,[r14],#4 @ *K256++
- add r6,r6,r2 @ h+=X[i]
- str r2,[sp,#5*4]
- eor r2,r4,r5
- add r6,r6,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r11
- add r6,r6,r3 @ h+=K256[i]
- eor r2,r2,r5 @ Ch(e,f,g)
- eor r0,r7,r7,ror#11
- add r6,r6,r2 @ h+=Ch(e,f,g)
-#if 5==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 5<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r7,r8 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#7*4] @ from future BODY_16_xx
- eor r3,r7,r8 @ a^b, b^c in next round
- ldr r1,[sp,#4*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r7,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r10,r10,r6 @ d+=h
- eor r12,r12,r8 @ Maj(a,b,c)
- add r6,r6,r0,ror#2 @ h+=Sigma0(a)
- @ add r6,r6,r12 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 6
-# if 6==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r10,r10,ror#5
- add r6,r6,r12 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r10,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 6
- add r6,r6,r12 @ h+=Maj(a,b,c) from the past
- ldrb r12,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r12,lsl#8
- ldrb r12,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 6==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r10,r10,ror#5
- orr r2,r2,r12,lsl#24
- eor r0,r0,r10,ror#19 @ Sigma1(e)
-#endif
- ldr r12,[r14],#4 @ *K256++
- add r5,r5,r2 @ h+=X[i]
- str r2,[sp,#6*4]
- eor r2,r11,r4
- add r5,r5,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r10
- add r5,r5,r12 @ h+=K256[i]
- eor r2,r2,r4 @ Ch(e,f,g)
- eor r0,r6,r6,ror#11
- add r5,r5,r2 @ h+=Ch(e,f,g)
-#if 6==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 6<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r6,r7 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#8*4] @ from future BODY_16_xx
- eor r12,r6,r7 @ a^b, b^c in next round
- ldr r1,[sp,#5*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r6,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r9,r9,r5 @ d+=h
- eor r3,r3,r7 @ Maj(a,b,c)
- add r5,r5,r0,ror#2 @ h+=Sigma0(a)
- @ add r5,r5,r3 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 7
-# if 7==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r9,r9,ror#5
- add r5,r5,r3 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r9,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 7
- add r5,r5,r3 @ h+=Maj(a,b,c) from the past
- ldrb r3,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r3,lsl#8
- ldrb r3,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 7==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r9,r9,ror#5
- orr r2,r2,r3,lsl#24
- eor r0,r0,r9,ror#19 @ Sigma1(e)
-#endif
- ldr r3,[r14],#4 @ *K256++
- add r4,r4,r2 @ h+=X[i]
- str r2,[sp,#7*4]
- eor r2,r10,r11
- add r4,r4,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r9
- add r4,r4,r3 @ h+=K256[i]
- eor r2,r2,r11 @ Ch(e,f,g)
- eor r0,r5,r5,ror#11
- add r4,r4,r2 @ h+=Ch(e,f,g)
-#if 7==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 7<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r5,r6 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#9*4] @ from future BODY_16_xx
- eor r3,r5,r6 @ a^b, b^c in next round
- ldr r1,[sp,#6*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r5,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r8,r8,r4 @ d+=h
- eor r12,r12,r6 @ Maj(a,b,c)
- add r4,r4,r0,ror#2 @ h+=Sigma0(a)
- @ add r4,r4,r12 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 8
-# if 8==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r8,r8,ror#5
- add r4,r4,r12 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r8,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 8
- add r4,r4,r12 @ h+=Maj(a,b,c) from the past
- ldrb r12,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r12,lsl#8
- ldrb r12,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 8==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r8,r8,ror#5
- orr r2,r2,r12,lsl#24
- eor r0,r0,r8,ror#19 @ Sigma1(e)
-#endif
- ldr r12,[r14],#4 @ *K256++
- add r11,r11,r2 @ h+=X[i]
- str r2,[sp,#8*4]
- eor r2,r9,r10
- add r11,r11,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r8
- add r11,r11,r12 @ h+=K256[i]
- eor r2,r2,r10 @ Ch(e,f,g)
- eor r0,r4,r4,ror#11
- add r11,r11,r2 @ h+=Ch(e,f,g)
-#if 8==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 8<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r4,r5 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#10*4] @ from future BODY_16_xx
- eor r12,r4,r5 @ a^b, b^c in next round
- ldr r1,[sp,#7*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r4,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r7,r7,r11 @ d+=h
- eor r3,r3,r5 @ Maj(a,b,c)
- add r11,r11,r0,ror#2 @ h+=Sigma0(a)
- @ add r11,r11,r3 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 9
-# if 9==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r7,r7,ror#5
- add r11,r11,r3 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r7,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 9
- add r11,r11,r3 @ h+=Maj(a,b,c) from the past
- ldrb r3,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r3,lsl#8
- ldrb r3,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 9==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r7,r7,ror#5
- orr r2,r2,r3,lsl#24
- eor r0,r0,r7,ror#19 @ Sigma1(e)
-#endif
- ldr r3,[r14],#4 @ *K256++
- add r10,r10,r2 @ h+=X[i]
- str r2,[sp,#9*4]
- eor r2,r8,r9
- add r10,r10,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r7
- add r10,r10,r3 @ h+=K256[i]
- eor r2,r2,r9 @ Ch(e,f,g)
- eor r0,r11,r11,ror#11
- add r10,r10,r2 @ h+=Ch(e,f,g)
-#if 9==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 9<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r11,r4 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#11*4] @ from future BODY_16_xx
- eor r3,r11,r4 @ a^b, b^c in next round
- ldr r1,[sp,#8*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r11,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r6,r6,r10 @ d+=h
- eor r12,r12,r4 @ Maj(a,b,c)
- add r10,r10,r0,ror#2 @ h+=Sigma0(a)
- @ add r10,r10,r12 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 10
-# if 10==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r6,r6,ror#5
- add r10,r10,r12 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r6,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 10
- add r10,r10,r12 @ h+=Maj(a,b,c) from the past
- ldrb r12,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r12,lsl#8
- ldrb r12,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 10==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r6,r6,ror#5
- orr r2,r2,r12,lsl#24
- eor r0,r0,r6,ror#19 @ Sigma1(e)
-#endif
- ldr r12,[r14],#4 @ *K256++
- add r9,r9,r2 @ h+=X[i]
- str r2,[sp,#10*4]
- eor r2,r7,r8
- add r9,r9,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r6
- add r9,r9,r12 @ h+=K256[i]
- eor r2,r2,r8 @ Ch(e,f,g)
- eor r0,r10,r10,ror#11
- add r9,r9,r2 @ h+=Ch(e,f,g)
-#if 10==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 10<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r10,r11 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#12*4] @ from future BODY_16_xx
- eor r12,r10,r11 @ a^b, b^c in next round
- ldr r1,[sp,#9*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r10,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r5,r5,r9 @ d+=h
- eor r3,r3,r11 @ Maj(a,b,c)
- add r9,r9,r0,ror#2 @ h+=Sigma0(a)
- @ add r9,r9,r3 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 11
-# if 11==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r5,r5,ror#5
- add r9,r9,r3 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r5,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 11
- add r9,r9,r3 @ h+=Maj(a,b,c) from the past
- ldrb r3,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r3,lsl#8
- ldrb r3,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 11==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r5,r5,ror#5
- orr r2,r2,r3,lsl#24
- eor r0,r0,r5,ror#19 @ Sigma1(e)
-#endif
- ldr r3,[r14],#4 @ *K256++
- add r8,r8,r2 @ h+=X[i]
- str r2,[sp,#11*4]
- eor r2,r6,r7
- add r8,r8,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r5
- add r8,r8,r3 @ h+=K256[i]
- eor r2,r2,r7 @ Ch(e,f,g)
- eor r0,r9,r9,ror#11
- add r8,r8,r2 @ h+=Ch(e,f,g)
-#if 11==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 11<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r9,r10 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#13*4] @ from future BODY_16_xx
- eor r3,r9,r10 @ a^b, b^c in next round
- ldr r1,[sp,#10*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r9,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r4,r4,r8 @ d+=h
- eor r12,r12,r10 @ Maj(a,b,c)
- add r8,r8,r0,ror#2 @ h+=Sigma0(a)
- @ add r8,r8,r12 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 12
-# if 12==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r4,r4,ror#5
- add r8,r8,r12 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r4,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 12
- add r8,r8,r12 @ h+=Maj(a,b,c) from the past
- ldrb r12,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r12,lsl#8
- ldrb r12,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 12==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r4,r4,ror#5
- orr r2,r2,r12,lsl#24
- eor r0,r0,r4,ror#19 @ Sigma1(e)
-#endif
- ldr r12,[r14],#4 @ *K256++
- add r7,r7,r2 @ h+=X[i]
- str r2,[sp,#12*4]
- eor r2,r5,r6
- add r7,r7,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r4
- add r7,r7,r12 @ h+=K256[i]
- eor r2,r2,r6 @ Ch(e,f,g)
- eor r0,r8,r8,ror#11
- add r7,r7,r2 @ h+=Ch(e,f,g)
-#if 12==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 12<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r8,r9 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#14*4] @ from future BODY_16_xx
- eor r12,r8,r9 @ a^b, b^c in next round
- ldr r1,[sp,#11*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r8,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r11,r11,r7 @ d+=h
- eor r3,r3,r9 @ Maj(a,b,c)
- add r7,r7,r0,ror#2 @ h+=Sigma0(a)
- @ add r7,r7,r3 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 13
-# if 13==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r11,r11,ror#5
- add r7,r7,r3 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r11,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 13
- add r7,r7,r3 @ h+=Maj(a,b,c) from the past
- ldrb r3,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r3,lsl#8
- ldrb r3,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 13==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r11,r11,ror#5
- orr r2,r2,r3,lsl#24
- eor r0,r0,r11,ror#19 @ Sigma1(e)
-#endif
- ldr r3,[r14],#4 @ *K256++
- add r6,r6,r2 @ h+=X[i]
- str r2,[sp,#13*4]
- eor r2,r4,r5
- add r6,r6,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r11
- add r6,r6,r3 @ h+=K256[i]
- eor r2,r2,r5 @ Ch(e,f,g)
- eor r0,r7,r7,ror#11
- add r6,r6,r2 @ h+=Ch(e,f,g)
-#if 13==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 13<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r7,r8 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#15*4] @ from future BODY_16_xx
- eor r3,r7,r8 @ a^b, b^c in next round
- ldr r1,[sp,#12*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r7,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r10,r10,r6 @ d+=h
- eor r12,r12,r8 @ Maj(a,b,c)
- add r6,r6,r0,ror#2 @ h+=Sigma0(a)
- @ add r6,r6,r12 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 14
-# if 14==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r10,r10,ror#5
- add r6,r6,r12 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r10,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 14
- add r6,r6,r12 @ h+=Maj(a,b,c) from the past
- ldrb r12,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r12,lsl#8
- ldrb r12,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 14==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r10,r10,ror#5
- orr r2,r2,r12,lsl#24
- eor r0,r0,r10,ror#19 @ Sigma1(e)
-#endif
- ldr r12,[r14],#4 @ *K256++
- add r5,r5,r2 @ h+=X[i]
- str r2,[sp,#14*4]
- eor r2,r11,r4
- add r5,r5,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r10
- add r5,r5,r12 @ h+=K256[i]
- eor r2,r2,r4 @ Ch(e,f,g)
- eor r0,r6,r6,ror#11
- add r5,r5,r2 @ h+=Ch(e,f,g)
-#if 14==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 14<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r6,r7 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#0*4] @ from future BODY_16_xx
- eor r12,r6,r7 @ a^b, b^c in next round
- ldr r1,[sp,#13*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r6,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r9,r9,r5 @ d+=h
- eor r3,r3,r7 @ Maj(a,b,c)
- add r5,r5,r0,ror#2 @ h+=Sigma0(a)
- @ add r5,r5,r3 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- @ ldr r2,[r1],#4 @ 15
-# if 15==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r9,r9,ror#5
- add r5,r5,r3 @ h+=Maj(a,b,c) from the past
- eor r0,r0,r9,ror#19 @ Sigma1(e)
-# ifndef __ARMEB__
- rev r2,r2
-# endif
-#else
- @ ldrb r2,[r1,#3] @ 15
- add r5,r5,r3 @ h+=Maj(a,b,c) from the past
- ldrb r3,[r1,#2]
- ldrb r0,[r1,#1]
- orr r2,r2,r3,lsl#8
- ldrb r3,[r1],#4
- orr r2,r2,r0,lsl#16
-# if 15==15
- str r1,[sp,#17*4] @ make room for r1
-# endif
- eor r0,r9,r9,ror#5
- orr r2,r2,r3,lsl#24
- eor r0,r0,r9,ror#19 @ Sigma1(e)
-#endif
- ldr r3,[r14],#4 @ *K256++
- add r4,r4,r2 @ h+=X[i]
- str r2,[sp,#15*4]
- eor r2,r10,r11
- add r4,r4,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r9
- add r4,r4,r3 @ h+=K256[i]
- eor r2,r2,r11 @ Ch(e,f,g)
- eor r0,r5,r5,ror#11
- add r4,r4,r2 @ h+=Ch(e,f,g)
-#if 15==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 15<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r5,r6 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#1*4] @ from future BODY_16_xx
- eor r3,r5,r6 @ a^b, b^c in next round
- ldr r1,[sp,#14*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r5,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r8,r8,r4 @ d+=h
- eor r12,r12,r6 @ Maj(a,b,c)
- add r4,r4,r0,ror#2 @ h+=Sigma0(a)
- @ add r4,r4,r12 @ h+=Maj(a,b,c)
-.Lrounds_16_xx:
- @ ldr r2,[sp,#1*4] @ 16
- @ ldr r1,[sp,#14*4]
- mov r0,r2,ror#7
- add r4,r4,r12 @ h+=Maj(a,b,c) from the past
- mov r12,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r12,r12,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#0*4]
- eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#9*4]
-
- add r12,r12,r0
- eor r0,r8,r8,ror#5 @ from BODY_00_15
- add r2,r2,r12
- eor r0,r0,r8,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r12,[r14],#4 @ *K256++
- add r11,r11,r2 @ h+=X[i]
- str r2,[sp,#0*4]
- eor r2,r9,r10
- add r11,r11,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r8
- add r11,r11,r12 @ h+=K256[i]
- eor r2,r2,r10 @ Ch(e,f,g)
- eor r0,r4,r4,ror#11
- add r11,r11,r2 @ h+=Ch(e,f,g)
-#if 16==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 16<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r4,r5 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#2*4] @ from future BODY_16_xx
- eor r12,r4,r5 @ a^b, b^c in next round
- ldr r1,[sp,#15*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r4,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r7,r7,r11 @ d+=h
- eor r3,r3,r5 @ Maj(a,b,c)
- add r11,r11,r0,ror#2 @ h+=Sigma0(a)
- @ add r11,r11,r3 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#2*4] @ 17
- @ ldr r1,[sp,#15*4]
- mov r0,r2,ror#7
- add r11,r11,r3 @ h+=Maj(a,b,c) from the past
- mov r3,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r3,r3,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#1*4]
- eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#10*4]
-
- add r3,r3,r0
- eor r0,r7,r7,ror#5 @ from BODY_00_15
- add r2,r2,r3
- eor r0,r0,r7,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r3,[r14],#4 @ *K256++
- add r10,r10,r2 @ h+=X[i]
- str r2,[sp,#1*4]
- eor r2,r8,r9
- add r10,r10,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r7
- add r10,r10,r3 @ h+=K256[i]
- eor r2,r2,r9 @ Ch(e,f,g)
- eor r0,r11,r11,ror#11
- add r10,r10,r2 @ h+=Ch(e,f,g)
-#if 17==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 17<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r11,r4 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#3*4] @ from future BODY_16_xx
- eor r3,r11,r4 @ a^b, b^c in next round
- ldr r1,[sp,#0*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r11,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r6,r6,r10 @ d+=h
- eor r12,r12,r4 @ Maj(a,b,c)
- add r10,r10,r0,ror#2 @ h+=Sigma0(a)
- @ add r10,r10,r12 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#3*4] @ 18
- @ ldr r1,[sp,#0*4]
- mov r0,r2,ror#7
- add r10,r10,r12 @ h+=Maj(a,b,c) from the past
- mov r12,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r12,r12,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#2*4]
- eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#11*4]
-
- add r12,r12,r0
- eor r0,r6,r6,ror#5 @ from BODY_00_15
- add r2,r2,r12
- eor r0,r0,r6,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r12,[r14],#4 @ *K256++
- add r9,r9,r2 @ h+=X[i]
- str r2,[sp,#2*4]
- eor r2,r7,r8
- add r9,r9,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r6
- add r9,r9,r12 @ h+=K256[i]
- eor r2,r2,r8 @ Ch(e,f,g)
- eor r0,r10,r10,ror#11
- add r9,r9,r2 @ h+=Ch(e,f,g)
-#if 18==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 18<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r10,r11 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#4*4] @ from future BODY_16_xx
- eor r12,r10,r11 @ a^b, b^c in next round
- ldr r1,[sp,#1*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r10,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r5,r5,r9 @ d+=h
- eor r3,r3,r11 @ Maj(a,b,c)
- add r9,r9,r0,ror#2 @ h+=Sigma0(a)
- @ add r9,r9,r3 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#4*4] @ 19
- @ ldr r1,[sp,#1*4]
- mov r0,r2,ror#7
- add r9,r9,r3 @ h+=Maj(a,b,c) from the past
- mov r3,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r3,r3,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#3*4]
- eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#12*4]
-
- add r3,r3,r0
- eor r0,r5,r5,ror#5 @ from BODY_00_15
- add r2,r2,r3
- eor r0,r0,r5,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r3,[r14],#4 @ *K256++
- add r8,r8,r2 @ h+=X[i]
- str r2,[sp,#3*4]
- eor r2,r6,r7
- add r8,r8,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r5
- add r8,r8,r3 @ h+=K256[i]
- eor r2,r2,r7 @ Ch(e,f,g)
- eor r0,r9,r9,ror#11
- add r8,r8,r2 @ h+=Ch(e,f,g)
-#if 19==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 19<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r9,r10 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#5*4] @ from future BODY_16_xx
- eor r3,r9,r10 @ a^b, b^c in next round
- ldr r1,[sp,#2*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r9,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r4,r4,r8 @ d+=h
- eor r12,r12,r10 @ Maj(a,b,c)
- add r8,r8,r0,ror#2 @ h+=Sigma0(a)
- @ add r8,r8,r12 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#5*4] @ 20
- @ ldr r1,[sp,#2*4]
- mov r0,r2,ror#7
- add r8,r8,r12 @ h+=Maj(a,b,c) from the past
- mov r12,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r12,r12,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#4*4]
- eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#13*4]
-
- add r12,r12,r0
- eor r0,r4,r4,ror#5 @ from BODY_00_15
- add r2,r2,r12
- eor r0,r0,r4,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r12,[r14],#4 @ *K256++
- add r7,r7,r2 @ h+=X[i]
- str r2,[sp,#4*4]
- eor r2,r5,r6
- add r7,r7,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r4
- add r7,r7,r12 @ h+=K256[i]
- eor r2,r2,r6 @ Ch(e,f,g)
- eor r0,r8,r8,ror#11
- add r7,r7,r2 @ h+=Ch(e,f,g)
-#if 20==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 20<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r8,r9 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#6*4] @ from future BODY_16_xx
- eor r12,r8,r9 @ a^b, b^c in next round
- ldr r1,[sp,#3*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r8,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r11,r11,r7 @ d+=h
- eor r3,r3,r9 @ Maj(a,b,c)
- add r7,r7,r0,ror#2 @ h+=Sigma0(a)
- @ add r7,r7,r3 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#6*4] @ 21
- @ ldr r1,[sp,#3*4]
- mov r0,r2,ror#7
- add r7,r7,r3 @ h+=Maj(a,b,c) from the past
- mov r3,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r3,r3,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#5*4]
- eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#14*4]
-
- add r3,r3,r0
- eor r0,r11,r11,ror#5 @ from BODY_00_15
- add r2,r2,r3
- eor r0,r0,r11,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r3,[r14],#4 @ *K256++
- add r6,r6,r2 @ h+=X[i]
- str r2,[sp,#5*4]
- eor r2,r4,r5
- add r6,r6,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r11
- add r6,r6,r3 @ h+=K256[i]
- eor r2,r2,r5 @ Ch(e,f,g)
- eor r0,r7,r7,ror#11
- add r6,r6,r2 @ h+=Ch(e,f,g)
-#if 21==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 21<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r7,r8 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#7*4] @ from future BODY_16_xx
- eor r3,r7,r8 @ a^b, b^c in next round
- ldr r1,[sp,#4*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r7,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r10,r10,r6 @ d+=h
- eor r12,r12,r8 @ Maj(a,b,c)
- add r6,r6,r0,ror#2 @ h+=Sigma0(a)
- @ add r6,r6,r12 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#7*4] @ 22
- @ ldr r1,[sp,#4*4]
- mov r0,r2,ror#7
- add r6,r6,r12 @ h+=Maj(a,b,c) from the past
- mov r12,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r12,r12,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#6*4]
- eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#15*4]
-
- add r12,r12,r0
- eor r0,r10,r10,ror#5 @ from BODY_00_15
- add r2,r2,r12
- eor r0,r0,r10,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r12,[r14],#4 @ *K256++
- add r5,r5,r2 @ h+=X[i]
- str r2,[sp,#6*4]
- eor r2,r11,r4
- add r5,r5,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r10
- add r5,r5,r12 @ h+=K256[i]
- eor r2,r2,r4 @ Ch(e,f,g)
- eor r0,r6,r6,ror#11
- add r5,r5,r2 @ h+=Ch(e,f,g)
-#if 22==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 22<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r6,r7 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#8*4] @ from future BODY_16_xx
- eor r12,r6,r7 @ a^b, b^c in next round
- ldr r1,[sp,#5*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r6,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r9,r9,r5 @ d+=h
- eor r3,r3,r7 @ Maj(a,b,c)
- add r5,r5,r0,ror#2 @ h+=Sigma0(a)
- @ add r5,r5,r3 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#8*4] @ 23
- @ ldr r1,[sp,#5*4]
- mov r0,r2,ror#7
- add r5,r5,r3 @ h+=Maj(a,b,c) from the past
- mov r3,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r3,r3,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#7*4]
- eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#0*4]
-
- add r3,r3,r0
- eor r0,r9,r9,ror#5 @ from BODY_00_15
- add r2,r2,r3
- eor r0,r0,r9,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r3,[r14],#4 @ *K256++
- add r4,r4,r2 @ h+=X[i]
- str r2,[sp,#7*4]
- eor r2,r10,r11
- add r4,r4,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r9
- add r4,r4,r3 @ h+=K256[i]
- eor r2,r2,r11 @ Ch(e,f,g)
- eor r0,r5,r5,ror#11
- add r4,r4,r2 @ h+=Ch(e,f,g)
-#if 23==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 23<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r5,r6 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#9*4] @ from future BODY_16_xx
- eor r3,r5,r6 @ a^b, b^c in next round
- ldr r1,[sp,#6*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r5,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r8,r8,r4 @ d+=h
- eor r12,r12,r6 @ Maj(a,b,c)
- add r4,r4,r0,ror#2 @ h+=Sigma0(a)
- @ add r4,r4,r12 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#9*4] @ 24
- @ ldr r1,[sp,#6*4]
- mov r0,r2,ror#7
- add r4,r4,r12 @ h+=Maj(a,b,c) from the past
- mov r12,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r12,r12,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#8*4]
- eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#1*4]
-
- add r12,r12,r0
- eor r0,r8,r8,ror#5 @ from BODY_00_15
- add r2,r2,r12
- eor r0,r0,r8,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r12,[r14],#4 @ *K256++
- add r11,r11,r2 @ h+=X[i]
- str r2,[sp,#8*4]
- eor r2,r9,r10
- add r11,r11,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r8
- add r11,r11,r12 @ h+=K256[i]
- eor r2,r2,r10 @ Ch(e,f,g)
- eor r0,r4,r4,ror#11
- add r11,r11,r2 @ h+=Ch(e,f,g)
-#if 24==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 24<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r4,r5 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#10*4] @ from future BODY_16_xx
- eor r12,r4,r5 @ a^b, b^c in next round
- ldr r1,[sp,#7*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r4,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r7,r7,r11 @ d+=h
- eor r3,r3,r5 @ Maj(a,b,c)
- add r11,r11,r0,ror#2 @ h+=Sigma0(a)
- @ add r11,r11,r3 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#10*4] @ 25
- @ ldr r1,[sp,#7*4]
- mov r0,r2,ror#7
- add r11,r11,r3 @ h+=Maj(a,b,c) from the past
- mov r3,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r3,r3,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#9*4]
- eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#2*4]
-
- add r3,r3,r0
- eor r0,r7,r7,ror#5 @ from BODY_00_15
- add r2,r2,r3
- eor r0,r0,r7,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r3,[r14],#4 @ *K256++
- add r10,r10,r2 @ h+=X[i]
- str r2,[sp,#9*4]
- eor r2,r8,r9
- add r10,r10,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r7
- add r10,r10,r3 @ h+=K256[i]
- eor r2,r2,r9 @ Ch(e,f,g)
- eor r0,r11,r11,ror#11
- add r10,r10,r2 @ h+=Ch(e,f,g)
-#if 25==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 25<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r11,r4 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#11*4] @ from future BODY_16_xx
- eor r3,r11,r4 @ a^b, b^c in next round
- ldr r1,[sp,#8*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r11,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r6,r6,r10 @ d+=h
- eor r12,r12,r4 @ Maj(a,b,c)
- add r10,r10,r0,ror#2 @ h+=Sigma0(a)
- @ add r10,r10,r12 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#11*4] @ 26
- @ ldr r1,[sp,#8*4]
- mov r0,r2,ror#7
- add r10,r10,r12 @ h+=Maj(a,b,c) from the past
- mov r12,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r12,r12,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#10*4]
- eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#3*4]
-
- add r12,r12,r0
- eor r0,r6,r6,ror#5 @ from BODY_00_15
- add r2,r2,r12
- eor r0,r0,r6,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r12,[r14],#4 @ *K256++
- add r9,r9,r2 @ h+=X[i]
- str r2,[sp,#10*4]
- eor r2,r7,r8
- add r9,r9,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r6
- add r9,r9,r12 @ h+=K256[i]
- eor r2,r2,r8 @ Ch(e,f,g)
- eor r0,r10,r10,ror#11
- add r9,r9,r2 @ h+=Ch(e,f,g)
-#if 26==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 26<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r10,r11 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#12*4] @ from future BODY_16_xx
- eor r12,r10,r11 @ a^b, b^c in next round
- ldr r1,[sp,#9*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r10,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r5,r5,r9 @ d+=h
- eor r3,r3,r11 @ Maj(a,b,c)
- add r9,r9,r0,ror#2 @ h+=Sigma0(a)
- @ add r9,r9,r3 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#12*4] @ 27
- @ ldr r1,[sp,#9*4]
- mov r0,r2,ror#7
- add r9,r9,r3 @ h+=Maj(a,b,c) from the past
- mov r3,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r3,r3,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#11*4]
- eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#4*4]
-
- add r3,r3,r0
- eor r0,r5,r5,ror#5 @ from BODY_00_15
- add r2,r2,r3
- eor r0,r0,r5,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r3,[r14],#4 @ *K256++
- add r8,r8,r2 @ h+=X[i]
- str r2,[sp,#11*4]
- eor r2,r6,r7
- add r8,r8,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r5
- add r8,r8,r3 @ h+=K256[i]
- eor r2,r2,r7 @ Ch(e,f,g)
- eor r0,r9,r9,ror#11
- add r8,r8,r2 @ h+=Ch(e,f,g)
-#if 27==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 27<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r9,r10 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#13*4] @ from future BODY_16_xx
- eor r3,r9,r10 @ a^b, b^c in next round
- ldr r1,[sp,#10*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r9,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r4,r4,r8 @ d+=h
- eor r12,r12,r10 @ Maj(a,b,c)
- add r8,r8,r0,ror#2 @ h+=Sigma0(a)
- @ add r8,r8,r12 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#13*4] @ 28
- @ ldr r1,[sp,#10*4]
- mov r0,r2,ror#7
- add r8,r8,r12 @ h+=Maj(a,b,c) from the past
- mov r12,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r12,r12,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#12*4]
- eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#5*4]
-
- add r12,r12,r0
- eor r0,r4,r4,ror#5 @ from BODY_00_15
- add r2,r2,r12
- eor r0,r0,r4,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r12,[r14],#4 @ *K256++
- add r7,r7,r2 @ h+=X[i]
- str r2,[sp,#12*4]
- eor r2,r5,r6
- add r7,r7,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r4
- add r7,r7,r12 @ h+=K256[i]
- eor r2,r2,r6 @ Ch(e,f,g)
- eor r0,r8,r8,ror#11
- add r7,r7,r2 @ h+=Ch(e,f,g)
-#if 28==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 28<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r8,r9 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#14*4] @ from future BODY_16_xx
- eor r12,r8,r9 @ a^b, b^c in next round
- ldr r1,[sp,#11*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r8,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r11,r11,r7 @ d+=h
- eor r3,r3,r9 @ Maj(a,b,c)
- add r7,r7,r0,ror#2 @ h+=Sigma0(a)
- @ add r7,r7,r3 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#14*4] @ 29
- @ ldr r1,[sp,#11*4]
- mov r0,r2,ror#7
- add r7,r7,r3 @ h+=Maj(a,b,c) from the past
- mov r3,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r3,r3,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#13*4]
- eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#6*4]
-
- add r3,r3,r0
- eor r0,r11,r11,ror#5 @ from BODY_00_15
- add r2,r2,r3
- eor r0,r0,r11,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r3,[r14],#4 @ *K256++
- add r6,r6,r2 @ h+=X[i]
- str r2,[sp,#13*4]
- eor r2,r4,r5
- add r6,r6,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r11
- add r6,r6,r3 @ h+=K256[i]
- eor r2,r2,r5 @ Ch(e,f,g)
- eor r0,r7,r7,ror#11
- add r6,r6,r2 @ h+=Ch(e,f,g)
-#if 29==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 29<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r7,r8 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#15*4] @ from future BODY_16_xx
- eor r3,r7,r8 @ a^b, b^c in next round
- ldr r1,[sp,#12*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r7,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r10,r10,r6 @ d+=h
- eor r12,r12,r8 @ Maj(a,b,c)
- add r6,r6,r0,ror#2 @ h+=Sigma0(a)
- @ add r6,r6,r12 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#15*4] @ 30
- @ ldr r1,[sp,#12*4]
- mov r0,r2,ror#7
- add r6,r6,r12 @ h+=Maj(a,b,c) from the past
- mov r12,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r12,r12,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#14*4]
- eor r12,r12,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#7*4]
-
- add r12,r12,r0
- eor r0,r10,r10,ror#5 @ from BODY_00_15
- add r2,r2,r12
- eor r0,r0,r10,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r12,[r14],#4 @ *K256++
- add r5,r5,r2 @ h+=X[i]
- str r2,[sp,#14*4]
- eor r2,r11,r4
- add r5,r5,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r10
- add r5,r5,r12 @ h+=K256[i]
- eor r2,r2,r4 @ Ch(e,f,g)
- eor r0,r6,r6,ror#11
- add r5,r5,r2 @ h+=Ch(e,f,g)
-#if 30==31
- and r12,r12,#0xff
- cmp r12,#0xf2 @ done?
-#endif
-#if 30<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r12,r6,r7 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#0*4] @ from future BODY_16_xx
- eor r12,r6,r7 @ a^b, b^c in next round
- ldr r1,[sp,#13*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r6,ror#20 @ Sigma0(a)
- and r3,r3,r12 @ (b^c)&=(a^b)
- add r9,r9,r5 @ d+=h
- eor r3,r3,r7 @ Maj(a,b,c)
- add r5,r5,r0,ror#2 @ h+=Sigma0(a)
- @ add r5,r5,r3 @ h+=Maj(a,b,c)
- @ ldr r2,[sp,#0*4] @ 31
- @ ldr r1,[sp,#13*4]
- mov r0,r2,ror#7
- add r5,r5,r3 @ h+=Maj(a,b,c) from the past
- mov r3,r1,ror#17
- eor r0,r0,r2,ror#18
- eor r3,r3,r1,ror#19
- eor r0,r0,r2,lsr#3 @ sigma0(X[i+1])
- ldr r2,[sp,#15*4]
- eor r3,r3,r1,lsr#10 @ sigma1(X[i+14])
- ldr r1,[sp,#8*4]
-
- add r3,r3,r0
- eor r0,r9,r9,ror#5 @ from BODY_00_15
- add r2,r2,r3
- eor r0,r0,r9,ror#19 @ Sigma1(e)
- add r2,r2,r1 @ X[i]
- ldr r3,[r14],#4 @ *K256++
- add r4,r4,r2 @ h+=X[i]
- str r2,[sp,#15*4]
- eor r2,r10,r11
- add r4,r4,r0,ror#6 @ h+=Sigma1(e)
- and r2,r2,r9
- add r4,r4,r3 @ h+=K256[i]
- eor r2,r2,r11 @ Ch(e,f,g)
- eor r0,r5,r5,ror#11
- add r4,r4,r2 @ h+=Ch(e,f,g)
-#if 31==31
- and r3,r3,#0xff
- cmp r3,#0xf2 @ done?
-#endif
-#if 31<15
-# if __ARM_ARCH__>=7
- ldr r2,[r1],#4 @ prefetch
-# else
- ldrb r2,[r1,#3]
-# endif
- eor r3,r5,r6 @ a^b, b^c in next round
-#else
- ldr r2,[sp,#1*4] @ from future BODY_16_xx
- eor r3,r5,r6 @ a^b, b^c in next round
- ldr r1,[sp,#14*4] @ from future BODY_16_xx
-#endif
- eor r0,r0,r5,ror#20 @ Sigma0(a)
- and r12,r12,r3 @ (b^c)&=(a^b)
- add r8,r8,r4 @ d+=h
- eor r12,r12,r6 @ Maj(a,b,c)
- add r4,r4,r0,ror#2 @ h+=Sigma0(a)
- @ add r4,r4,r12 @ h+=Maj(a,b,c)
-#if __ARM_ARCH__>=7
- ite eq @ Thumb2 thing, sanity check in ARM
-#endif
- ldreq r3,[sp,#16*4] @ pull ctx
- bne .Lrounds_16_xx
-
- add r4,r4,r12 @ h+=Maj(a,b,c) from the past
- ldr r0,[r3,#0]
- ldr r2,[r3,#4]
- ldr r12,[r3,#8]
- add r4,r4,r0
- ldr r0,[r3,#12]
- add r5,r5,r2
- ldr r2,[r3,#16]
- add r6,r6,r12
- ldr r12,[r3,#20]
- add r7,r7,r0
- ldr r0,[r3,#24]
- add r8,r8,r2
- ldr r2,[r3,#28]
- add r9,r9,r12
- ldr r1,[sp,#17*4] @ pull inp
- ldr r12,[sp,#18*4] @ pull inp+len
- add r10,r10,r0
- add r11,r11,r2
- stmia r3,{r4,r5,r6,r7,r8,r9,r10,r11}
- cmp r1,r12
- sub r14,r14,#256 @ rewind Ktbl
- bne .Loop
-
- add sp,sp,#19*4 @ destroy frame
-#if __ARM_ARCH__>=5
- ldmia sp!,{r4-r11,pc}
-#else
- ldmia sp!,{r4-r11,lr}
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-#endif
-.size sha256_block_data_order,.-sha256_block_data_order
-#if __ARM_MAX_ARCH__>=7
-.arch armv7-a
-.fpu neon
-
-.global sha256_block_data_order_neon
-.type sha256_block_data_order_neon,%function
-.align 4
-sha256_block_data_order_neon:
-.LNEON:
- stmdb sp!,{r4-r12,lr}
-
- sub r11,sp,#16*4+16
- adr r14,.Lsha256_block_data_order
- sub r14,r14,#.Lsha256_block_data_order-K256
- bic r11,r11,#15 @ align for 128-bit stores
- mov r12,sp
- mov sp,r11 @ alloca
- add r2,r1,r2,lsl#6 @ len to point at the end of inp
-
- vld1.8 {q0},[r1]!
- vld1.8 {q1},[r1]!
- vld1.8 {q2},[r1]!
- vld1.8 {q3},[r1]!
- vld1.32 {q8},[r14,:128]!
- vld1.32 {q9},[r14,:128]!
- vld1.32 {q10},[r14,:128]!
- vld1.32 {q11},[r14,:128]!
- vrev32.8 q0,q0 @ yes, even on
- str r0,[sp,#64]
- vrev32.8 q1,q1 @ big-endian
- str r1,[sp,#68]
- mov r1,sp
- vrev32.8 q2,q2
- str r2,[sp,#72]
- vrev32.8 q3,q3
- str r12,[sp,#76] @ save original sp
- vadd.i32 q8,q8,q0
- vadd.i32 q9,q9,q1
- vst1.32 {q8},[r1,:128]!
- vadd.i32 q10,q10,q2
- vst1.32 {q9},[r1,:128]!
- vadd.i32 q11,q11,q3
- vst1.32 {q10},[r1,:128]!
- vst1.32 {q11},[r1,:128]!
-
- ldmia r0,{r4-r11}
- sub r1,r1,#64
- ldr r2,[sp,#0]
- eor r12,r12,r12
- eor r3,r5,r6
- b .L_00_48
-
-.align 4
-.L_00_48:
- vext.8 q8,q0,q1,#4
- add r11,r11,r2
- eor r2,r9,r10
- eor r0,r8,r8,ror#5
- vext.8 q9,q2,q3,#4
- add r4,r4,r12
- and r2,r2,r8
- eor r12,r0,r8,ror#19
- vshr.u32 q10,q8,#7
- eor r0,r4,r4,ror#11
- eor r2,r2,r10
- vadd.i32 q0,q0,q9
- add r11,r11,r12,ror#6
- eor r12,r4,r5
- vshr.u32 q9,q8,#3
- eor r0,r0,r4,ror#20
- add r11,r11,r2
- vsli.32 q10,q8,#25
- ldr r2,[sp,#4]
- and r3,r3,r12
- vshr.u32 q11,q8,#18
- add r7,r7,r11
- add r11,r11,r0,ror#2
- eor r3,r3,r5
- veor q9,q9,q10
- add r10,r10,r2
- vsli.32 q11,q8,#14
- eor r2,r8,r9
- eor r0,r7,r7,ror#5
- vshr.u32 d24,d7,#17
- add r11,r11,r3
- and r2,r2,r7
- veor q9,q9,q11
- eor r3,r0,r7,ror#19
- eor r0,r11,r11,ror#11
- vsli.32 d24,d7,#15
- eor r2,r2,r9
- add r10,r10,r3,ror#6
- vshr.u32 d25,d7,#10
- eor r3,r11,r4
- eor r0,r0,r11,ror#20
- vadd.i32 q0,q0,q9
- add r10,r10,r2
- ldr r2,[sp,#8]
- veor d25,d25,d24
- and r12,r12,r3
- add r6,r6,r10
- vshr.u32 d24,d7,#19
- add r10,r10,r0,ror#2
- eor r12,r12,r4
- vsli.32 d24,d7,#13
- add r9,r9,r2
- eor r2,r7,r8
- veor d25,d25,d24
- eor r0,r6,r6,ror#5
- add r10,r10,r12
- vadd.i32 d0,d0,d25
- and r2,r2,r6
- eor r12,r0,r6,ror#19
- vshr.u32 d24,d0,#17
- eor r0,r10,r10,ror#11
- eor r2,r2,r8
- vsli.32 d24,d0,#15
- add r9,r9,r12,ror#6
- eor r12,r10,r11
- vshr.u32 d25,d0,#10
- eor r0,r0,r10,ror#20
- add r9,r9,r2
- veor d25,d25,d24
- ldr r2,[sp,#12]
- and r3,r3,r12
- vshr.u32 d24,d0,#19
- add r5,r5,r9
- add r9,r9,r0,ror#2
- eor r3,r3,r11
- vld1.32 {q8},[r14,:128]!
- add r8,r8,r2
- vsli.32 d24,d0,#13
- eor r2,r6,r7
- eor r0,r5,r5,ror#5
- veor d25,d25,d24
- add r9,r9,r3
- and r2,r2,r5
- vadd.i32 d1,d1,d25
- eor r3,r0,r5,ror#19
- eor r0,r9,r9,ror#11
- vadd.i32 q8,q8,q0
- eor r2,r2,r7
- add r8,r8,r3,ror#6
- eor r3,r9,r10
- eor r0,r0,r9,ror#20
- add r8,r8,r2
- ldr r2,[sp,#16]
- and r12,r12,r3
- add r4,r4,r8
- vst1.32 {q8},[r1,:128]!
- add r8,r8,r0,ror#2
- eor r12,r12,r10
- vext.8 q8,q1,q2,#4
- add r7,r7,r2
- eor r2,r5,r6
- eor r0,r4,r4,ror#5
- vext.8 q9,q3,q0,#4
- add r8,r8,r12
- and r2,r2,r4
- eor r12,r0,r4,ror#19
- vshr.u32 q10,q8,#7
- eor r0,r8,r8,ror#11
- eor r2,r2,r6
- vadd.i32 q1,q1,q9
- add r7,r7,r12,ror#6
- eor r12,r8,r9
- vshr.u32 q9,q8,#3
- eor r0,r0,r8,ror#20
- add r7,r7,r2
- vsli.32 q10,q8,#25
- ldr r2,[sp,#20]
- and r3,r3,r12
- vshr.u32 q11,q8,#18
- add r11,r11,r7
- add r7,r7,r0,ror#2
- eor r3,r3,r9
- veor q9,q9,q10
- add r6,r6,r2
- vsli.32 q11,q8,#14
- eor r2,r4,r5
- eor r0,r11,r11,ror#5
- vshr.u32 d24,d1,#17
- add r7,r7,r3
- and r2,r2,r11
- veor q9,q9,q11
- eor r3,r0,r11,ror#19
- eor r0,r7,r7,ror#11
- vsli.32 d24,d1,#15
- eor r2,r2,r5
- add r6,r6,r3,ror#6
- vshr.u32 d25,d1,#10
- eor r3,r7,r8
- eor r0,r0,r7,ror#20
- vadd.i32 q1,q1,q9
- add r6,r6,r2
- ldr r2,[sp,#24]
- veor d25,d25,d24
- and r12,r12,r3
- add r10,r10,r6
- vshr.u32 d24,d1,#19
- add r6,r6,r0,ror#2
- eor r12,r12,r8
- vsli.32 d24,d1,#13
- add r5,r5,r2
- eor r2,r11,r4
- veor d25,d25,d24
- eor r0,r10,r10,ror#5
- add r6,r6,r12
- vadd.i32 d2,d2,d25
- and r2,r2,r10
- eor r12,r0,r10,ror#19
- vshr.u32 d24,d2,#17
- eor r0,r6,r6,ror#11
- eor r2,r2,r4
- vsli.32 d24,d2,#15
- add r5,r5,r12,ror#6
- eor r12,r6,r7
- vshr.u32 d25,d2,#10
- eor r0,r0,r6,ror#20
- add r5,r5,r2
- veor d25,d25,d24
- ldr r2,[sp,#28]
- and r3,r3,r12
- vshr.u32 d24,d2,#19
- add r9,r9,r5
- add r5,r5,r0,ror#2
- eor r3,r3,r7
- vld1.32 {q8},[r14,:128]!
- add r4,r4,r2
- vsli.32 d24,d2,#13
- eor r2,r10,r11
- eor r0,r9,r9,ror#5
- veor d25,d25,d24
- add r5,r5,r3
- and r2,r2,r9
- vadd.i32 d3,d3,d25
- eor r3,r0,r9,ror#19
- eor r0,r5,r5,ror#11
- vadd.i32 q8,q8,q1
- eor r2,r2,r11
- add r4,r4,r3,ror#6
- eor r3,r5,r6
- eor r0,r0,r5,ror#20
- add r4,r4,r2
- ldr r2,[sp,#32]
- and r12,r12,r3
- add r8,r8,r4
- vst1.32 {q8},[r1,:128]!
- add r4,r4,r0,ror#2
- eor r12,r12,r6
- vext.8 q8,q2,q3,#4
- add r11,r11,r2
- eor r2,r9,r10
- eor r0,r8,r8,ror#5
- vext.8 q9,q0,q1,#4
- add r4,r4,r12
- and r2,r2,r8
- eor r12,r0,r8,ror#19
- vshr.u32 q10,q8,#7
- eor r0,r4,r4,ror#11
- eor r2,r2,r10
- vadd.i32 q2,q2,q9
- add r11,r11,r12,ror#6
- eor r12,r4,r5
- vshr.u32 q9,q8,#3
- eor r0,r0,r4,ror#20
- add r11,r11,r2
- vsli.32 q10,q8,#25
- ldr r2,[sp,#36]
- and r3,r3,r12
- vshr.u32 q11,q8,#18
- add r7,r7,r11
- add r11,r11,r0,ror#2
- eor r3,r3,r5
- veor q9,q9,q10
- add r10,r10,r2
- vsli.32 q11,q8,#14
- eor r2,r8,r9
- eor r0,r7,r7,ror#5
- vshr.u32 d24,d3,#17
- add r11,r11,r3
- and r2,r2,r7
- veor q9,q9,q11
- eor r3,r0,r7,ror#19
- eor r0,r11,r11,ror#11
- vsli.32 d24,d3,#15
- eor r2,r2,r9
- add r10,r10,r3,ror#6
- vshr.u32 d25,d3,#10
- eor r3,r11,r4
- eor r0,r0,r11,ror#20
- vadd.i32 q2,q2,q9
- add r10,r10,r2
- ldr r2,[sp,#40]
- veor d25,d25,d24
- and r12,r12,r3
- add r6,r6,r10
- vshr.u32 d24,d3,#19
- add r10,r10,r0,ror#2
- eor r12,r12,r4
- vsli.32 d24,d3,#13
- add r9,r9,r2
- eor r2,r7,r8
- veor d25,d25,d24
- eor r0,r6,r6,ror#5
- add r10,r10,r12
- vadd.i32 d4,d4,d25
- and r2,r2,r6
- eor r12,r0,r6,ror#19
- vshr.u32 d24,d4,#17
- eor r0,r10,r10,ror#11
- eor r2,r2,r8
- vsli.32 d24,d4,#15
- add r9,r9,r12,ror#6
- eor r12,r10,r11
- vshr.u32 d25,d4,#10
- eor r0,r0,r10,ror#20
- add r9,r9,r2
- veor d25,d25,d24
- ldr r2,[sp,#44]
- and r3,r3,r12
- vshr.u32 d24,d4,#19
- add r5,r5,r9
- add r9,r9,r0,ror#2
- eor r3,r3,r11
- vld1.32 {q8},[r14,:128]!
- add r8,r8,r2
- vsli.32 d24,d4,#13
- eor r2,r6,r7
- eor r0,r5,r5,ror#5
- veor d25,d25,d24
- add r9,r9,r3
- and r2,r2,r5
- vadd.i32 d5,d5,d25
- eor r3,r0,r5,ror#19
- eor r0,r9,r9,ror#11
- vadd.i32 q8,q8,q2
- eor r2,r2,r7
- add r8,r8,r3,ror#6
- eor r3,r9,r10
- eor r0,r0,r9,ror#20
- add r8,r8,r2
- ldr r2,[sp,#48]
- and r12,r12,r3
- add r4,r4,r8
- vst1.32 {q8},[r1,:128]!
- add r8,r8,r0,ror#2
- eor r12,r12,r10
- vext.8 q8,q3,q0,#4
- add r7,r7,r2
- eor r2,r5,r6
- eor r0,r4,r4,ror#5
- vext.8 q9,q1,q2,#4
- add r8,r8,r12
- and r2,r2,r4
- eor r12,r0,r4,ror#19
- vshr.u32 q10,q8,#7
- eor r0,r8,r8,ror#11
- eor r2,r2,r6
- vadd.i32 q3,q3,q9
- add r7,r7,r12,ror#6
- eor r12,r8,r9
- vshr.u32 q9,q8,#3
- eor r0,r0,r8,ror#20
- add r7,r7,r2
- vsli.32 q10,q8,#25
- ldr r2,[sp,#52]
- and r3,r3,r12
- vshr.u32 q11,q8,#18
- add r11,r11,r7
- add r7,r7,r0,ror#2
- eor r3,r3,r9
- veor q9,q9,q10
- add r6,r6,r2
- vsli.32 q11,q8,#14
- eor r2,r4,r5
- eor r0,r11,r11,ror#5
- vshr.u32 d24,d5,#17
- add r7,r7,r3
- and r2,r2,r11
- veor q9,q9,q11
- eor r3,r0,r11,ror#19
- eor r0,r7,r7,ror#11
- vsli.32 d24,d5,#15
- eor r2,r2,r5
- add r6,r6,r3,ror#6
- vshr.u32 d25,d5,#10
- eor r3,r7,r8
- eor r0,r0,r7,ror#20
- vadd.i32 q3,q3,q9
- add r6,r6,r2
- ldr r2,[sp,#56]
- veor d25,d25,d24
- and r12,r12,r3
- add r10,r10,r6
- vshr.u32 d24,d5,#19
- add r6,r6,r0,ror#2
- eor r12,r12,r8
- vsli.32 d24,d5,#13
- add r5,r5,r2
- eor r2,r11,r4
- veor d25,d25,d24
- eor r0,r10,r10,ror#5
- add r6,r6,r12
- vadd.i32 d6,d6,d25
- and r2,r2,r10
- eor r12,r0,r10,ror#19
- vshr.u32 d24,d6,#17
- eor r0,r6,r6,ror#11
- eor r2,r2,r4
- vsli.32 d24,d6,#15
- add r5,r5,r12,ror#6
- eor r12,r6,r7
- vshr.u32 d25,d6,#10
- eor r0,r0,r6,ror#20
- add r5,r5,r2
- veor d25,d25,d24
- ldr r2,[sp,#60]
- and r3,r3,r12
- vshr.u32 d24,d6,#19
- add r9,r9,r5
- add r5,r5,r0,ror#2
- eor r3,r3,r7
- vld1.32 {q8},[r14,:128]!
- add r4,r4,r2
- vsli.32 d24,d6,#13
- eor r2,r10,r11
- eor r0,r9,r9,ror#5
- veor d25,d25,d24
- add r5,r5,r3
- and r2,r2,r9
- vadd.i32 d7,d7,d25
- eor r3,r0,r9,ror#19
- eor r0,r5,r5,ror#11
- vadd.i32 q8,q8,q3
- eor r2,r2,r11
- add r4,r4,r3,ror#6
- eor r3,r5,r6
- eor r0,r0,r5,ror#20
- add r4,r4,r2
- ldr r2,[r14]
- and r12,r12,r3
- add r8,r8,r4
- vst1.32 {q8},[r1,:128]!
- add r4,r4,r0,ror#2
- eor r12,r12,r6
- teq r2,#0 @ check for K256 terminator
- ldr r2,[sp,#0]
- sub r1,r1,#64
- bne .L_00_48
-
- ldr r1,[sp,#68]
- ldr r0,[sp,#72]
- sub r14,r14,#256 @ rewind r14
- teq r1,r0
- it eq
- subeq r1,r1,#64 @ avoid SEGV
- vld1.8 {q0},[r1]! @ load next input block
- vld1.8 {q1},[r1]!
- vld1.8 {q2},[r1]!
- vld1.8 {q3},[r1]!
- it ne
- strne r1,[sp,#68]
- mov r1,sp
- add r11,r11,r2
- eor r2,r9,r10
- eor r0,r8,r8,ror#5
- add r4,r4,r12
- vld1.32 {q8},[r14,:128]!
- and r2,r2,r8
- eor r12,r0,r8,ror#19
- eor r0,r4,r4,ror#11
- eor r2,r2,r10
- vrev32.8 q0,q0
- add r11,r11,r12,ror#6
- eor r12,r4,r5
- eor r0,r0,r4,ror#20
- add r11,r11,r2
- vadd.i32 q8,q8,q0
- ldr r2,[sp,#4]
- and r3,r3,r12
- add r7,r7,r11
- add r11,r11,r0,ror#2
- eor r3,r3,r5
- add r10,r10,r2
- eor r2,r8,r9
- eor r0,r7,r7,ror#5
- add r11,r11,r3
- and r2,r2,r7
- eor r3,r0,r7,ror#19
- eor r0,r11,r11,ror#11
- eor r2,r2,r9
- add r10,r10,r3,ror#6
- eor r3,r11,r4
- eor r0,r0,r11,ror#20
- add r10,r10,r2
- ldr r2,[sp,#8]
- and r12,r12,r3
- add r6,r6,r10
- add r10,r10,r0,ror#2
- eor r12,r12,r4
- add r9,r9,r2
- eor r2,r7,r8
- eor r0,r6,r6,ror#5
- add r10,r10,r12
- and r2,r2,r6
- eor r12,r0,r6,ror#19
- eor r0,r10,r10,ror#11
- eor r2,r2,r8
- add r9,r9,r12,ror#6
- eor r12,r10,r11
- eor r0,r0,r10,ror#20
- add r9,r9,r2
- ldr r2,[sp,#12]
- and r3,r3,r12
- add r5,r5,r9
- add r9,r9,r0,ror#2
- eor r3,r3,r11
- add r8,r8,r2
- eor r2,r6,r7
- eor r0,r5,r5,ror#5
- add r9,r9,r3
- and r2,r2,r5
- eor r3,r0,r5,ror#19
- eor r0,r9,r9,ror#11
- eor r2,r2,r7
- add r8,r8,r3,ror#6
- eor r3,r9,r10
- eor r0,r0,r9,ror#20
- add r8,r8,r2
- ldr r2,[sp,#16]
- and r12,r12,r3
- add r4,r4,r8
- add r8,r8,r0,ror#2
- eor r12,r12,r10
- vst1.32 {q8},[r1,:128]!
- add r7,r7,r2
- eor r2,r5,r6
- eor r0,r4,r4,ror#5
- add r8,r8,r12
- vld1.32 {q8},[r14,:128]!
- and r2,r2,r4
- eor r12,r0,r4,ror#19
- eor r0,r8,r8,ror#11
- eor r2,r2,r6
- vrev32.8 q1,q1
- add r7,r7,r12,ror#6
- eor r12,r8,r9
- eor r0,r0,r8,ror#20
- add r7,r7,r2
- vadd.i32 q8,q8,q1
- ldr r2,[sp,#20]
- and r3,r3,r12
- add r11,r11,r7
- add r7,r7,r0,ror#2
- eor r3,r3,r9
- add r6,r6,r2
- eor r2,r4,r5
- eor r0,r11,r11,ror#5
- add r7,r7,r3
- and r2,r2,r11
- eor r3,r0,r11,ror#19
- eor r0,r7,r7,ror#11
- eor r2,r2,r5
- add r6,r6,r3,ror#6
- eor r3,r7,r8
- eor r0,r0,r7,ror#20
- add r6,r6,r2
- ldr r2,[sp,#24]
- and r12,r12,r3
- add r10,r10,r6
- add r6,r6,r0,ror#2
- eor r12,r12,r8
- add r5,r5,r2
- eor r2,r11,r4
- eor r0,r10,r10,ror#5
- add r6,r6,r12
- and r2,r2,r10
- eor r12,r0,r10,ror#19
- eor r0,r6,r6,ror#11
- eor r2,r2,r4
- add r5,r5,r12,ror#6
- eor r12,r6,r7
- eor r0,r0,r6,ror#20
- add r5,r5,r2
- ldr r2,[sp,#28]
- and r3,r3,r12
- add r9,r9,r5
- add r5,r5,r0,ror#2
- eor r3,r3,r7
- add r4,r4,r2
- eor r2,r10,r11
- eor r0,r9,r9,ror#5
- add r5,r5,r3
- and r2,r2,r9
- eor r3,r0,r9,ror#19
- eor r0,r5,r5,ror#11
- eor r2,r2,r11
- add r4,r4,r3,ror#6
- eor r3,r5,r6
- eor r0,r0,r5,ror#20
- add r4,r4,r2
- ldr r2,[sp,#32]
- and r12,r12,r3
- add r8,r8,r4
- add r4,r4,r0,ror#2
- eor r12,r12,r6
- vst1.32 {q8},[r1,:128]!
- add r11,r11,r2
- eor r2,r9,r10
- eor r0,r8,r8,ror#5
- add r4,r4,r12
- vld1.32 {q8},[r14,:128]!
- and r2,r2,r8
- eor r12,r0,r8,ror#19
- eor r0,r4,r4,ror#11
- eor r2,r2,r10
- vrev32.8 q2,q2
- add r11,r11,r12,ror#6
- eor r12,r4,r5
- eor r0,r0,r4,ror#20
- add r11,r11,r2
- vadd.i32 q8,q8,q2
- ldr r2,[sp,#36]
- and r3,r3,r12
- add r7,r7,r11
- add r11,r11,r0,ror#2
- eor r3,r3,r5
- add r10,r10,r2
- eor r2,r8,r9
- eor r0,r7,r7,ror#5
- add r11,r11,r3
- and r2,r2,r7
- eor r3,r0,r7,ror#19
- eor r0,r11,r11,ror#11
- eor r2,r2,r9
- add r10,r10,r3,ror#6
- eor r3,r11,r4
- eor r0,r0,r11,ror#20
- add r10,r10,r2
- ldr r2,[sp,#40]
- and r12,r12,r3
- add r6,r6,r10
- add r10,r10,r0,ror#2
- eor r12,r12,r4
- add r9,r9,r2
- eor r2,r7,r8
- eor r0,r6,r6,ror#5
- add r10,r10,r12
- and r2,r2,r6
- eor r12,r0,r6,ror#19
- eor r0,r10,r10,ror#11
- eor r2,r2,r8
- add r9,r9,r12,ror#6
- eor r12,r10,r11
- eor r0,r0,r10,ror#20
- add r9,r9,r2
- ldr r2,[sp,#44]
- and r3,r3,r12
- add r5,r5,r9
- add r9,r9,r0,ror#2
- eor r3,r3,r11
- add r8,r8,r2
- eor r2,r6,r7
- eor r0,r5,r5,ror#5
- add r9,r9,r3
- and r2,r2,r5
- eor r3,r0,r5,ror#19
- eor r0,r9,r9,ror#11
- eor r2,r2,r7
- add r8,r8,r3,ror#6
- eor r3,r9,r10
- eor r0,r0,r9,ror#20
- add r8,r8,r2
- ldr r2,[sp,#48]
- and r12,r12,r3
- add r4,r4,r8
- add r8,r8,r0,ror#2
- eor r12,r12,r10
- vst1.32 {q8},[r1,:128]!
- add r7,r7,r2
- eor r2,r5,r6
- eor r0,r4,r4,ror#5
- add r8,r8,r12
- vld1.32 {q8},[r14,:128]!
- and r2,r2,r4
- eor r12,r0,r4,ror#19
- eor r0,r8,r8,ror#11
- eor r2,r2,r6
- vrev32.8 q3,q3
- add r7,r7,r12,ror#6
- eor r12,r8,r9
- eor r0,r0,r8,ror#20
- add r7,r7,r2
- vadd.i32 q8,q8,q3
- ldr r2,[sp,#52]
- and r3,r3,r12
- add r11,r11,r7
- add r7,r7,r0,ror#2
- eor r3,r3,r9
- add r6,r6,r2
- eor r2,r4,r5
- eor r0,r11,r11,ror#5
- add r7,r7,r3
- and r2,r2,r11
- eor r3,r0,r11,ror#19
- eor r0,r7,r7,ror#11
- eor r2,r2,r5
- add r6,r6,r3,ror#6
- eor r3,r7,r8
- eor r0,r0,r7,ror#20
- add r6,r6,r2
- ldr r2,[sp,#56]
- and r12,r12,r3
- add r10,r10,r6
- add r6,r6,r0,ror#2
- eor r12,r12,r8
- add r5,r5,r2
- eor r2,r11,r4
- eor r0,r10,r10,ror#5
- add r6,r6,r12
- and r2,r2,r10
- eor r12,r0,r10,ror#19
- eor r0,r6,r6,ror#11
- eor r2,r2,r4
- add r5,r5,r12,ror#6
- eor r12,r6,r7
- eor r0,r0,r6,ror#20
- add r5,r5,r2
- ldr r2,[sp,#60]
- and r3,r3,r12
- add r9,r9,r5
- add r5,r5,r0,ror#2
- eor r3,r3,r7
- add r4,r4,r2
- eor r2,r10,r11
- eor r0,r9,r9,ror#5
- add r5,r5,r3
- and r2,r2,r9
- eor r3,r0,r9,ror#19
- eor r0,r5,r5,ror#11
- eor r2,r2,r11
- add r4,r4,r3,ror#6
- eor r3,r5,r6
- eor r0,r0,r5,ror#20
- add r4,r4,r2
- ldr r2,[sp,#64]
- and r12,r12,r3
- add r8,r8,r4
- add r4,r4,r0,ror#2
- eor r12,r12,r6
- vst1.32 {q8},[r1,:128]!
- ldr r0,[r2,#0]
- add r4,r4,r12 @ h+=Maj(a,b,c) from the past
- ldr r12,[r2,#4]
- ldr r3,[r2,#8]
- ldr r1,[r2,#12]
- add r4,r4,r0 @ accumulate
- ldr r0,[r2,#16]
- add r5,r5,r12
- ldr r12,[r2,#20]
- add r6,r6,r3
- ldr r3,[r2,#24]
- add r7,r7,r1
- ldr r1,[r2,#28]
- add r8,r8,r0
- str r4,[r2],#4
- add r9,r9,r12
- str r5,[r2],#4
- add r10,r10,r3
- str r6,[r2],#4
- add r11,r11,r1
- str r7,[r2],#4
- stmia r2,{r8-r11}
-
- ittte ne
- movne r1,sp
- ldrne r2,[sp,#0]
- eorne r12,r12,r12
- ldreq sp,[sp,#76] @ restore original sp
- itt ne
- eorne r3,r5,r6
- bne .L_00_48
-
- ldmia sp!,{r4-r12,pc}
-.size sha256_block_data_order_neon,.-sha256_block_data_order_neon
-#endif
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
-
-# ifdef __thumb2__
-# define INST(a,b,c,d) .byte c,d|0xc,a,b
-# else
-# define INST(a,b,c,d) .byte a,b,c,d
-# endif
-
-.type sha256_block_data_order_armv8,%function
-.align 5
-sha256_block_data_order_armv8:
-.LARMv8:
- vld1.32 {q0,q1},[r0]
-# ifdef __thumb2__
- adr r3,.LARMv8
- sub r3,r3,#.LARMv8-K256
-# else
- adrl r3,K256
-# endif
- add r2,r1,r2,lsl#6 @ len to point at the end of inp
-
-.Loop_v8:
- vld1.8 {q8-q9},[r1]!
- vld1.8 {q10-q11},[r1]!
- vld1.32 {q12},[r3]!
- vrev32.8 q8,q8
- vrev32.8 q9,q9
- vrev32.8 q10,q10
- vrev32.8 q11,q11
- vmov q14,q0 @ offload
- vmov q15,q1
- teq r1,r2
- vld1.32 {q13},[r3]!
- vadd.i32 q12,q12,q8
- INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9
- vmov q2,q0
- INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
- INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
- INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11
- vld1.32 {q12},[r3]!
- vadd.i32 q13,q13,q9
- INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10
- vmov q2,q0
- INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
- INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
- INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8
- vld1.32 {q13},[r3]!
- vadd.i32 q12,q12,q10
- INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11
- vmov q2,q0
- INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
- INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
- INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9
- vld1.32 {q12},[r3]!
- vadd.i32 q13,q13,q11
- INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8
- vmov q2,q0
- INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
- INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
- INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10
- vld1.32 {q13},[r3]!
- vadd.i32 q12,q12,q8
- INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9
- vmov q2,q0
- INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
- INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
- INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11
- vld1.32 {q12},[r3]!
- vadd.i32 q13,q13,q9
- INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10
- vmov q2,q0
- INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
- INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
- INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8
- vld1.32 {q13},[r3]!
- vadd.i32 q12,q12,q10
- INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11
- vmov q2,q0
- INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
- INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
- INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9
- vld1.32 {q12},[r3]!
- vadd.i32 q13,q13,q11
- INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8
- vmov q2,q0
- INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
- INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
- INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10
- vld1.32 {q13},[r3]!
- vadd.i32 q12,q12,q8
- INST(0xe2,0x03,0xfa,0xf3) @ sha256su0 q8,q9
- vmov q2,q0
- INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
- INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
- INST(0xe6,0x0c,0x64,0xf3) @ sha256su1 q8,q10,q11
- vld1.32 {q12},[r3]!
- vadd.i32 q13,q13,q9
- INST(0xe4,0x23,0xfa,0xf3) @ sha256su0 q9,q10
- vmov q2,q0
- INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
- INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
- INST(0xe0,0x2c,0x66,0xf3) @ sha256su1 q9,q11,q8
- vld1.32 {q13},[r3]!
- vadd.i32 q12,q12,q10
- INST(0xe6,0x43,0xfa,0xf3) @ sha256su0 q10,q11
- vmov q2,q0
- INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
- INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
- INST(0xe2,0x4c,0x60,0xf3) @ sha256su1 q10,q8,q9
- vld1.32 {q12},[r3]!
- vadd.i32 q13,q13,q11
- INST(0xe0,0x63,0xfa,0xf3) @ sha256su0 q11,q8
- vmov q2,q0
- INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
- INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
- INST(0xe4,0x6c,0x62,0xf3) @ sha256su1 q11,q9,q10
- vld1.32 {q13},[r3]!
- vadd.i32 q12,q12,q8
- vmov q2,q0
- INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
- INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
-
- vld1.32 {q12},[r3]!
- vadd.i32 q13,q13,q9
- vmov q2,q0
- INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
- INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
-
- vld1.32 {q13},[r3]
- vadd.i32 q12,q12,q10
- sub r3,r3,#256-16 @ rewind
- vmov q2,q0
- INST(0x68,0x0c,0x02,0xf3) @ sha256h q0,q1,q12
- INST(0x68,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q12
-
- vadd.i32 q13,q13,q11
- vmov q2,q0
- INST(0x6a,0x0c,0x02,0xf3) @ sha256h q0,q1,q13
- INST(0x6a,0x2c,0x14,0xf3) @ sha256h2 q1,q2,q13
-
- vadd.i32 q0,q0,q14
- vadd.i32 q1,q1,q15
- it ne
- bne .Loop_v8
-
- vst1.32 {q0,q1},[r0]
-
- bx lr @ bx lr
-.size sha256_block_data_order_armv8,.-sha256_block_data_order_armv8
-#endif
-.asciz "SHA256 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by <appro@openssl.org>"
-.align 2
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
-.comm OPENSSL_armcap_P,4,4
-#endif
diff --git a/arch/arm/crypto/sha512-core.S_shipped b/arch/arm/crypto/sha512-core.S_shipped
deleted file mode 100644
index 03014624f2ab..000000000000
--- a/arch/arm/crypto/sha512-core.S_shipped
+++ /dev/null
@@ -1,1869 +0,0 @@
-@ SPDX-License-Identifier: GPL-2.0
-
-@ This code is taken from the OpenSSL project but the author (Andy Polyakov)
-@ has relicensed it under the GPLv2. Therefore this program is free software;
-@ you can redistribute it and/or modify it under the terms of the GNU General
-@ Public License version 2 as published by the Free Software Foundation.
-@
-@ The original headers, including the original license headers, are
-@ included below for completeness.
-
-@ ====================================================================
-@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
-@ project. The module is, however, dual licensed under OpenSSL and
-@ CRYPTOGAMS licenses depending on where you obtain it. For further
-@ details see https://www.openssl.org/~appro/cryptogams/.
-@ ====================================================================
-
-@ SHA512 block procedure for ARMv4. September 2007.
-
-@ This code is ~4.5 (four and a half) times faster than code generated
-@ by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue
-@ Xscale PXA250 core].
-@
-@ July 2010.
-@
-@ Rescheduling for dual-issue pipeline resulted in 6% improvement on
-@ Cortex A8 core and ~40 cycles per processed byte.
-
-@ February 2011.
-@
-@ Profiler-assisted and platform-specific optimization resulted in 7%
-@ improvement on Coxtex A8 core and ~38 cycles per byte.
-
-@ March 2011.
-@
-@ Add NEON implementation. On Cortex A8 it was measured to process
-@ one byte in 23.3 cycles or ~60% faster than integer-only code.
-
-@ August 2012.
-@
-@ Improve NEON performance by 12% on Snapdragon S4. In absolute
-@ terms it's 22.6 cycles per byte, which is disappointing result.
-@ Technical writers asserted that 3-way S4 pipeline can sustain
-@ multiple NEON instructions per cycle, but dual NEON issue could
-@ not be observed, see https://www.openssl.org/~appro/Snapdragon-S4.html
-@ for further details. On side note Cortex-A15 processes one byte in
-@ 16 cycles.
-
-@ Byte order [in]dependence. =========================================
-@
-@ Originally caller was expected to maintain specific *dword* order in
-@ h[0-7], namely with most significant dword at *lower* address, which
-@ was reflected in below two parameters as 0 and 4. Now caller is
-@ expected to maintain native byte order for whole 64-bit values.
-#ifndef __KERNEL__
-# include "arm_arch.h"
-# define VFP_ABI_PUSH vstmdb sp!,{d8-d15}
-# define VFP_ABI_POP vldmia sp!,{d8-d15}
-#else
-# define __ARM_ARCH__ __LINUX_ARM_ARCH__
-# define __ARM_MAX_ARCH__ 7
-# define VFP_ABI_PUSH
-# define VFP_ABI_POP
-#endif
-
-#ifdef __ARMEL__
-# define LO 0
-# define HI 4
-# define WORD64(hi0,lo0,hi1,lo1) .word lo0,hi0, lo1,hi1
-#else
-# define HI 0
-# define LO 4
-# define WORD64(hi0,lo0,hi1,lo1) .word hi0,lo0, hi1,lo1
-#endif
-
-.text
-#if __ARM_ARCH__<7
-.code 32
-#else
-.syntax unified
-# ifdef __thumb2__
-.thumb
-# else
-.code 32
-# endif
-#endif
-
-.type K512,%object
-.align 5
-K512:
-WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
-WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
-WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
-WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
-WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
-WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
-WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
-WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
-WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
-WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
-WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
-WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
-WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
-WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
-WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
-WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
-WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
-WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
-WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
-WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
-WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
-WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
-WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
-WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
-WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
-WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
-WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
-WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
-WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
-WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
-WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
-WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
-WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
-WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
-WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
-WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
-WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
-WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
-WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
-WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
-.size K512,.-K512
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
-.LOPENSSL_armcap:
-.word OPENSSL_armcap_P-sha512_block_data_order
-.skip 32-4
-#else
-.skip 32
-#endif
-
-.global sha512_block_data_order
-.type sha512_block_data_order,%function
-sha512_block_data_order:
-.Lsha512_block_data_order:
-#if __ARM_ARCH__<7
- sub r3,pc,#8 @ sha512_block_data_order
-#else
- adr r3,.Lsha512_block_data_order
-#endif
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
- ldr r12,.LOPENSSL_armcap
- ldr r12,[r3,r12] @ OPENSSL_armcap_P
- tst r12,#1
- bne .LNEON
-#endif
- add r2,r1,r2,lsl#7 @ len to point at the end of inp
- stmdb sp!,{r4-r12,lr}
- sub r14,r3,#672 @ K512
- sub sp,sp,#9*8
-
- ldr r7,[r0,#32+LO]
- ldr r8,[r0,#32+HI]
- ldr r9, [r0,#48+LO]
- ldr r10, [r0,#48+HI]
- ldr r11, [r0,#56+LO]
- ldr r12, [r0,#56+HI]
-.Loop:
- str r9, [sp,#48+0]
- str r10, [sp,#48+4]
- str r11, [sp,#56+0]
- str r12, [sp,#56+4]
- ldr r5,[r0,#0+LO]
- ldr r6,[r0,#0+HI]
- ldr r3,[r0,#8+LO]
- ldr r4,[r0,#8+HI]
- ldr r9, [r0,#16+LO]
- ldr r10, [r0,#16+HI]
- ldr r11, [r0,#24+LO]
- ldr r12, [r0,#24+HI]
- str r3,[sp,#8+0]
- str r4,[sp,#8+4]
- str r9, [sp,#16+0]
- str r10, [sp,#16+4]
- str r11, [sp,#24+0]
- str r12, [sp,#24+4]
- ldr r3,[r0,#40+LO]
- ldr r4,[r0,#40+HI]
- str r3,[sp,#40+0]
- str r4,[sp,#40+4]
-
-.L00_15:
-#if __ARM_ARCH__<7
- ldrb r3,[r1,#7]
- ldrb r9, [r1,#6]
- ldrb r10, [r1,#5]
- ldrb r11, [r1,#4]
- ldrb r4,[r1,#3]
- ldrb r12, [r1,#2]
- orr r3,r3,r9,lsl#8
- ldrb r9, [r1,#1]
- orr r3,r3,r10,lsl#16
- ldrb r10, [r1],#8
- orr r3,r3,r11,lsl#24
- orr r4,r4,r12,lsl#8
- orr r4,r4,r9,lsl#16
- orr r4,r4,r10,lsl#24
-#else
- ldr r3,[r1,#4]
- ldr r4,[r1],#8
-#ifdef __ARMEL__
- rev r3,r3
- rev r4,r4
-#endif
-#endif
- @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
- @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
- @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
- mov r9,r7,lsr#14
- str r3,[sp,#64+0]
- mov r10,r8,lsr#14
- str r4,[sp,#64+4]
- eor r9,r9,r8,lsl#18
- ldr r11,[sp,#56+0] @ h.lo
- eor r10,r10,r7,lsl#18
- ldr r12,[sp,#56+4] @ h.hi
- eor r9,r9,r7,lsr#18
- eor r10,r10,r8,lsr#18
- eor r9,r9,r8,lsl#14
- eor r10,r10,r7,lsl#14
- eor r9,r9,r8,lsr#9
- eor r10,r10,r7,lsr#9
- eor r9,r9,r7,lsl#23
- eor r10,r10,r8,lsl#23 @ Sigma1(e)
- adds r3,r3,r9
- ldr r9,[sp,#40+0] @ f.lo
- adc r4,r4,r10 @ T += Sigma1(e)
- ldr r10,[sp,#40+4] @ f.hi
- adds r3,r3,r11
- ldr r11,[sp,#48+0] @ g.lo
- adc r4,r4,r12 @ T += h
- ldr r12,[sp,#48+4] @ g.hi
-
- eor r9,r9,r11
- str r7,[sp,#32+0]
- eor r10,r10,r12
- str r8,[sp,#32+4]
- and r9,r9,r7
- str r5,[sp,#0+0]
- and r10,r10,r8
- str r6,[sp,#0+4]
- eor r9,r9,r11
- ldr r11,[r14,#LO] @ K[i].lo
- eor r10,r10,r12 @ Ch(e,f,g)
- ldr r12,[r14,#HI] @ K[i].hi
-
- adds r3,r3,r9
- ldr r7,[sp,#24+0] @ d.lo
- adc r4,r4,r10 @ T += Ch(e,f,g)
- ldr r8,[sp,#24+4] @ d.hi
- adds r3,r3,r11
- and r9,r11,#0xff
- adc r4,r4,r12 @ T += K[i]
- adds r7,r7,r3
- ldr r11,[sp,#8+0] @ b.lo
- adc r8,r8,r4 @ d += T
- teq r9,#148
-
- ldr r12,[sp,#16+0] @ c.lo
-#if __ARM_ARCH__>=7
- it eq @ Thumb2 thing, sanity check in ARM
-#endif
- orreq r14,r14,#1
- @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
- @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
- @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
- mov r9,r5,lsr#28
- mov r10,r6,lsr#28
- eor r9,r9,r6,lsl#4
- eor r10,r10,r5,lsl#4
- eor r9,r9,r6,lsr#2
- eor r10,r10,r5,lsr#2
- eor r9,r9,r5,lsl#30
- eor r10,r10,r6,lsl#30
- eor r9,r9,r6,lsr#7
- eor r10,r10,r5,lsr#7
- eor r9,r9,r5,lsl#25
- eor r10,r10,r6,lsl#25 @ Sigma0(a)
- adds r3,r3,r9
- and r9,r5,r11
- adc r4,r4,r10 @ T += Sigma0(a)
-
- ldr r10,[sp,#8+4] @ b.hi
- orr r5,r5,r11
- ldr r11,[sp,#16+4] @ c.hi
- and r5,r5,r12
- and r12,r6,r10
- orr r6,r6,r10
- orr r5,r5,r9 @ Maj(a,b,c).lo
- and r6,r6,r11
- adds r5,r5,r3
- orr r6,r6,r12 @ Maj(a,b,c).hi
- sub sp,sp,#8
- adc r6,r6,r4 @ h += T
- tst r14,#1
- add r14,r14,#8
- tst r14,#1
- beq .L00_15
- ldr r9,[sp,#184+0]
- ldr r10,[sp,#184+4]
- bic r14,r14,#1
-.L16_79:
- @ sigma0(x) (ROTR((x),1) ^ ROTR((x),8) ^ ((x)>>7))
- @ LO lo>>1^hi<<31 ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
- @ HI hi>>1^lo<<31 ^ hi>>8^lo<<24 ^ hi>>7
- mov r3,r9,lsr#1
- ldr r11,[sp,#80+0]
- mov r4,r10,lsr#1
- ldr r12,[sp,#80+4]
- eor r3,r3,r10,lsl#31
- eor r4,r4,r9,lsl#31
- eor r3,r3,r9,lsr#8
- eor r4,r4,r10,lsr#8
- eor r3,r3,r10,lsl#24
- eor r4,r4,r9,lsl#24
- eor r3,r3,r9,lsr#7
- eor r4,r4,r10,lsr#7
- eor r3,r3,r10,lsl#25
-
- @ sigma1(x) (ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
- @ LO lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
- @ HI hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
- mov r9,r11,lsr#19
- mov r10,r12,lsr#19
- eor r9,r9,r12,lsl#13
- eor r10,r10,r11,lsl#13
- eor r9,r9,r12,lsr#29
- eor r10,r10,r11,lsr#29
- eor r9,r9,r11,lsl#3
- eor r10,r10,r12,lsl#3
- eor r9,r9,r11,lsr#6
- eor r10,r10,r12,lsr#6
- ldr r11,[sp,#120+0]
- eor r9,r9,r12,lsl#26
-
- ldr r12,[sp,#120+4]
- adds r3,r3,r9
- ldr r9,[sp,#192+0]
- adc r4,r4,r10
-
- ldr r10,[sp,#192+4]
- adds r3,r3,r11
- adc r4,r4,r12
- adds r3,r3,r9
- adc r4,r4,r10
- @ Sigma1(x) (ROTR((x),14) ^ ROTR((x),18) ^ ROTR((x),41))
- @ LO lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
- @ HI hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
- mov r9,r7,lsr#14
- str r3,[sp,#64+0]
- mov r10,r8,lsr#14
- str r4,[sp,#64+4]
- eor r9,r9,r8,lsl#18
- ldr r11,[sp,#56+0] @ h.lo
- eor r10,r10,r7,lsl#18
- ldr r12,[sp,#56+4] @ h.hi
- eor r9,r9,r7,lsr#18
- eor r10,r10,r8,lsr#18
- eor r9,r9,r8,lsl#14
- eor r10,r10,r7,lsl#14
- eor r9,r9,r8,lsr#9
- eor r10,r10,r7,lsr#9
- eor r9,r9,r7,lsl#23
- eor r10,r10,r8,lsl#23 @ Sigma1(e)
- adds r3,r3,r9
- ldr r9,[sp,#40+0] @ f.lo
- adc r4,r4,r10 @ T += Sigma1(e)
- ldr r10,[sp,#40+4] @ f.hi
- adds r3,r3,r11
- ldr r11,[sp,#48+0] @ g.lo
- adc r4,r4,r12 @ T += h
- ldr r12,[sp,#48+4] @ g.hi
-
- eor r9,r9,r11
- str r7,[sp,#32+0]
- eor r10,r10,r12
- str r8,[sp,#32+4]
- and r9,r9,r7
- str r5,[sp,#0+0]
- and r10,r10,r8
- str r6,[sp,#0+4]
- eor r9,r9,r11
- ldr r11,[r14,#LO] @ K[i].lo
- eor r10,r10,r12 @ Ch(e,f,g)
- ldr r12,[r14,#HI] @ K[i].hi
-
- adds r3,r3,r9
- ldr r7,[sp,#24+0] @ d.lo
- adc r4,r4,r10 @ T += Ch(e,f,g)
- ldr r8,[sp,#24+4] @ d.hi
- adds r3,r3,r11
- and r9,r11,#0xff
- adc r4,r4,r12 @ T += K[i]
- adds r7,r7,r3
- ldr r11,[sp,#8+0] @ b.lo
- adc r8,r8,r4 @ d += T
- teq r9,#23
-
- ldr r12,[sp,#16+0] @ c.lo
-#if __ARM_ARCH__>=7
- it eq @ Thumb2 thing, sanity check in ARM
-#endif
- orreq r14,r14,#1
- @ Sigma0(x) (ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
- @ LO lo>>28^hi<<4 ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
- @ HI hi>>28^lo<<4 ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
- mov r9,r5,lsr#28
- mov r10,r6,lsr#28
- eor r9,r9,r6,lsl#4
- eor r10,r10,r5,lsl#4
- eor r9,r9,r6,lsr#2
- eor r10,r10,r5,lsr#2
- eor r9,r9,r5,lsl#30
- eor r10,r10,r6,lsl#30
- eor r9,r9,r6,lsr#7
- eor r10,r10,r5,lsr#7
- eor r9,r9,r5,lsl#25
- eor r10,r10,r6,lsl#25 @ Sigma0(a)
- adds r3,r3,r9
- and r9,r5,r11
- adc r4,r4,r10 @ T += Sigma0(a)
-
- ldr r10,[sp,#8+4] @ b.hi
- orr r5,r5,r11
- ldr r11,[sp,#16+4] @ c.hi
- and r5,r5,r12
- and r12,r6,r10
- orr r6,r6,r10
- orr r5,r5,r9 @ Maj(a,b,c).lo
- and r6,r6,r11
- adds r5,r5,r3
- orr r6,r6,r12 @ Maj(a,b,c).hi
- sub sp,sp,#8
- adc r6,r6,r4 @ h += T
- tst r14,#1
- add r14,r14,#8
-#if __ARM_ARCH__>=7
- ittt eq @ Thumb2 thing, sanity check in ARM
-#endif
- ldreq r9,[sp,#184+0]
- ldreq r10,[sp,#184+4]
- beq .L16_79
- bic r14,r14,#1
-
- ldr r3,[sp,#8+0]
- ldr r4,[sp,#8+4]
- ldr r9, [r0,#0+LO]
- ldr r10, [r0,#0+HI]
- ldr r11, [r0,#8+LO]
- ldr r12, [r0,#8+HI]
- adds r9,r5,r9
- str r9, [r0,#0+LO]
- adc r10,r6,r10
- str r10, [r0,#0+HI]
- adds r11,r3,r11
- str r11, [r0,#8+LO]
- adc r12,r4,r12
- str r12, [r0,#8+HI]
-
- ldr r5,[sp,#16+0]
- ldr r6,[sp,#16+4]
- ldr r3,[sp,#24+0]
- ldr r4,[sp,#24+4]
- ldr r9, [r0,#16+LO]
- ldr r10, [r0,#16+HI]
- ldr r11, [r0,#24+LO]
- ldr r12, [r0,#24+HI]
- adds r9,r5,r9
- str r9, [r0,#16+LO]
- adc r10,r6,r10
- str r10, [r0,#16+HI]
- adds r11,r3,r11
- str r11, [r0,#24+LO]
- adc r12,r4,r12
- str r12, [r0,#24+HI]
-
- ldr r3,[sp,#40+0]
- ldr r4,[sp,#40+4]
- ldr r9, [r0,#32+LO]
- ldr r10, [r0,#32+HI]
- ldr r11, [r0,#40+LO]
- ldr r12, [r0,#40+HI]
- adds r7,r7,r9
- str r7,[r0,#32+LO]
- adc r8,r8,r10
- str r8,[r0,#32+HI]
- adds r11,r3,r11
- str r11, [r0,#40+LO]
- adc r12,r4,r12
- str r12, [r0,#40+HI]
-
- ldr r5,[sp,#48+0]
- ldr r6,[sp,#48+4]
- ldr r3,[sp,#56+0]
- ldr r4,[sp,#56+4]
- ldr r9, [r0,#48+LO]
- ldr r10, [r0,#48+HI]
- ldr r11, [r0,#56+LO]
- ldr r12, [r0,#56+HI]
- adds r9,r5,r9
- str r9, [r0,#48+LO]
- adc r10,r6,r10
- str r10, [r0,#48+HI]
- adds r11,r3,r11
- str r11, [r0,#56+LO]
- adc r12,r4,r12
- str r12, [r0,#56+HI]
-
- add sp,sp,#640
- sub r14,r14,#640
-
- teq r1,r2
- bne .Loop
-
- add sp,sp,#8*9 @ destroy frame
-#if __ARM_ARCH__>=5
- ldmia sp!,{r4-r12,pc}
-#else
- ldmia sp!,{r4-r12,lr}
- tst lr,#1
- moveq pc,lr @ be binary compatible with V4, yet
- .word 0xe12fff1e @ interoperable with Thumb ISA:-)
-#endif
-.size sha512_block_data_order,.-sha512_block_data_order
-#if __ARM_MAX_ARCH__>=7
-.arch armv7-a
-.fpu neon
-
-.global sha512_block_data_order_neon
-.type sha512_block_data_order_neon,%function
-.align 4
-sha512_block_data_order_neon:
-.LNEON:
- dmb @ errata #451034 on early Cortex A8
- add r2,r1,r2,lsl#7 @ len to point at the end of inp
- VFP_ABI_PUSH
- adr r3,.Lsha512_block_data_order
- sub r3,r3,.Lsha512_block_data_order-K512
- vldmia r0,{d16-d23} @ load context
-.Loop_neon:
- vshr.u64 d24,d20,#14 @ 0
-#if 0<16
- vld1.64 {d0},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d20,#18
-#if 0>0
- vadd.i64 d16,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d20,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d20,#50
- vsli.64 d25,d20,#46
- vmov d29,d20
- vsli.64 d26,d20,#23
-#if 0<16 && defined(__ARMEL__)
- vrev64.8 d0,d0
-#endif
- veor d25,d24
- vbsl d29,d21,d22 @ Ch(e,f,g)
- vshr.u64 d24,d16,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d23
- vshr.u64 d25,d16,#34
- vsli.64 d24,d16,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d16,#39
- vadd.i64 d28,d0
- vsli.64 d25,d16,#30
- veor d30,d16,d17
- vsli.64 d26,d16,#25
- veor d23,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d18,d17 @ Maj(a,b,c)
- veor d23,d26 @ Sigma0(a)
- vadd.i64 d19,d27
- vadd.i64 d30,d27
- @ vadd.i64 d23,d30
- vshr.u64 d24,d19,#14 @ 1
-#if 1<16
- vld1.64 {d1},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d19,#18
-#if 1>0
- vadd.i64 d23,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d19,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d19,#50
- vsli.64 d25,d19,#46
- vmov d29,d19
- vsli.64 d26,d19,#23
-#if 1<16 && defined(__ARMEL__)
- vrev64.8 d1,d1
-#endif
- veor d25,d24
- vbsl d29,d20,d21 @ Ch(e,f,g)
- vshr.u64 d24,d23,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d22
- vshr.u64 d25,d23,#34
- vsli.64 d24,d23,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d23,#39
- vadd.i64 d28,d1
- vsli.64 d25,d23,#30
- veor d30,d23,d16
- vsli.64 d26,d23,#25
- veor d22,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d17,d16 @ Maj(a,b,c)
- veor d22,d26 @ Sigma0(a)
- vadd.i64 d18,d27
- vadd.i64 d30,d27
- @ vadd.i64 d22,d30
- vshr.u64 d24,d18,#14 @ 2
-#if 2<16
- vld1.64 {d2},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d18,#18
-#if 2>0
- vadd.i64 d22,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d18,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d18,#50
- vsli.64 d25,d18,#46
- vmov d29,d18
- vsli.64 d26,d18,#23
-#if 2<16 && defined(__ARMEL__)
- vrev64.8 d2,d2
-#endif
- veor d25,d24
- vbsl d29,d19,d20 @ Ch(e,f,g)
- vshr.u64 d24,d22,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d21
- vshr.u64 d25,d22,#34
- vsli.64 d24,d22,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d22,#39
- vadd.i64 d28,d2
- vsli.64 d25,d22,#30
- veor d30,d22,d23
- vsli.64 d26,d22,#25
- veor d21,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d16,d23 @ Maj(a,b,c)
- veor d21,d26 @ Sigma0(a)
- vadd.i64 d17,d27
- vadd.i64 d30,d27
- @ vadd.i64 d21,d30
- vshr.u64 d24,d17,#14 @ 3
-#if 3<16
- vld1.64 {d3},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d17,#18
-#if 3>0
- vadd.i64 d21,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d17,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d17,#50
- vsli.64 d25,d17,#46
- vmov d29,d17
- vsli.64 d26,d17,#23
-#if 3<16 && defined(__ARMEL__)
- vrev64.8 d3,d3
-#endif
- veor d25,d24
- vbsl d29,d18,d19 @ Ch(e,f,g)
- vshr.u64 d24,d21,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d20
- vshr.u64 d25,d21,#34
- vsli.64 d24,d21,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d21,#39
- vadd.i64 d28,d3
- vsli.64 d25,d21,#30
- veor d30,d21,d22
- vsli.64 d26,d21,#25
- veor d20,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d23,d22 @ Maj(a,b,c)
- veor d20,d26 @ Sigma0(a)
- vadd.i64 d16,d27
- vadd.i64 d30,d27
- @ vadd.i64 d20,d30
- vshr.u64 d24,d16,#14 @ 4
-#if 4<16
- vld1.64 {d4},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d16,#18
-#if 4>0
- vadd.i64 d20,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d16,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d16,#50
- vsli.64 d25,d16,#46
- vmov d29,d16
- vsli.64 d26,d16,#23
-#if 4<16 && defined(__ARMEL__)
- vrev64.8 d4,d4
-#endif
- veor d25,d24
- vbsl d29,d17,d18 @ Ch(e,f,g)
- vshr.u64 d24,d20,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d19
- vshr.u64 d25,d20,#34
- vsli.64 d24,d20,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d20,#39
- vadd.i64 d28,d4
- vsli.64 d25,d20,#30
- veor d30,d20,d21
- vsli.64 d26,d20,#25
- veor d19,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d22,d21 @ Maj(a,b,c)
- veor d19,d26 @ Sigma0(a)
- vadd.i64 d23,d27
- vadd.i64 d30,d27
- @ vadd.i64 d19,d30
- vshr.u64 d24,d23,#14 @ 5
-#if 5<16
- vld1.64 {d5},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d23,#18
-#if 5>0
- vadd.i64 d19,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d23,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d23,#50
- vsli.64 d25,d23,#46
- vmov d29,d23
- vsli.64 d26,d23,#23
-#if 5<16 && defined(__ARMEL__)
- vrev64.8 d5,d5
-#endif
- veor d25,d24
- vbsl d29,d16,d17 @ Ch(e,f,g)
- vshr.u64 d24,d19,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d18
- vshr.u64 d25,d19,#34
- vsli.64 d24,d19,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d19,#39
- vadd.i64 d28,d5
- vsli.64 d25,d19,#30
- veor d30,d19,d20
- vsli.64 d26,d19,#25
- veor d18,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d21,d20 @ Maj(a,b,c)
- veor d18,d26 @ Sigma0(a)
- vadd.i64 d22,d27
- vadd.i64 d30,d27
- @ vadd.i64 d18,d30
- vshr.u64 d24,d22,#14 @ 6
-#if 6<16
- vld1.64 {d6},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d22,#18
-#if 6>0
- vadd.i64 d18,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d22,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d22,#50
- vsli.64 d25,d22,#46
- vmov d29,d22
- vsli.64 d26,d22,#23
-#if 6<16 && defined(__ARMEL__)
- vrev64.8 d6,d6
-#endif
- veor d25,d24
- vbsl d29,d23,d16 @ Ch(e,f,g)
- vshr.u64 d24,d18,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d17
- vshr.u64 d25,d18,#34
- vsli.64 d24,d18,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d18,#39
- vadd.i64 d28,d6
- vsli.64 d25,d18,#30
- veor d30,d18,d19
- vsli.64 d26,d18,#25
- veor d17,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d20,d19 @ Maj(a,b,c)
- veor d17,d26 @ Sigma0(a)
- vadd.i64 d21,d27
- vadd.i64 d30,d27
- @ vadd.i64 d17,d30
- vshr.u64 d24,d21,#14 @ 7
-#if 7<16
- vld1.64 {d7},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d21,#18
-#if 7>0
- vadd.i64 d17,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d21,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d21,#50
- vsli.64 d25,d21,#46
- vmov d29,d21
- vsli.64 d26,d21,#23
-#if 7<16 && defined(__ARMEL__)
- vrev64.8 d7,d7
-#endif
- veor d25,d24
- vbsl d29,d22,d23 @ Ch(e,f,g)
- vshr.u64 d24,d17,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d16
- vshr.u64 d25,d17,#34
- vsli.64 d24,d17,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d17,#39
- vadd.i64 d28,d7
- vsli.64 d25,d17,#30
- veor d30,d17,d18
- vsli.64 d26,d17,#25
- veor d16,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d19,d18 @ Maj(a,b,c)
- veor d16,d26 @ Sigma0(a)
- vadd.i64 d20,d27
- vadd.i64 d30,d27
- @ vadd.i64 d16,d30
- vshr.u64 d24,d20,#14 @ 8
-#if 8<16
- vld1.64 {d8},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d20,#18
-#if 8>0
- vadd.i64 d16,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d20,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d20,#50
- vsli.64 d25,d20,#46
- vmov d29,d20
- vsli.64 d26,d20,#23
-#if 8<16 && defined(__ARMEL__)
- vrev64.8 d8,d8
-#endif
- veor d25,d24
- vbsl d29,d21,d22 @ Ch(e,f,g)
- vshr.u64 d24,d16,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d23
- vshr.u64 d25,d16,#34
- vsli.64 d24,d16,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d16,#39
- vadd.i64 d28,d8
- vsli.64 d25,d16,#30
- veor d30,d16,d17
- vsli.64 d26,d16,#25
- veor d23,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d18,d17 @ Maj(a,b,c)
- veor d23,d26 @ Sigma0(a)
- vadd.i64 d19,d27
- vadd.i64 d30,d27
- @ vadd.i64 d23,d30
- vshr.u64 d24,d19,#14 @ 9
-#if 9<16
- vld1.64 {d9},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d19,#18
-#if 9>0
- vadd.i64 d23,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d19,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d19,#50
- vsli.64 d25,d19,#46
- vmov d29,d19
- vsli.64 d26,d19,#23
-#if 9<16 && defined(__ARMEL__)
- vrev64.8 d9,d9
-#endif
- veor d25,d24
- vbsl d29,d20,d21 @ Ch(e,f,g)
- vshr.u64 d24,d23,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d22
- vshr.u64 d25,d23,#34
- vsli.64 d24,d23,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d23,#39
- vadd.i64 d28,d9
- vsli.64 d25,d23,#30
- veor d30,d23,d16
- vsli.64 d26,d23,#25
- veor d22,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d17,d16 @ Maj(a,b,c)
- veor d22,d26 @ Sigma0(a)
- vadd.i64 d18,d27
- vadd.i64 d30,d27
- @ vadd.i64 d22,d30
- vshr.u64 d24,d18,#14 @ 10
-#if 10<16
- vld1.64 {d10},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d18,#18
-#if 10>0
- vadd.i64 d22,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d18,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d18,#50
- vsli.64 d25,d18,#46
- vmov d29,d18
- vsli.64 d26,d18,#23
-#if 10<16 && defined(__ARMEL__)
- vrev64.8 d10,d10
-#endif
- veor d25,d24
- vbsl d29,d19,d20 @ Ch(e,f,g)
- vshr.u64 d24,d22,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d21
- vshr.u64 d25,d22,#34
- vsli.64 d24,d22,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d22,#39
- vadd.i64 d28,d10
- vsli.64 d25,d22,#30
- veor d30,d22,d23
- vsli.64 d26,d22,#25
- veor d21,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d16,d23 @ Maj(a,b,c)
- veor d21,d26 @ Sigma0(a)
- vadd.i64 d17,d27
- vadd.i64 d30,d27
- @ vadd.i64 d21,d30
- vshr.u64 d24,d17,#14 @ 11
-#if 11<16
- vld1.64 {d11},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d17,#18
-#if 11>0
- vadd.i64 d21,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d17,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d17,#50
- vsli.64 d25,d17,#46
- vmov d29,d17
- vsli.64 d26,d17,#23
-#if 11<16 && defined(__ARMEL__)
- vrev64.8 d11,d11
-#endif
- veor d25,d24
- vbsl d29,d18,d19 @ Ch(e,f,g)
- vshr.u64 d24,d21,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d20
- vshr.u64 d25,d21,#34
- vsli.64 d24,d21,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d21,#39
- vadd.i64 d28,d11
- vsli.64 d25,d21,#30
- veor d30,d21,d22
- vsli.64 d26,d21,#25
- veor d20,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d23,d22 @ Maj(a,b,c)
- veor d20,d26 @ Sigma0(a)
- vadd.i64 d16,d27
- vadd.i64 d30,d27
- @ vadd.i64 d20,d30
- vshr.u64 d24,d16,#14 @ 12
-#if 12<16
- vld1.64 {d12},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d16,#18
-#if 12>0
- vadd.i64 d20,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d16,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d16,#50
- vsli.64 d25,d16,#46
- vmov d29,d16
- vsli.64 d26,d16,#23
-#if 12<16 && defined(__ARMEL__)
- vrev64.8 d12,d12
-#endif
- veor d25,d24
- vbsl d29,d17,d18 @ Ch(e,f,g)
- vshr.u64 d24,d20,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d19
- vshr.u64 d25,d20,#34
- vsli.64 d24,d20,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d20,#39
- vadd.i64 d28,d12
- vsli.64 d25,d20,#30
- veor d30,d20,d21
- vsli.64 d26,d20,#25
- veor d19,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d22,d21 @ Maj(a,b,c)
- veor d19,d26 @ Sigma0(a)
- vadd.i64 d23,d27
- vadd.i64 d30,d27
- @ vadd.i64 d19,d30
- vshr.u64 d24,d23,#14 @ 13
-#if 13<16
- vld1.64 {d13},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d23,#18
-#if 13>0
- vadd.i64 d19,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d23,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d23,#50
- vsli.64 d25,d23,#46
- vmov d29,d23
- vsli.64 d26,d23,#23
-#if 13<16 && defined(__ARMEL__)
- vrev64.8 d13,d13
-#endif
- veor d25,d24
- vbsl d29,d16,d17 @ Ch(e,f,g)
- vshr.u64 d24,d19,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d18
- vshr.u64 d25,d19,#34
- vsli.64 d24,d19,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d19,#39
- vadd.i64 d28,d13
- vsli.64 d25,d19,#30
- veor d30,d19,d20
- vsli.64 d26,d19,#25
- veor d18,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d21,d20 @ Maj(a,b,c)
- veor d18,d26 @ Sigma0(a)
- vadd.i64 d22,d27
- vadd.i64 d30,d27
- @ vadd.i64 d18,d30
- vshr.u64 d24,d22,#14 @ 14
-#if 14<16
- vld1.64 {d14},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d22,#18
-#if 14>0
- vadd.i64 d18,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d22,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d22,#50
- vsli.64 d25,d22,#46
- vmov d29,d22
- vsli.64 d26,d22,#23
-#if 14<16 && defined(__ARMEL__)
- vrev64.8 d14,d14
-#endif
- veor d25,d24
- vbsl d29,d23,d16 @ Ch(e,f,g)
- vshr.u64 d24,d18,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d17
- vshr.u64 d25,d18,#34
- vsli.64 d24,d18,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d18,#39
- vadd.i64 d28,d14
- vsli.64 d25,d18,#30
- veor d30,d18,d19
- vsli.64 d26,d18,#25
- veor d17,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d20,d19 @ Maj(a,b,c)
- veor d17,d26 @ Sigma0(a)
- vadd.i64 d21,d27
- vadd.i64 d30,d27
- @ vadd.i64 d17,d30
- vshr.u64 d24,d21,#14 @ 15
-#if 15<16
- vld1.64 {d15},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d21,#18
-#if 15>0
- vadd.i64 d17,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d21,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d21,#50
- vsli.64 d25,d21,#46
- vmov d29,d21
- vsli.64 d26,d21,#23
-#if 15<16 && defined(__ARMEL__)
- vrev64.8 d15,d15
-#endif
- veor d25,d24
- vbsl d29,d22,d23 @ Ch(e,f,g)
- vshr.u64 d24,d17,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d16
- vshr.u64 d25,d17,#34
- vsli.64 d24,d17,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d17,#39
- vadd.i64 d28,d15
- vsli.64 d25,d17,#30
- veor d30,d17,d18
- vsli.64 d26,d17,#25
- veor d16,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d19,d18 @ Maj(a,b,c)
- veor d16,d26 @ Sigma0(a)
- vadd.i64 d20,d27
- vadd.i64 d30,d27
- @ vadd.i64 d16,d30
- mov r12,#4
-.L16_79_neon:
- subs r12,#1
- vshr.u64 q12,q7,#19
- vshr.u64 q13,q7,#61
- vadd.i64 d16,d30 @ h+=Maj from the past
- vshr.u64 q15,q7,#6
- vsli.64 q12,q7,#45
- vext.8 q14,q0,q1,#8 @ X[i+1]
- vsli.64 q13,q7,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q0,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q4,q5,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d20,#14 @ from NEON_00_15
- vadd.i64 q0,q14
- vshr.u64 d25,d20,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d20,#41 @ from NEON_00_15
- vadd.i64 q0,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d20,#50
- vsli.64 d25,d20,#46
- vmov d29,d20
- vsli.64 d26,d20,#23
-#if 16<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d21,d22 @ Ch(e,f,g)
- vshr.u64 d24,d16,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d23
- vshr.u64 d25,d16,#34
- vsli.64 d24,d16,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d16,#39
- vadd.i64 d28,d0
- vsli.64 d25,d16,#30
- veor d30,d16,d17
- vsli.64 d26,d16,#25
- veor d23,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d18,d17 @ Maj(a,b,c)
- veor d23,d26 @ Sigma0(a)
- vadd.i64 d19,d27
- vadd.i64 d30,d27
- @ vadd.i64 d23,d30
- vshr.u64 d24,d19,#14 @ 17
-#if 17<16
- vld1.64 {d1},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d19,#18
-#if 17>0
- vadd.i64 d23,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d19,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d19,#50
- vsli.64 d25,d19,#46
- vmov d29,d19
- vsli.64 d26,d19,#23
-#if 17<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d20,d21 @ Ch(e,f,g)
- vshr.u64 d24,d23,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d22
- vshr.u64 d25,d23,#34
- vsli.64 d24,d23,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d23,#39
- vadd.i64 d28,d1
- vsli.64 d25,d23,#30
- veor d30,d23,d16
- vsli.64 d26,d23,#25
- veor d22,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d17,d16 @ Maj(a,b,c)
- veor d22,d26 @ Sigma0(a)
- vadd.i64 d18,d27
- vadd.i64 d30,d27
- @ vadd.i64 d22,d30
- vshr.u64 q12,q0,#19
- vshr.u64 q13,q0,#61
- vadd.i64 d22,d30 @ h+=Maj from the past
- vshr.u64 q15,q0,#6
- vsli.64 q12,q0,#45
- vext.8 q14,q1,q2,#8 @ X[i+1]
- vsli.64 q13,q0,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q1,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q5,q6,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d18,#14 @ from NEON_00_15
- vadd.i64 q1,q14
- vshr.u64 d25,d18,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d18,#41 @ from NEON_00_15
- vadd.i64 q1,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d18,#50
- vsli.64 d25,d18,#46
- vmov d29,d18
- vsli.64 d26,d18,#23
-#if 18<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d19,d20 @ Ch(e,f,g)
- vshr.u64 d24,d22,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d21
- vshr.u64 d25,d22,#34
- vsli.64 d24,d22,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d22,#39
- vadd.i64 d28,d2
- vsli.64 d25,d22,#30
- veor d30,d22,d23
- vsli.64 d26,d22,#25
- veor d21,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d16,d23 @ Maj(a,b,c)
- veor d21,d26 @ Sigma0(a)
- vadd.i64 d17,d27
- vadd.i64 d30,d27
- @ vadd.i64 d21,d30
- vshr.u64 d24,d17,#14 @ 19
-#if 19<16
- vld1.64 {d3},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d17,#18
-#if 19>0
- vadd.i64 d21,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d17,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d17,#50
- vsli.64 d25,d17,#46
- vmov d29,d17
- vsli.64 d26,d17,#23
-#if 19<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d18,d19 @ Ch(e,f,g)
- vshr.u64 d24,d21,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d20
- vshr.u64 d25,d21,#34
- vsli.64 d24,d21,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d21,#39
- vadd.i64 d28,d3
- vsli.64 d25,d21,#30
- veor d30,d21,d22
- vsli.64 d26,d21,#25
- veor d20,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d23,d22 @ Maj(a,b,c)
- veor d20,d26 @ Sigma0(a)
- vadd.i64 d16,d27
- vadd.i64 d30,d27
- @ vadd.i64 d20,d30
- vshr.u64 q12,q1,#19
- vshr.u64 q13,q1,#61
- vadd.i64 d20,d30 @ h+=Maj from the past
- vshr.u64 q15,q1,#6
- vsli.64 q12,q1,#45
- vext.8 q14,q2,q3,#8 @ X[i+1]
- vsli.64 q13,q1,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q2,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q6,q7,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d16,#14 @ from NEON_00_15
- vadd.i64 q2,q14
- vshr.u64 d25,d16,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d16,#41 @ from NEON_00_15
- vadd.i64 q2,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d16,#50
- vsli.64 d25,d16,#46
- vmov d29,d16
- vsli.64 d26,d16,#23
-#if 20<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d17,d18 @ Ch(e,f,g)
- vshr.u64 d24,d20,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d19
- vshr.u64 d25,d20,#34
- vsli.64 d24,d20,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d20,#39
- vadd.i64 d28,d4
- vsli.64 d25,d20,#30
- veor d30,d20,d21
- vsli.64 d26,d20,#25
- veor d19,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d22,d21 @ Maj(a,b,c)
- veor d19,d26 @ Sigma0(a)
- vadd.i64 d23,d27
- vadd.i64 d30,d27
- @ vadd.i64 d19,d30
- vshr.u64 d24,d23,#14 @ 21
-#if 21<16
- vld1.64 {d5},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d23,#18
-#if 21>0
- vadd.i64 d19,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d23,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d23,#50
- vsli.64 d25,d23,#46
- vmov d29,d23
- vsli.64 d26,d23,#23
-#if 21<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d16,d17 @ Ch(e,f,g)
- vshr.u64 d24,d19,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d18
- vshr.u64 d25,d19,#34
- vsli.64 d24,d19,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d19,#39
- vadd.i64 d28,d5
- vsli.64 d25,d19,#30
- veor d30,d19,d20
- vsli.64 d26,d19,#25
- veor d18,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d21,d20 @ Maj(a,b,c)
- veor d18,d26 @ Sigma0(a)
- vadd.i64 d22,d27
- vadd.i64 d30,d27
- @ vadd.i64 d18,d30
- vshr.u64 q12,q2,#19
- vshr.u64 q13,q2,#61
- vadd.i64 d18,d30 @ h+=Maj from the past
- vshr.u64 q15,q2,#6
- vsli.64 q12,q2,#45
- vext.8 q14,q3,q4,#8 @ X[i+1]
- vsli.64 q13,q2,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q3,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q7,q0,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d22,#14 @ from NEON_00_15
- vadd.i64 q3,q14
- vshr.u64 d25,d22,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d22,#41 @ from NEON_00_15
- vadd.i64 q3,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d22,#50
- vsli.64 d25,d22,#46
- vmov d29,d22
- vsli.64 d26,d22,#23
-#if 22<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d23,d16 @ Ch(e,f,g)
- vshr.u64 d24,d18,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d17
- vshr.u64 d25,d18,#34
- vsli.64 d24,d18,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d18,#39
- vadd.i64 d28,d6
- vsli.64 d25,d18,#30
- veor d30,d18,d19
- vsli.64 d26,d18,#25
- veor d17,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d20,d19 @ Maj(a,b,c)
- veor d17,d26 @ Sigma0(a)
- vadd.i64 d21,d27
- vadd.i64 d30,d27
- @ vadd.i64 d17,d30
- vshr.u64 d24,d21,#14 @ 23
-#if 23<16
- vld1.64 {d7},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d21,#18
-#if 23>0
- vadd.i64 d17,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d21,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d21,#50
- vsli.64 d25,d21,#46
- vmov d29,d21
- vsli.64 d26,d21,#23
-#if 23<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d22,d23 @ Ch(e,f,g)
- vshr.u64 d24,d17,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d16
- vshr.u64 d25,d17,#34
- vsli.64 d24,d17,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d17,#39
- vadd.i64 d28,d7
- vsli.64 d25,d17,#30
- veor d30,d17,d18
- vsli.64 d26,d17,#25
- veor d16,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d19,d18 @ Maj(a,b,c)
- veor d16,d26 @ Sigma0(a)
- vadd.i64 d20,d27
- vadd.i64 d30,d27
- @ vadd.i64 d16,d30
- vshr.u64 q12,q3,#19
- vshr.u64 q13,q3,#61
- vadd.i64 d16,d30 @ h+=Maj from the past
- vshr.u64 q15,q3,#6
- vsli.64 q12,q3,#45
- vext.8 q14,q4,q5,#8 @ X[i+1]
- vsli.64 q13,q3,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q4,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q0,q1,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d20,#14 @ from NEON_00_15
- vadd.i64 q4,q14
- vshr.u64 d25,d20,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d20,#41 @ from NEON_00_15
- vadd.i64 q4,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d20,#50
- vsli.64 d25,d20,#46
- vmov d29,d20
- vsli.64 d26,d20,#23
-#if 24<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d21,d22 @ Ch(e,f,g)
- vshr.u64 d24,d16,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d23
- vshr.u64 d25,d16,#34
- vsli.64 d24,d16,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d16,#39
- vadd.i64 d28,d8
- vsli.64 d25,d16,#30
- veor d30,d16,d17
- vsli.64 d26,d16,#25
- veor d23,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d18,d17 @ Maj(a,b,c)
- veor d23,d26 @ Sigma0(a)
- vadd.i64 d19,d27
- vadd.i64 d30,d27
- @ vadd.i64 d23,d30
- vshr.u64 d24,d19,#14 @ 25
-#if 25<16
- vld1.64 {d9},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d19,#18
-#if 25>0
- vadd.i64 d23,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d19,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d19,#50
- vsli.64 d25,d19,#46
- vmov d29,d19
- vsli.64 d26,d19,#23
-#if 25<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d20,d21 @ Ch(e,f,g)
- vshr.u64 d24,d23,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d22
- vshr.u64 d25,d23,#34
- vsli.64 d24,d23,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d23,#39
- vadd.i64 d28,d9
- vsli.64 d25,d23,#30
- veor d30,d23,d16
- vsli.64 d26,d23,#25
- veor d22,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d17,d16 @ Maj(a,b,c)
- veor d22,d26 @ Sigma0(a)
- vadd.i64 d18,d27
- vadd.i64 d30,d27
- @ vadd.i64 d22,d30
- vshr.u64 q12,q4,#19
- vshr.u64 q13,q4,#61
- vadd.i64 d22,d30 @ h+=Maj from the past
- vshr.u64 q15,q4,#6
- vsli.64 q12,q4,#45
- vext.8 q14,q5,q6,#8 @ X[i+1]
- vsli.64 q13,q4,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q5,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q1,q2,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d18,#14 @ from NEON_00_15
- vadd.i64 q5,q14
- vshr.u64 d25,d18,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d18,#41 @ from NEON_00_15
- vadd.i64 q5,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d18,#50
- vsli.64 d25,d18,#46
- vmov d29,d18
- vsli.64 d26,d18,#23
-#if 26<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d19,d20 @ Ch(e,f,g)
- vshr.u64 d24,d22,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d21
- vshr.u64 d25,d22,#34
- vsli.64 d24,d22,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d22,#39
- vadd.i64 d28,d10
- vsli.64 d25,d22,#30
- veor d30,d22,d23
- vsli.64 d26,d22,#25
- veor d21,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d16,d23 @ Maj(a,b,c)
- veor d21,d26 @ Sigma0(a)
- vadd.i64 d17,d27
- vadd.i64 d30,d27
- @ vadd.i64 d21,d30
- vshr.u64 d24,d17,#14 @ 27
-#if 27<16
- vld1.64 {d11},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d17,#18
-#if 27>0
- vadd.i64 d21,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d17,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d17,#50
- vsli.64 d25,d17,#46
- vmov d29,d17
- vsli.64 d26,d17,#23
-#if 27<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d18,d19 @ Ch(e,f,g)
- vshr.u64 d24,d21,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d20
- vshr.u64 d25,d21,#34
- vsli.64 d24,d21,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d21,#39
- vadd.i64 d28,d11
- vsli.64 d25,d21,#30
- veor d30,d21,d22
- vsli.64 d26,d21,#25
- veor d20,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d23,d22 @ Maj(a,b,c)
- veor d20,d26 @ Sigma0(a)
- vadd.i64 d16,d27
- vadd.i64 d30,d27
- @ vadd.i64 d20,d30
- vshr.u64 q12,q5,#19
- vshr.u64 q13,q5,#61
- vadd.i64 d20,d30 @ h+=Maj from the past
- vshr.u64 q15,q5,#6
- vsli.64 q12,q5,#45
- vext.8 q14,q6,q7,#8 @ X[i+1]
- vsli.64 q13,q5,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q6,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q2,q3,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d16,#14 @ from NEON_00_15
- vadd.i64 q6,q14
- vshr.u64 d25,d16,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d16,#41 @ from NEON_00_15
- vadd.i64 q6,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d16,#50
- vsli.64 d25,d16,#46
- vmov d29,d16
- vsli.64 d26,d16,#23
-#if 28<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d17,d18 @ Ch(e,f,g)
- vshr.u64 d24,d20,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d19
- vshr.u64 d25,d20,#34
- vsli.64 d24,d20,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d20,#39
- vadd.i64 d28,d12
- vsli.64 d25,d20,#30
- veor d30,d20,d21
- vsli.64 d26,d20,#25
- veor d19,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d22,d21 @ Maj(a,b,c)
- veor d19,d26 @ Sigma0(a)
- vadd.i64 d23,d27
- vadd.i64 d30,d27
- @ vadd.i64 d19,d30
- vshr.u64 d24,d23,#14 @ 29
-#if 29<16
- vld1.64 {d13},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d23,#18
-#if 29>0
- vadd.i64 d19,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d23,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d23,#50
- vsli.64 d25,d23,#46
- vmov d29,d23
- vsli.64 d26,d23,#23
-#if 29<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d16,d17 @ Ch(e,f,g)
- vshr.u64 d24,d19,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d18
- vshr.u64 d25,d19,#34
- vsli.64 d24,d19,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d19,#39
- vadd.i64 d28,d13
- vsli.64 d25,d19,#30
- veor d30,d19,d20
- vsli.64 d26,d19,#25
- veor d18,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d21,d20 @ Maj(a,b,c)
- veor d18,d26 @ Sigma0(a)
- vadd.i64 d22,d27
- vadd.i64 d30,d27
- @ vadd.i64 d18,d30
- vshr.u64 q12,q6,#19
- vshr.u64 q13,q6,#61
- vadd.i64 d18,d30 @ h+=Maj from the past
- vshr.u64 q15,q6,#6
- vsli.64 q12,q6,#45
- vext.8 q14,q7,q0,#8 @ X[i+1]
- vsli.64 q13,q6,#3
- veor q15,q12
- vshr.u64 q12,q14,#1
- veor q15,q13 @ sigma1(X[i+14])
- vshr.u64 q13,q14,#8
- vadd.i64 q7,q15
- vshr.u64 q15,q14,#7
- vsli.64 q12,q14,#63
- vsli.64 q13,q14,#56
- vext.8 q14,q3,q4,#8 @ X[i+9]
- veor q15,q12
- vshr.u64 d24,d22,#14 @ from NEON_00_15
- vadd.i64 q7,q14
- vshr.u64 d25,d22,#18 @ from NEON_00_15
- veor q15,q13 @ sigma0(X[i+1])
- vshr.u64 d26,d22,#41 @ from NEON_00_15
- vadd.i64 q7,q15
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d22,#50
- vsli.64 d25,d22,#46
- vmov d29,d22
- vsli.64 d26,d22,#23
-#if 30<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d23,d16 @ Ch(e,f,g)
- vshr.u64 d24,d18,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d17
- vshr.u64 d25,d18,#34
- vsli.64 d24,d18,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d18,#39
- vadd.i64 d28,d14
- vsli.64 d25,d18,#30
- veor d30,d18,d19
- vsli.64 d26,d18,#25
- veor d17,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d20,d19 @ Maj(a,b,c)
- veor d17,d26 @ Sigma0(a)
- vadd.i64 d21,d27
- vadd.i64 d30,d27
- @ vadd.i64 d17,d30
- vshr.u64 d24,d21,#14 @ 31
-#if 31<16
- vld1.64 {d15},[r1]! @ handles unaligned
-#endif
- vshr.u64 d25,d21,#18
-#if 31>0
- vadd.i64 d17,d30 @ h+=Maj from the past
-#endif
- vshr.u64 d26,d21,#41
- vld1.64 {d28},[r3,:64]! @ K[i++]
- vsli.64 d24,d21,#50
- vsli.64 d25,d21,#46
- vmov d29,d21
- vsli.64 d26,d21,#23
-#if 31<16 && defined(__ARMEL__)
- vrev64.8 ,
-#endif
- veor d25,d24
- vbsl d29,d22,d23 @ Ch(e,f,g)
- vshr.u64 d24,d17,#28
- veor d26,d25 @ Sigma1(e)
- vadd.i64 d27,d29,d16
- vshr.u64 d25,d17,#34
- vsli.64 d24,d17,#36
- vadd.i64 d27,d26
- vshr.u64 d26,d17,#39
- vadd.i64 d28,d15
- vsli.64 d25,d17,#30
- veor d30,d17,d18
- vsli.64 d26,d17,#25
- veor d16,d24,d25
- vadd.i64 d27,d28
- vbsl d30,d19,d18 @ Maj(a,b,c)
- veor d16,d26 @ Sigma0(a)
- vadd.i64 d20,d27
- vadd.i64 d30,d27
- @ vadd.i64 d16,d30
- bne .L16_79_neon
-
- vadd.i64 d16,d30 @ h+=Maj from the past
- vldmia r0,{d24-d31} @ load context to temp
- vadd.i64 q8,q12 @ vectorized accumulate
- vadd.i64 q9,q13
- vadd.i64 q10,q14
- vadd.i64 q11,q15
- vstmia r0,{d16-d23} @ save context
- teq r1,r2
- sub r3,#640 @ rewind K512
- bne .Loop_neon
-
- VFP_ABI_POP
- bx lr @ .word 0xe12fff1e
-.size sha512_block_data_order_neon,.-sha512_block_data_order_neon
-#endif
-.asciz "SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by <appro@openssl.org>"
-.align 2
-#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
-.comm OPENSSL_armcap_P,4,4
-#endif
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 455eb19a5ac1..db8512d9a918 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -22,8 +22,8 @@
* strex/ldrex monitor on some implementations. The reason we can use it for
* atomic_set() is the clrex or dummy strex done on every exception return.
*/
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
#if __LINUX_ARM_ARCH__ >= 6
@@ -34,7 +34,7 @@
*/
#define ATOMIC_OP(op, c_op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
@@ -52,7 +52,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
@@ -73,7 +73,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result, val; \
@@ -93,17 +93,17 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
return result; \
}
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
-static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
+static inline int arch_atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
{
int oldval;
unsigned long res;
@@ -123,9 +123,9 @@ static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
return oldval;
}
-#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
+#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
-static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int oldval, newval;
unsigned long tmp;
@@ -151,7 +151,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
return oldval;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
#else /* ARM_ARCH_6 */
@@ -160,7 +160,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
#endif
#define ATOMIC_OP(op, c_op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
@@ -170,7 +170,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
int val; \
@@ -184,7 +184,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
int val; \
@@ -197,7 +197,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
return val; \
}
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
@@ -211,7 +211,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret;
}
-#define atomic_fetch_andnot atomic_fetch_andnot
+#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
#endif /* __LINUX_ARM_ARCH__ */
@@ -223,7 +223,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub)
-#define atomic_andnot atomic_andnot
+#define arch_atomic_andnot arch_atomic_andnot
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \
@@ -240,7 +240,7 @@ ATOMIC_OPS(xor, ^=, eor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#ifndef CONFIG_GENERIC_ATOMIC64
typedef struct {
@@ -250,7 +250,7 @@ typedef struct {
#define ATOMIC64_INIT(i) { (i) }
#ifdef CONFIG_ARM_LPAE
-static inline s64 atomic64_read(const atomic64_t *v)
+static inline s64 arch_atomic64_read(const atomic64_t *v)
{
s64 result;
@@ -263,7 +263,7 @@ static inline s64 atomic64_read(const atomic64_t *v)
return result;
}
-static inline void atomic64_set(atomic64_t *v, s64 i)
+static inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
__asm__ __volatile__("@ atomic64_set\n"
" strd %2, %H2, [%1]"
@@ -272,7 +272,7 @@ static inline void atomic64_set(atomic64_t *v, s64 i)
);
}
#else
-static inline s64 atomic64_read(const atomic64_t *v)
+static inline s64 arch_atomic64_read(const atomic64_t *v)
{
s64 result;
@@ -285,7 +285,7 @@ static inline s64 atomic64_read(const atomic64_t *v)
return result;
}
-static inline void atomic64_set(atomic64_t *v, s64 i)
+static inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
s64 tmp;
@@ -302,7 +302,7 @@ static inline void atomic64_set(atomic64_t *v, s64 i)
#endif
#define ATOMIC64_OP(op, op1, op2) \
-static inline void atomic64_##op(s64 i, atomic64_t *v) \
+static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
{ \
s64 result; \
unsigned long tmp; \
@@ -322,7 +322,7 @@ static inline void atomic64_##op(s64 i, atomic64_t *v) \
#define ATOMIC64_OP_RETURN(op, op1, op2) \
static inline s64 \
-atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
+arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
{ \
s64 result; \
unsigned long tmp; \
@@ -345,7 +345,7 @@ atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
#define ATOMIC64_FETCH_OP(op, op1, op2) \
static inline s64 \
-atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
+arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
{ \
s64 result, val; \
unsigned long tmp; \
@@ -374,34 +374,34 @@ atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
ATOMIC64_OPS(add, adds, adc)
ATOMIC64_OPS(sub, subs, sbc)
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, op1, op2) \
ATOMIC64_OP(op, op1, op2) \
ATOMIC64_FETCH_OP(op, op1, op2)
-#define atomic64_andnot atomic64_andnot
+#define arch_atomic64_andnot arch_atomic64_andnot
ATOMIC64_OPS(and, and, and)
ATOMIC64_OPS(andnot, bic, bic)
ATOMIC64_OPS(or, orr, orr)
ATOMIC64_OPS(xor, eor, eor)
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-static inline s64 atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
+static inline s64 arch_atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
{
s64 oldval;
unsigned long res;
@@ -422,9 +422,9 @@ static inline s64 atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
return oldval;
}
-#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
+#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg_relaxed
-static inline s64 atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
+static inline s64 arch_atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
{
s64 result;
unsigned long tmp;
@@ -442,9 +442,9 @@ static inline s64 atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
return result;
}
-#define atomic64_xchg_relaxed atomic64_xchg_relaxed
+#define arch_atomic64_xchg_relaxed arch_atomic64_xchg_relaxed
-static inline s64 atomic64_dec_if_positive(atomic64_t *v)
+static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 result;
unsigned long tmp;
@@ -470,9 +470,9 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
return result;
}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
-static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 oldval, newval;
unsigned long tmp;
@@ -500,7 +500,7 @@ static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
return oldval;
}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
#endif /* !CONFIG_GENERIC_ATOMIC64 */
#endif
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index 673c7dd75ab9..ba8d9d7d242b 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -88,5 +88,6 @@ extern asmlinkage void c_backtrace(unsigned long fp, int pmode,
struct mm_struct;
void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr);
extern void __show_regs(struct pt_regs *);
+extern void __show_regs_alloc_free(struct pt_regs *regs);
#endif
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 8b701f8e175c..4dfe538dfc68 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -114,7 +114,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
return ret;
}
-#define xchg_relaxed(ptr, x) ({ \
+#define arch_xchg_relaxed(ptr, x) ({ \
(__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
sizeof(*(ptr))); \
})
@@ -128,20 +128,20 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
#error "SMP is not supported on this platform"
#endif
-#define xchg xchg_relaxed
+#define arch_xchg arch_xchg_relaxed
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
-#define cmpxchg_local(ptr, o, n) ({ \
- (__typeof(*ptr))__cmpxchg_local_generic((ptr), \
+#define arch_cmpxchg_local(ptr, o, n) ({ \
+ (__typeof(*ptr))__generic_cmpxchg_local((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))); \
})
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#include <asm-generic/cmpxchg.h>
@@ -207,7 +207,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
return oldval;
}
-#define cmpxchg_relaxed(ptr,o,n) ({ \
+#define arch_cmpxchg_relaxed(ptr,o,n) ({ \
(__typeof__(*(ptr)))__cmpxchg((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
@@ -224,7 +224,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
case 1:
case 2:
- ret = __cmpxchg_local_generic(ptr, old, new, size);
+ ret = __generic_cmpxchg_local(ptr, old, new, size);
break;
#endif
default:
@@ -234,7 +234,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
return ret;
}
-#define cmpxchg_local(ptr, o, n) ({ \
+#define arch_cmpxchg_local(ptr, o, n) ({ \
(__typeof(*ptr))__cmpxchg_local((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
@@ -266,13 +266,13 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
return oldval;
}
-#define cmpxchg64_relaxed(ptr, o, n) ({ \
+#define arch_cmpxchg64_relaxed(ptr, o, n) ({ \
(__typeof__(*(ptr)))__cmpxchg64((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)); \
})
-#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) arch_cmpxchg64_relaxed((ptr), (o), (n))
#endif /* __LINUX_ARM_ARCH__ >= 6 */
diff --git a/arch/arm/include/asm/cpuidle.h b/arch/arm/include/asm/cpuidle.h
index 0d67ed682e07..397be5ed30e7 100644
--- a/arch/arm/include/asm/cpuidle.h
+++ b/arch/arm/include/asm/cpuidle.h
@@ -7,9 +7,11 @@
#ifdef CONFIG_CPU_IDLE
extern int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
+#define __cpuidle_method_section __used __section("__cpuidle_method_of_table")
#else
static inline int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index) { return -ENODEV; }
+#define __cpuidle_method_section __maybe_unused /* drop silently */
#endif
/* Common ARM WFI state */
@@ -42,11 +44,15 @@ struct of_cpuidle_method {
#define CPUIDLE_METHOD_OF_DECLARE(name, _method, _ops) \
static const struct of_cpuidle_method __cpuidle_method_of_table_##name \
- __used __section("__cpuidle_method_of_table") \
- = { .method = _method, .ops = _ops }
+ __cpuidle_method_section = { .method = _method, .ops = _ops }
extern int arm_cpuidle_suspend(int index);
extern int arm_cpuidle_init(int cpu);
+struct arm_cpuidle_irq_context { };
+
+#define arm_cpuidle_save_irq_context(c) (void)c
+#define arm_cpuidle_restore_irq_context(c) (void)c
+
#endif
diff --git a/arch/arm/include/asm/hypervisor.h b/arch/arm/include/asm/hypervisor.h
index df8524365637..bd61502b9715 100644
--- a/arch/arm/include/asm/hypervisor.h
+++ b/arch/arm/include/asm/hypervisor.h
@@ -4,4 +4,7 @@
#include <asm/xen/hypervisor.h>
+void kvm_init_hyp_services(void);
+bool kvm_arm_hyp_service_available(u32 func_id);
+
#endif
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index fc748122f1e0..f74944c6fe8d 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -430,11 +430,6 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
*/
#define xlate_dev_mem_ptr(p) __va(p)
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p) p
-
#include <asm-generic/io.h>
#ifdef CONFIG_MMU
diff --git a/arch/arm/include/asm/kexec.h b/arch/arm/include/asm/kexec.h
index 22751b5b5735..e62832dcba76 100644
--- a/arch/arm/include/asm/kexec.h
+++ b/arch/arm/include/asm/kexec.h
@@ -56,9 +56,6 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
}
}
-/* Function pointer to optional machine-specific reinitialization */
-extern void (*kexec_reinit)(void);
-
static inline unsigned long phys_to_boot_phys(phys_addr_t phys)
{
return phys_to_idmap(phys);
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 2f841cb65c30..a711322d9f40 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -150,21 +150,6 @@ extern unsigned long vectors_base;
*/
#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
-#ifdef CONFIG_XIP_KERNEL
-/*
- * When referencing data in RAM from the XIP region in a relative manner
- * with the MMU off, we need the relative offset between the two physical
- * addresses. The macro below achieves this, which is:
- * __pa(v_data) - __xip_pa(v_text)
- */
-#define PHYS_RELATIVE(v_data, v_text) \
- (((v_data) - PAGE_OFFSET + PLAT_PHYS_OFFSET) - \
- ((v_text) - XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) + \
- CONFIG_XIP_PHYS_ADDR))
-#else
-#define PHYS_RELATIVE(v_data, v_text) ((v_data) - (v_text))
-#endif
-
#ifndef __ASSEMBLY__
/*
diff --git a/arch/arm/include/asm/paravirt.h b/arch/arm/include/asm/paravirt.h
index cdbf02d9c1d4..95d5b0d625cd 100644
--- a/arch/arm/include/asm/paravirt.h
+++ b/arch/arm/include/asm/paravirt.h
@@ -3,23 +3,19 @@
#define _ASM_ARM_PARAVIRT_H
#ifdef CONFIG_PARAVIRT
+#include <linux/static_call_types.h>
+
struct static_key;
extern struct static_key paravirt_steal_enabled;
extern struct static_key paravirt_steal_rq_enabled;
-struct pv_time_ops {
- unsigned long long (*steal_clock)(int cpu);
-};
-
-struct paravirt_patch_template {
- struct pv_time_ops time;
-};
+u64 dummy_steal_clock(int cpu);
-extern struct paravirt_patch_template pv_ops;
+DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
static inline u64 paravirt_steal_clock(int cpu)
{
- return pv_ops.time.steal_clock(cpu);
+ return static_call(pv_steal_clock)(cpu);
}
#endif
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index fdee1f04f4f3..a17f01235c29 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -143,7 +143,6 @@ pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
__pmd_populate(pmdp, page_to_phys(ptep), prot);
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
#endif /* CONFIG_MMU */
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 2b85d175e999..d4edab51a77c 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -186,8 +186,6 @@ static inline pte_t pte_mkspecial(pte_t pte)
#define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY))
#define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY))
-#define pud_page(pud) pmd_page(__pmd(pud_val(pud)))
-#define pud_write(pud) pmd_write(__pmd(pud_val(pud)))
#define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index c02f24400369..d63a5bb6bd0c 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -166,6 +166,9 @@ extern struct page *empty_zero_page;
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+#define pud_page(pud) pmd_page(__pmd(pud_val(pud)))
+#define pud_write(pud) pmd_write(__pmd(pud_val(pud)))
+
#define pmd_none(pmd) (!pmd_val(pmd))
static inline pte_t *pmd_page_vaddr(pmd_t pmd)
diff --git a/arch/arm/include/asm/set_memory.h b/arch/arm/include/asm/set_memory.h
index a1ceff4295d3..ec17fc0fda7a 100644
--- a/arch/arm/include/asm/set_memory.h
+++ b/arch/arm/include/asm/set_memory.h
@@ -18,12 +18,4 @@ static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
#endif
-#ifdef CONFIG_STRICT_KERNEL_RWX
-void set_kernel_text_rw(void);
-void set_kernel_text_ro(void);
-#else
-static inline void set_kernel_text_rw(void) { }
-static inline void set_kernel_text_ro(void) { }
-#endif
-
#endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 8f009e788ad4..f610a773f2be 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -22,7 +22,7 @@
* assembler to insert a extra (16-bit) IT instruction, depending on the
* presence or absence of neighbouring conditional instructions.
*
- * To avoid this unpredictableness, an approprite IT is inserted explicitly:
+ * To avoid this unpredictability, an appropriate IT is inserted explicitly:
* the assembler won't change IT instructions which are explicitly present
* in the input.
*/
diff --git a/arch/arm/include/asm/sync_bitops.h b/arch/arm/include/asm/sync_bitops.h
index 39ff217136d1..6f5d627c44a3 100644
--- a/arch/arm/include/asm/sync_bitops.h
+++ b/arch/arm/include/asm/sync_bitops.h
@@ -21,7 +21,7 @@
#define sync_test_and_clear_bit(nr, p) _test_and_clear_bit(nr, p)
#define sync_test_and_change_bit(nr, p) _test_and_change_bit(nr, p)
#define sync_test_bit(nr, addr) test_bit(nr, addr)
-#define sync_cmpxchg cmpxchg
+#define arch_sync_cmpxchg arch_cmpxchg
#endif
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 24cbfc112dfa..0ccc985b90af 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -253,7 +253,7 @@ extern struct cpu_tlb_fns cpu_tlb;
* space.
* - mm - mm_struct describing address space
*
- * flush_tlb_range(mm,start,end)
+ * flush_tlb_range(vma,start,end)
*
* Invalidate a range of TLB entries in the specified
* address space.
@@ -261,18 +261,11 @@ extern struct cpu_tlb_fns cpu_tlb;
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*
- * flush_tlb_page(vaddr,vma)
+ * flush_tlb_page(vma, uaddr)
*
* Invalidate the specified page in the specified address range.
+ * - vma - vm_area_struct describing address range
* - vaddr - virtual address (may not be aligned)
- * - vma - vma_struct describing address range
- *
- * flush_kern_tlb_page(kaddr)
- *
- * Invalidate the TLB entry for the specified page. The address
- * will be in the kernels virtual memory space. Current uses
- * only require the D-TLB to be invalidated.
- * - kaddr - Kernel virtual memory address
*/
/*
diff --git a/arch/arm/include/asm/unaligned.h b/arch/arm/include/asm/unaligned.h
deleted file mode 100644
index ab905ffcf193..000000000000
--- a/arch/arm/include/asm/unaligned.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef __ASM_ARM_UNALIGNED_H
-#define __ASM_ARM_UNALIGNED_H
-
-/*
- * We generally want to set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on ARMv6+,
- * but we don't want to use linux/unaligned/access_ok.h since that can lead
- * to traps on unaligned stm/ldm or strd/ldrd.
- */
-#include <asm/byteorder.h>
-
-#if defined(__LITTLE_ENDIAN)
-# include <linux/unaligned/le_struct.h>
-# include <linux/unaligned/be_byteshift.h>
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_le
-# define put_unaligned __put_unaligned_le
-#elif defined(__BIG_ENDIAN)
-# include <linux/unaligned/be_struct.h>
-# include <linux/unaligned/le_byteshift.h>
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_be
-# define put_unaligned __put_unaligned_be
-#else
-# error need to define endianess
-#endif
-
-#endif /* __ASM_ARM_UNALIGNED_H */
diff --git a/arch/arm/include/asm/xen/swiotlb-xen.h b/arch/arm/include/asm/xen/swiotlb-xen.h
new file mode 100644
index 000000000000..455ade5d5320
--- /dev/null
+++ b/arch/arm/include/asm/xen/swiotlb-xen.h
@@ -0,0 +1 @@
+#include <xen/arm/swiotlb-xen.h>
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild
index ce8573157774..63748af8bc9d 100644
--- a/arch/arm/include/uapi/asm/Kbuild
+++ b/arch/arm/include/uapi/asm/Kbuild
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-generated-y += unistd-common.h
generated-y += unistd-oabi.h
generated-y += unistd-eabi.h
generic-y += kvm_para.h
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 93ecf8aa4fe5..ae7749e15726 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -24,7 +24,6 @@
#include <asm/unistd-oabi.h>
#endif
-#include <asm/unistd-common.h>
#define __NR_sync_file_range2 __NR_arm_sync_file_range
/*
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index be8050b0c3df..70993af22d80 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -24,6 +24,7 @@
#include <asm/vdso_datapage.h>
#include <asm/hardware/cache-l2x0.h>
#include <linux/kbuild.h>
+#include <linux/arm-smccc.h>
#include "signal.h"
/*
@@ -148,6 +149,8 @@ int main(void)
DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys));
DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash));
#endif
+ DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id));
+ DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state));
BLANK();
DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index e0d7833a1827..7f0b7aba1498 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -344,20 +344,19 @@ ENTRY(\sym)
.size \sym, . - \sym
.endm
-#define NATIVE(nr, func) syscall nr, func
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
+#define __SYSCALL(nr, func) syscall nr, func
/*
* This is the syscall table declaration for native ABI syscalls.
* With EABI a couple syscalls are obsolete and defined as sys_ni_syscall.
*/
syscall_table_start sys_call_table
-#define COMPAT(nr, native, compat) syscall nr, native
#ifdef CONFIG_AEABI
#include <calls-eabi.S>
#else
#include <calls-oabi.S>
#endif
-#undef COMPAT
syscall_table_end sys_call_table
/*============================================================================
@@ -455,7 +454,8 @@ ENDPROC(sys_oabi_readahead)
* using the compatibility syscall entries.
*/
syscall_table_start sys_oabi_call_table
-#define COMPAT(nr, native, compat) syscall nr, compat
+#undef __SYSCALL_WITH_COMPAT
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat)
#include <calls-oabi.S>
syscall_table_end sys_oabi_call_table
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 08660ae9dcbc..b1423fb130ea 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -886,7 +886,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
info->trigger = addr;
pr_debug("breakpoint fired: address = 0x%x\n", addr);
perf_bp_event(bp, regs);
- if (!bp->overflow_handler)
+ if (is_default_overflow_handler(bp))
enable_single_step(bp, addr);
goto unlock;
}
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 698b6f636156..20ab1e607522 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -63,7 +63,27 @@ int arch_show_interrupts(struct seq_file *p, int prec)
*/
void handle_IRQ(unsigned int irq, struct pt_regs *regs)
{
- __handle_domain_irq(NULL, irq, false, regs);
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ struct irq_desc *desc;
+
+ irq_enter();
+
+ /*
+ * Some hardware gives randomly wrong interrupts. Rather
+ * than crashing, do something sensible.
+ */
+ if (unlikely(!irq || irq >= nr_irqs))
+ desc = NULL;
+ else
+ desc = irq_to_desc(irq);
+
+ if (likely(desc))
+ handle_irq_desc(desc);
+ else
+ ack_bad_irq(irq);
+
+ irq_exit();
+ set_irq_regs(old_regs);
}
/*
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 2b09dad7935e..f567032a09c0 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -147,11 +147,6 @@ void machine_crash_shutdown(struct pt_regs *regs)
pr_info("Loading crashdump kernel...\n");
}
-/*
- * Function pointer to optional machine-specific reinitialization
- */
-void (*kexec_reinit)(void);
-
void machine_kexec(struct kimage *image)
{
unsigned long page_list, reboot_entry_phys;
@@ -187,9 +182,6 @@ void machine_kexec(struct kimage *image)
pr_info("Bye!\n");
- if (kexec_reinit)
- kexec_reinit();
-
soft_restart(reboot_entry_phys);
}
diff --git a/arch/arm/kernel/paravirt.c b/arch/arm/kernel/paravirt.c
index 4cfed91fe256..7dd9806369fb 100644
--- a/arch/arm/kernel/paravirt.c
+++ b/arch/arm/kernel/paravirt.c
@@ -9,10 +9,15 @@
#include <linux/export.h>
#include <linux/jump_label.h>
#include <linux/types.h>
+#include <linux/static_call.h>
#include <asm/paravirt.h>
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
-struct paravirt_patch_template pv_ops;
-EXPORT_SYMBOL_GPL(pv_ops);
+static u64 native_steal_clock(int cpu)
+{
+ return 0;
+}
+
+DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 2924d7910b10..eb2190477da1 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -773,10 +773,10 @@ static inline void armv7pmu_write_counter(struct perf_event *event, u64 value)
pr_err("CPU%u writing wrong counter %d\n",
smp_processor_id(), idx);
} else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
- asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
+ asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" ((u32)value));
} else {
armv7_pmnc_select_counter(idx);
- asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
+ asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" ((u32)value));
}
}
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 5199a2bb4111..fc9e8b37eaa8 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -92,6 +92,17 @@ void arch_cpu_idle_exit(void)
ledtrig_cpu(CPU_LED_IDLE_END);
}
+void __show_regs_alloc_free(struct pt_regs *regs)
+{
+ int i;
+
+ /* check for r0 - r12 only */
+ for (i = 0; i < 13; i++) {
+ pr_alert("Register r%d information:", i);
+ mem_dump_obj((void *)regs->uregs[i]);
+ }
+}
+
void __show_regs(struct pt_regs *regs)
{
unsigned long flags;
@@ -277,7 +288,7 @@ unsigned long get_wchan(struct task_struct *p)
struct stackframe frame;
unsigned long stack_page;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
frame.fp = thread_saved_fp(p);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 1a5edf562e85..73ca7797b92f 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -545,9 +545,11 @@ void notrace cpu_init(void)
* In Thumb-2, msr with an immediate value is not allowed.
*/
#ifdef CONFIG_THUMB2_KERNEL
-#define PLC "r"
+#define PLC_l "l"
+#define PLC_r "r"
#else
-#define PLC "I"
+#define PLC_l "I"
+#define PLC_r "I"
#endif
/*
@@ -569,15 +571,15 @@ void notrace cpu_init(void)
"msr cpsr_c, %9"
:
: "r" (stk),
- PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
"I" (offsetof(struct stack, irq[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
"I" (offsetof(struct stack, abt[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
"I" (offsetof(struct stack, und[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
"I" (offsetof(struct stack, fiq[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
+ PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
: "r14");
#endif
}
diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S
index 00664c78faca..931df62a7831 100644
--- a/arch/arm/kernel/smccc-call.S
+++ b/arch/arm/kernel/smccc-call.S
@@ -3,7 +3,9 @@
* Copyright (c) 2015, Linaro Limited
*/
#include <linux/linkage.h>
+#include <linux/arm-smccc.h>
+#include <asm/asm-offsets.h>
#include <asm/opcodes-sec.h>
#include <asm/opcodes-virt.h>
#include <asm/unwind.h>
@@ -27,7 +29,14 @@ UNWIND( .fnstart)
UNWIND( .save {r4-r7})
ldm r12, {r4-r7}
\instr
- pop {r4-r7}
+ ldr r4, [sp, #36]
+ cmp r4, #0
+ beq 1f // No quirk structure
+ ldr r5, [r4, #ARM_SMCCC_QUIRK_ID_OFFS]
+ cmp r5, #ARM_SMCCC_QUIRK_QCOM_A6
+ bne 1f // No quirk present
+ str r6, [r4, #ARM_SMCCC_QUIRK_STATE_OFFS]
+1: pop {r4-r7}
ldr r12, [sp, #(4 * 4)]
stm r12, {r0-r3}
bx lr
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 74679240a9d8..c7bb168b0d97 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -432,7 +432,6 @@ asmlinkage void secondary_start_kernel(void)
#endif
pr_debug("CPU%u: Booted secondary processor\n", cpu);
- preempt_disable();
trace_hardirqs_off();
/*
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index 24bd20564be7..43f0a3ebf390 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/ftrace.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mm_types.h>
@@ -26,12 +27,22 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
return -EINVAL;
/*
+ * Function graph tracer state gets incosistent when the kernel
+ * calls functions that never return (aka suspend finishers) hence
+ * disable graph tracing during their execution.
+ */
+ pause_graph_tracing();
+
+ /*
* Provide a temporary page table with an identity mapping for
* the MMU-enable code, required for resuming. On successful
* resume (indicated by a zero return code), we need to switch
* back to the correct page tables.
*/
ret = __cpu_suspend(arg, fn, __mpidr);
+
+ unpause_graph_tracing();
+
if (ret == 0) {
cpu_switch_mm(mm->pgd, mm);
local_flush_bp_all();
@@ -45,7 +56,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
{
u32 __mpidr = cpu_logical_map(smp_processor_id());
- return __cpu_suspend(arg, fn, __mpidr);
+ int ret;
+
+ pause_graph_tracing();
+ ret = __cpu_suspend(arg, fn, __mpidr);
+ unpause_graph_tracing();
+
+ return ret;
}
#define idmap_pgd NULL
#endif
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 17d5a785df28..64308e3a5d0c 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -287,6 +287,7 @@ static int __die(const char *str, int err, struct pt_regs *regs)
print_modules();
__show_regs(regs);
+ __show_regs_alloc_free(regs);
pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk));
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 120f9aa6fff3..90dcdfe3b3d0 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -17,6 +17,8 @@
#include <linux/clk/at91_pmc.h>
#include <linux/platform_data/atmel.h>
+#include <soc/at91/pm.h>
+
#include <asm/cacheflush.h>
#include <asm/fncpy.h>
#include <asm/system_misc.h>
@@ -25,17 +27,6 @@
#include "generic.h"
#include "pm.h"
-/*
- * FIXME: this is needed to communicate between the pinctrl driver and
- * the PM implementation in the machine. Possibly part of the PM
- * implementation should be moved down into the pinctrl driver and get
- * called as part of the generic suspend/resume path.
- */
-#ifdef CONFIG_PINCTRL_AT91
-extern void at91_pinctrl_gpio_suspend(void);
-extern void at91_pinctrl_gpio_resume(void);
-#endif
-
struct at91_soc_pm {
int (*config_shdwc_ws)(void __iomem *shdwc, u32 *mode, u32 *polarity);
int (*config_pmc_ws)(void __iomem *pmc, u32 mode, u32 polarity);
@@ -326,6 +317,12 @@ static void at91_pm_suspend(suspend_state_t state)
static int at91_pm_enter(suspend_state_t state)
{
#ifdef CONFIG_PINCTRL_AT91
+ /*
+ * FIXME: this is needed to communicate between the pinctrl driver and
+ * the PM implementation in the machine. Possibly part of the PM
+ * implementation should be moved down into the pinctrl driver and get
+ * called as part of the generic suspend/resume path.
+ */
at91_pinctrl_gpio_suspend();
#endif
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
index a20ba12d876c..823c9cc98f18 100644
--- a/arch/arm/mach-davinci/board-da830-evm.c
+++ b/arch/arm/mach-davinci/board-da830-evm.c
@@ -454,6 +454,10 @@ static const struct property_entry da830_evm_i2c_eeprom_properties[] = {
{ }
};
+static const struct software_node da830_evm_i2c_eeprom_node = {
+ .properties = da830_evm_i2c_eeprom_properties,
+};
+
static int __init da830_evm_ui_expander_setup(struct i2c_client *client,
int gpio, unsigned ngpio, void *context)
{
@@ -485,7 +489,7 @@ static struct pcf857x_platform_data __initdata da830_evm_ui_expander_info = {
static struct i2c_board_info __initdata da830_evm_i2c_devices[] = {
{
I2C_BOARD_INFO("24c256", 0x50),
- .properties = da830_evm_i2c_eeprom_properties,
+ .swnode = &da830_evm_i2c_eeprom_node,
},
{
I2C_BOARD_INFO("tlv320aic3x", 0x18),
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index bdf31eb77620..b3bef74c982a 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -232,10 +232,14 @@ static const struct property_entry eeprom_properties[] = {
{ }
};
+static const struct software_node eeprom_node = {
+ .properties = eeprom_properties,
+};
+
static struct i2c_board_info i2c_info[] = {
{
I2C_BOARD_INFO("24c256", 0x50),
- .properties = eeprom_properties,
+ .swnode = &eeprom_node,
},
{
I2C_BOARD_INFO("tlv320aic3x", 0x18),
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index 7755cccec550..cce3a621eb20 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -541,6 +541,10 @@ static const struct property_entry eeprom_properties[] = {
{ }
};
+static const struct software_node eeprom_node = {
+ .properties = eeprom_properties,
+};
+
/*
* MSP430 supports RTC, card detection, input from IR remote, and
* a bit more. It triggers interrupts on GPIO(7) from pressing
@@ -647,7 +651,7 @@ static struct i2c_board_info __initdata i2c_info[] = {
},
{
I2C_BOARD_INFO("24c256", 0x50),
- .properties = eeprom_properties,
+ .swnode = &eeprom_node,
},
{
I2C_BOARD_INFO("tlv320aic33", 0x1b),
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index 952ddabc743e..ee91d81ebbfd 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -362,6 +362,10 @@ static const struct property_entry eeprom_properties[] = {
PROPERTY_ENTRY_U32("pagesize", 64),
{ }
};
+
+static const struct software_node eeprom_node = {
+ .properties = eeprom_properties,
+};
#endif
static u8 dm646x_iis_serializer_direction[] = {
@@ -430,7 +434,7 @@ static void evm_init_cpld(void)
static struct i2c_board_info __initdata i2c_info[] = {
{
I2C_BOARD_INFO("24c256", 0x50),
- .properties = eeprom_properties,
+ .swnode = &eeprom_node,
},
{
I2C_BOARD_INFO("pcf8574a", 0x38),
diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c
index 5205008c8061..2127969beb96 100644
--- a/arch/arm/mach-davinci/board-mityomapl138.c
+++ b/arch/arm/mach-davinci/board-mityomapl138.c
@@ -197,6 +197,10 @@ static const struct property_entry mityomapl138_fd_chip_properties[] = {
{ }
};
+static const struct software_node mityomapl138_fd_chip_node = {
+ .properties = mityomapl138_fd_chip_properties,
+};
+
static struct davinci_i2c_platform_data mityomap_i2c_0_pdata = {
.bus_freq = 100, /* kHz */
.bus_delay = 0, /* usec */
@@ -323,7 +327,7 @@ static struct i2c_board_info __initdata mityomap_tps65023_info[] = {
},
{
I2C_BOARD_INFO("24c02", 0x50),
- .properties = mityomapl138_fd_chip_properties,
+ .swnode = &mityomapl138_fd_chip_node,
},
};
diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c
index 79b47958e992..6930b2f485d1 100644
--- a/arch/arm/mach-davinci/board-sffsdr.c
+++ b/arch/arm/mach-davinci/board-sffsdr.c
@@ -84,10 +84,14 @@ static const struct property_entry eeprom_properties[] = {
{ }
};
+static const struct software_node eeprom_node = {
+ .properties = eeprom_properties,
+};
+
static struct i2c_board_info __initdata i2c_info[] = {
{
I2C_BOARD_INFO("24c64", 0x50),
- .properties = eeprom_properties,
+ .swnode = &eeprom_node,
},
/* Other I2C devices:
* MSP430, addr 0x23 (not used)
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index ea0be59f469a..fb4a394ece3a 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -78,12 +78,11 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
#endif /* CONFIG_HOTPLUG_CPU */
/**
- * exynos_core_power_down : power down the specified cpu
- * @cpu : the cpu to power down
+ * exynos_cpu_power_down() - power down the specified cpu
+ * @cpu: the cpu to power down
*
* Power down the specified cpu. The sequence must be finished by a
* call to cpu_do_idle()
- *
*/
void exynos_cpu_power_down(int cpu)
{
@@ -107,8 +106,8 @@ void exynos_cpu_power_down(int cpu)
}
/**
- * exynos_cpu_power_up : power up the specified cpu
- * @cpu : the cpu to power up
+ * exynos_cpu_power_up() - power up the specified cpu
+ * @cpu: the cpu to power up
*
* Power up the specified cpu
*/
@@ -124,9 +123,8 @@ void exynos_cpu_power_up(int cpu)
}
/**
- * exynos_cpu_power_state : returns the power state of the cpu
- * @cpu : the cpu to retrieve the power state from
- *
+ * exynos_cpu_power_state() - returns the power state of the cpu
+ * @cpu: the cpu to retrieve the power state from
*/
int exynos_cpu_power_state(int cpu)
{
@@ -135,8 +133,8 @@ int exynos_cpu_power_state(int cpu)
}
/**
- * exynos_cluster_power_down : power down the specified cluster
- * @cluster : the cluster to power down
+ * exynos_cluster_power_down() - power down the specified cluster
+ * @cluster: the cluster to power down
*/
void exynos_cluster_power_down(int cluster)
{
@@ -144,8 +142,8 @@ void exynos_cluster_power_down(int cluster)
}
/**
- * exynos_cluster_power_up : power up the specified cluster
- * @cluster : the cluster to power up
+ * exynos_cluster_power_up() - power up the specified cluster
+ * @cluster: the cluster to power up
*/
void exynos_cluster_power_up(int cluster)
{
@@ -154,8 +152,8 @@ void exynos_cluster_power_up(int cluster)
}
/**
- * exynos_cluster_power_state : returns the power state of the cluster
- * @cluster : the cluster to retrieve the power state from
+ * exynos_cluster_power_state() - returns the power state of the cluster
+ * @cluster: the cluster to retrieve the power state from
*
*/
int exynos_cluster_power_state(int cluster)
@@ -165,7 +163,7 @@ int exynos_cluster_power_state(int cluster)
}
/**
- * exynos_scu_enable : enables SCU for Cortex-A9 based system
+ * exynos_scu_enable() - enables SCU for Cortex-A9 based system
*/
void exynos_scu_enable(void)
{
diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig
index 844aa585b966..728aff93fba9 100644
--- a/arch/arm/mach-footbridge/Kconfig
+++ b/arch/arm/mach-footbridge/Kconfig
@@ -16,27 +16,6 @@ config ARCH_CATS
Saying N will reduce the size of the Footbridge kernel.
-config ARCH_PERSONAL_SERVER
- bool "Compaq Personal Server"
- select FOOTBRIDGE_HOST
- select ISA
- select ISA_DMA
- select FORCE_PCI
- help
- Say Y here if you intend to run this kernel on the Compaq
- Personal Server.
-
- Saying N will reduce the size of the Footbridge kernel.
-
- The Compaq Personal Server is not available for purchase.
- There are no product plans beyond the current research
- prototypes at this time. Information is available at:
-
- <http://www.crl.hpl.hp.com/projects/personalserver/>
-
- If you have any questions or comments about the Compaq Personal
- Server, send e-mail to <skiff@crl.dec.com>.
-
config ARCH_EBSA285_ADDIN
bool "EBSA285 (addin mode)"
select ARCH_EBSA285
diff --git a/arch/arm/mach-footbridge/Makefile b/arch/arm/mach-footbridge/Makefile
index a09f1041f141..6262993c0555 100644
--- a/arch/arm/mach-footbridge/Makefile
+++ b/arch/arm/mach-footbridge/Makefile
@@ -11,12 +11,10 @@ pci-y += dc21285.o
pci-$(CONFIG_ARCH_CATS) += cats-pci.o
pci-$(CONFIG_ARCH_EBSA285_HOST) += ebsa285-pci.o
pci-$(CONFIG_ARCH_NETWINDER) += netwinder-pci.o
-pci-$(CONFIG_ARCH_PERSONAL_SERVER) += personal-pci.o
obj-$(CONFIG_ARCH_CATS) += cats-hw.o isa-timer.o
obj-$(CONFIG_ARCH_EBSA285) += ebsa285.o dc21285-timer.o
obj-$(CONFIG_ARCH_NETWINDER) += netwinder-hw.o isa-timer.o
-obj-$(CONFIG_ARCH_PERSONAL_SERVER) += personal.o dc21285-timer.o
obj-$(CONFIG_PCI) +=$(pci-y)
diff --git a/arch/arm/mach-footbridge/cats-pci.c b/arch/arm/mach-footbridge/cats-pci.c
index 0b2fd7e2e9b4..90b1e9be430e 100644
--- a/arch/arm/mach-footbridge/cats-pci.c
+++ b/arch/arm/mach-footbridge/cats-pci.c
@@ -15,14 +15,14 @@
#include <asm/mach-types.h>
/* cats host-specific stuff */
-static int irqmap_cats[] __initdata = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 };
+static int irqmap_cats[] = { IRQ_PCI, IRQ_IN0, IRQ_IN1, IRQ_IN3 };
static u8 cats_no_swizzle(struct pci_dev *dev, u8 *pin)
{
return 0;
}
-static int __init cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int cats_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
if (dev->irq >= 255)
return -1; /* not a valid interrupt. */
diff --git a/arch/arm/mach-footbridge/ebsa285-pci.c b/arch/arm/mach-footbridge/ebsa285-pci.c
index 6f28aaa9ca79..c3f280d08fa7 100644
--- a/arch/arm/mach-footbridge/ebsa285-pci.c
+++ b/arch/arm/mach-footbridge/ebsa285-pci.c
@@ -14,9 +14,9 @@
#include <asm/mach/pci.h>
#include <asm/mach-types.h>
-static int irqmap_ebsa285[] __initdata = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI };
+static int irqmap_ebsa285[] = { IRQ_IN3, IRQ_IN1, IRQ_IN0, IRQ_PCI };
-static int __init ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int ebsa285_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
if (dev->vendor == PCI_VENDOR_ID_CONTAQ &&
dev->device == PCI_DEVICE_ID_CONTAQ_82C693)
diff --git a/arch/arm/mach-footbridge/netwinder-pci.c b/arch/arm/mach-footbridge/netwinder-pci.c
index 9473aa0305e5..e8304392074b 100644
--- a/arch/arm/mach-footbridge/netwinder-pci.c
+++ b/arch/arm/mach-footbridge/netwinder-pci.c
@@ -18,7 +18,7 @@
* We now use the slot ID instead of the device identifiers to select
* which interrupt is routed where.
*/
-static int __init netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int netwinder_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
switch (slot) {
case 0: /* host bridge */
diff --git a/arch/arm/mach-footbridge/personal-pci.c b/arch/arm/mach-footbridge/personal-pci.c
deleted file mode 100644
index 4391e433a4b2..000000000000
--- a/arch/arm/mach-footbridge/personal-pci.c
+++ /dev/null
@@ -1,58 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/arch/arm/mach-footbridge/personal-pci.c
- *
- * PCI bios-type initialisation for PCI machines
- *
- * Bits taken from various places.
- */
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-
-#include <asm/irq.h>
-#include <asm/mach/pci.h>
-#include <asm/mach-types.h>
-
-static int irqmap_personal_server[] __initdata = {
- IRQ_IN0, IRQ_IN1, IRQ_IN2, IRQ_IN3, 0, 0, 0,
- IRQ_DOORBELLHOST, IRQ_DMA1, IRQ_DMA2, IRQ_PCI
-};
-
-static int __init personal_server_map_irq(const struct pci_dev *dev, u8 slot,
- u8 pin)
-{
- unsigned char line;
-
- pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &line);
-
- if (line > 0x40 && line <= 0x5f) {
- /* line corresponds to the bit controlling this interrupt
- * in the footbridge. Ignore the first 8 interrupt bits,
- * look up the rest in the map. IN0 is bit number 8
- */
- return irqmap_personal_server[(line & 0x1f) - 8];
- } else if (line == 0) {
- /* no interrupt */
- return 0;
- } else
- return irqmap_personal_server[(line - 1) & 3];
-}
-
-static struct hw_pci personal_server_pci __initdata = {
- .map_irq = personal_server_map_irq,
- .nr_controllers = 1,
- .ops = &dc21285_ops,
- .setup = dc21285_setup,
- .preinit = dc21285_preinit,
- .postinit = dc21285_postinit,
-};
-
-static int __init personal_pci_init(void)
-{
- if (machine_is_personal_server())
- pci_common_init(&personal_server_pci);
- return 0;
-}
-
-subsys_initcall(personal_pci_init);
diff --git a/arch/arm/mach-footbridge/personal.c b/arch/arm/mach-footbridge/personal.c
deleted file mode 100644
index ca715754fc00..000000000000
--- a/arch/arm/mach-footbridge/personal.c
+++ /dev/null
@@ -1,25 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/arch/arm/mach-footbridge/personal.c
- *
- * Personal server (Skiff) machine fixup
- */
-#include <linux/init.h>
-#include <linux/spinlock.h>
-
-#include <asm/hardware/dec21285.h>
-#include <asm/mach-types.h>
-
-#include <asm/mach/arch.h>
-
-#include "common.h"
-
-MACHINE_START(PERSONAL_SERVER, "Compaq-PersonalServer")
- /* Maintainer: Jamey Hicks / George France */
- .atag_offset = 0x100,
- .map_io = footbridge_map_io,
- .init_irq = footbridge_init_irq,
- .init_time = footbridge_timer_init,
- .restart = footbridge_restart,
-MACHINE_END
-
diff --git a/arch/arm/mach-hisi/hisilicon.c b/arch/arm/mach-hisi/hisilicon.c
index 07ea28b99cd0..b8d14b369cc9 100644
--- a/arch/arm/mach-hisi/hisilicon.c
+++ b/arch/arm/mach-hisi/hisilicon.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * (Hisilicon's SoC based) flattened device tree enabled machine
+ * (HiSilicon's SoC based) flattened device tree enabled machine
*
- * Copyright (c) 2012-2013 Hisilicon Ltd.
+ * Copyright (c) 2012-2013 HiSilicon Ltd.
* Copyright (c) 2012-2013 Linaro Ltd.
*
* Author: Haojian Zhuang <haojian.zhuang@linaro.org>
diff --git a/arch/arm/mach-hisi/hotplug.c b/arch/arm/mach-hisi/hotplug.c
index 5c5f255abc3a..c517941416f1 100644
--- a/arch/arm/mach-hisi/hotplug.c
+++ b/arch/arm/mach-hisi/hotplug.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013 Linaro Ltd.
- * Copyright (c) 2013 Hisilicon Limited.
+ * Copyright (c) 2013 HiSilicon Limited.
*/
#include <linux/cpu.h>
diff --git a/arch/arm/mach-hisi/platmcpm.c b/arch/arm/mach-hisi/platmcpm.c
index f155e32f8420..96a484095194 100644
--- a/arch/arm/mach-hisi/platmcpm.c
+++ b/arch/arm/mach-hisi/platmcpm.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013-2014 Linaro Ltd.
- * Copyright (c) 2013-2014 Hisilicon Limited.
+ * Copyright (c) 2013-2014 HiSilicon Limited.
*/
#include <linux/init.h>
#include <linux/smp.h>
diff --git a/arch/arm/mach-hisi/platsmp.c b/arch/arm/mach-hisi/platsmp.c
index da7a09c1dae5..a56cc64deeb8 100644
--- a/arch/arm/mach-hisi/platsmp.c
+++ b/arch/arm/mach-hisi/platsmp.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013 Linaro Ltd.
- * Copyright (c) 2013 Hisilicon Limited.
+ * Copyright (c) 2013 HiSilicon Limited.
* Based on arch/arm/mach-vexpress/platsmp.c, Copyright (C) 2002 ARM Ltd.
*/
#include <linux/smp.h>
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 52902782cc5f..b407b024dde3 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -63,7 +63,7 @@ config SOC_IMX35
select MXC_AVIC
select PINCTRL_IMX35
help
- This enables support for Freescale i.MX31 processor
+ This enables support for Freescale i.MX35 processor
endif
diff --git a/arch/arm/mach-imx/avic.c b/arch/arm/mach-imx/avic.c
index 322caa21bcb3..21bce4049cec 100644
--- a/arch/arm/mach-imx/avic.c
+++ b/arch/arm/mach-imx/avic.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -162,7 +163,7 @@ static void __exception_irq_entry avic_handle_irq(struct pt_regs *regs)
* interrupts. It registers the interrupt enable and disable functions
* to the kernel for each interrupt source.
*/
-void __init mxc_init_irq(void __iomem *irqbase)
+static void __init mxc_init_irq(void __iomem *irqbase)
{
struct device_node *np;
int irq_base;
@@ -220,3 +221,16 @@ void __init mxc_init_irq(void __iomem *irqbase)
printk(KERN_INFO "MXC IRQ initialized\n");
}
+
+static int __init imx_avic_init(struct device_node *node,
+ struct device_node *parent)
+{
+ void __iomem *avic_base;
+
+ avic_base = of_iomap(node, 0);
+ BUG_ON(!avic_base);
+ mxc_init_irq(avic_base);
+ return 0;
+}
+
+IRQCHIP_DECLARE(imx_avic, "fsl,avic", imx_avic_init);
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
index 2b004cc4f95e..474dedb73bc7 100644
--- a/arch/arm/mach-imx/common.h
+++ b/arch/arm/mach-imx/common.h
@@ -22,7 +22,6 @@ void mx35_map_io(void);
void imx21_init_early(void);
void imx31_init_early(void);
void imx35_init_early(void);
-void mxc_init_irq(void __iomem *);
void mx31_init_irq(void);
void mx35_init_irq(void);
void mxc_set_cpu_type(unsigned int type);
diff --git a/arch/arm/mach-imx/mach-imx1.c b/arch/arm/mach-imx/mach-imx1.c
index 32df3b8012f9..8eca92d66a2e 100644
--- a/arch/arm/mach-imx/mach-imx1.c
+++ b/arch/arm/mach-imx/mach-imx1.c
@@ -17,16 +17,6 @@ static void __init imx1_init_early(void)
mxc_set_cpu_type(MXC_CPU_MX1);
}
-static void __init imx1_init_irq(void)
-{
- void __iomem *avic_addr;
-
- avic_addr = ioremap(MX1_AVIC_ADDR, SZ_4K);
- WARN_ON(!avic_addr);
-
- mxc_init_irq(avic_addr);
-}
-
static const char * const imx1_dt_board_compat[] __initconst = {
"fsl,imx1",
NULL
@@ -34,7 +24,6 @@ static const char * const imx1_dt_board_compat[] __initconst = {
DT_MACHINE_START(IMX1_DT, "Freescale i.MX1 (Device Tree Support)")
.init_early = imx1_init_early,
- .init_irq = imx1_init_irq,
.dt_compat = imx1_dt_board_compat,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx25.c b/arch/arm/mach-imx/mach-imx25.c
index 95de48a1aa7d..51927bd08aef 100644
--- a/arch/arm/mach-imx/mach-imx25.c
+++ b/arch/arm/mach-imx/mach-imx25.c
@@ -22,17 +22,6 @@ static void __init imx25_dt_init(void)
imx_aips_allow_unprivileged_access("fsl,imx25-aips");
}
-static void __init mx25_init_irq(void)
-{
- struct device_node *np;
- void __iomem *avic_base;
-
- np = of_find_compatible_node(NULL, NULL, "fsl,avic");
- avic_base = of_iomap(np, 0);
- BUG_ON(!avic_base);
- mxc_init_irq(avic_base);
-}
-
static const char * const imx25_dt_board_compat[] __initconst = {
"fsl,imx25",
NULL
@@ -42,6 +31,5 @@ DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)")
.init_early = imx25_init_early,
.init_machine = imx25_dt_init,
.init_late = imx25_pm_init,
- .init_irq = mx25_init_irq,
.dt_compat = imx25_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx27.c b/arch/arm/mach-imx/mach-imx27.c
index 262422a9c196..e325c9468105 100644
--- a/arch/arm/mach-imx/mach-imx27.c
+++ b/arch/arm/mach-imx/mach-imx27.c
@@ -56,17 +56,6 @@ static void __init imx27_init_early(void)
mxc_set_cpu_type(MXC_CPU_MX27);
}
-static void __init mx27_init_irq(void)
-{
- void __iomem *avic_base;
- struct device_node *np;
-
- np = of_find_compatible_node(NULL, NULL, "fsl,avic");
- avic_base = of_iomap(np, 0);
- BUG_ON(!avic_base);
- mxc_init_irq(avic_base);
-}
-
static const char * const imx27_dt_board_compat[] __initconst = {
"fsl,imx27",
NULL
@@ -75,7 +64,6 @@ static const char * const imx27_dt_board_compat[] __initconst = {
DT_MACHINE_START(IMX27_DT, "Freescale i.MX27 (Device Tree Support)")
.map_io = mx27_map_io,
.init_early = imx27_init_early,
- .init_irq = mx27_init_irq,
.init_late = imx27_pm_init,
.dt_compat = imx27_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx31.c b/arch/arm/mach-imx/mach-imx31.c
index dc69dfe600df..e9a1092b6093 100644
--- a/arch/arm/mach-imx/mach-imx31.c
+++ b/arch/arm/mach-imx/mach-imx31.c
@@ -14,6 +14,5 @@ static const char * const imx31_dt_board_compat[] __initconst = {
DT_MACHINE_START(IMX31_DT, "Freescale i.MX31 (Device Tree Support)")
.map_io = mx31_map_io,
.init_early = imx31_init_early,
- .init_irq = mx31_init_irq,
.dt_compat = imx31_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx35.c b/arch/arm/mach-imx/mach-imx35.c
index ec5c3068715c..0fc08218b77d 100644
--- a/arch/arm/mach-imx/mach-imx35.c
+++ b/arch/arm/mach-imx/mach-imx35.c
@@ -27,6 +27,5 @@ DT_MACHINE_START(IMX35_DT, "Freescale i.MX35 (Device Tree Support)")
.l2c_aux_mask = ~0,
.map_io = mx35_map_io,
.init_early = imx35_init_early,
- .init_irq = mx35_init_irq,
.dt_compat = imx35_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c
index 5056438e5b42..28db97289ee8 100644
--- a/arch/arm/mach-imx/mm-imx3.c
+++ b/arch/arm/mach-imx/mm-imx3.c
@@ -109,18 +109,6 @@ void __init imx31_init_early(void)
mx3_ccm_base = of_iomap(np, 0);
BUG_ON(!mx3_ccm_base);
}
-
-void __init mx31_init_irq(void)
-{
- void __iomem *avic_base;
- struct device_node *np;
-
- np = of_find_compatible_node(NULL, NULL, "fsl,imx31-avic");
- avic_base = of_iomap(np, 0);
- BUG_ON(!avic_base);
-
- mxc_init_irq(avic_base);
-}
#endif /* ifdef CONFIG_SOC_IMX31 */
#ifdef CONFIG_SOC_IMX35
@@ -158,16 +146,4 @@ void __init imx35_init_early(void)
mx3_ccm_base = of_iomap(np, 0);
BUG_ON(!mx3_ccm_base);
}
-
-void __init mx35_init_irq(void)
-{
- void __iomem *avic_base;
- struct device_node *np;
-
- np = of_find_compatible_node(NULL, NULL, "fsl,imx35-avic");
- avic_base = of_iomap(np, 0);
- BUG_ON(!avic_base);
-
- mxc_init_irq(avic_base);
-}
#endif /* ifdef CONFIG_SOC_IMX35 */
diff --git a/arch/arm/mach-imx/pm-imx27.c b/arch/arm/mach-imx/pm-imx27.c
index 020e6deb67c8..237e8aa9fe83 100644
--- a/arch/arm/mach-imx/pm-imx27.c
+++ b/arch/arm/mach-imx/pm-imx27.c
@@ -12,6 +12,7 @@
#include <linux/suspend.h>
#include <linux/io.h>
+#include "common.h"
#include "hardware.h"
static int mx27_suspend_enter(suspend_state_t state)
diff --git a/arch/arm/mach-imx/pm-imx5.c b/arch/arm/mach-imx/pm-imx5.c
index e9962b48e30c..2e3af2bc7758 100644
--- a/arch/arm/mach-imx/pm-imx5.c
+++ b/arch/arm/mach-imx/pm-imx5.c
@@ -45,7 +45,7 @@
* This is also the lowest power state possible without affecting
* non-cpu parts of the system. For these reasons, imx5 should default
* to always using this state for cpu idling. The PM_SUSPEND_STANDBY also
- * uses this state and needs to take no action when registers remain confgiured
+ * uses this state and needs to take no action when registers remain configured
* for this state.
*/
#define IMX5_DEFAULT_CPU_IDLE_STATE WAIT_UNCLOCKED_POWER_OFF
diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c
index 78b9a5ee41c9..bf99e718f8b8 100644
--- a/arch/arm/mach-iop32x/n2100.c
+++ b/arch/arm/mach-iop32x/n2100.c
@@ -116,16 +116,16 @@ static struct hw_pci n2100_pci __initdata = {
};
/*
- * Both r8169 chips on the n2100 exhibit PCI parity problems. Set
- * the ->broken_parity_status flag for both ports so that the r8169
- * driver knows it should ignore error interrupts.
+ * Both r8169 chips on the n2100 exhibit PCI parity problems. Turn
+ * off parity reporting for both ports so we don't get error interrupts
+ * for them.
*/
static void n2100_fixup_r8169(struct pci_dev *dev)
{
if (dev->bus->number == 0 &&
(dev->devfn == PCI_DEVFN(1, 0) ||
dev->devfn == PCI_DEVFN(2, 0)))
- dev->broken_parity_status = 1;
+ pci_disable_parity(dev);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REALTEK, PCI_ANY_ID, n2100_fixup_r8169);
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 000f672a94c9..007a44412e24 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -233,12 +233,38 @@ static struct platform_device *ixp46x_devices[] __initdata = {
unsigned long ixp4xx_exp_bus_size;
EXPORT_SYMBOL(ixp4xx_exp_bus_size);
+static struct platform_device_info ixp_dev_info __initdata = {
+ .name = "ixp4xx_crypto",
+ .id = 0,
+ .dma_mask = DMA_BIT_MASK(32),
+};
+
+static int __init ixp_crypto_register(void)
+{
+ struct platform_device *pdev;
+
+ if (!(~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
+ IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
+ printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
+ return -ENODEV;
+ }
+
+ pdev = platform_device_register_full(&ixp_dev_info);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ return 0;
+}
+
void __init ixp4xx_sys_init(void)
{
ixp4xx_exp_bus_size = SZ_16M;
platform_add_devices(ixp4xx_devices, ARRAY_SIZE(ixp4xx_devices));
+ if (IS_ENABLED(CONFIG_CRYPTO_DEV_IXP4XX))
+ ixp_crypto_register();
+
if (cpu_is_ixp46x()) {
int region;
diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
index cd711bfc591f..2c647bdf8d25 100644
--- a/arch/arm/mach-keystone/keystone.c
+++ b/arch/arm/mach-keystone/keystone.c
@@ -65,7 +65,7 @@ static void __init keystone_init(void)
static long long __init keystone_pv_fixup(void)
{
long long offset;
- phys_addr_t mem_start, mem_end;
+ u64 mem_start, mem_end;
mem_start = memblock_start_of_DRAM();
mem_end = memblock_end_of_DRAM();
@@ -78,7 +78,7 @@ static long long __init keystone_pv_fixup(void)
if (mem_start < KEYSTONE_HIGH_PHYS_START ||
mem_end > KEYSTONE_HIGH_PHYS_END) {
pr_crit("Invalid address space for memory (%08llx-%08llx)\n",
- (u64)mem_start, (u64)mem_end);
+ mem_start, mem_end);
return 0;
}
diff --git a/arch/arm/mach-mstar/Kconfig b/arch/arm/mach-mstar/Kconfig
index 576d1ab293c8..cd300eeedc20 100644
--- a/arch/arm/mach-mstar/Kconfig
+++ b/arch/arm/mach-mstar/Kconfig
@@ -4,6 +4,7 @@ menuconfig ARCH_MSTARV7
select ARM_GIC
select ARM_HEAVY_MB
select MST_IRQ
+ select MSTAR_MSC313_MPLL
help
Support for newer MStar/Sigmastar SoC families that are
based on Armv7 cores like the Cortex A7 and share the same
diff --git a/arch/arm/mach-mvebu/kirkwood.c b/arch/arm/mach-mvebu/kirkwood.c
index ceaad6d5927e..06b1706595f4 100644
--- a/arch/arm/mach-mvebu/kirkwood.c
+++ b/arch/arm/mach-mvebu/kirkwood.c
@@ -84,6 +84,7 @@ static void __init kirkwood_dt_eth_fixup(void)
struct device_node *pnp = of_get_parent(np);
struct clk *clk;
struct property *pmac;
+ u8 tmpmac[ETH_ALEN];
void __iomem *io;
u8 *macaddr;
u32 reg;
@@ -93,7 +94,7 @@ static void __init kirkwood_dt_eth_fixup(void)
/* skip disabled nodes or nodes with valid MAC address*/
if (!of_device_is_available(pnp) ||
- !IS_ERR(of_get_mac_address(np)))
+ !of_get_mac_address(np, tmpmac))
goto eth_fixup_skip;
clk = of_clk_get(pnp, 0);
diff --git a/arch/arm/mach-npcm/Kconfig b/arch/arm/mach-npcm/Kconfig
index 7f7002dc2b21..a71cf1d189ae 100644
--- a/arch/arm/mach-npcm/Kconfig
+++ b/arch/arm/mach-npcm/Kconfig
@@ -1,11 +1,22 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig ARCH_NPCM
bool "Nuvoton NPCM Architecture"
- depends on ARCH_MULTI_V7
+ depends on ARCH_MULTI_V5 || ARCH_MULTI_V7
select PINCTRL
if ARCH_NPCM
+config ARCH_WPCM450
+ bool "Support for WPCM450 BMC (Hermon)"
+ depends on ARCH_MULTI_V5
+ select CPU_ARM926T
+ select WPCM450_AIC
+ select NPCM7XX_TIMER
+ help
+ General support for WPCM450 BMC (Hermon).
+
+ Winbond/Nuvoton WPCM450 BMC based on the ARM926EJ-S.
+
config ARCH_NPCM7XX
bool "Support for NPCM7xx BMC (Poleg)"
depends on ARCH_MULTI_V7
diff --git a/arch/arm/mach-npcm/Makefile b/arch/arm/mach-npcm/Makefile
index 1bc3a70bfab8..8d61fcd42fb1 100644
--- a/arch/arm/mach-npcm/Makefile
+++ b/arch/arm/mach-npcm/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
AFLAGS_headsmp.o += -march=armv7-a
+obj-$(CONFIG_ARCH_WPCM450) += wpcm450.o
obj-$(CONFIG_ARCH_NPCM7XX) += npcm7xx.o
obj-$(CONFIG_SMP) += platsmp.o headsmp.o
diff --git a/arch/arm/mach-npcm/wpcm450.c b/arch/arm/mach-npcm/wpcm450.c
new file mode 100644
index 000000000000..f17b3dab45af
--- /dev/null
+++ b/arch/arm/mach-npcm/wpcm450.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright 2021 Jonathan Neuschäfer
+
+#include <asm/mach/arch.h>
+
+static const char *const wpcm450_dt_match[] = {
+ "nuvoton,wpcm450",
+ NULL
+};
+
+DT_MACHINE_START(WPCM450_DT, "WPCM450 chip")
+ .dt_compat = wpcm450_dt_match,
+MACHINE_END
diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
index 14a6c3eb3298..f745a65d3bd7 100644
--- a/arch/arm/mach-omap1/ams-delta-fiq-handler.S
+++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S
@@ -15,6 +15,7 @@
#include <linux/platform_data/gpio-omap.h>
#include <asm/assembler.h>
+#include <asm/irq.h>
#include "ams-delta-fiq.h"
#include "board-ams-delta.h"
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index 2ee527c00284..1026a816dcc0 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -458,20 +458,6 @@ static struct gpiod_lookup_table leds_gpio_table = {
#ifdef CONFIG_LEDS_TRIGGERS
DEFINE_LED_TRIGGER(ams_delta_camera_led_trigger);
-
-static int ams_delta_camera_power(struct device *dev, int power)
-{
- /*
- * turn on camera LED
- */
- if (power)
- led_trigger_event(ams_delta_camera_led_trigger, LED_FULL);
- else
- led_trigger_event(ams_delta_camera_led_trigger, LED_OFF);
- return 0;
-}
-#else
-#define ams_delta_camera_power NULL
#endif
static struct platform_device ams_delta_audio_device = {
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index c40cf5ef8607..977b0b744c22 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -320,7 +320,7 @@ static int tps_setup(struct i2c_client *client, void *context)
{
if (!IS_BUILTIN(CONFIG_TPS65010))
return -ENOSYS;
-
+
tps65010_config_vregs1(TPS_LDO2_ENABLE | TPS_VLDO2_3_0V |
TPS_LDO1_ENABLE | TPS_VLDO1_3_0V);
@@ -394,6 +394,8 @@ static void __init h2_init(void)
BUG_ON(gpio_request(H2_NAND_RB_GPIO_PIN, "NAND ready") < 0);
gpio_direction_input(H2_NAND_RB_GPIO_PIN);
+ gpiod_add_lookup_table(&isp1301_gpiod_table);
+
omap_cfg_reg(L3_1610_FLASH_CS2B_OE);
omap_cfg_reg(M8_1610_FLASH_CS2B_WE);
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index 0a4c9b0b13b0..e18b6f13300e 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -332,11 +332,15 @@ static const struct property_entry mistral_at24_properties[] = {
{ }
};
+static const struct software_node mistral_at24_node = {
+ .properties = mistral_at24_properties,
+};
+
static struct i2c_board_info __initdata mistral_i2c_board_info[] = {
{
/* NOTE: powered from LCD supply */
I2C_BOARD_INFO("24c04", 0x50),
- .properties = mistral_at24_properties,
+ .swnode = &mistral_at24_node,
},
/* TODO when driver support is ready:
* - optionally ov9640 camera sensor at 0x30
diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
index 2c1e2b32b9b3..a745d64d4699 100644
--- a/arch/arm/mach-omap1/pm.c
+++ b/arch/arm/mach-omap1/pm.c
@@ -655,9 +655,13 @@ static int __init omap_pm_init(void)
irq = INT_7XX_WAKE_UP_REQ;
else if (cpu_is_omap16xx())
irq = INT_1610_WAKE_UP_REQ;
- if (request_irq(irq, omap_wakeup_interrupt, 0, "peripheral wakeup",
- NULL))
- pr_err("Failed to request irq %d (peripheral wakeup)\n", irq);
+ else
+ irq = -1;
+
+ if (irq >= 0) {
+ if (request_irq(irq, omap_wakeup_interrupt, 0, "peripheral wakeup", NULL))
+ pr_err("Failed to request irq %d (peripheral wakeup)\n", irq);
+ }
/* Program new power ramp-up time
* (0 for most boards since we don't lower voltage when in deep sleep)
diff --git a/arch/arm/mach-omap1/timer.c b/arch/arm/mach-omap1/timer.c
index 97fc2096b970..0411d5508d63 100644
--- a/arch/arm/mach-omap1/timer.c
+++ b/arch/arm/mach-omap1/timer.c
@@ -1,4 +1,4 @@
-/**
+/*
* OMAP1 Dual-Mode Timers - platform device registration
*
* Contains first level initialization routines which internally
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 4178c0ee46eb..7df8f5276ddf 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -34,7 +34,6 @@ config ARCH_OMAP4
select ARM_GIC
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
- select OMAP_HWMOD
select OMAP_INTERCONNECT
select OMAP_INTERCONNECT_BARRIER
select PL310_ERRATA_588369 if CACHE_L2X0
@@ -54,7 +53,6 @@ config SOC_OMAP5
select HAVE_ARM_SCU if SMP
select HAVE_ARM_ARCH_TIMER
select ARM_ERRATA_798181 if SMP
- select OMAP_HWMOD
select OMAP_INTERCONNECT
select OMAP_INTERCONNECT_BARRIER
select PM_OPP
@@ -90,7 +88,6 @@ config SOC_DRA7XX
select HAVE_ARM_ARCH_TIMER
select IRQ_CROSSBAR
select ARM_ERRATA_798181 if SMP
- select OMAP_HWMOD
select OMAP_INTERCONNECT
select OMAP_INTERCONNECT_BARRIER
select PM_OPP
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 9bcfb34a2206..8306ad686bc8 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -20,14 +20,14 @@ secure-common = omap-smc.o omap-secure.o
obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common)
obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common) $(secure-common)
-obj-$(CONFIG_ARCH_OMAP4) += $(hwmod-common) $(secure-common)
+obj-$(CONFIG_ARCH_OMAP4) += $(secure-common)
obj-$(CONFIG_SOC_AM33XX) += $(secure-common)
-obj-$(CONFIG_SOC_OMAP5) += $(hwmod-common) $(secure-common)
+obj-$(CONFIG_SOC_OMAP5) += $(secure-common)
obj-$(CONFIG_SOC_AM43XX) += $(secure-common)
-obj-$(CONFIG_SOC_DRA7XX) += $(hwmod-common) $(secure-common)
+obj-$(CONFIG_SOC_DRA7XX) += $(secure-common)
ifneq ($(CONFIG_SND_SOC_OMAP_MCBSP),)
-obj-y += mcbsp.o
+obj-$(CONFIG_OMAP_HWMOD) += mcbsp.o
endif
obj-$(CONFIG_TWL4030_CORE) += omap_twl.o
@@ -207,9 +207,6 @@ obj-$(CONFIG_SOC_OMAP2430) += omap_hwmod_2430_data.o
obj-$(CONFIG_ARCH_OMAP3) += omap_hwmod_2xxx_3xxx_ipblock_data.o
obj-$(CONFIG_ARCH_OMAP3) += omap_hwmod_3xxx_data.o
obj-$(CONFIG_SOC_TI81XX) += omap_hwmod_81xx_data.o
-obj-$(CONFIG_ARCH_OMAP4) += omap_hwmod_44xx_data.o
-obj-$(CONFIG_SOC_OMAP5) += omap_hwmod_54xx_data.o
-obj-$(CONFIG_SOC_DRA7XX) += omap_hwmod_7xx_data.o
# OMAP2420 MSDI controller integration support ("MMC")
obj-$(CONFIG_SOC_OMAP2420) += msdi.o
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 7290f033fd2d..1610c567a6a3 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -33,7 +33,7 @@ static void __init __maybe_unused omap_generic_init(void)
}
/* Clocks are needed early, see drivers/clocksource for the rest */
-void __init __maybe_unused omap_init_time_of(void)
+static void __init __maybe_unused omap_init_time_of(void)
{
omap_clk_init();
timer_probe();
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 418a61ecb827..5e86145db0e2 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -322,6 +322,7 @@ static int n8x0_mmc_get_cover_state(struct device *dev, int slot)
static void n8x0_mmc_callback(void *data, u8 card_mask)
{
+#ifdef CONFIG_MMC_OMAP
int bit, *openp, index;
if (board_is_n800()) {
@@ -339,7 +340,6 @@ static void n8x0_mmc_callback(void *data, u8 card_mask)
else
*openp = 0;
-#ifdef CONFIG_MMC_OMAP
omap_mmc_notify_cover_event(mmc_device, index, *openp);
#else
pr_warn("MMC: notify cover event not available\n");
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index 49926eced5f1..db446f271f5d 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -343,15 +343,6 @@ static inline void omap5_secondary_hyp_startup(void)
}
#endif
-#ifdef CONFIG_SOC_DRA7XX
-extern int dra7xx_pciess_reset(struct omap_hwmod *oh);
-#else
-static inline int dra7xx_pciess_reset(struct omap_hwmod *oh)
-{
- return 0;
-}
-#endif
-
struct omap_system_dma_plat_info;
void pdata_quirks_init(const struct of_device_id *);
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 060ba6957b7c..fba0c7aa398c 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -402,6 +402,7 @@ static int __init _omap2_init_reprogram_sdrc(void)
return v;
}
+#ifdef CONFIG_OMAP_HWMOD
static int _set_hwmod_postsetup_state(struct omap_hwmod *oh, void *data)
{
return omap_hwmod_set_postsetup_state(oh, *(u8 *)data);
@@ -414,6 +415,11 @@ static void __init __maybe_unused omap_hwmod_init_postsetup(void)
/* Set the default postsetup state for all hwmods */
omap_hwmod_for_each(_set_hwmod_postsetup_state, &postsetup_state);
}
+#else
+static inline void omap_hwmod_init_postsetup(void)
+{
+}
+#endif
#ifdef CONFIG_SOC_OMAP2420
void __init omap2420_init_early(void)
@@ -615,8 +621,6 @@ void __init omap4430_init_early(void)
omap44xx_voltagedomains_init();
omap44xx_powerdomains_init();
omap44xx_clockdomains_init();
- omap44xx_hwmod_init();
- omap_hwmod_init_postsetup();
omap_l2_cache_init();
omap_clk_soc_init = omap4xxx_dt_clk_init;
omap_secure_init();
@@ -643,8 +647,6 @@ void __init omap5_init_early(void)
omap54xx_voltagedomains_init();
omap54xx_powerdomains_init();
omap54xx_clockdomains_init();
- omap54xx_hwmod_init();
- omap_hwmod_init_postsetup();
omap_clk_soc_init = omap5xxx_dt_clk_init;
omap_secure_init();
}
@@ -667,8 +669,6 @@ void __init dra7xx_init_early(void)
dra7xxx_check_revision();
dra7xx_powerdomains_init();
dra7xx_clockdomains_init();
- dra7xx_hwmod_init();
- omap_hwmod_init_postsetup();
omap_clk_soc_init = dra7xx_dt_clk_init;
omap_secure_init();
}
diff --git a/arch/arm/mach-omap2/omap-secure.c b/arch/arm/mach-omap2/omap-secure.c
index f70d561f37f7..0659ab4cb0af 100644
--- a/arch/arm/mach-omap2/omap-secure.c
+++ b/arch/arm/mach-omap2/omap-secure.c
@@ -9,6 +9,7 @@
*/
#include <linux/arm-smccc.h>
+#include <linux/cpu_pm.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -20,6 +21,7 @@
#include "common.h"
#include "omap-secure.h"
+#include "soc.h"
static phys_addr_t omap_secure_memblock_base;
@@ -213,3 +215,40 @@ void __init omap_secure_init(void)
{
omap_optee_init_check();
}
+
+/*
+ * Dummy dispatcher call after core OSWR and MPU off. Updates the ROM return
+ * address after MMU has been re-enabled after CPU1 has been woken up again.
+ * Otherwise the ROM code will attempt to use the earlier physical return
+ * address that got set with MMU off when waking up CPU1. Only used on secure
+ * devices.
+ */
+static int cpu_notifier(struct notifier_block *nb, unsigned long cmd, void *v)
+{
+ switch (cmd) {
+ case CPU_CLUSTER_PM_EXIT:
+ omap_secure_dispatcher(OMAP4_PPA_SERVICE_0,
+ FLAG_START_CRITICAL,
+ 0, 0, 0, 0, 0);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block secure_notifier_block = {
+ .notifier_call = cpu_notifier,
+};
+
+static int __init secure_pm_init(void)
+{
+ if (omap_type() == OMAP2_DEVICE_TYPE_GP || !soc_is_omap44xx())
+ return 0;
+
+ cpu_pm_register_notifier(&secure_notifier_block);
+
+ return 0;
+}
+omap_arch_initcall(secure_pm_init);
diff --git a/arch/arm/mach-omap2/omap-secure.h b/arch/arm/mach-omap2/omap-secure.h
index 4aaa95706d39..172069f31616 100644
--- a/arch/arm/mach-omap2/omap-secure.h
+++ b/arch/arm/mach-omap2/omap-secure.h
@@ -50,6 +50,7 @@
#define OMAP5_DRA7_MON_SET_ACR_INDEX 0x107
/* Secure PPA(Primary Protected Application) APIs */
+#define OMAP4_PPA_SERVICE_0 0x21
#define OMAP4_PPA_L2_POR_INDEX 0x23
#define OMAP4_PPA_CPU_ACTRL_SMP_INDEX 0x25
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 2310cd56e99b..65934b2924fb 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2137,6 +2137,7 @@ static int of_dev_hwmod_lookup(struct device_node *np,
if (res == 0) {
*found = fc;
*index = i;
+ of_node_put(np0);
return 0;
}
}
@@ -3495,10 +3496,6 @@ static const struct omap_hwmod_reset omap24xx_reset_quirks[] = {
{ .match = "msdi", .len = 4, .reset = omap_msdi_reset, },
};
-static const struct omap_hwmod_reset dra7_reset_quirks[] = {
- { .match = "pcie", .len = 4, .reset = dra7xx_pciess_reset, },
-};
-
static const struct omap_hwmod_reset omap_reset_quirks[] = {
{ .match = "dss_core", .len = 8, .reset = omap_dss_reset, },
{ .match = "hdq1w", .len = 5, .reset = omap_hdq1w_reset, },
@@ -3534,10 +3531,6 @@ omap_hwmod_init_reset_quirks(struct device *dev, struct omap_hwmod *oh,
omap24xx_reset_quirks,
ARRAY_SIZE(omap24xx_reset_quirks));
- if (soc_is_dra7xx())
- omap_hwmod_init_reset_quirk(dev, oh, data, dra7_reset_quirks,
- ARRAY_SIZE(dra7_reset_quirks));
-
omap_hwmod_init_reset_quirk(dev, oh, data, omap_reset_quirks,
ARRAY_SIZE(omap_reset_quirks));
}
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index eebf2fdf434c..6962a8d267e7 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -607,6 +607,8 @@ struct omap_hwmod {
struct omap_hwmod *parent_hwmod;
};
+#ifdef CONFIG_OMAP_HWMOD
+
struct device_node;
struct omap_hwmod *omap_hwmod_lookup(const char *name);
@@ -656,6 +658,17 @@ extern void __init omap_hwmod_init(void);
const char *omap_hwmod_get_main_clk(struct omap_hwmod *oh);
+#else /* CONFIG_OMAP_HWMOD */
+
+static inline int
+omap_hwmod_for_each_by_class(const char *classname,
+ int (*fn)(struct omap_hwmod *oh, void *user),
+ void *user)
+{
+ return 0;
+}
+#endif /* CONFIG_OMAP_HWMOD */
+
/*
*
*/
@@ -671,7 +684,6 @@ extern int omap2420_hwmod_init(void);
extern int omap2430_hwmod_init(void);
extern int omap3xxx_hwmod_init(void);
extern int omap44xx_hwmod_init(void);
-extern int omap54xx_hwmod_init(void);
extern int am33xx_hwmod_init(void);
extern int dm814x_hwmod_init(void);
extern int dm816x_hwmod_init(void);
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
deleted file mode 100644
index 6aa3b8e81a0c..000000000000
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ /dev/null
@@ -1,877 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Hardware modules present on the OMAP44xx chips
- *
- * Copyright (C) 2009-2012 Texas Instruments, Inc.
- * Copyright (C) 2009-2010 Nokia Corporation
- *
- * Paul Walmsley
- * Benoit Cousson
- *
- * This file is automatically generated from the OMAP hardware databases.
- * We respectfully ask that any modifications to this file be coordinated
- * with the public linux-omap@vger.kernel.org mailing list and the
- * authors above to ensure that the autogeneration scripts are kept
- * up-to-date with the file contents.
- * Note that this file is currently not in sync with autogeneration scripts.
- * The above note to be removed, once it is synced up.
- */
-
-#include <linux/io.h>
-
-#include "omap_hwmod.h"
-#include "omap_hwmod_common_data.h"
-#include "cm1_44xx.h"
-#include "cm2_44xx.h"
-#include "prm44xx.h"
-#include "prm-regbits-44xx.h"
-
-/* Base offset for all OMAP4 interrupts external to MPUSS */
-#define OMAP44XX_IRQ_GIC_START 32
-
-/*
- * IP blocks
- */
-
-/*
- * 'dmm' class
- * instance(s): dmm
- */
-static struct omap_hwmod_class omap44xx_dmm_hwmod_class = {
- .name = "dmm",
-};
-
-/* dmm */
-static struct omap_hwmod omap44xx_dmm_hwmod = {
- .name = "dmm",
- .class = &omap44xx_dmm_hwmod_class,
- .clkdm_name = "l3_emif_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_MEMIF_DMM_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_MEMIF_DMM_CONTEXT_OFFSET,
- },
- },
-};
-
-/*
- * 'l3' class
- * instance(s): l3_instr, l3_main_1, l3_main_2, l3_main_3
- */
-static struct omap_hwmod_class omap44xx_l3_hwmod_class = {
- .name = "l3",
-};
-
-/* l3_instr */
-static struct omap_hwmod omap44xx_l3_instr_hwmod = {
- .name = "l3_instr",
- .class = &omap44xx_l3_hwmod_class,
- .clkdm_name = "l3_instr_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L3INSTR_L3_INSTR_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L3INSTR_L3_INSTR_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
-};
-
-/* l3_main_1 */
-static struct omap_hwmod omap44xx_l3_main_1_hwmod = {
- .name = "l3_main_1",
- .class = &omap44xx_l3_hwmod_class,
- .clkdm_name = "l3_1_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L3_1_L3_1_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L3_1_L3_1_CONTEXT_OFFSET,
- },
- },
-};
-
-/* l3_main_2 */
-static struct omap_hwmod omap44xx_l3_main_2_hwmod = {
- .name = "l3_main_2",
- .class = &omap44xx_l3_hwmod_class,
- .clkdm_name = "l3_2_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L3_2_L3_2_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L3_2_L3_2_CONTEXT_OFFSET,
- },
- },
-};
-
-/* l3_main_3 */
-static struct omap_hwmod omap44xx_l3_main_3_hwmod = {
- .name = "l3_main_3",
- .class = &omap44xx_l3_hwmod_class,
- .clkdm_name = "l3_instr_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L3INSTR_L3_3_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L3INSTR_L3_3_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
-};
-
-/*
- * 'l4' class
- * instance(s): l4_abe, l4_cfg, l4_per, l4_wkup
- */
-static struct omap_hwmod_class omap44xx_l4_hwmod_class = {
- .name = "l4",
-};
-
-/* l4_cfg */
-static struct omap_hwmod omap44xx_l4_cfg_hwmod = {
- .name = "l4_cfg",
- .class = &omap44xx_l4_hwmod_class,
- .clkdm_name = "l4_cfg_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L4CFG_L4_CFG_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L4CFG_L4_CFG_CONTEXT_OFFSET,
- },
- },
-};
-
-/* l4_per */
-static struct omap_hwmod omap44xx_l4_per_hwmod = {
- .name = "l4_per",
- .class = &omap44xx_l4_hwmod_class,
- .clkdm_name = "l4_per_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L4PER_L4PER_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L4PER_L4_PER_CONTEXT_OFFSET,
- },
- },
-};
-
-/* l4_wkup */
-static struct omap_hwmod omap44xx_l4_wkup_hwmod = {
- .name = "l4_wkup",
- .class = &omap44xx_l4_hwmod_class,
- .clkdm_name = "l4_wkup_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_WKUP_L4WKUP_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_WKUP_L4WKUP_CONTEXT_OFFSET,
- },
- },
-};
-
-/*
- * 'mpu_bus' class
- * instance(s): mpu_private
- */
-static struct omap_hwmod_class omap44xx_mpu_bus_hwmod_class = {
- .name = "mpu_bus",
-};
-
-/* mpu_private */
-static struct omap_hwmod omap44xx_mpu_private_hwmod = {
- .name = "mpu_private",
- .class = &omap44xx_mpu_bus_hwmod_class,
- .clkdm_name = "mpuss_clkdm",
- .prcm = {
- .omap4 = {
- .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
- },
- },
-};
-
-/*
- * 'ocp_wp_noc' class
- * instance(s): ocp_wp_noc
- */
-static struct omap_hwmod_class omap44xx_ocp_wp_noc_hwmod_class = {
- .name = "ocp_wp_noc",
-};
-
-/* ocp_wp_noc */
-static struct omap_hwmod omap44xx_ocp_wp_noc_hwmod = {
- .name = "ocp_wp_noc",
- .class = &omap44xx_ocp_wp_noc_hwmod_class,
- .clkdm_name = "l3_instr_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L3INSTR_OCP_WP1_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L3INSTR_OCP_WP1_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
-};
-
-/*
- * Modules omap_hwmod structures
- *
- * The following IPs are excluded for the moment because:
- * - They do not need an explicit SW control using omap_hwmod API.
- * - They still need to be validated with the driver
- * properly adapted to omap_hwmod / omap_device
- *
- * usim
- */
-
-/*
- * 'ctrl_module' class
- * attila core control module + core pad control module + wkup pad control
- * module + attila wkup control module
- */
-
-static struct omap_hwmod_class_sysconfig omap44xx_ctrl_module_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- .sysc_flags = SYSC_HAS_SIDLEMODE,
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class omap44xx_ctrl_module_hwmod_class = {
- .name = "ctrl_module",
- .sysc = &omap44xx_ctrl_module_sysc,
-};
-
-/* ctrl_module_core */
-static struct omap_hwmod omap44xx_ctrl_module_core_hwmod = {
- .name = "ctrl_module_core",
- .class = &omap44xx_ctrl_module_hwmod_class,
- .clkdm_name = "l4_cfg_clkdm",
- .prcm = {
- .omap4 = {
- .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
- },
- },
-};
-
-/* ctrl_module_pad_core */
-static struct omap_hwmod omap44xx_ctrl_module_pad_core_hwmod = {
- .name = "ctrl_module_pad_core",
- .class = &omap44xx_ctrl_module_hwmod_class,
- .clkdm_name = "l4_cfg_clkdm",
- .prcm = {
- .omap4 = {
- .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
- },
- },
-};
-
-/* ctrl_module_wkup */
-static struct omap_hwmod omap44xx_ctrl_module_wkup_hwmod = {
- .name = "ctrl_module_wkup",
- .class = &omap44xx_ctrl_module_hwmod_class,
- .clkdm_name = "l4_wkup_clkdm",
- .prcm = {
- .omap4 = {
- .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
- },
- },
-};
-
-/* ctrl_module_pad_wkup */
-static struct omap_hwmod omap44xx_ctrl_module_pad_wkup_hwmod = {
- .name = "ctrl_module_pad_wkup",
- .class = &omap44xx_ctrl_module_hwmod_class,
- .clkdm_name = "l4_wkup_clkdm",
- .prcm = {
- .omap4 = {
- .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
- },
- },
-};
-
-/*
- * 'debugss' class
- * debug and emulation sub system
- */
-
-static struct omap_hwmod_class omap44xx_debugss_hwmod_class = {
- .name = "debugss",
-};
-
-/* debugss */
-static struct omap_hwmod omap44xx_debugss_hwmod = {
- .name = "debugss",
- .class = &omap44xx_debugss_hwmod_class,
- .clkdm_name = "emu_sys_clkdm",
- .main_clk = "trace_clk_div_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_EMU_DEBUGSS_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_EMU_DEBUGSS_CONTEXT_OFFSET,
- },
- },
-};
-
-/*
- * 'emif' class
- * external memory interface no1
- */
-
-static struct omap_hwmod_class_sysconfig omap44xx_emif_sysc = {
- .rev_offs = 0x0000,
-};
-
-static struct omap_hwmod_class omap44xx_emif_hwmod_class = {
- .name = "emif",
- .sysc = &omap44xx_emif_sysc,
-};
-
-/* emif1 */
-static struct omap_hwmod omap44xx_emif1_hwmod = {
- .name = "emif1",
- .class = &omap44xx_emif_hwmod_class,
- .clkdm_name = "l3_emif_clkdm",
- .flags = HWMOD_INIT_NO_IDLE,
- .main_clk = "ddrphy_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_MEMIF_EMIF_1_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_MEMIF_EMIF_1_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
-};
-
-/* emif2 */
-static struct omap_hwmod omap44xx_emif2_hwmod = {
- .name = "emif2",
- .class = &omap44xx_emif_hwmod_class,
- .clkdm_name = "l3_emif_clkdm",
- .flags = HWMOD_INIT_NO_IDLE,
- .main_clk = "ddrphy_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_MEMIF_EMIF_2_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_MEMIF_EMIF_2_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
-};
-
-/*
- * 'iss' class
- * external images sensor pixel data processor
- */
-
-static struct omap_hwmod_class_sysconfig omap44xx_iss_sysc = {
- .rev_offs = 0x0000,
- .sysc_offs = 0x0010,
- /*
- * ISS needs 100 OCP clk cycles delay after a softreset before
- * accessing sysconfig again.
- * The lowest frequency at the moment for L3 bus is 100 MHz, so
- * 1usec delay is needed. Add an x2 margin to be safe (2 usecs).
- *
- * TODO: Indicate errata when available.
- */
- .srst_udelay = 2,
- .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS |
- SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
- MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class omap44xx_iss_hwmod_class = {
- .name = "iss",
- .sysc = &omap44xx_iss_sysc,
-};
-
-/* iss */
-static struct omap_hwmod_opt_clk iss_opt_clks[] = {
- { .role = "ctrlclk", .clk = "iss_ctrlclk" },
-};
-
-static struct omap_hwmod omap44xx_iss_hwmod = {
- .name = "iss",
- .class = &omap44xx_iss_hwmod_class,
- .clkdm_name = "iss_clkdm",
- .main_clk = "ducati_clk_mux_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_CAM_ISS_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_CAM_ISS_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
- .opt_clks = iss_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(iss_opt_clks),
-};
-
-/*
- * 'mpu' class
- * mpu sub-system
- */
-
-static struct omap_hwmod_class omap44xx_mpu_hwmod_class = {
- .name = "mpu",
-};
-
-/* mpu */
-static struct omap_hwmod omap44xx_mpu_hwmod = {
- .name = "mpu",
- .class = &omap44xx_mpu_hwmod_class,
- .clkdm_name = "mpuss_clkdm",
- .flags = HWMOD_INIT_NO_IDLE,
- .main_clk = "dpll_mpu_m2_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_MPU_MPU_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_MPU_MPU_CONTEXT_OFFSET,
- },
- },
-};
-
-/*
- * 'ocmc_ram' class
- * top-level core on-chip ram
- */
-
-static struct omap_hwmod_class omap44xx_ocmc_ram_hwmod_class = {
- .name = "ocmc_ram",
-};
-
-/* ocmc_ram */
-static struct omap_hwmod omap44xx_ocmc_ram_hwmod = {
- .name = "ocmc_ram",
- .class = &omap44xx_ocmc_ram_hwmod_class,
- .clkdm_name = "l3_2_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_L3_2_OCMC_RAM_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_L3_2_OCMC_RAM_CONTEXT_OFFSET,
- },
- },
-};
-
-
-/*
- * 'prcm' class
- * power and reset manager (part of the prcm infrastructure) + clock manager 2
- * + clock manager 1 (in always on power domain) + local prm in mpu
- */
-
-static struct omap_hwmod_class omap44xx_prcm_hwmod_class = {
- .name = "prcm",
-};
-
-/* prcm_mpu */
-static struct omap_hwmod omap44xx_prcm_mpu_hwmod = {
- .name = "prcm_mpu",
- .class = &omap44xx_prcm_hwmod_class,
- .clkdm_name = "l4_wkup_clkdm",
- .flags = HWMOD_NO_IDLEST,
- .prcm = {
- .omap4 = {
- .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
- },
- },
-};
-
-/* cm_core_aon */
-static struct omap_hwmod omap44xx_cm_core_aon_hwmod = {
- .name = "cm_core_aon",
- .class = &omap44xx_prcm_hwmod_class,
- .flags = HWMOD_NO_IDLEST,
- .prcm = {
- .omap4 = {
- .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
- },
- },
-};
-
-/* cm_core */
-static struct omap_hwmod omap44xx_cm_core_hwmod = {
- .name = "cm_core",
- .class = &omap44xx_prcm_hwmod_class,
- .flags = HWMOD_NO_IDLEST,
- .prcm = {
- .omap4 = {
- .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
- },
- },
-};
-
-/* prm */
-static struct omap_hwmod_rst_info omap44xx_prm_resets[] = {
- { .name = "rst_global_warm_sw", .rst_shift = 0 },
- { .name = "rst_global_cold_sw", .rst_shift = 1 },
-};
-
-static struct omap_hwmod omap44xx_prm_hwmod = {
- .name = "prm",
- .class = &omap44xx_prcm_hwmod_class,
- .rst_lines = omap44xx_prm_resets,
- .rst_lines_cnt = ARRAY_SIZE(omap44xx_prm_resets),
-};
-
-/*
- * 'scrm' class
- * system clock and reset manager
- */
-
-static struct omap_hwmod_class omap44xx_scrm_hwmod_class = {
- .name = "scrm",
-};
-
-/* scrm */
-static struct omap_hwmod omap44xx_scrm_hwmod = {
- .name = "scrm",
- .class = &omap44xx_scrm_hwmod_class,
- .clkdm_name = "l4_wkup_clkdm",
- .prcm = {
- .omap4 = {
- .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
- },
- },
-};
-
-/*
- * 'sl2if' class
- * shared level 2 memory interface
- */
-
-static struct omap_hwmod_class omap44xx_sl2if_hwmod_class = {
- .name = "sl2if",
-};
-
-/* sl2if */
-static struct omap_hwmod omap44xx_sl2if_hwmod = {
- .name = "sl2if",
- .class = &omap44xx_sl2if_hwmod_class,
- .clkdm_name = "ivahd_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP4_CM_IVAHD_SL2_CLKCTRL_OFFSET,
- .context_offs = OMAP4_RM_IVAHD_SL2_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
-};
-
-/*
- * interfaces
- */
-
-/* l3_main_1 -> dmm */
-static struct omap_hwmod_ocp_if omap44xx_l3_main_1__dmm = {
- .master = &omap44xx_l3_main_1_hwmod,
- .slave = &omap44xx_dmm_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_SDMA,
-};
-
-/* mpu -> dmm */
-static struct omap_hwmod_ocp_if omap44xx_mpu__dmm = {
- .master = &omap44xx_mpu_hwmod,
- .slave = &omap44xx_dmm_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_MPU,
-};
-
-/* l3_main_3 -> l3_instr */
-static struct omap_hwmod_ocp_if omap44xx_l3_main_3__l3_instr = {
- .master = &omap44xx_l3_main_3_hwmod,
- .slave = &omap44xx_l3_instr_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* ocp_wp_noc -> l3_instr */
-static struct omap_hwmod_ocp_if omap44xx_ocp_wp_noc__l3_instr = {
- .master = &omap44xx_ocp_wp_noc_hwmod,
- .slave = &omap44xx_l3_instr_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_2 -> l3_main_1 */
-static struct omap_hwmod_ocp_if omap44xx_l3_main_2__l3_main_1 = {
- .master = &omap44xx_l3_main_2_hwmod,
- .slave = &omap44xx_l3_main_1_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> l3_main_1 */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_1 = {
- .master = &omap44xx_l4_cfg_hwmod,
- .slave = &omap44xx_l3_main_1_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* mpu -> l3_main_1 */
-static struct omap_hwmod_ocp_if omap44xx_mpu__l3_main_1 = {
- .master = &omap44xx_mpu_hwmod,
- .slave = &omap44xx_l3_main_1_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_MPU,
-};
-
-/* debugss -> l3_main_2 */
-static struct omap_hwmod_ocp_if omap44xx_debugss__l3_main_2 = {
- .master = &omap44xx_debugss_hwmod,
- .slave = &omap44xx_l3_main_2_hwmod,
- .clk = "dbgclk_mux_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* iss -> l3_main_2 */
-static struct omap_hwmod_ocp_if omap44xx_iss__l3_main_2 = {
- .master = &omap44xx_iss_hwmod,
- .slave = &omap44xx_l3_main_2_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_1 -> l3_main_2 */
-static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l3_main_2 = {
- .master = &omap44xx_l3_main_1_hwmod,
- .slave = &omap44xx_l3_main_2_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_MPU,
-};
-
-/* l4_cfg -> l3_main_2 */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_2 = {
- .master = &omap44xx_l4_cfg_hwmod,
- .slave = &omap44xx_l3_main_2_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_1 -> l3_main_3 */
-static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l3_main_3 = {
- .master = &omap44xx_l3_main_1_hwmod,
- .slave = &omap44xx_l3_main_3_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_MPU,
-};
-
-/* l3_main_2 -> l3_main_3 */
-static struct omap_hwmod_ocp_if omap44xx_l3_main_2__l3_main_3 = {
- .master = &omap44xx_l3_main_2_hwmod,
- .slave = &omap44xx_l3_main_3_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> l3_main_3 */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_3 = {
- .master = &omap44xx_l4_cfg_hwmod,
- .slave = &omap44xx_l3_main_3_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_1 -> l4_cfg */
-static struct omap_hwmod_ocp_if omap44xx_l3_main_1__l4_cfg = {
- .master = &omap44xx_l3_main_1_hwmod,
- .slave = &omap44xx_l4_cfg_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_2 -> l4_per */
-static struct omap_hwmod_ocp_if omap44xx_l3_main_2__l4_per = {
- .master = &omap44xx_l3_main_2_hwmod,
- .slave = &omap44xx_l4_per_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> l4_wkup */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l4_wkup = {
- .master = &omap44xx_l4_cfg_hwmod,
- .slave = &omap44xx_l4_wkup_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* mpu -> mpu_private */
-static struct omap_hwmod_ocp_if omap44xx_mpu__mpu_private = {
- .master = &omap44xx_mpu_hwmod,
- .slave = &omap44xx_mpu_private_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> ocp_wp_noc */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__ocp_wp_noc = {
- .master = &omap44xx_l4_cfg_hwmod,
- .slave = &omap44xx_ocp_wp_noc_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> ctrl_module_core */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__ctrl_module_core = {
- .master = &omap44xx_l4_cfg_hwmod,
- .slave = &omap44xx_ctrl_module_core_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> ctrl_module_pad_core */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__ctrl_module_pad_core = {
- .master = &omap44xx_l4_cfg_hwmod,
- .slave = &omap44xx_ctrl_module_pad_core_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_wkup -> ctrl_module_wkup */
-static struct omap_hwmod_ocp_if omap44xx_l4_wkup__ctrl_module_wkup = {
- .master = &omap44xx_l4_wkup_hwmod,
- .slave = &omap44xx_ctrl_module_wkup_hwmod,
- .clk = "l4_wkup_clk_mux_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_wkup -> ctrl_module_pad_wkup */
-static struct omap_hwmod_ocp_if omap44xx_l4_wkup__ctrl_module_pad_wkup = {
- .master = &omap44xx_l4_wkup_hwmod,
- .slave = &omap44xx_ctrl_module_pad_wkup_hwmod,
- .clk = "l4_wkup_clk_mux_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_instr -> debugss */
-static struct omap_hwmod_ocp_if omap44xx_l3_instr__debugss = {
- .master = &omap44xx_l3_instr_hwmod,
- .slave = &omap44xx_debugss_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_2 -> iss */
-static struct omap_hwmod_ocp_if omap44xx_l3_main_2__iss = {
- .master = &omap44xx_l3_main_2_hwmod,
- .slave = &omap44xx_iss_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_2 -> ocmc_ram */
-static struct omap_hwmod_ocp_if omap44xx_l3_main_2__ocmc_ram = {
- .master = &omap44xx_l3_main_2_hwmod,
- .slave = &omap44xx_ocmc_ram_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* mpu_private -> prcm_mpu */
-static struct omap_hwmod_ocp_if omap44xx_mpu_private__prcm_mpu = {
- .master = &omap44xx_mpu_private_hwmod,
- .slave = &omap44xx_prcm_mpu_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_wkup -> cm_core_aon */
-static struct omap_hwmod_ocp_if omap44xx_l4_wkup__cm_core_aon = {
- .master = &omap44xx_l4_wkup_hwmod,
- .slave = &omap44xx_cm_core_aon_hwmod,
- .clk = "l4_wkup_clk_mux_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> cm_core */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__cm_core = {
- .master = &omap44xx_l4_cfg_hwmod,
- .slave = &omap44xx_cm_core_hwmod,
- .clk = "l4_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_wkup -> prm */
-static struct omap_hwmod_ocp_if omap44xx_l4_wkup__prm = {
- .master = &omap44xx_l4_wkup_hwmod,
- .slave = &omap44xx_prm_hwmod,
- .clk = "l4_wkup_clk_mux_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_wkup -> scrm */
-static struct omap_hwmod_ocp_if omap44xx_l4_wkup__scrm = {
- .master = &omap44xx_l4_wkup_hwmod,
- .slave = &omap44xx_scrm_hwmod,
- .clk = "l4_wkup_clk_mux_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_2 -> sl2if */
-static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l3_main_2__sl2if = {
- .master = &omap44xx_l3_main_2_hwmod,
- .slave = &omap44xx_sl2if_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* mpu -> emif1 */
-static struct omap_hwmod_ocp_if omap44xx_mpu__emif1 = {
- .master = &omap44xx_mpu_hwmod,
- .slave = &omap44xx_emif1_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* mpu -> emif2 */
-static struct omap_hwmod_ocp_if omap44xx_mpu__emif2 = {
- .master = &omap44xx_mpu_hwmod,
- .slave = &omap44xx_emif2_hwmod,
- .clk = "l3_div_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
- &omap44xx_l3_main_1__dmm,
- &omap44xx_mpu__dmm,
- &omap44xx_l3_main_3__l3_instr,
- &omap44xx_ocp_wp_noc__l3_instr,
- &omap44xx_l3_main_2__l3_main_1,
- &omap44xx_l4_cfg__l3_main_1,
- &omap44xx_mpu__l3_main_1,
- &omap44xx_debugss__l3_main_2,
- &omap44xx_iss__l3_main_2,
- &omap44xx_l3_main_1__l3_main_2,
- &omap44xx_l4_cfg__l3_main_2,
- &omap44xx_l3_main_1__l3_main_3,
- &omap44xx_l3_main_2__l3_main_3,
- &omap44xx_l4_cfg__l3_main_3,
- &omap44xx_l3_main_1__l4_cfg,
- &omap44xx_l3_main_2__l4_per,
- &omap44xx_l4_cfg__l4_wkup,
- &omap44xx_mpu__mpu_private,
- &omap44xx_l4_cfg__ocp_wp_noc,
- &omap44xx_l4_cfg__ctrl_module_core,
- &omap44xx_l4_cfg__ctrl_module_pad_core,
- &omap44xx_l4_wkup__ctrl_module_wkup,
- &omap44xx_l4_wkup__ctrl_module_pad_wkup,
- &omap44xx_l3_instr__debugss,
- &omap44xx_l3_main_2__iss,
- &omap44xx_l3_main_2__ocmc_ram,
- &omap44xx_mpu_private__prcm_mpu,
- &omap44xx_l4_wkup__cm_core_aon,
- &omap44xx_l4_cfg__cm_core,
- &omap44xx_l4_wkup__prm,
- &omap44xx_l4_wkup__scrm,
- /* &omap44xx_l3_main_2__sl2if, */
- &omap44xx_mpu__emif1,
- &omap44xx_mpu__emif2,
- NULL,
-};
-
-int __init omap44xx_hwmod_init(void)
-{
- omap_hwmod_init();
- return omap_hwmod_register_links(omap44xx_hwmod_ocp_ifs);
-}
-
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
deleted file mode 100644
index 85b9ab4756ed..000000000000
--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+++ /dev/null
@@ -1,467 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Hardware modules present on the OMAP54xx chips
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com
- *
- * Paul Walmsley
- * Benoit Cousson
- *
- * This file is automatically generated from the OMAP hardware databases.
- * We respectfully ask that any modifications to this file be coordinated
- * with the public linux-omap@vger.kernel.org mailing list and the
- * authors above to ensure that the autogeneration scripts are kept
- * up-to-date with the file contents.
- */
-
-#include <linux/io.h>
-#include <linux/power/smartreflex.h>
-
-#include "omap_hwmod.h"
-#include "omap_hwmod_common_data.h"
-#include "cm1_54xx.h"
-#include "cm2_54xx.h"
-#include "prm54xx.h"
-
-/* Base offset for all OMAP5 interrupts external to MPUSS */
-#define OMAP54XX_IRQ_GIC_START 32
-
-/*
- * IP blocks
- */
-
-/*
- * 'dmm' class
- * instance(s): dmm
- */
-static struct omap_hwmod_class omap54xx_dmm_hwmod_class = {
- .name = "dmm",
-};
-
-/* dmm */
-static struct omap_hwmod omap54xx_dmm_hwmod = {
- .name = "dmm",
- .class = &omap54xx_dmm_hwmod_class,
- .clkdm_name = "emif_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_EMIF_DMM_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_EMIF_DMM_CONTEXT_OFFSET,
- },
- },
-};
-
-/*
- * 'l3' class
- * instance(s): l3_instr, l3_main_1, l3_main_2, l3_main_3
- */
-static struct omap_hwmod_class omap54xx_l3_hwmod_class = {
- .name = "l3",
-};
-
-/* l3_instr */
-static struct omap_hwmod omap54xx_l3_instr_hwmod = {
- .name = "l3_instr",
- .class = &omap54xx_l3_hwmod_class,
- .clkdm_name = "l3instr_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L3INSTR_L3_INSTR_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L3INSTR_L3_INSTR_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
-};
-
-/* l3_main_1 */
-static struct omap_hwmod omap54xx_l3_main_1_hwmod = {
- .name = "l3_main_1",
- .class = &omap54xx_l3_hwmod_class,
- .clkdm_name = "l3main1_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L3MAIN1_L3_MAIN_1_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L3MAIN1_L3_MAIN_1_CONTEXT_OFFSET,
- },
- },
-};
-
-/* l3_main_2 */
-static struct omap_hwmod omap54xx_l3_main_2_hwmod = {
- .name = "l3_main_2",
- .class = &omap54xx_l3_hwmod_class,
- .clkdm_name = "l3main2_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L3MAIN2_L3_MAIN_2_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L3MAIN2_L3_MAIN_2_CONTEXT_OFFSET,
- },
- },
-};
-
-/* l3_main_3 */
-static struct omap_hwmod omap54xx_l3_main_3_hwmod = {
- .name = "l3_main_3",
- .class = &omap54xx_l3_hwmod_class,
- .clkdm_name = "l3instr_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L3INSTR_L3_MAIN_3_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L3INSTR_L3_MAIN_3_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
-};
-
-/*
- * 'l4' class
- * instance(s): l4_abe, l4_cfg, l4_per, l4_wkup
- */
-static struct omap_hwmod_class omap54xx_l4_hwmod_class = {
- .name = "l4",
-};
-
-/* l4_cfg */
-static struct omap_hwmod omap54xx_l4_cfg_hwmod = {
- .name = "l4_cfg",
- .class = &omap54xx_l4_hwmod_class,
- .clkdm_name = "l4cfg_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L4CFG_L4_CFG_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L4CFG_L4_CFG_CONTEXT_OFFSET,
- },
- },
-};
-
-/* l4_per */
-static struct omap_hwmod omap54xx_l4_per_hwmod = {
- .name = "l4_per",
- .class = &omap54xx_l4_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L4PER_L4_PER_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L4PER_L4_PER_CONTEXT_OFFSET,
- },
- },
-};
-
-/* l4_wkup */
-static struct omap_hwmod omap54xx_l4_wkup_hwmod = {
- .name = "l4_wkup",
- .class = &omap54xx_l4_hwmod_class,
- .clkdm_name = "wkupaon_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_WKUPAON_L4_WKUP_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_WKUPAON_L4_WKUP_CONTEXT_OFFSET,
- },
- },
-};
-
-/*
- * 'mpu_bus' class
- * instance(s): mpu_private
- */
-static struct omap_hwmod_class omap54xx_mpu_bus_hwmod_class = {
- .name = "mpu_bus",
-};
-
-/* mpu_private */
-static struct omap_hwmod omap54xx_mpu_private_hwmod = {
- .name = "mpu_private",
- .class = &omap54xx_mpu_bus_hwmod_class,
- .clkdm_name = "mpu_clkdm",
- .prcm = {
- .omap4 = {
- .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
- },
- },
-};
-
-/*
- * 'emif' class
- * external memory interface no1 (wrapper)
- */
-
-static struct omap_hwmod_class_sysconfig omap54xx_emif_sysc = {
- .rev_offs = 0x0000,
-};
-
-static struct omap_hwmod_class omap54xx_emif_hwmod_class = {
- .name = "emif",
- .sysc = &omap54xx_emif_sysc,
-};
-
-/* emif1 */
-static struct omap_hwmod omap54xx_emif1_hwmod = {
- .name = "emif1",
- .class = &omap54xx_emif_hwmod_class,
- .clkdm_name = "emif_clkdm",
- .flags = HWMOD_INIT_NO_IDLE,
- .main_clk = "dpll_core_h11x2_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_EMIF_EMIF1_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_EMIF_EMIF1_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
-};
-
-/* emif2 */
-static struct omap_hwmod omap54xx_emif2_hwmod = {
- .name = "emif2",
- .class = &omap54xx_emif_hwmod_class,
- .clkdm_name = "emif_clkdm",
- .flags = HWMOD_INIT_NO_IDLE,
- .main_clk = "dpll_core_h11x2_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_EMIF_EMIF2_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_EMIF_EMIF2_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
-};
-
-
-
-
-/*
- * 'mpu' class
- * mpu sub-system
- */
-
-static struct omap_hwmod_class omap54xx_mpu_hwmod_class = {
- .name = "mpu",
-};
-
-/* mpu */
-static struct omap_hwmod omap54xx_mpu_hwmod = {
- .name = "mpu",
- .class = &omap54xx_mpu_hwmod_class,
- .clkdm_name = "mpu_clkdm",
- .flags = HWMOD_INIT_NO_IDLE,
- .main_clk = "dpll_mpu_m2_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_MPU_MPU_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_MPU_MPU_CONTEXT_OFFSET,
- },
- },
-};
-
-/*
- * 'sata' class
- * sata: serial ata interface gen2 compliant ( 1 rx/ 1 tx)
- */
-
-static struct omap_hwmod_class_sysconfig omap54xx_sata_sysc = {
- .rev_offs = 0x00fc,
- .sysc_offs = 0x0000,
- .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
- MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class omap54xx_sata_hwmod_class = {
- .name = "sata",
- .sysc = &omap54xx_sata_sysc,
-};
-
-/* sata */
-static struct omap_hwmod omap54xx_sata_hwmod = {
- .name = "sata",
- .class = &omap54xx_sata_hwmod_class,
- .clkdm_name = "l3init_clkdm",
- .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
- .main_clk = "func_48m_fclk",
- .mpu_rt_idx = 1,
- .prcm = {
- .omap4 = {
- .clkctrl_offs = OMAP54XX_CM_L3INIT_SATA_CLKCTRL_OFFSET,
- .context_offs = OMAP54XX_RM_L3INIT_SATA_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* l4_cfg -> sata */
-static struct omap_hwmod_ocp_if omap54xx_l4_cfg__sata = {
- .master = &omap54xx_l4_cfg_hwmod,
- .slave = &omap54xx_sata_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/*
- * Interfaces
- */
-
-/* l3_main_1 -> dmm */
-static struct omap_hwmod_ocp_if omap54xx_l3_main_1__dmm = {
- .master = &omap54xx_l3_main_1_hwmod,
- .slave = &omap54xx_dmm_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_SDMA,
-};
-
-/* l3_main_3 -> l3_instr */
-static struct omap_hwmod_ocp_if omap54xx_l3_main_3__l3_instr = {
- .master = &omap54xx_l3_main_3_hwmod,
- .slave = &omap54xx_l3_instr_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_2 -> l3_main_1 */
-static struct omap_hwmod_ocp_if omap54xx_l3_main_2__l3_main_1 = {
- .master = &omap54xx_l3_main_2_hwmod,
- .slave = &omap54xx_l3_main_1_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> l3_main_1 */
-static struct omap_hwmod_ocp_if omap54xx_l4_cfg__l3_main_1 = {
- .master = &omap54xx_l4_cfg_hwmod,
- .slave = &omap54xx_l3_main_1_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* mpu -> l3_main_1 */
-static struct omap_hwmod_ocp_if omap54xx_mpu__l3_main_1 = {
- .master = &omap54xx_mpu_hwmod,
- .slave = &omap54xx_l3_main_1_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU,
-};
-
-/* l3_main_1 -> l3_main_2 */
-static struct omap_hwmod_ocp_if omap54xx_l3_main_1__l3_main_2 = {
- .master = &omap54xx_l3_main_1_hwmod,
- .slave = &omap54xx_l3_main_2_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU,
-};
-
-/* l4_cfg -> l3_main_2 */
-static struct omap_hwmod_ocp_if omap54xx_l4_cfg__l3_main_2 = {
- .master = &omap54xx_l4_cfg_hwmod,
- .slave = &omap54xx_l3_main_2_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_1 -> l3_main_3 */
-static struct omap_hwmod_ocp_if omap54xx_l3_main_1__l3_main_3 = {
- .master = &omap54xx_l3_main_1_hwmod,
- .slave = &omap54xx_l3_main_3_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU,
-};
-
-/* l3_main_2 -> l3_main_3 */
-static struct omap_hwmod_ocp_if omap54xx_l3_main_2__l3_main_3 = {
- .master = &omap54xx_l3_main_2_hwmod,
- .slave = &omap54xx_l3_main_3_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> l3_main_3 */
-static struct omap_hwmod_ocp_if omap54xx_l4_cfg__l3_main_3 = {
- .master = &omap54xx_l4_cfg_hwmod,
- .slave = &omap54xx_l3_main_3_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_1 -> l4_cfg */
-static struct omap_hwmod_ocp_if omap54xx_l3_main_1__l4_cfg = {
- .master = &omap54xx_l3_main_1_hwmod,
- .slave = &omap54xx_l4_cfg_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_2 -> l4_per */
-static struct omap_hwmod_ocp_if omap54xx_l3_main_2__l4_per = {
- .master = &omap54xx_l3_main_2_hwmod,
- .slave = &omap54xx_l4_per_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_1 -> l4_wkup */
-static struct omap_hwmod_ocp_if omap54xx_l3_main_1__l4_wkup = {
- .master = &omap54xx_l3_main_1_hwmod,
- .slave = &omap54xx_l4_wkup_hwmod,
- .clk = "wkupaon_iclk_mux",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* mpu -> mpu_private */
-static struct omap_hwmod_ocp_if omap54xx_mpu__mpu_private = {
- .master = &omap54xx_mpu_hwmod,
- .slave = &omap54xx_mpu_private_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* mpu -> emif1 */
-static struct omap_hwmod_ocp_if omap54xx_mpu__emif1 = {
- .master = &omap54xx_mpu_hwmod,
- .slave = &omap54xx_emif1_hwmod,
- .clk = "dpll_core_h11x2_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* mpu -> emif2 */
-static struct omap_hwmod_ocp_if omap54xx_mpu__emif2 = {
- .master = &omap54xx_mpu_hwmod,
- .slave = &omap54xx_emif2_hwmod,
- .clk = "dpll_core_h11x2_ck",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> mpu */
-static struct omap_hwmod_ocp_if omap54xx_l4_cfg__mpu = {
- .master = &omap54xx_l4_cfg_hwmod,
- .slave = &omap54xx_mpu_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-static struct omap_hwmod_ocp_if *omap54xx_hwmod_ocp_ifs[] __initdata = {
- &omap54xx_l3_main_1__dmm,
- &omap54xx_l3_main_3__l3_instr,
- &omap54xx_l3_main_2__l3_main_1,
- &omap54xx_l4_cfg__l3_main_1,
- &omap54xx_mpu__l3_main_1,
- &omap54xx_l3_main_1__l3_main_2,
- &omap54xx_l4_cfg__l3_main_2,
- &omap54xx_l3_main_1__l3_main_3,
- &omap54xx_l3_main_2__l3_main_3,
- &omap54xx_l4_cfg__l3_main_3,
- &omap54xx_l3_main_1__l4_cfg,
- &omap54xx_l3_main_2__l4_per,
- &omap54xx_l3_main_1__l4_wkup,
- &omap54xx_mpu__mpu_private,
- &omap54xx_mpu__emif1,
- &omap54xx_mpu__emif2,
- &omap54xx_l4_cfg__mpu,
- &omap54xx_l4_cfg__sata,
- NULL,
-};
-
-int __init omap54xx_hwmod_init(void)
-{
- omap_hwmod_init();
- return omap_hwmod_register_links(omap54xx_hwmod_ocp_ifs);
-}
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
deleted file mode 100644
index 48c2a808bd46..000000000000
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ /dev/null
@@ -1,719 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Hardware modules present on the DRA7xx chips
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com
- *
- * Paul Walmsley
- * Benoit Cousson
- *
- * This file is automatically generated from the OMAP hardware databases.
- * We respectfully ask that any modifications to this file be coordinated
- * with the public linux-omap@vger.kernel.org mailing list and the
- * authors above to ensure that the autogeneration scripts are kept
- * up-to-date with the file contents.
- */
-
-#include <linux/io.h>
-
-#include "omap_hwmod.h"
-#include "omap_hwmod_common_data.h"
-#include "cm1_7xx.h"
-#include "cm2_7xx.h"
-#include "prm7xx.h"
-#include "soc.h"
-
-/* Base offset for all DRA7XX interrupts external to MPUSS */
-#define DRA7XX_IRQ_GIC_START 32
-
-/*
- * IP blocks
- */
-
-/*
- * 'dmm' class
- * instance(s): dmm
- */
-static struct omap_hwmod_class dra7xx_dmm_hwmod_class = {
- .name = "dmm",
-};
-
-/* dmm */
-static struct omap_hwmod dra7xx_dmm_hwmod = {
- .name = "dmm",
- .class = &dra7xx_dmm_hwmod_class,
- .clkdm_name = "emif_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_EMIF_DMM_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_EMIF_DMM_CONTEXT_OFFSET,
- },
- },
-};
-
-/*
- * 'l3' class
- * instance(s): l3_instr, l3_main_1, l3_main_2
- */
-static struct omap_hwmod_class dra7xx_l3_hwmod_class = {
- .name = "l3",
-};
-
-/* l3_instr */
-static struct omap_hwmod dra7xx_l3_instr_hwmod = {
- .name = "l3_instr",
- .class = &dra7xx_l3_hwmod_class,
- .clkdm_name = "l3instr_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L3INSTR_L3_INSTR_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L3INSTR_L3_INSTR_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
-};
-
-/* l3_main_1 */
-static struct omap_hwmod dra7xx_l3_main_1_hwmod = {
- .name = "l3_main_1",
- .class = &dra7xx_l3_hwmod_class,
- .clkdm_name = "l3main1_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L3MAIN1_L3_MAIN_1_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L3MAIN1_L3_MAIN_1_CONTEXT_OFFSET,
- },
- },
-};
-
-/* l3_main_2 */
-static struct omap_hwmod dra7xx_l3_main_2_hwmod = {
- .name = "l3_main_2",
- .class = &dra7xx_l3_hwmod_class,
- .clkdm_name = "l3instr_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L3INSTR_L3_MAIN_2_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L3INSTR_L3_MAIN_2_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_HWCTRL,
- },
- },
-};
-
-/*
- * 'l4' class
- * instance(s): l4_cfg, l4_per1, l4_per2, l4_per3, l4_wkup
- */
-static struct omap_hwmod_class dra7xx_l4_hwmod_class = {
- .name = "l4",
-};
-
-/* l4_cfg */
-static struct omap_hwmod dra7xx_l4_cfg_hwmod = {
- .name = "l4_cfg",
- .class = &dra7xx_l4_hwmod_class,
- .clkdm_name = "l4cfg_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4CFG_L4_CFG_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L4CFG_L4_CFG_CONTEXT_OFFSET,
- },
- },
-};
-
-/* l4_per1 */
-static struct omap_hwmod dra7xx_l4_per1_hwmod = {
- .name = "l4_per1",
- .class = &dra7xx_l4_hwmod_class,
- .clkdm_name = "l4per_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER_L4_PER1_CLKCTRL_OFFSET,
- .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
- },
- },
-};
-
-/* l4_per2 */
-static struct omap_hwmod dra7xx_l4_per2_hwmod = {
- .name = "l4_per2",
- .class = &dra7xx_l4_hwmod_class,
- .clkdm_name = "l4per2_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER2_L4_PER2_CLKCTRL_OFFSET,
- .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
- },
- },
-};
-
-/* l4_per3 */
-static struct omap_hwmod dra7xx_l4_per3_hwmod = {
- .name = "l4_per3",
- .class = &dra7xx_l4_hwmod_class,
- .clkdm_name = "l4per3_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER3_L4_PER3_CLKCTRL_OFFSET,
- .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
- },
- },
-};
-
-/* l4_wkup */
-static struct omap_hwmod dra7xx_l4_wkup_hwmod = {
- .name = "l4_wkup",
- .class = &dra7xx_l4_hwmod_class,
- .clkdm_name = "wkupaon_clkdm",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_WKUPAON_L4_WKUP_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_WKUPAON_L4_WKUP_CONTEXT_OFFSET,
- },
- },
-};
-
-/*
- * 'atl' class
- *
- */
-
-static struct omap_hwmod_class dra7xx_atl_hwmod_class = {
- .name = "atl",
-};
-
-/* atl */
-static struct omap_hwmod dra7xx_atl_hwmod = {
- .name = "atl",
- .class = &dra7xx_atl_hwmod_class,
- .clkdm_name = "atl_clkdm",
- .main_clk = "atl_gfclk_mux",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_ATL_ATL_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_ATL_ATL_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/*
- * 'bb2d' class
- *
- */
-
-static struct omap_hwmod_class dra7xx_bb2d_hwmod_class = {
- .name = "bb2d",
-};
-
-/* bb2d */
-static struct omap_hwmod dra7xx_bb2d_hwmod = {
- .name = "bb2d",
- .class = &dra7xx_bb2d_hwmod_class,
- .clkdm_name = "dss_clkdm",
- .main_clk = "dpll_core_h24x2_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_DSS_BB2D_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_DSS_BB2D_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/*
- * 'ctrl_module' class
- *
- */
-
-static struct omap_hwmod_class dra7xx_ctrl_module_hwmod_class = {
- .name = "ctrl_module",
-};
-
-/* ctrl_module_wkup */
-static struct omap_hwmod dra7xx_ctrl_module_wkup_hwmod = {
- .name = "ctrl_module_wkup",
- .class = &dra7xx_ctrl_module_hwmod_class,
- .clkdm_name = "wkupaon_clkdm",
- .prcm = {
- .omap4 = {
- .flags = HWMOD_OMAP4_NO_CONTEXT_LOSS_BIT,
- },
- },
-};
-
-/*
- * 'mpu' class
- *
- */
-
-static struct omap_hwmod_class dra7xx_mpu_hwmod_class = {
- .name = "mpu",
-};
-
-/* mpu */
-static struct omap_hwmod dra7xx_mpu_hwmod = {
- .name = "mpu",
- .class = &dra7xx_mpu_hwmod_class,
- .clkdm_name = "mpu_clkdm",
- .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,
- .main_clk = "dpll_mpu_m2_ck",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_MPU_MPU_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_MPU_MPU_CONTEXT_OFFSET,
- },
- },
-};
-
-
-/*
- * 'PCIE' class
- *
- */
-
-/*
- * As noted in documentation for _reset() in omap_hwmod.c, the stock reset
- * functionality of OMAP HWMOD layer does not deassert the hardreset lines
- * associated with an IP automatically leaving the driver to handle that
- * by itself. This does not work for PCIeSS which needs the reset lines
- * deasserted for the driver to start accessing registers.
- *
- * We use a PCIeSS HWMOD class specific reset handler to deassert the hardreset
- * lines after asserting them.
- */
-int dra7xx_pciess_reset(struct omap_hwmod *oh)
-{
- int i;
-
- for (i = 0; i < oh->rst_lines_cnt; i++) {
- omap_hwmod_assert_hardreset(oh, oh->rst_lines[i].name);
- omap_hwmod_deassert_hardreset(oh, oh->rst_lines[i].name);
- }
-
- return 0;
-}
-
-static struct omap_hwmod_class dra7xx_pciess_hwmod_class = {
- .name = "pcie",
- .reset = dra7xx_pciess_reset,
-};
-
-/* pcie1 */
-static struct omap_hwmod_rst_info dra7xx_pciess1_resets[] = {
- { .name = "pcie", .rst_shift = 0 },
-};
-
-static struct omap_hwmod dra7xx_pciess1_hwmod = {
- .name = "pcie1",
- .class = &dra7xx_pciess_hwmod_class,
- .clkdm_name = "pcie_clkdm",
- .rst_lines = dra7xx_pciess1_resets,
- .rst_lines_cnt = ARRAY_SIZE(dra7xx_pciess1_resets),
- .main_clk = "l4_root_clk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L3INIT_PCIESS1_CLKCTRL_OFFSET,
- .rstctrl_offs = DRA7XX_RM_L3INIT_PCIESS_RSTCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L3INIT_PCIESS1_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/* pcie2 */
-static struct omap_hwmod_rst_info dra7xx_pciess2_resets[] = {
- { .name = "pcie", .rst_shift = 1 },
-};
-
-/* pcie2 */
-static struct omap_hwmod dra7xx_pciess2_hwmod = {
- .name = "pcie2",
- .class = &dra7xx_pciess_hwmod_class,
- .clkdm_name = "pcie_clkdm",
- .rst_lines = dra7xx_pciess2_resets,
- .rst_lines_cnt = ARRAY_SIZE(dra7xx_pciess2_resets),
- .main_clk = "l4_root_clk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L3INIT_PCIESS2_CLKCTRL_OFFSET,
- .rstctrl_offs = DRA7XX_RM_L3INIT_PCIESS_RSTCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L3INIT_PCIESS2_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/*
- * 'qspi' class
- *
- */
-
-static struct omap_hwmod_class_sysconfig dra7xx_qspi_sysc = {
- .rev_offs = 0,
- .sysc_offs = 0x0010,
- .sysc_flags = SYSC_HAS_SIDLEMODE,
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class dra7xx_qspi_hwmod_class = {
- .name = "qspi",
- .sysc = &dra7xx_qspi_sysc,
-};
-
-/* qspi */
-static struct omap_hwmod dra7xx_qspi_hwmod = {
- .name = "qspi",
- .class = &dra7xx_qspi_hwmod_class,
- .clkdm_name = "l4per2_clkdm",
- .main_clk = "qspi_gfclk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L4PER2_QSPI_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L4PER2_QSPI_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/*
- * 'sata' class
- *
- */
-
-static struct omap_hwmod_class_sysconfig dra7xx_sata_sysc = {
- .rev_offs = 0x00fc,
- .sysc_offs = 0x0000,
- .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
- MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
- .sysc_fields = &omap_hwmod_sysc_type2,
-};
-
-static struct omap_hwmod_class dra7xx_sata_hwmod_class = {
- .name = "sata",
- .sysc = &dra7xx_sata_sysc,
-};
-
-/* sata */
-
-static struct omap_hwmod dra7xx_sata_hwmod = {
- .name = "sata",
- .class = &dra7xx_sata_hwmod_class,
- .clkdm_name = "l3init_clkdm",
- .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
- .main_clk = "func_48m_fclk",
- .mpu_rt_idx = 1,
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L3INIT_SATA_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L3INIT_SATA_CONTEXT_OFFSET,
- .modulemode = MODULEMODE_SWCTRL,
- },
- },
-};
-
-/*
- * 'vcp' class
- *
- */
-
-static struct omap_hwmod_class dra7xx_vcp_hwmod_class = {
- .name = "vcp",
-};
-
-/* vcp1 */
-static struct omap_hwmod dra7xx_vcp1_hwmod = {
- .name = "vcp1",
- .class = &dra7xx_vcp_hwmod_class,
- .clkdm_name = "l3main1_clkdm",
- .main_clk = "l3_iclk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L3MAIN1_VCP1_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L3MAIN1_VCP1_CONTEXT_OFFSET,
- },
- },
-};
-
-/* vcp2 */
-static struct omap_hwmod dra7xx_vcp2_hwmod = {
- .name = "vcp2",
- .class = &dra7xx_vcp_hwmod_class,
- .clkdm_name = "l3main1_clkdm",
- .main_clk = "l3_iclk_div",
- .prcm = {
- .omap4 = {
- .clkctrl_offs = DRA7XX_CM_L3MAIN1_VCP2_CLKCTRL_OFFSET,
- .context_offs = DRA7XX_RM_L3MAIN1_VCP2_CONTEXT_OFFSET,
- },
- },
-};
-
-
-
-/*
- * Interfaces
- */
-
-/* l3_main_1 -> dmm */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__dmm = {
- .master = &dra7xx_l3_main_1_hwmod,
- .slave = &dra7xx_dmm_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_SDMA,
-};
-
-/* l3_main_2 -> l3_instr */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_2__l3_instr = {
- .master = &dra7xx_l3_main_2_hwmod,
- .slave = &dra7xx_l3_instr_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> l3_main_1 */
-static struct omap_hwmod_ocp_if dra7xx_l4_cfg__l3_main_1 = {
- .master = &dra7xx_l4_cfg_hwmod,
- .slave = &dra7xx_l3_main_1_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* mpu -> l3_main_1 */
-static struct omap_hwmod_ocp_if dra7xx_mpu__l3_main_1 = {
- .master = &dra7xx_mpu_hwmod,
- .slave = &dra7xx_l3_main_1_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU,
-};
-
-/* l3_main_1 -> l3_main_2 */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__l3_main_2 = {
- .master = &dra7xx_l3_main_1_hwmod,
- .slave = &dra7xx_l3_main_2_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU,
-};
-
-/* l4_cfg -> l3_main_2 */
-static struct omap_hwmod_ocp_if dra7xx_l4_cfg__l3_main_2 = {
- .master = &dra7xx_l4_cfg_hwmod,
- .slave = &dra7xx_l3_main_2_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_1 -> l4_cfg */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__l4_cfg = {
- .master = &dra7xx_l3_main_1_hwmod,
- .slave = &dra7xx_l4_cfg_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_1 -> l4_per1 */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__l4_per1 = {
- .master = &dra7xx_l3_main_1_hwmod,
- .slave = &dra7xx_l4_per1_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_1 -> l4_per2 */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__l4_per2 = {
- .master = &dra7xx_l3_main_1_hwmod,
- .slave = &dra7xx_l4_per2_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_1 -> l4_per3 */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__l4_per3 = {
- .master = &dra7xx_l3_main_1_hwmod,
- .slave = &dra7xx_l4_per3_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_1 -> l4_wkup */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__l4_wkup = {
- .master = &dra7xx_l3_main_1_hwmod,
- .slave = &dra7xx_l4_wkup_hwmod,
- .clk = "wkupaon_iclk_mux",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per2 -> atl */
-static struct omap_hwmod_ocp_if dra7xx_l4_per2__atl = {
- .master = &dra7xx_l4_per2_hwmod,
- .slave = &dra7xx_atl_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_1 -> bb2d */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__bb2d = {
- .master = &dra7xx_l3_main_1_hwmod,
- .slave = &dra7xx_bb2d_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_wkup -> ctrl_module_wkup */
-static struct omap_hwmod_ocp_if dra7xx_l4_wkup__ctrl_module_wkup = {
- .master = &dra7xx_l4_wkup_hwmod,
- .slave = &dra7xx_ctrl_module_wkup_hwmod,
- .clk = "wkupaon_iclk_mux",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> mpu */
-static struct omap_hwmod_ocp_if dra7xx_l4_cfg__mpu = {
- .master = &dra7xx_l4_cfg_hwmod,
- .slave = &dra7xx_mpu_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_1 -> pciess1 */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pciess1 = {
- .master = &dra7xx_l3_main_1_hwmod,
- .slave = &dra7xx_pciess1_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> pciess1 */
-static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pciess1 = {
- .master = &dra7xx_l4_cfg_hwmod,
- .slave = &dra7xx_pciess1_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_1 -> pciess2 */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__pciess2 = {
- .master = &dra7xx_l3_main_1_hwmod,
- .slave = &dra7xx_pciess2_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> pciess2 */
-static struct omap_hwmod_ocp_if dra7xx_l4_cfg__pciess2 = {
- .master = &dra7xx_l4_cfg_hwmod,
- .slave = &dra7xx_pciess2_hwmod,
- .clk = "l4_root_clk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_1 -> qspi */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__qspi = {
- .master = &dra7xx_l3_main_1_hwmod,
- .slave = &dra7xx_qspi_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_cfg -> sata */
-static struct omap_hwmod_ocp_if dra7xx_l4_cfg__sata = {
- .master = &dra7xx_l4_cfg_hwmod,
- .slave = &dra7xx_sata_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_1 -> vcp1 */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__vcp1 = {
- .master = &dra7xx_l3_main_1_hwmod,
- .slave = &dra7xx_vcp1_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per2 -> vcp1 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per2__vcp1 = {
- .master = &dra7xx_l4_per2_hwmod,
- .slave = &dra7xx_vcp1_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l3_main_1 -> vcp2 */
-static struct omap_hwmod_ocp_if dra7xx_l3_main_1__vcp2 = {
- .master = &dra7xx_l3_main_1_hwmod,
- .slave = &dra7xx_vcp2_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-/* l4_per2 -> vcp2 */
-static struct omap_hwmod_ocp_if dra7xx_l4_per2__vcp2 = {
- .master = &dra7xx_l4_per2_hwmod,
- .slave = &dra7xx_vcp2_hwmod,
- .clk = "l3_iclk_div",
- .user = OCP_USER_MPU | OCP_USER_SDMA,
-};
-
-static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
- &dra7xx_l3_main_1__dmm,
- &dra7xx_l3_main_2__l3_instr,
- &dra7xx_l4_cfg__l3_main_1,
- &dra7xx_mpu__l3_main_1,
- &dra7xx_l3_main_1__l3_main_2,
- &dra7xx_l4_cfg__l3_main_2,
- &dra7xx_l3_main_1__l4_cfg,
- &dra7xx_l3_main_1__l4_per1,
- &dra7xx_l3_main_1__l4_per2,
- &dra7xx_l3_main_1__l4_per3,
- &dra7xx_l3_main_1__l4_wkup,
- &dra7xx_l4_per2__atl,
- &dra7xx_l3_main_1__bb2d,
- &dra7xx_l4_wkup__ctrl_module_wkup,
- &dra7xx_l4_cfg__mpu,
- &dra7xx_l3_main_1__pciess1,
- &dra7xx_l4_cfg__pciess1,
- &dra7xx_l3_main_1__pciess2,
- &dra7xx_l4_cfg__pciess2,
- &dra7xx_l3_main_1__qspi,
- &dra7xx_l4_cfg__sata,
- &dra7xx_l3_main_1__vcp1,
- &dra7xx_l4_per2__vcp1,
- &dra7xx_l3_main_1__vcp2,
- &dra7xx_l4_per2__vcp2,
- NULL,
-};
-
-/* SoC variant specific hwmod links */
-static struct omap_hwmod_ocp_if *dra72x_hwmod_ocp_ifs[] __initdata = {
- NULL,
-};
-
-static struct omap_hwmod_ocp_if *rtc_hwmod_ocp_ifs[] __initdata = {
- NULL,
-};
-
-int __init dra7xx_hwmod_init(void)
-{
- int ret;
-
- omap_hwmod_init();
- ret = omap_hwmod_register_links(dra7xx_hwmod_ocp_ifs);
-
- if (!ret && soc_is_dra74x()) {
- ret = omap_hwmod_register_links(rtc_hwmod_ocp_ifs);
- } else if (!ret && soc_is_dra72x()) {
- ret = omap_hwmod_register_links(dra72x_hwmod_ocp_ifs);
- if (!ret && !of_machine_is_compatible("ti,dra718"))
- ret = omap_hwmod_register_links(rtc_hwmod_ocp_ifs);
- } else if (!ret && soc_is_dra76x()) {
- if (!ret && soc_is_dra76x_abz())
- ret = omap_hwmod_register_links(rtc_hwmod_ocp_ifs);
- }
-
- return ret;
-}
diff --git a/arch/arm/mach-omap2/omap_twl.c b/arch/arm/mach-omap2/omap_twl.c
index a642d3b39e50..d4dab041324d 100644
--- a/arch/arm/mach-omap2/omap_twl.c
+++ b/arch/arm/mach-omap2/omap_twl.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* OMAP and TWL PMIC specific initializations.
*
* Copyright (C) 2010 Texas Instruments Incorporated.
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 2e3a10914c40..765809b214e7 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -443,7 +443,7 @@ void omap_auxdata_legacy_init(struct device *dev)
dev->platform_data = &twl_gpio_auxdata;
}
-#if IS_ENABLED(CONFIG_SND_SOC_OMAP_MCBSP)
+#if defined(CONFIG_ARCH_OMAP3) && IS_ENABLED(CONFIG_SND_SOC_OMAP_MCBSP)
static struct omap_mcbsp_platform_data mcbsp_pdata;
static void __init omap3_mcbsp_init(void)
{
@@ -569,10 +569,29 @@ static void pdata_quirks_check(struct pdata_init *quirks)
}
}
-void __init pdata_quirks_init(const struct of_device_id *omap_dt_match_table)
+static const char * const pdata_quirks_init_nodes[] = {
+ "prcm",
+ "prm",
+};
+
+static void __init
+pdata_quirks_init_clocks(const struct of_device_id *omap_dt_match_table)
{
struct device_node *np;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pdata_quirks_init_nodes); i++) {
+ np = of_find_node_by_name(NULL, pdata_quirks_init_nodes[i]);
+ if (!np)
+ continue;
+
+ of_platform_populate(np, omap_dt_match_table,
+ omap_auxdata_lookup, NULL);
+ }
+}
+void __init pdata_quirks_init(const struct of_device_id *omap_dt_match_table)
+{
/*
* We still need this for omap2420 and omap3 PM to work, others are
* using drivers/misc/sram.c already.
@@ -585,13 +604,7 @@ void __init pdata_quirks_init(const struct of_device_id *omap_dt_match_table)
omap3_mcbsp_init();
pdata_quirks_check(auxdata_quirks);
- /* Populate always-on PRCM in l4_wkup to probe l4_wkup */
- np = of_find_node_by_name(NULL, "prcm");
- if (!np)
- np = of_find_node_by_name(NULL, "prm");
- if (np)
- of_platform_populate(np, omap_dt_match_table,
- omap_auxdata_lookup, NULL);
+ pdata_quirks_init_clocks(omap_dt_match_table);
of_platform_populate(NULL, omap_dt_match_table,
omap_auxdata_lookup, NULL);
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c
index 919d35d5b325..b43eab9879d3 100644
--- a/arch/arm/mach-omap2/pm-debug.c
+++ b/arch/arm/mach-omap2/pm-debug.c
@@ -168,8 +168,8 @@ static int pwrdm_suspend_set(void *data, u64 val)
return -EINVAL;
}
-DEFINE_SIMPLE_ATTRIBUTE(pwrdm_suspend_fops, pwrdm_suspend_get,
- pwrdm_suspend_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(pwrdm_suspend_fops, pwrdm_suspend_get,
+ pwrdm_suspend_set, "%llu\n");
static int __init pwrdms_setup(struct powerdomain *pwrdm, void *dir)
{
diff --git a/arch/arm/mach-omap2/pmic-cpcap.c b/arch/arm/mach-omap2/pmic-cpcap.c
index 09076ad0576d..668dc84fd31e 100644
--- a/arch/arm/mach-omap2/pmic-cpcap.c
+++ b/arch/arm/mach-omap2/pmic-cpcap.c
@@ -246,10 +246,10 @@ int __init omap4_cpcap_init(void)
omap_voltage_register_pmic(voltdm, &omap443x_max8952_mpu);
if (of_machine_is_compatible("motorola,droid-bionic")) {
- voltdm = voltdm_lookup("mpu");
+ voltdm = voltdm_lookup("core");
omap_voltage_register_pmic(voltdm, &omap_cpcap_core);
- voltdm = voltdm_lookup("mpu");
+ voltdm = voltdm_lookup("iva");
omap_voltage_register_pmic(voltdm, &omap_cpcap_iva);
} else {
voltdm = voltdm_lookup("core");
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index 1cbac76136d4..0a5b87e2a4b0 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -1202,26 +1202,26 @@ bool pwrdm_can_ever_lose_context(struct powerdomain *pwrdm)
if (!pwrdm) {
pr_debug("powerdomain: %s: invalid powerdomain pointer\n",
__func__);
- return 1;
+ return true;
}
if (pwrdm->pwrsts & PWRSTS_OFF)
- return 1;
+ return true;
if (pwrdm->pwrsts & PWRSTS_RET) {
if (pwrdm->pwrsts_logic_ret & PWRSTS_OFF)
- return 1;
+ return true;
for (i = 0; i < pwrdm->banks; i++)
if (pwrdm->pwrsts_mem_ret[i] & PWRSTS_OFF)
- return 1;
+ return true;
}
for (i = 0; i < pwrdm->banks; i++)
if (pwrdm->pwrsts_mem_on[i] & PWRSTS_OFF)
- return 1;
+ return true;
- return 0;
+ return false;
}
/**
diff --git a/arch/arm/mach-omap2/sr_device.c b/arch/arm/mach-omap2/sr_device.c
index 62df666c2bd0..db672cf19a51 100644
--- a/arch/arm/mach-omap2/sr_device.c
+++ b/arch/arm/mach-omap2/sr_device.c
@@ -88,34 +88,26 @@ static void __init sr_set_nvalues(struct omap_volt_data *volt_data,
extern struct omap_sr_data omap_sr_pdata[];
-static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
+static int __init sr_init_by_name(const char *name, const char *voltdm)
{
struct omap_sr_data *sr_data = NULL;
struct omap_volt_data *volt_data;
- struct omap_smartreflex_dev_attr *sr_dev_attr;
static int i;
- if (!strncmp(oh->name, "smartreflex_mpu_iva", 20) ||
- !strncmp(oh->name, "smartreflex_mpu", 16))
+ if (!strncmp(name, "smartreflex_mpu_iva", 20) ||
+ !strncmp(name, "smartreflex_mpu", 16))
sr_data = &omap_sr_pdata[OMAP_SR_MPU];
- else if (!strncmp(oh->name, "smartreflex_core", 17))
+ else if (!strncmp(name, "smartreflex_core", 17))
sr_data = &omap_sr_pdata[OMAP_SR_CORE];
- else if (!strncmp(oh->name, "smartreflex_iva", 16))
+ else if (!strncmp(name, "smartreflex_iva", 16))
sr_data = &omap_sr_pdata[OMAP_SR_IVA];
if (!sr_data) {
- pr_err("%s: Unknown instance %s\n", __func__, oh->name);
+ pr_err("%s: Unknown instance %s\n", __func__, name);
return -EINVAL;
}
- sr_dev_attr = (struct omap_smartreflex_dev_attr *)oh->dev_attr;
- if (!sr_dev_attr || !sr_dev_attr->sensor_voltdm_name) {
- pr_err("%s: No voltage domain specified for %s. Cannot initialize\n",
- __func__, oh->name);
- goto exit;
- }
-
- sr_data->name = oh->name;
+ sr_data->name = name;
if (cpu_is_omap343x())
sr_data->ip_type = 1;
else
@@ -136,10 +128,10 @@ static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
}
}
- sr_data->voltdm = voltdm_lookup(sr_dev_attr->sensor_voltdm_name);
+ sr_data->voltdm = voltdm_lookup(voltdm);
if (!sr_data->voltdm) {
pr_err("%s: Unable to get voltage domain pointer for VDD %s\n",
- __func__, sr_dev_attr->sensor_voltdm_name);
+ __func__, voltdm);
goto exit;
}
@@ -160,6 +152,27 @@ exit:
return 0;
}
+#ifdef CONFIG_OMAP_HWMOD
+static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
+{
+ struct omap_smartreflex_dev_attr *sr_dev_attr;
+
+ sr_dev_attr = (struct omap_smartreflex_dev_attr *)oh->dev_attr;
+ if (!sr_dev_attr || !sr_dev_attr->sensor_voltdm_name) {
+ pr_err("%s: No voltage domain specified for %s. Cannot initialize\n",
+ __func__, oh->name);
+ return 0;
+ }
+
+ return sr_init_by_name(oh->name, sr_dev_attr->sensor_voltdm_name);
+}
+#else
+static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
+{
+ return -EINVAL;
+}
+#endif
+
/*
* API to be called from board files to enable smartreflex
* autocompensation at init.
@@ -169,7 +182,42 @@ void __init omap_enable_smartreflex_on_init(void)
sr_enable_on_init = true;
}
+static const char * const omap4_sr_instances[] = {
+ "mpu",
+ "iva",
+ "core",
+};
+
+static const char * const dra7_sr_instances[] = {
+ "mpu",
+ "core",
+};
+
int __init omap_devinit_smartreflex(void)
{
+ const char * const *sr_inst = NULL;
+ int i, nr_sr = 0;
+
+ if (soc_is_omap44xx()) {
+ sr_inst = omap4_sr_instances;
+ nr_sr = ARRAY_SIZE(omap4_sr_instances);
+
+ } else if (soc_is_dra7xx()) {
+ sr_inst = dra7_sr_instances;
+ nr_sr = ARRAY_SIZE(dra7_sr_instances);
+ }
+
+ if (nr_sr) {
+ const char *name, *voltdm;
+
+ for (i = 0; i < nr_sr; i++) {
+ name = kasprintf(GFP_KERNEL, "smartreflex_%s", sr_inst[i]);
+ voltdm = sr_inst[i];
+ sr_init_by_name(name, voltdm);
+ }
+
+ return 0;
+ }
+
return omap_hwmod_for_each_by_class("smartreflex", sr_dev_init, NULL);
}
diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c
index 151e26ec0696..04a12523cdee 100644
--- a/arch/arm/mach-pxa/icontrol.c
+++ b/arch/arm/mach-pxa/icontrol.c
@@ -74,13 +74,17 @@ static const struct property_entry mcp251x_properties[] = {
{}
};
+static const struct software_node mcp251x_node = {
+ .properties = mcp251x_properties,
+};
+
static struct spi_board_info mcp251x_board_info[] = {
{
.modalias = "mcp2515",
.max_speed_hz = 6500000,
.bus_num = 3,
.chip_select = 0,
- .properties = mcp251x_properties,
+ .swnode = &mcp251x_node,
.controller_data = &mcp251x_chip_info1,
.irq = PXA_GPIO_TO_IRQ(ICONTROL_MCP251x_nIRQ1)
},
@@ -89,7 +93,7 @@ static struct spi_board_info mcp251x_board_info[] = {
.max_speed_hz = 6500000,
.bus_num = 3,
.chip_select = 1,
- .properties = mcp251x_properties,
+ .swnode = &mcp251x_node,
.controller_data = &mcp251x_chip_info2,
.irq = PXA_GPIO_TO_IRQ(ICONTROL_MCP251x_nIRQ2)
},
@@ -98,7 +102,7 @@ static struct spi_board_info mcp251x_board_info[] = {
.max_speed_hz = 6500000,
.bus_num = 4,
.chip_select = 0,
- .properties = mcp251x_properties,
+ .swnode = &mcp251x_node,
.controller_data = &mcp251x_chip_info3,
.irq = PXA_GPIO_TO_IRQ(ICONTROL_MCP251x_nIRQ3)
},
@@ -107,7 +111,7 @@ static struct spi_board_info mcp251x_board_info[] = {
.max_speed_hz = 6500000,
.bus_num = 4,
.chip_select = 1,
- .properties = mcp251x_properties,
+ .swnode = &mcp251x_node,
.controller_data = &mcp251x_chip_info4,
.irq = PXA_GPIO_TO_IRQ(ICONTROL_MCP251x_nIRQ4)
}
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index d1010ec26e9f..d237bd030238 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -502,16 +502,20 @@ static inline void mainstone_init_keypad(void) {}
#endif
static int mst_pcmcia0_irqs[11] = {
- [0 ... 10] = -1,
+ [0 ... 4] = -1,
[5] = MAINSTONE_S0_CD_IRQ,
+ [6 ... 7] = -1,
[8] = MAINSTONE_S0_STSCHG_IRQ,
+ [9] = -1,
[10] = MAINSTONE_S0_IRQ,
};
static int mst_pcmcia1_irqs[11] = {
- [0 ... 10] = -1,
+ [0 ... 4] = -1,
[5] = MAINSTONE_S1_CD_IRQ,
+ [6 ... 7] = -1,
[8] = MAINSTONE_S1_STSCHG_IRQ,
+ [9] = -1,
[10] = MAINSTONE_S1_IRQ,
};
diff --git a/arch/arm/mach-pxa/pxa_cplds_irqs.c b/arch/arm/mach-pxa/pxa_cplds_irqs.c
index 45c19ca96f7a..bddfc7cd5d40 100644
--- a/arch/arm/mach-pxa/pxa_cplds_irqs.c
+++ b/arch/arm/mach-pxa/pxa_cplds_irqs.c
@@ -121,8 +121,13 @@ static int cplds_probe(struct platform_device *pdev)
return fpga->irq;
base_irq = platform_get_irq(pdev, 1);
- if (base_irq < 0)
+ if (base_irq < 0) {
base_irq = 0;
+ } else {
+ ret = devm_irq_alloc_descs(&pdev->dev, base_irq, base_irq, CPLDS_NB_IRQ, 0);
+ if (ret < 0)
+ return ret;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
fpga->base = devm_ioremap_resource(&pdev->dev, res);
@@ -147,22 +152,20 @@ static int cplds_probe(struct platform_device *pdev)
}
irq_set_irq_wake(fpga->irq, 1);
- fpga->irqdomain = irq_domain_add_linear(pdev->dev.of_node,
- CPLDS_NB_IRQ,
- &cplds_irq_domain_ops, fpga);
+ if (base_irq)
+ fpga->irqdomain = irq_domain_add_legacy(pdev->dev.of_node,
+ CPLDS_NB_IRQ,
+ base_irq, 0,
+ &cplds_irq_domain_ops,
+ fpga);
+ else
+ fpga->irqdomain = irq_domain_add_linear(pdev->dev.of_node,
+ CPLDS_NB_IRQ,
+ &cplds_irq_domain_ops,
+ fpga);
if (!fpga->irqdomain)
return -ENODEV;
- if (base_irq) {
- ret = irq_create_strict_mappings(fpga->irqdomain, base_irq, 0,
- CPLDS_NB_IRQ);
- if (ret) {
- dev_err(&pdev->dev, "couldn't create the irq mapping %d..%d\n",
- base_irq, base_irq + CPLDS_NB_IRQ);
- return ret;
- }
- }
-
return 0;
}
diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c
index e2353f7dcf01..7ad627465768 100644
--- a/arch/arm/mach-pxa/stargate2.c
+++ b/arch/arm/mach-pxa/stargate2.c
@@ -794,6 +794,10 @@ static const struct property_entry pca9500_eeprom_properties[] = {
{ }
};
+static const struct software_node pca9500_eeprom_node = {
+ .properties = pca9500_eeprom_properties,
+};
+
/**
* stargate2_reset_bluetooth() reset the bluecore to ensure consistent state
**/
@@ -929,7 +933,7 @@ static struct i2c_board_info __initdata stargate2_i2c_board_info[] = {
}, {
.type = "24c02",
.addr = 0x57,
- .properties = pca9500_eeprom_properties,
+ .swnode = &pca9500_eeprom_node,
}, {
.type = "max1238",
.addr = 0x35,
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index b27fc7ac9cea..97700429633e 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -433,10 +433,14 @@ static const struct property_entry mcp251x_properties[] = {
{}
};
+static const struct software_node mcp251x_node = {
+ .properties = mcp251x_properties,
+};
+
static struct spi_board_info zeus_spi_board_info[] = {
[0] = {
.modalias = "mcp2515",
- .properties = mcp251x_properties,
+ .swnode = &mcp251x_node,
.irq = PXA_GPIO_TO_IRQ(ZEUS_CAN_GPIO),
.max_speed_hz = 1*1000*1000,
.bus_num = 3,
diff --git a/arch/arm/mach-s3c/mach-mini2440.c b/arch/arm/mach-s3c/mach-mini2440.c
index 4100905dfbd0..551ec660ab59 100644
--- a/arch/arm/mach-s3c/mach-mini2440.c
+++ b/arch/arm/mach-s3c/mach-mini2440.c
@@ -542,10 +542,14 @@ static const struct property_entry mini2440_at24_properties[] = {
{ }
};
+static const struct software_node mini2440_at24_node = {
+ .properties = mini2440_at24_properties,
+};
+
static struct i2c_board_info mini2440_i2c_devs[] __initdata = {
{
I2C_BOARD_INFO("24c08", 0x50),
- .properties = mini2440_at24_properties,
+ .swnode = &mini2440_at24_node,
},
};
diff --git a/arch/arm/mach-s3c/mach-rx1950.c b/arch/arm/mach-s3c/mach-rx1950.c
index 6e19add158a9..a3f46aa61c45 100644
--- a/arch/arm/mach-s3c/mach-rx1950.c
+++ b/arch/arm/mach-s3c/mach-rx1950.c
@@ -384,6 +384,8 @@ static struct s3c2410fb_mach_info rx1950_lcd_cfg = {
static struct pwm_lookup rx1950_pwm_lookup[] = {
PWM_LOOKUP("samsung-pwm", 0, "pwm-backlight.0", NULL, 48000,
PWM_POLARITY_NORMAL),
+ PWM_LOOKUP("samsung-pwm", 1, "pwm-backlight.0", "RX1950 LCD", LCD_PWM_PERIOD,
+ PWM_POLARITY_NORMAL),
};
static struct pwm_device *lcd_pwm;
@@ -498,19 +500,18 @@ static void rx1950_bl_power(int enable)
static int rx1950_backlight_init(struct device *dev)
{
WARN_ON(gpio_request(S3C2410_GPB(0), "Backlight"));
- lcd_pwm = pwm_request(1, "RX1950 LCD");
+ lcd_pwm = pwm_get(dev, "RX1950 LCD");
if (IS_ERR(lcd_pwm)) {
dev_err(dev, "Unable to request PWM for LCD power!\n");
return PTR_ERR(lcd_pwm);
}
/*
- * This is only required to initialize .polarity; all other values are
- * fixed in this driver.
+ * Call pwm_init_state to initialize .polarity and .period. The other
+ * values are fixed in this driver.
*/
pwm_init_state(lcd_pwm, &lcd_pwm_state);
- lcd_pwm_state.period = LCD_PWM_PERIOD;
lcd_pwm_state.duty_cycle = LCD_PWM_DUTY;
rx1950_lcd_power(1);
@@ -524,7 +525,7 @@ static void rx1950_backlight_exit(struct device *dev)
rx1950_bl_power(0);
rx1950_lcd_power(0);
- pwm_free(lcd_pwm);
+ pwm_put(lcd_pwm);
gpio_free(S3C2410_GPB(0));
}
diff --git a/arch/arm/mach-socfpga/Kconfig b/arch/arm/mach-socfpga/Kconfig
index c3bb68d57cea..43ddec677c0b 100644
--- a/arch/arm/mach-socfpga/Kconfig
+++ b/arch/arm/mach-socfpga/Kconfig
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-menuconfig ARCH_SOCFPGA
+menuconfig ARCH_INTEL_SOCFPGA
bool "Altera SOCFPGA family"
depends on ARCH_MULTI_V7
select ARCH_SUPPORTS_BIG_ENDIAN
@@ -19,7 +19,7 @@ menuconfig ARCH_SOCFPGA
select PL310_ERRATA_753970 if PL310
select PL310_ERRATA_769419
-if ARCH_SOCFPGA
+if ARCH_INTEL_SOCFPGA
config SOCFPGA_SUSPEND
bool "Suspend to RAM on SOCFPGA"
help
diff --git a/arch/arm/mach-spear/spear320.c b/arch/arm/mach-spear/spear320.c
index 0958f68a21e2..926d5a243238 100644
--- a/arch/arm/mach-spear/spear320.c
+++ b/arch/arm/mach-spear/spear320.c
@@ -195,14 +195,12 @@ static struct pl022_ssp_controller spear320_ssp_data[] = {
.dma_filter = pl08x_filter_id,
.dma_tx_param = "ssp1_tx",
.dma_rx_param = "ssp1_rx",
- .num_chipselect = 2,
}, {
.bus_id = 2,
.enable_dma = 1,
.dma_filter = pl08x_filter_id,
.dma_tx_param = "ssp2_tx",
.dma_rx_param = "ssp2_rx",
- .num_chipselect = 2,
}
};
diff --git a/arch/arm/mach-spear/spear3xx.c b/arch/arm/mach-spear/spear3xx.c
index 8537fcffe5a8..f83321d5e353 100644
--- a/arch/arm/mach-spear/spear3xx.c
+++ b/arch/arm/mach-spear/spear3xx.c
@@ -30,16 +30,6 @@ struct pl022_ssp_controller pl022_plat_data = {
.dma_filter = pl08x_filter_id,
.dma_tx_param = "ssp0_tx",
.dma_rx_param = "ssp0_rx",
- /*
- * This is number of spi devices that can be connected to spi. There are
- * two type of chipselects on which slave devices can work. One is chip
- * select provided by spi masters other is controlled through external
- * gpio's. We can't use chipselect provided from spi master (because as
- * soon as FIFO becomes empty, CS is disabled and transfer ends). So
- * this number now depends on number of gpios available for spi. each
- * slave on each master requires a separate gpio pin.
- */
- .num_chipselect = 2,
};
/* dmac device registration */
diff --git a/arch/arm/mach-stm32/board-dt.c b/arch/arm/mach-stm32/board-dt.c
index 011d57b488c2..a766310d8dca 100644
--- a/arch/arm/mach-stm32/board-dt.c
+++ b/arch/arm/mach-stm32/board-dt.c
@@ -17,6 +17,7 @@ static const char *const stm32_compat[] __initconst = {
"st,stm32f746",
"st,stm32f769",
"st,stm32h743",
+ "st,stm32h750",
"st,stm32mp157",
NULL
};
diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c
index 0810f3abd810..415d8ad2a3c1 100644
--- a/arch/arm/mach-ux500/platsmp.c
+++ b/arch/arm/mach-ux500/platsmp.c
@@ -86,7 +86,7 @@ static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
}
#ifdef CONFIG_HOTPLUG_CPU
-void ux500_cpu_die(unsigned int cpu)
+static void ux500_cpu_die(unsigned int cpu)
{
wfi();
}
diff --git a/arch/arm/mach-zynq/Kconfig b/arch/arm/mach-zynq/Kconfig
index 43fb941dcd07..a56748d671c4 100644
--- a/arch/arm/mach-zynq/Kconfig
+++ b/arch/arm/mach-zynq/Kconfig
@@ -6,7 +6,7 @@ config ARCH_ZYNQ
select ARCH_SUPPORTS_BIG_ENDIAN
select ARM_AMBA
select ARM_GIC
- select ARM_GLOBAL_TIMER if !CPU_FREQ
+ select ARM_GLOBAL_TIMER
select CADENCE_TTC_TIMER
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index dc8f152f3556..830bbfb26ca5 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -33,41 +33,41 @@ icache_size:
* processor. We fix this by performing an invalidate, rather than a
* clean + invalidate, before jumping into the kernel.
*
- * This function is cloned from arch/arm/mach-tegra/headsmp.S, and needs
- * to be called for both secondary cores startup and primary core resume
- * procedures.
+ * This function needs to be called for both secondary cores startup and
+ * primary core resume procedures.
*/
ENTRY(v7_invalidate_l1)
- mov r0, #0
- mcr p15, 2, r0, c0, c0, 0
- mrc p15, 1, r0, c0, c0, 0
-
- movw r1, #0x7fff
- and r2, r1, r0, lsr #13
+ mov r0, #0
+ mcr p15, 2, r0, c0, c0, 0 @ select L1 data cache in CSSELR
+ isb
+ mrc p15, 1, r0, c0, c0, 0 @ read cache geometry from CCSIDR
- movw r1, #0x3ff
+ movw r3, #0x3ff
+ and r3, r3, r0, lsr #3 @ 'Associativity' in CCSIDR[12:3]
+ clz r1, r3 @ WayShift
+ mov r2, #1
+ mov r3, r3, lsl r1 @ NumWays-1 shifted into bits [31:...]
+ movs r1, r2, lsl r1 @ #1 shifted left by same amount
+ moveq r1, #1 @ r1 needs value > 0 even if only 1 way
- and r3, r1, r0, lsr #3 @ NumWays - 1
- add r2, r2, #1 @ NumSets
+ and r2, r0, #0x7
+ add r2, r2, #4 @ SetShift
- and r0, r0, #0x7
- add r0, r0, #4 @ SetShift
+1: movw ip, #0x7fff
+ and r0, ip, r0, lsr #13 @ 'NumSets' in CCSIDR[27:13]
- clz r1, r3 @ WayShift
- add r4, r3, #1 @ NumWays
-1: sub r2, r2, #1 @ NumSets--
- mov r3, r4 @ Temp = NumWays
-2: subs r3, r3, #1 @ Temp--
- mov r5, r3, lsl r1
- mov r6, r2, lsl r0
- orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
- mcr p15, 0, r5, c7, c6, 2
- bgt 2b
- cmp r2, #0
- bgt 1b
- dsb st
- isb
- ret lr
+2: mov ip, r0, lsl r2 @ NumSet << SetShift
+ orr ip, ip, r3 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
+ mcr p15, 0, ip, c7, c6, 2
+ subs r0, r0, #1 @ Set--
+ bpl 2b
+ subs r3, r3, r1 @ Way--
+ bcc 3f
+ mrc p15, 1, r0, c0, c0, 0 @ re-read cache geometry from CCSIDR
+ b 1b
+3: dsb st
+ isb
+ ret lr
ENDPROC(v7_invalidate_l1)
/*
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index 44f7292ec27b..f1da3b439b96 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/highmem.h>
+#include <linux/pagemap.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 6a769a6c314e..d8a115de5507 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -8,6 +8,7 @@
#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/highmem.h>
+#include <linux/pagemap.h>
#include <asm/shmparam.h>
#include <asm/tlbflush.h>
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index eb5d338657d1..bcb485620a05 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/highmem.h>
+#include <linux/pagemap.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index 93ff0097f00b..fb688003d156 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -420,7 +420,7 @@ void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)
note_page(&st, 0, 0, 0, NULL);
}
-static void ptdump_initialize(void)
+static void __init ptdump_initialize(void)
{
unsigned i, j;
@@ -466,7 +466,7 @@ void ptdump_check_wx(void)
pr_info("Checked W+X mappings: passed, no W+X pages found\n");
}
-static int ptdump_init(void)
+static int __init ptdump_init(void)
{
ptdump_initialize();
ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables");
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 828a2561b229..9d4744a632c6 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -301,7 +301,11 @@ static void __init free_highpages(void)
void __init mem_init(void)
{
#ifdef CONFIG_ARM_LPAE
- swiotlb_init(1);
+ if (swiotlb_force == SWIOTLB_FORCE ||
+ max_pfn > arm_dma_pfn_limit)
+ swiotlb_init(1);
+ else
+ swiotlb_force = SWIOTLB_NO_FORCE;
#endif
set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
@@ -316,8 +320,6 @@ void __init mem_init(void)
free_highpages();
- mem_init_print_info(NULL);
-
/*
* Check boundaries twice: Some fundamental inconsistencies can
* be detected at build time already.
@@ -487,33 +489,12 @@ static int __mark_rodata_ro(void *unused)
return 0;
}
-static int kernel_set_to_readonly __read_mostly;
-
void mark_rodata_ro(void)
{
- kernel_set_to_readonly = 1;
stop_machine(__mark_rodata_ro, NULL, NULL);
debug_checkwx();
}
-void set_kernel_text_rw(void)
-{
- if (!kernel_set_to_readonly)
- return;
-
- set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
- current->active_mm);
-}
-
-void set_kernel_text_ro(void)
-{
- if (!kernel_set_to_readonly)
- return;
-
- set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
- current->active_mm);
-}
-
#else
static inline void fix_kernmem_perms(void) { }
#endif /* CONFIG_STRICT_KERNEL_RWX */
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index a25b660c3017..c1e12aab67b8 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -387,8 +387,7 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
/* Make sure fixmap region does not exceed available allocation. */
- BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
- FIXADDR_END);
+ BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) < FIXADDR_START);
BUG_ON(idx >= __end_of_fixed_addresses);
/* we only support device mappings until pgprot_kernel has been set */
diff --git a/arch/arm/mm/pmsa-v7.c b/arch/arm/mm/pmsa-v7.c
index 88950e41a3a9..59d916ccdf25 100644
--- a/arch/arm/mm/pmsa-v7.c
+++ b/arch/arm/mm/pmsa-v7.c
@@ -235,6 +235,7 @@ void __init pmsav7_adjust_lowmem_bounds(void)
phys_addr_t mem_end;
phys_addr_t reg_start, reg_end;
unsigned int mem_max_regions;
+ bool first = true;
int num;
u64 i;
@@ -263,7 +264,7 @@ void __init pmsav7_adjust_lowmem_bounds(void)
#endif
for_each_mem_range(i, &reg_start, &reg_end) {
- if (i == 0) {
+ if (first) {
phys_addr_t phys_offset = PHYS_OFFSET;
/*
@@ -275,6 +276,7 @@ void __init pmsav7_adjust_lowmem_bounds(void)
mem_start = reg_start;
mem_end = reg_end;
specified_mem_size = mem_end - mem_start;
+ first = false;
} else {
/*
* memblock auto merges contiguous blocks, remove
diff --git a/arch/arm/mm/pmsa-v8.c b/arch/arm/mm/pmsa-v8.c
index 2de019f7503e..8359748a19a1 100644
--- a/arch/arm/mm/pmsa-v8.c
+++ b/arch/arm/mm/pmsa-v8.c
@@ -95,10 +95,11 @@ void __init pmsav8_adjust_lowmem_bounds(void)
{
phys_addr_t mem_end;
phys_addr_t reg_start, reg_end;
+ bool first = true;
u64 i;
for_each_mem_range(i, &reg_start, &reg_end) {
- if (i == 0) {
+ if (first) {
phys_addr_t phys_offset = PHYS_OFFSET;
/*
@@ -107,6 +108,7 @@ void __init pmsav8_adjust_lowmem_bounds(void)
if (reg_start != phys_offset)
panic("First memory bank must be contiguous from PHYS_OFFSET");
mem_end = reg_end;
+ first = false;
} else {
/*
* memblock auto merges contiguous blocks, remove
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 28c9d32fa99a..26d726a08a34 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -256,6 +256,20 @@ ENDPROC(cpu_pj4b_do_resume)
#endif
+ @
+ @ Invoke the v7_invalidate_l1() function, which adheres to the AAPCS
+ @ rules, and so it may corrupt registers that we need to preserve.
+ @
+ .macro do_invalidate_l1
+ mov r6, r1
+ mov r7, r2
+ mov r10, lr
+ bl v7_invalidate_l1 @ corrupts {r0-r3, ip, lr}
+ mov r1, r6
+ mov r2, r7
+ mov lr, r10
+ .endm
+
/*
* __v7_setup
*
@@ -277,6 +291,7 @@ __v7_ca5mp_setup:
__v7_ca9mp_setup:
__v7_cr7mp_setup:
__v7_cr8mp_setup:
+ do_invalidate_l1
mov r10, #(1 << 0) @ Cache/TLB ops broadcasting
b 1f
__v7_ca7mp_setup:
@@ -284,13 +299,9 @@ __v7_ca12mp_setup:
__v7_ca15mp_setup:
__v7_b15mp_setup:
__v7_ca17mp_setup:
+ do_invalidate_l1
mov r10, #0
-1: adr r0, __v7_setup_stack_ptr
- ldr r12, [r0]
- add r12, r12, r0 @ the local stack
- stmia r12, {r1-r6, lr} @ v7_invalidate_l1 touches r0-r6
- bl v7_invalidate_l1
- ldmia r12, {r1-r6, lr}
+1:
#ifdef CONFIG_SMP
orr r10, r10, #(1 << 6) @ Enable SMP/nAMP mode
ALT_SMP(mrc p15, 0, r0, c1, c0, 1)
@@ -471,12 +482,7 @@ __v7_pj4b_setup:
#endif /* CONFIG_CPU_PJ4B */
__v7_setup:
- adr r0, __v7_setup_stack_ptr
- ldr r12, [r0]
- add r12, r12, r0 @ the local stack
- stmia r12, {r1-r6, lr} @ v7_invalidate_l1 touches r0-r6
- bl v7_invalidate_l1
- ldmia r12, {r1-r6, lr}
+ do_invalidate_l1
__v7_setup_cont:
and r0, r9, #0xff000000 @ ARM?
@@ -548,17 +554,8 @@ __errata_finish:
orr r0, r0, r6 @ set them
THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions
ret lr @ return to head.S:__ret
-
- .align 2
-__v7_setup_stack_ptr:
- .word PHYS_RELATIVE(__v7_setup_stack, .)
ENDPROC(__v7_setup)
- .bss
- .align 2
-__v7_setup_stack:
- .space 4 * 7 @ 7 registers
-
__INITDATA
.weak cpu_v7_bugs_init
diff --git a/arch/arm/mm/ptdump_debugfs.c b/arch/arm/mm/ptdump_debugfs.c
index 598b636615a2..318de969ae0f 100644
--- a/arch/arm/mm/ptdump_debugfs.c
+++ b/arch/arm/mm/ptdump_debugfs.c
@@ -11,20 +11,9 @@ static int ptdump_show(struct seq_file *m, void *v)
ptdump_walk_pgd(m, info);
return 0;
}
+DEFINE_SHOW_ATTRIBUTE(ptdump);
-static int ptdump_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ptdump_show, inode->i_private);
-}
-
-static const struct file_operations ptdump_fops = {
- .open = ptdump_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-void ptdump_debugfs_register(struct ptdump_info *info, const char *name)
+void __init ptdump_debugfs_register(struct ptdump_info *info, const char *name)
{
debugfs_create_file(name, 0400, NULL, info, &ptdump_fops);
}
diff --git a/arch/arm/mm/tlb-v6.S b/arch/arm/mm/tlb-v6.S
index 5335b9687297..74f4b383afe3 100644
--- a/arch/arm/mm/tlb-v6.S
+++ b/arch/arm/mm/tlb-v6.S
@@ -24,7 +24,7 @@
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
- * - vma - vma_struct describing address range
+ * - vma - vm_area_struct describing address range
*
* It is assumed that:
* - the "Invalidate single entry" instruction will invalidate
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index 1bb28d7db567..87bf4ab17721 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -23,7 +23,7 @@
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
- * - vma - vma_struct describing address range
+ * - vma - vm_area_struct describing address range
*
* It is assumed that:
* - the "Invalidate single entry" instruction will invalidate
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index 1eb59003bdec..9f11de46aaa9 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -85,7 +85,7 @@ static int dma_lch_count;
static int dma_chan_count;
static int omap_dma_reserve_channels;
-static spinlock_t dma_chan_lock;
+static DEFINE_SPINLOCK(dma_chan_lock);
static struct omap_dma_lch *dma_chan;
static inline void disable_lnk(int lch);
@@ -902,7 +902,6 @@ static int omap_system_dma_probe(struct platform_device *pdev)
if (!dma_chan)
return -ENOMEM;
- spin_lock_init(&dma_chan_lock);
for (ch = 0; ch < dma_chan_count; ch++) {
omap_clear_dma(ch);
diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
index a9653117ca0d..27e0af78e88b 100644
--- a/arch/arm/probes/kprobes/core.c
+++ b/arch/arm/probes/kprobes/core.c
@@ -348,29 +348,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
reset_current_kprobe();
}
break;
-
- case KPROBE_HIT_ACTIVE:
- case KPROBE_HIT_SSDONE:
- /*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, fsr))
- return 1;
- break;
-
- default:
- break;
}
return 0;
diff --git a/arch/arm/probes/kprobes/test-arm.c b/arch/arm/probes/kprobes/test-arm.c
index 977369f1aa48..a0dae35ffacd 100644
--- a/arch/arm/probes/kprobes/test-arm.c
+++ b/arch/arm/probes/kprobes/test-arm.c
@@ -55,25 +55,25 @@ void kprobe_arm_test_cases(void)
TEST_GROUP("Data-processing (register), (register-shifted register), (immediate)")
#define _DATA_PROCESSING_DNM(op,s,val) \
- TEST_RR( op "eq" s " r0, r",1, VAL1,", r",2, val, "") \
- TEST_RR( op "ne" s " r1, r",1, VAL1,", r",2, val, ", lsl #3") \
- TEST_RR( op "cs" s " r2, r",3, VAL1,", r",2, val, ", lsr #4") \
- TEST_RR( op "cc" s " r3, r",3, VAL1,", r",2, val, ", asr #5") \
- TEST_RR( op "mi" s " r4, r",5, VAL1,", r",2, N(val),", asr #6") \
- TEST_RR( op "pl" s " r5, r",5, VAL1,", r",2, val, ", ror #7") \
- TEST_RR( op "vs" s " r6, r",7, VAL1,", r",2, val, ", rrx") \
- TEST_R( op "vc" s " r6, r",7, VAL1,", pc, lsl #3") \
- TEST_R( op "vc" s " r6, r",7, VAL1,", sp, lsr #4") \
- TEST_R( op "vc" s " r6, pc, r",7, VAL1,", asr #5") \
- TEST_R( op "vc" s " r6, sp, r",7, VAL1,", ror #6") \
- TEST_RRR( op "hi" s " r8, r",9, VAL1,", r",14,val, ", lsl r",0, 3,"")\
- TEST_RRR( op "ls" s " r9, r",9, VAL1,", r",14,val, ", lsr r",7, 4,"")\
- TEST_RRR( op "ge" s " r10, r",11,VAL1,", r",14,val, ", asr r",7, 5,"")\
- TEST_RRR( op "lt" s " r11, r",11,VAL1,", r",14,N(val),", asr r",7, 6,"")\
- TEST_RR( op "gt" s " r12, r13" ", r",14,val, ", ror r",14,7,"")\
- TEST_RR( op "le" s " r14, r",0, val, ", r13" ", lsl r",14,8,"")\
- TEST_R( op "eq" s " r0, r",11,VAL1,", #0xf5") \
- TEST_R( op "ne" s " r11, r",0, VAL1,", #0xf5000000") \
+ TEST_RR( op s "eq r0, r",1, VAL1,", r",2, val, "") \
+ TEST_RR( op s "ne r1, r",1, VAL1,", r",2, val, ", lsl #3") \
+ TEST_RR( op s "cs r2, r",3, VAL1,", r",2, val, ", lsr #4") \
+ TEST_RR( op s "cc r3, r",3, VAL1,", r",2, val, ", asr #5") \
+ TEST_RR( op s "mi r4, r",5, VAL1,", r",2, N(val),", asr #6") \
+ TEST_RR( op s "pl r5, r",5, VAL1,", r",2, val, ", ror #7") \
+ TEST_RR( op s "vs r6, r",7, VAL1,", r",2, val, ", rrx") \
+ TEST_R( op s "vc r6, r",7, VAL1,", pc, lsl #3") \
+ TEST_R( op s "vc r6, r",7, VAL1,", sp, lsr #4") \
+ TEST_R( op s "vc r6, pc, r",7, VAL1,", asr #5") \
+ TEST_R( op s "vc r6, sp, r",7, VAL1,", ror #6") \
+ TEST_RRR( op s "hi r8, r",9, VAL1,", r",14,val, ", lsl r",0, 3,"")\
+ TEST_RRR( op s "ls r9, r",9, VAL1,", r",14,val, ", lsr r",7, 4,"")\
+ TEST_RRR( op s "ge r10, r",11,VAL1,", r",14,val, ", asr r",7, 5,"")\
+ TEST_RRR( op s "lt r11, r",11,VAL1,", r",14,N(val),", asr r",7, 6,"")\
+ TEST_RR( op s "gt r12, r13" ", r",14,val, ", ror r",14,7,"")\
+ TEST_RR( op s "le r14, r",0, val, ", r13" ", lsl r",14,8,"")\
+ TEST_R( op s "eq r0, r",11,VAL1,", #0xf5") \
+ TEST_R( op s "ne r11, r",0, VAL1,", #0xf5000000") \
TEST_R( op s " r7, r",8, VAL2,", #0x000af000") \
TEST( op s " r4, pc" ", #0x00005a00")
@@ -104,23 +104,23 @@ void kprobe_arm_test_cases(void)
TEST_R( op " r",8, VAL2,", #0x000af000")
#define _DATA_PROCESSING_DM(op,s,val) \
- TEST_R( op "eq" s " r0, r",1, val, "") \
- TEST_R( op "ne" s " r1, r",1, val, ", lsl #3") \
- TEST_R( op "cs" s " r2, r",3, val, ", lsr #4") \
- TEST_R( op "cc" s " r3, r",3, val, ", asr #5") \
- TEST_R( op "mi" s " r4, r",5, N(val),", asr #6") \
- TEST_R( op "pl" s " r5, r",5, val, ", ror #7") \
- TEST_R( op "vs" s " r6, r",10,val, ", rrx") \
- TEST( op "vs" s " r7, pc, lsl #3") \
- TEST( op "vs" s " r7, sp, lsr #4") \
- TEST_RR( op "vc" s " r8, r",7, val, ", lsl r",0, 3,"") \
- TEST_RR( op "hi" s " r9, r",9, val, ", lsr r",7, 4,"") \
- TEST_RR( op "ls" s " r10, r",9, val, ", asr r",7, 5,"") \
- TEST_RR( op "ge" s " r11, r",11,N(val),", asr r",7, 6,"") \
- TEST_RR( op "lt" s " r12, r",11,val, ", ror r",14,7,"") \
- TEST_R( op "gt" s " r14, r13" ", lsl r",14,8,"") \
- TEST( op "eq" s " r0, #0xf5") \
- TEST( op "ne" s " r11, #0xf5000000") \
+ TEST_R( op s "eq r0, r",1, val, "") \
+ TEST_R( op s "ne r1, r",1, val, ", lsl #3") \
+ TEST_R( op s "cs r2, r",3, val, ", lsr #4") \
+ TEST_R( op s "cc r3, r",3, val, ", asr #5") \
+ TEST_R( op s "mi r4, r",5, N(val),", asr #6") \
+ TEST_R( op s "pl r5, r",5, val, ", ror #7") \
+ TEST_R( op s "vs r6, r",10,val, ", rrx") \
+ TEST( op s "vs r7, pc, lsl #3") \
+ TEST( op s "vs r7, sp, lsr #4") \
+ TEST_RR( op s "vc r8, r",7, val, ", lsl r",0, 3,"") \
+ TEST_RR( op s "hi r9, r",9, val, ", lsr r",7, 4,"") \
+ TEST_RR( op s "ls r10, r",9, val, ", asr r",7, 5,"") \
+ TEST_RR( op s "ge r11, r",11,N(val),", asr r",7, 6,"") \
+ TEST_RR( op s "lt r12, r",11,val, ", ror r",14,7,"") \
+ TEST_R( op s "gt r14, r13" ", lsl r",14,8,"") \
+ TEST( op s "eq r0, #0xf5") \
+ TEST( op s "ne r11, #0xf5000000") \
TEST( op s " r7, #0x000af000") \
TEST( op s " r4, #0x00005a00")
@@ -166,10 +166,10 @@ void kprobe_arm_test_cases(void)
/* Data-processing with PC as a target and status registers updated */
TEST_UNSUPPORTED("movs pc, r1")
- TEST_UNSUPPORTED("movs pc, r1, lsl r2")
+ TEST_UNSUPPORTED(__inst_arm(0xe1b0f211) " @movs pc, r1, lsl r2")
TEST_UNSUPPORTED("movs pc, #0x10000")
TEST_UNSUPPORTED("adds pc, lr, r1")
- TEST_UNSUPPORTED("adds pc, lr, r1, lsl r2")
+ TEST_UNSUPPORTED(__inst_arm(0xe09ef211) " @adds pc, lr, r1, lsl r2")
TEST_UNSUPPORTED("adds pc, lr, #4")
/* Data-processing with SP as target */
@@ -352,7 +352,7 @@ void kprobe_arm_test_cases(void)
TEST_UNSUPPORTED(__inst_arm(0xe000029f) " @ mul r0, pc, r2")
TEST_UNSUPPORTED(__inst_arm(0xe0000f91) " @ mul r0, r1, pc")
TEST_RR( "muls r0, r",1, VAL1,", r",2, VAL2,"")
- TEST_RR( "mullss r7, r",8, VAL2,", r",9, VAL2,"")
+ TEST_RR( "mulsls r7, r",8, VAL2,", r",9, VAL2,"")
TEST_R( "muls lr, r",4, VAL3,", r13")
TEST_UNSUPPORTED(__inst_arm(0xe01f0291) " @ muls pc, r1, r2")
@@ -361,7 +361,7 @@ void kprobe_arm_test_cases(void)
TEST_RR( "mla lr, r",1, VAL2,", r",2, VAL3,", r13")
TEST_UNSUPPORTED(__inst_arm(0xe02f3291) " @ mla pc, r1, r2, r3")
TEST_RRR( "mlas r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
- TEST_RRR( "mlahis r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+ TEST_RRR( "mlashi r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
TEST_RR( "mlas lr, r",1, VAL2,", r",2, VAL3,", r13")
TEST_UNSUPPORTED(__inst_arm(0xe03f3291) " @ mlas pc, r1, r2, r3")
@@ -394,7 +394,7 @@ void kprobe_arm_test_cases(void)
TEST_UNSUPPORTED(__inst_arm(0xe081f392) " @ umull pc, r1, r2, r3")
TEST_UNSUPPORTED(__inst_arm(0xe08f1392) " @ umull r1, pc, r2, r3")
TEST_RR( "umulls r0, r1, r",2, VAL1,", r",3, VAL2,"")
- TEST_RR( "umulllss r7, r8, r",9, VAL2,", r",10, VAL1,"")
+ TEST_RR( "umullsls r7, r8, r",9, VAL2,", r",10, VAL1,"")
TEST_R( "umulls lr, r12, r",11,VAL3,", r13")
TEST_UNSUPPORTED(__inst_arm(0xe091f392) " @ umulls pc, r1, r2, r3")
TEST_UNSUPPORTED(__inst_arm(0xe09f1392) " @ umulls r1, pc, r2, r3")
@@ -405,7 +405,7 @@ void kprobe_arm_test_cases(void)
TEST_UNSUPPORTED(__inst_arm(0xe0af1392) " @ umlal pc, r1, r2, r3")
TEST_UNSUPPORTED(__inst_arm(0xe0a1f392) " @ umlal r1, pc, r2, r3")
TEST_RRRR( "umlals r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
- TEST_RRRR( "umlalles r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+ TEST_RRRR( "umlalsle r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
TEST_RRR( "umlals r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
TEST_UNSUPPORTED(__inst_arm(0xe0bf1392) " @ umlals pc, r1, r2, r3")
TEST_UNSUPPORTED(__inst_arm(0xe0b1f392) " @ umlals r1, pc, r2, r3")
@@ -416,7 +416,7 @@ void kprobe_arm_test_cases(void)
TEST_UNSUPPORTED(__inst_arm(0xe0c1f392) " @ smull pc, r1, r2, r3")
TEST_UNSUPPORTED(__inst_arm(0xe0cf1392) " @ smull r1, pc, r2, r3")
TEST_RR( "smulls r0, r1, r",2, VAL1,", r",3, VAL2,"")
- TEST_RR( "smulllss r7, r8, r",9, VAL2,", r",10, VAL1,"")
+ TEST_RR( "smullsls r7, r8, r",9, VAL2,", r",10, VAL1,"")
TEST_R( "smulls lr, r12, r",11,VAL3,", r13")
TEST_UNSUPPORTED(__inst_arm(0xe0d1f392) " @ smulls pc, r1, r2, r3")
TEST_UNSUPPORTED(__inst_arm(0xe0df1392) " @ smulls r1, pc, r2, r3")
@@ -427,7 +427,7 @@ void kprobe_arm_test_cases(void)
TEST_UNSUPPORTED(__inst_arm(0xe0ef1392) " @ smlal pc, r1, r2, r3")
TEST_UNSUPPORTED(__inst_arm(0xe0e1f392) " @ smlal r1, pc, r2, r3")
TEST_RRRR( "smlals r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
- TEST_RRRR( "smlalles r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+ TEST_RRRR( "smlalsle r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
TEST_RRR( "smlals r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
TEST_UNSUPPORTED(__inst_arm(0xe0ff1392) " @ smlals pc, r1, r2, r3")
TEST_UNSUPPORTED(__inst_arm(0xe0f0f392) " @ smlals r0, pc, r2, r3")
@@ -450,7 +450,7 @@ void kprobe_arm_test_cases(void)
TEST_UNSUPPORTED(__inst_arm(0xe10f0091) " @ swp r0, r1, [pc]")
#if __LINUX_ARM_ARCH__ < 6
TEST_RP("swpb lr, r",7,VAL2,", [r",8,0,"]")
- TEST_R( "swpvsb r0, r",1,VAL1,", [sp]")
+ TEST_R( "swpbvs r0, r",1,VAL1,", [sp]")
#else
TEST_UNSUPPORTED(__inst_arm(0xe148e097) " @ swpb lr, r7, [r8]")
TEST_UNSUPPORTED(__inst_arm(0x614d0091) " @ swpvsb r0, r1, [sp]")
@@ -477,11 +477,11 @@ void kprobe_arm_test_cases(void)
TEST_GROUP("Extra load/store instructions")
TEST_RPR( "strh r",0, VAL1,", [r",1, 48,", -r",2, 24,"]")
- TEST_RPR( "streqh r",14,VAL2,", [r",11,0, ", r",12, 48,"]")
- TEST_UNSUPPORTED( "streqh r14, [r13, r12]")
- TEST_UNSUPPORTED( "streqh r14, [r12, r13]")
+ TEST_RPR( "strheq r",14,VAL2,", [r",11,0, ", r",12, 48,"]")
+ TEST_UNSUPPORTED( "strheq r14, [r13, r12]")
+ TEST_UNSUPPORTED( "strheq r14, [r12, r13]")
TEST_RPR( "strh r",1, VAL1,", [r",2, 24,", r",3, 48,"]!")
- TEST_RPR( "strneh r",12,VAL2,", [r",11,48,", -r",10,24,"]!")
+ TEST_RPR( "strhne r",12,VAL2,", [r",11,48,", -r",10,24,"]!")
TEST_RPR( "strh r",2, VAL1,", [r",3, 24,"], r",4, 48,"")
TEST_RPR( "strh r",10,VAL2,", [r",9, 48,"], -r",11,24,"")
TEST_UNSUPPORTED(__inst_arm(0xe1afc0ba) " @ strh r12, [pc, r10]!")
@@ -489,9 +489,9 @@ void kprobe_arm_test_cases(void)
TEST_UNSUPPORTED(__inst_arm(0xe089a0bf) " @ strh r10, [r9], pc")
TEST_PR( "ldrh r0, [r",0, 48,", -r",2, 24,"]")
- TEST_PR( "ldrcsh r14, [r",13,0, ", r",12, 48,"]")
+ TEST_PR( "ldrhcs r14, [r",13,0, ", r",12, 48,"]")
TEST_PR( "ldrh r1, [r",2, 24,", r",3, 48,"]!")
- TEST_PR( "ldrcch r12, [r",11,48,", -r",10,24,"]!")
+ TEST_PR( "ldrhcc r12, [r",11,48,", -r",10,24,"]!")
TEST_PR( "ldrh r2, [r",3, 24,"], r",4, 48,"")
TEST_PR( "ldrh r10, [r",9, 48,"], -r",11,24,"")
TEST_UNSUPPORTED(__inst_arm(0xe1bfc0ba) " @ ldrh r12, [pc, r10]!")
@@ -499,9 +499,9 @@ void kprobe_arm_test_cases(void)
TEST_UNSUPPORTED(__inst_arm(0xe099a0bf) " @ ldrh r10, [r9], pc")
TEST_RP( "strh r",0, VAL1,", [r",1, 24,", #-2]")
- TEST_RP( "strmih r",14,VAL2,", [r",13,0, ", #2]")
+ TEST_RP( "strhmi r",14,VAL2,", [r",13,0, ", #2]")
TEST_RP( "strh r",1, VAL1,", [r",2, 24,", #4]!")
- TEST_RP( "strplh r",12,VAL2,", [r",11,24,", #-4]!")
+ TEST_RP( "strhpl r",12,VAL2,", [r",11,24,", #-4]!")
TEST_RP( "strh r",2, VAL1,", [r",3, 24,"], #48")
TEST_RP( "strh r",10,VAL2,", [r",9, 64,"], #-48")
TEST_RP( "strh r",3, VAL1,", [r",13,TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"]!")
@@ -511,9 +511,9 @@ void kprobe_arm_test_cases(void)
TEST_UNSUPPORTED(__inst_arm(0xe0c9f3b0) " @ strh pc, [r9], #48")
TEST_P( "ldrh r0, [r",0, 24,", #-2]")
- TEST_P( "ldrvsh r14, [r",13,0, ", #2]")
+ TEST_P( "ldrhvs r14, [r",13,0, ", #2]")
TEST_P( "ldrh r1, [r",2, 24,", #4]!")
- TEST_P( "ldrvch r12, [r",11,24,", #-4]!")
+ TEST_P( "ldrhvc r12, [r",11,24,", #-4]!")
TEST_P( "ldrh r2, [r",3, 24,"], #48")
TEST_P( "ldrh r10, [r",9, 64,"], #-48")
TEST( "ldrh r0, [pc, #0]")
@@ -521,18 +521,18 @@ void kprobe_arm_test_cases(void)
TEST_UNSUPPORTED(__inst_arm(0xe0d9f3b0) " @ ldrh pc, [r9], #48")
TEST_PR( "ldrsb r0, [r",0, 48,", -r",2, 24,"]")
- TEST_PR( "ldrhisb r14, [r",13,0,", r",12, 48,"]")
+ TEST_PR( "ldrsbhi r14, [r",13,0,", r",12, 48,"]")
TEST_PR( "ldrsb r1, [r",2, 24,", r",3, 48,"]!")
- TEST_PR( "ldrlssb r12, [r",11,48,", -r",10,24,"]!")
+ TEST_PR( "ldrsbls r12, [r",11,48,", -r",10,24,"]!")
TEST_PR( "ldrsb r2, [r",3, 24,"], r",4, 48,"")
TEST_PR( "ldrsb r10, [r",9, 48,"], -r",11,24,"")
TEST_UNSUPPORTED(__inst_arm(0xe1bfc0da) " @ ldrsb r12, [pc, r10]!")
TEST_UNSUPPORTED(__inst_arm(0xe099f0db) " @ ldrsb pc, [r9], r11")
TEST_P( "ldrsb r0, [r",0, 24,", #-1]")
- TEST_P( "ldrgesb r14, [r",13,0, ", #1]")
+ TEST_P( "ldrsbge r14, [r",13,0, ", #1]")
TEST_P( "ldrsb r1, [r",2, 24,", #4]!")
- TEST_P( "ldrltsb r12, [r",11,24,", #-4]!")
+ TEST_P( "ldrsblt r12, [r",11,24,", #-4]!")
TEST_P( "ldrsb r2, [r",3, 24,"], #48")
TEST_P( "ldrsb r10, [r",9, 64,"], #-48")
TEST( "ldrsb r0, [pc, #0]")
@@ -540,18 +540,18 @@ void kprobe_arm_test_cases(void)
TEST_UNSUPPORTED(__inst_arm(0xe0d9f3d0) " @ ldrsb pc, [r9], #48")
TEST_PR( "ldrsh r0, [r",0, 48,", -r",2, 24,"]")
- TEST_PR( "ldrgtsh r14, [r",13,0, ", r",12, 48,"]")
+ TEST_PR( "ldrshgt r14, [r",13,0, ", r",12, 48,"]")
TEST_PR( "ldrsh r1, [r",2, 24,", r",3, 48,"]!")
- TEST_PR( "ldrlesh r12, [r",11,48,", -r",10,24,"]!")
+ TEST_PR( "ldrshle r12, [r",11,48,", -r",10,24,"]!")
TEST_PR( "ldrsh r2, [r",3, 24,"], r",4, 48,"")
TEST_PR( "ldrsh r10, [r",9, 48,"], -r",11,24,"")
TEST_UNSUPPORTED(__inst_arm(0xe1bfc0fa) " @ ldrsh r12, [pc, r10]!")
TEST_UNSUPPORTED(__inst_arm(0xe099f0fb) " @ ldrsh pc, [r9], r11")
TEST_P( "ldrsh r0, [r",0, 24,", #-1]")
- TEST_P( "ldreqsh r14, [r",13,0 ,", #1]")
+ TEST_P( "ldrsheq r14, [r",13,0 ,", #1]")
TEST_P( "ldrsh r1, [r",2, 24,", #4]!")
- TEST_P( "ldrnesh r12, [r",11,24,", #-4]!")
+ TEST_P( "ldrshne r12, [r",11,24,", #-4]!")
TEST_P( "ldrsh r2, [r",3, 24,"], #48")
TEST_P( "ldrsh r10, [r",9, 64,"], #-48")
TEST( "ldrsh r0, [pc, #0]")
@@ -571,30 +571,30 @@ void kprobe_arm_test_cases(void)
#if __LINUX_ARM_ARCH__ >= 5
TEST_RPR( "strd r",0, VAL1,", [r",1, 48,", -r",2,24,"]")
- TEST_RPR( "strccd r",8, VAL2,", [r",11,0, ", r",12,48,"]")
- TEST_UNSUPPORTED( "strccd r8, [r13, r12]")
- TEST_UNSUPPORTED( "strccd r8, [r12, r13]")
+ TEST_RPR( "strdcc r",8, VAL2,", [r",11,0, ", r",12,48,"]")
+ TEST_UNSUPPORTED( "strdcc r8, [r13, r12]")
+ TEST_UNSUPPORTED( "strdcc r8, [r12, r13]")
TEST_RPR( "strd r",4, VAL1,", [r",2, 24,", r",3, 48,"]!")
- TEST_RPR( "strcsd r",12,VAL2,", [r",11,48,", -r",10,24,"]!")
- TEST_RPR( "strd r",2, VAL1,", [r",5, 24,"], r",4,48,"")
- TEST_RPR( "strd r",10,VAL2,", [r",9, 48,"], -r",7,24,"")
+ TEST_RPR( "strdcs r",12,VAL2,", r13, [r",11,48,", -r",10,24,"]!")
+ TEST_RPR( "strd r",2, VAL1,", r3, [r",5, 24,"], r",4,48,"")
+ TEST_RPR( "strd r",10,VAL2,", r11, [r",9, 48,"], -r",7,24,"")
TEST_UNSUPPORTED(__inst_arm(0xe1afc0fa) " @ strd r12, [pc, r10]!")
TEST_PR( "ldrd r0, [r",0, 48,", -r",2,24,"]")
- TEST_PR( "ldrmid r8, [r",13,0, ", r",12,48,"]")
+ TEST_PR( "ldrdmi r8, [r",13,0, ", r",12,48,"]")
TEST_PR( "ldrd r4, [r",2, 24,", r",3, 48,"]!")
- TEST_PR( "ldrpld r6, [r",11,48,", -r",10,24,"]!")
- TEST_PR( "ldrd r2, [r",5, 24,"], r",4,48,"")
- TEST_PR( "ldrd r10, [r",9,48,"], -r",7,24,"")
+ TEST_PR( "ldrdpl r6, [r",11,48,", -r",10,24,"]!")
+ TEST_PR( "ldrd r2, r3, [r",5, 24,"], r",4,48,"")
+ TEST_PR( "ldrd r10, r11, [r",9,48,"], -r",7,24,"")
TEST_UNSUPPORTED(__inst_arm(0xe1afc0da) " @ ldrd r12, [pc, r10]!")
TEST_UNSUPPORTED(__inst_arm(0xe089f0db) " @ ldrd pc, [r9], r11")
TEST_UNSUPPORTED(__inst_arm(0xe089e0db) " @ ldrd lr, [r9], r11")
TEST_UNSUPPORTED(__inst_arm(0xe089c0df) " @ ldrd r12, [r9], pc")
TEST_RP( "strd r",0, VAL1,", [r",1, 24,", #-8]")
- TEST_RP( "strvsd r",8, VAL2,", [r",13,0, ", #8]")
+ TEST_RP( "strdvs r",8, VAL2,", [r",13,0, ", #8]")
TEST_RP( "strd r",4, VAL1,", [r",2, 24,", #16]!")
- TEST_RP( "strvcd r",12,VAL2,", [r",11,24,", #-16]!")
+ TEST_RP( "strdvc r",12,VAL2,", r13, [r",11,24,", #-16]!")
TEST_RP( "strd r",2, VAL1,", [r",4, 24,"], #48")
TEST_RP( "strd r",10,VAL2,", [r",9, 64,"], #-48")
TEST_RP( "strd r",6, VAL1,", [r",13,TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"]!")
@@ -603,9 +603,9 @@ void kprobe_arm_test_cases(void)
TEST_UNSUPPORTED(__inst_arm(0xe1efc3f0) " @ strd r12, [pc, #48]!")
TEST_P( "ldrd r0, [r",0, 24,", #-8]")
- TEST_P( "ldrhid r8, [r",13,0, ", #8]")
+ TEST_P( "ldrdhi r8, [r",13,0, ", #8]")
TEST_P( "ldrd r4, [r",2, 24,", #16]!")
- TEST_P( "ldrlsd r6, [r",11,24,", #-16]!")
+ TEST_P( "ldrdls r6, [r",11,24,", #-16]!")
TEST_P( "ldrd r2, [r",5, 24,"], #48")
TEST_P( "ldrd r10, [r",9,6,"], #-48")
TEST_UNSUPPORTED(__inst_arm(0xe1efc3d0) " @ ldrd r12, [pc, #48]!")
@@ -1084,63 +1084,63 @@ void kprobe_arm_test_cases(void)
TEST_GROUP("Branch, branch with link, and block data transfer")
TEST_P( "stmda r",0, 16*4,", {r0}")
- TEST_P( "stmeqda r",4, 16*4,", {r0-r15}")
- TEST_P( "stmneda r",8, 16*4,"!, {r8-r15}")
+ TEST_P( "stmdaeq r",4, 16*4,", {r0-r15}")
+ TEST_P( "stmdane r",8, 16*4,"!, {r8-r15}")
TEST_P( "stmda r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
TEST_P( "stmda r",13,0, "!, {pc}")
TEST_P( "ldmda r",0, 16*4,", {r0}")
- TEST_BF_P("ldmcsda r",4, 15*4,", {r0-r15}")
- TEST_BF_P("ldmccda r",7, 15*4,"!, {r8-r15}")
+ TEST_BF_P("ldmdacs r",4, 15*4,", {r0-r15}")
+ TEST_BF_P("ldmdacc r",7, 15*4,"!, {r8-r15}")
TEST_P( "ldmda r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
TEST_BF_P("ldmda r",14,15*4,"!, {pc}")
TEST_P( "stmia r",0, 16*4,", {r0}")
- TEST_P( "stmmiia r",4, 16*4,", {r0-r15}")
- TEST_P( "stmplia r",8, 16*4,"!, {r8-r15}")
+ TEST_P( "stmiami r",4, 16*4,", {r0-r15}")
+ TEST_P( "stmiapl r",8, 16*4,"!, {r8-r15}")
TEST_P( "stmia r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
TEST_P( "stmia r",14,0, "!, {pc}")
TEST_P( "ldmia r",0, 16*4,", {r0}")
- TEST_BF_P("ldmvsia r",4, 0, ", {r0-r15}")
- TEST_BF_P("ldmvcia r",7, 8*4, "!, {r8-r15}")
+ TEST_BF_P("ldmiavs r",4, 0, ", {r0-r15}")
+ TEST_BF_P("ldmiavc r",7, 8*4, "!, {r8-r15}")
TEST_P( "ldmia r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
TEST_BF_P("ldmia r",14,15*4,"!, {pc}")
TEST_P( "stmdb r",0, 16*4,", {r0}")
- TEST_P( "stmhidb r",4, 16*4,", {r0-r15}")
- TEST_P( "stmlsdb r",8, 16*4,"!, {r8-r15}")
+ TEST_P( "stmdbhi r",4, 16*4,", {r0-r15}")
+ TEST_P( "stmdbls r",8, 16*4,"!, {r8-r15}")
TEST_P( "stmdb r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
TEST_P( "stmdb r",13,4, "!, {pc}")
TEST_P( "ldmdb r",0, 16*4,", {r0}")
- TEST_BF_P("ldmgedb r",4, 16*4,", {r0-r15}")
- TEST_BF_P("ldmltdb r",7, 16*4,"!, {r8-r15}")
+ TEST_BF_P("ldmdbge r",4, 16*4,", {r0-r15}")
+ TEST_BF_P("ldmdblt r",7, 16*4,"!, {r8-r15}")
TEST_P( "ldmdb r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
TEST_BF_P("ldmdb r",14,16*4,"!, {pc}")
TEST_P( "stmib r",0, 16*4,", {r0}")
- TEST_P( "stmgtib r",4, 16*4,", {r0-r15}")
- TEST_P( "stmleib r",8, 16*4,"!, {r8-r15}")
+ TEST_P( "stmibgt r",4, 16*4,", {r0-r15}")
+ TEST_P( "stmible r",8, 16*4,"!, {r8-r15}")
TEST_P( "stmib r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
TEST_P( "stmib r",13,-4, "!, {pc}")
TEST_P( "ldmib r",0, 16*4,", {r0}")
- TEST_BF_P("ldmeqib r",4, -4,", {r0-r15}")
- TEST_BF_P("ldmneib r",7, 7*4,"!, {r8-r15}")
+ TEST_BF_P("ldmibeq r",4, -4,", {r0-r15}")
+ TEST_BF_P("ldmibne r",7, 7*4,"!, {r8-r15}")
TEST_P( "ldmib r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
TEST_BF_P("ldmib r",14,14*4,"!, {pc}")
TEST_P( "stmdb r",13,16*4,"!, {r3-r12,lr}")
- TEST_P( "stmeqdb r",13,16*4,"!, {r3-r12}")
- TEST_P( "stmnedb r",2, 16*4,", {r3-r12,lr}")
+ TEST_P( "stmdbeq r",13,16*4,"!, {r3-r12}")
+ TEST_P( "stmdbne r",2, 16*4,", {r3-r12,lr}")
TEST_P( "stmdb r",13,16*4,"!, {r2-r12,lr}")
TEST_P( "stmdb r",0, 16*4,", {r0-r12}")
TEST_P( "stmdb r",0, 16*4,", {r0-r12,lr}")
TEST_BF_P("ldmia r",13,5*4, "!, {r3-r12,pc}")
- TEST_P( "ldmccia r",13,5*4, "!, {r3-r12}")
- TEST_BF_P("ldmcsia r",2, 5*4, "!, {r3-r12,pc}")
+ TEST_P( "ldmiacc r",13,5*4, "!, {r3-r12}")
+ TEST_BF_P("ldmiacs r",2, 5*4, "!, {r3-r12,pc}")
TEST_BF_P("ldmia r",13,4*4, "!, {r2-r12,pc}")
TEST_P( "ldmia r",0, 16*4,", {r0-r12}")
TEST_P( "ldmia r",0, 16*4,", {r0-r12,lr}")
@@ -1174,80 +1174,80 @@ void kprobe_arm_test_cases(void)
#define TEST_COPROCESSOR(code) TEST_UNSUPPORTED(code)
#define COPROCESSOR_INSTRUCTIONS_ST_LD(two,cc) \
- TEST_COPROCESSOR("stc"two" 0, cr0, [r13, #4]") \
- TEST_COPROCESSOR("stc"two" 0, cr0, [r13, #-4]") \
- TEST_COPROCESSOR("stc"two" 0, cr0, [r13, #4]!") \
- TEST_COPROCESSOR("stc"two" 0, cr0, [r13, #-4]!") \
- TEST_COPROCESSOR("stc"two" 0, cr0, [r13], #4") \
- TEST_COPROCESSOR("stc"two" 0, cr0, [r13], #-4") \
- TEST_COPROCESSOR("stc"two" 0, cr0, [r13], {1}") \
- TEST_COPROCESSOR("stc"two"l 0, cr0, [r13, #4]") \
- TEST_COPROCESSOR("stc"two"l 0, cr0, [r13, #-4]") \
- TEST_COPROCESSOR("stc"two"l 0, cr0, [r13, #4]!") \
- TEST_COPROCESSOR("stc"two"l 0, cr0, [r13, #-4]!") \
- TEST_COPROCESSOR("stc"two"l 0, cr0, [r13], #4") \
- TEST_COPROCESSOR("stc"two"l 0, cr0, [r13], #-4") \
- TEST_COPROCESSOR("stc"two"l 0, cr0, [r13], {1}") \
- TEST_COPROCESSOR("ldc"two" 0, cr0, [r13, #4]") \
- TEST_COPROCESSOR("ldc"two" 0, cr0, [r13, #-4]") \
- TEST_COPROCESSOR("ldc"two" 0, cr0, [r13, #4]!") \
- TEST_COPROCESSOR("ldc"two" 0, cr0, [r13, #-4]!") \
- TEST_COPROCESSOR("ldc"two" 0, cr0, [r13], #4") \
- TEST_COPROCESSOR("ldc"two" 0, cr0, [r13], #-4") \
- TEST_COPROCESSOR("ldc"two" 0, cr0, [r13], {1}") \
- TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13, #4]") \
- TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13, #-4]") \
- TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13, #4]!") \
- TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13, #-4]!") \
- TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13], #4") \
- TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13], #-4") \
- TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13], {1}") \
+ TEST_COPROCESSOR("stc"two" p0, cr0, [r13, #4]") \
+ TEST_COPROCESSOR("stc"two" p0, cr0, [r13, #-4]") \
+ TEST_COPROCESSOR("stc"two" p0, cr0, [r13, #4]!") \
+ TEST_COPROCESSOR("stc"two" p0, cr0, [r13, #-4]!") \
+ TEST_COPROCESSOR("stc"two" p0, cr0, [r13], #4") \
+ TEST_COPROCESSOR("stc"two" p0, cr0, [r13], #-4") \
+ TEST_COPROCESSOR("stc"two" p0, cr0, [r13], {1}") \
+ TEST_COPROCESSOR("stc"two"l p0, cr0, [r13, #4]") \
+ TEST_COPROCESSOR("stc"two"l p0, cr0, [r13, #-4]") \
+ TEST_COPROCESSOR("stc"two"l p0, cr0, [r13, #4]!") \
+ TEST_COPROCESSOR("stc"two"l p0, cr0, [r13, #-4]!") \
+ TEST_COPROCESSOR("stc"two"l p0, cr0, [r13], #4") \
+ TEST_COPROCESSOR("stc"two"l p0, cr0, [r13], #-4") \
+ TEST_COPROCESSOR("stc"two"l p0, cr0, [r13], {1}") \
+ TEST_COPROCESSOR("ldc"two" p0, cr0, [r13, #4]") \
+ TEST_COPROCESSOR("ldc"two" p0, cr0, [r13, #-4]") \
+ TEST_COPROCESSOR("ldc"two" p0, cr0, [r13, #4]!") \
+ TEST_COPROCESSOR("ldc"two" p0, cr0, [r13, #-4]!") \
+ TEST_COPROCESSOR("ldc"two" p0, cr0, [r13], #4") \
+ TEST_COPROCESSOR("ldc"two" p0, cr0, [r13], #-4") \
+ TEST_COPROCESSOR("ldc"two" p0, cr0, [r13], {1}") \
+ TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13, #4]") \
+ TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13, #-4]") \
+ TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13, #4]!") \
+ TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13, #-4]!") \
+ TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13], #4") \
+ TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13], #-4") \
+ TEST_COPROCESSOR("ldc"two"l p0, cr0, [r13], {1}") \
\
- TEST_COPROCESSOR( "stc"two" 0, cr0, [r15, #4]") \
- TEST_COPROCESSOR( "stc"two" 0, cr0, [r15, #-4]") \
+ TEST_COPROCESSOR( "stc"two" p0, cr0, [r15, #4]") \
+ TEST_COPROCESSOR( "stc"two" p0, cr0, [r15, #-4]") \
TEST_UNSUPPORTED(__inst_arm(0x##cc##daf0001) " @ stc"two" 0, cr0, [r15, #4]!") \
TEST_UNSUPPORTED(__inst_arm(0x##cc##d2f0001) " @ stc"two" 0, cr0, [r15, #-4]!") \
TEST_UNSUPPORTED(__inst_arm(0x##cc##caf0001) " @ stc"two" 0, cr0, [r15], #4") \
TEST_UNSUPPORTED(__inst_arm(0x##cc##c2f0001) " @ stc"two" 0, cr0, [r15], #-4") \
- TEST_COPROCESSOR( "stc"two" 0, cr0, [r15], {1}") \
- TEST_COPROCESSOR( "stc"two"l 0, cr0, [r15, #4]") \
- TEST_COPROCESSOR( "stc"two"l 0, cr0, [r15, #-4]") \
+ TEST_COPROCESSOR( "stc"two" p0, cr0, [r15], {1}") \
+ TEST_COPROCESSOR( "stc"two"l p0, cr0, [r15, #4]") \
+ TEST_COPROCESSOR( "stc"two"l p0, cr0, [r15, #-4]") \
TEST_UNSUPPORTED(__inst_arm(0x##cc##def0001) " @ stc"two"l 0, cr0, [r15, #4]!") \
TEST_UNSUPPORTED(__inst_arm(0x##cc##d6f0001) " @ stc"two"l 0, cr0, [r15, #-4]!") \
TEST_UNSUPPORTED(__inst_arm(0x##cc##cef0001) " @ stc"two"l 0, cr0, [r15], #4") \
TEST_UNSUPPORTED(__inst_arm(0x##cc##c6f0001) " @ stc"two"l 0, cr0, [r15], #-4") \
- TEST_COPROCESSOR( "stc"two"l 0, cr0, [r15], {1}") \
- TEST_COPROCESSOR( "ldc"two" 0, cr0, [r15, #4]") \
- TEST_COPROCESSOR( "ldc"two" 0, cr0, [r15, #-4]") \
+ TEST_COPROCESSOR( "stc"two"l p0, cr0, [r15], {1}") \
+ TEST_COPROCESSOR( "ldc"two" p0, cr0, [r15, #4]") \
+ TEST_COPROCESSOR( "ldc"two" p0, cr0, [r15, #-4]") \
TEST_UNSUPPORTED(__inst_arm(0x##cc##dbf0001) " @ ldc"two" 0, cr0, [r15, #4]!") \
TEST_UNSUPPORTED(__inst_arm(0x##cc##d3f0001) " @ ldc"two" 0, cr0, [r15, #-4]!") \
TEST_UNSUPPORTED(__inst_arm(0x##cc##cbf0001) " @ ldc"two" 0, cr0, [r15], #4") \
TEST_UNSUPPORTED(__inst_arm(0x##cc##c3f0001) " @ ldc"two" 0, cr0, [r15], #-4") \
- TEST_COPROCESSOR( "ldc"two" 0, cr0, [r15], {1}") \
- TEST_COPROCESSOR( "ldc"two"l 0, cr0, [r15, #4]") \
- TEST_COPROCESSOR( "ldc"two"l 0, cr0, [r15, #-4]") \
+ TEST_COPROCESSOR( "ldc"two" p0, cr0, [r15], {1}") \
+ TEST_COPROCESSOR( "ldc"two"l p0, cr0, [r15, #4]") \
+ TEST_COPROCESSOR( "ldc"two"l p0, cr0, [r15, #-4]") \
TEST_UNSUPPORTED(__inst_arm(0x##cc##dff0001) " @ ldc"two"l 0, cr0, [r15, #4]!") \
TEST_UNSUPPORTED(__inst_arm(0x##cc##d7f0001) " @ ldc"two"l 0, cr0, [r15, #-4]!") \
TEST_UNSUPPORTED(__inst_arm(0x##cc##cff0001) " @ ldc"two"l 0, cr0, [r15], #4") \
TEST_UNSUPPORTED(__inst_arm(0x##cc##c7f0001) " @ ldc"two"l 0, cr0, [r15], #-4") \
- TEST_COPROCESSOR( "ldc"two"l 0, cr0, [r15], {1}")
+ TEST_COPROCESSOR( "ldc"two"l p0, cr0, [r15], {1}")
#define COPROCESSOR_INSTRUCTIONS_MC_MR(two,cc) \
\
- TEST_COPROCESSOR( "mcrr"two" 0, 15, r0, r14, cr0") \
- TEST_COPROCESSOR( "mcrr"two" 15, 0, r14, r0, cr15") \
+ TEST_COPROCESSOR( "mcrr"two" p0, 15, r0, r14, cr0") \
+ TEST_COPROCESSOR( "mcrr"two" p15, 0, r14, r0, cr15") \
TEST_UNSUPPORTED(__inst_arm(0x##cc##c4f00f0) " @ mcrr"two" 0, 15, r0, r15, cr0") \
TEST_UNSUPPORTED(__inst_arm(0x##cc##c40ff0f) " @ mcrr"two" 15, 0, r15, r0, cr15") \
- TEST_COPROCESSOR( "mrrc"two" 0, 15, r0, r14, cr0") \
- TEST_COPROCESSOR( "mrrc"two" 15, 0, r14, r0, cr15") \
+ TEST_COPROCESSOR( "mrrc"two" p0, 15, r0, r14, cr0") \
+ TEST_COPROCESSOR( "mrrc"two" p15, 0, r14, r0, cr15") \
TEST_UNSUPPORTED(__inst_arm(0x##cc##c5f00f0) " @ mrrc"two" 0, 15, r0, r15, cr0") \
TEST_UNSUPPORTED(__inst_arm(0x##cc##c50ff0f) " @ mrrc"two" 15, 0, r15, r0, cr15") \
- TEST_COPROCESSOR( "cdp"two" 15, 15, cr15, cr15, cr15, 7") \
- TEST_COPROCESSOR( "cdp"two" 0, 0, cr0, cr0, cr0, 0") \
- TEST_COPROCESSOR( "mcr"two" 15, 7, r15, cr15, cr15, 7") \
- TEST_COPROCESSOR( "mcr"two" 0, 0, r0, cr0, cr0, 0") \
- TEST_COPROCESSOR( "mrc"two" 15, 7, r15, cr15, cr15, 7") \
- TEST_COPROCESSOR( "mrc"two" 0, 0, r0, cr0, cr0, 0")
+ TEST_COPROCESSOR( "cdp"two" p15, 15, cr15, cr15, cr15, 7") \
+ TEST_COPROCESSOR( "cdp"two" p0, 0, cr0, cr0, cr0, 0") \
+ TEST_COPROCESSOR( "mcr"two" p15, 7, r15, cr15, cr15, 7") \
+ TEST_COPROCESSOR( "mcr"two" p0, 0, r0, cr0, cr0, 0") \
+ TEST_COPROCESSOR( "mrc"two" p15, 7, r14, cr15, cr15, 7") \
+ TEST_COPROCESSOR( "mrc"two" p0, 0, r0, cr0, cr0, 0")
COPROCESSOR_INSTRUCTIONS_ST_LD("",e)
#if __LINUX_ARM_ARCH__ >= 5
diff --git a/arch/arm/probes/kprobes/test-core.h b/arch/arm/probes/kprobes/test-core.h
index 19a5b2add41e..f1d5583e7bbb 100644
--- a/arch/arm/probes/kprobes/test-core.h
+++ b/arch/arm/probes/kprobes/test-core.h
@@ -108,6 +108,7 @@ struct test_arg_end {
#define TESTCASE_START(title) \
__asm__ __volatile__ ( \
+ ".syntax unified \n\t" \
"bl __kprobes_test_case_start \n\t" \
".pushsection .rodata \n\t" \
"10: \n\t" \
diff --git a/arch/arm/probes/uprobes/core.c b/arch/arm/probes/uprobes/core.c
index c4b49b322e8a..f5f790c6e5f8 100644
--- a/arch/arm/probes/uprobes/core.c
+++ b/arch/arm/probes/uprobes/core.c
@@ -204,7 +204,7 @@ unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
static struct undef_hook uprobes_arm_break_hook = {
.instr_mask = 0x0fffffff,
.instr_val = (UPROBE_SWBP_ARM_INSN & 0x0fffffff),
- .cpsr_mask = MODE_MASK,
+ .cpsr_mask = (PSR_T_BIT | MODE_MASK),
.cpsr_val = USR_MODE,
.fn = uprobe_trap_handler,
};
@@ -212,7 +212,7 @@ static struct undef_hook uprobes_arm_break_hook = {
static struct undef_hook uprobes_arm_ss_hook = {
.instr_mask = 0x0fffffff,
.instr_val = (UPROBE_SS_ARM_INSN & 0x0fffffff),
- .cpsr_mask = MODE_MASK,
+ .cpsr_mask = (PSR_T_BIT | MODE_MASK),
.cpsr_val = USR_MODE,
.fn = uprobe_trap_handler,
};
diff --git a/arch/arm/tools/Makefile b/arch/arm/tools/Makefile
index 3654f979851b..87de1f63f649 100644
--- a/arch/arm/tools/Makefile
+++ b/arch/arm/tools/Makefile
@@ -8,16 +8,15 @@
gen := arch/$(ARCH)/include/generated
kapi := $(gen)/asm
uapi := $(gen)/uapi/asm
-syshdr := $(srctree)/$(src)/syscallhdr.sh
+syshdr := $(srctree)/scripts/syscallhdr.sh
sysnr := $(srctree)/$(src)/syscallnr.sh
-systbl := $(srctree)/$(src)/syscalltbl.sh
+systbl := $(srctree)/scripts/syscalltbl.sh
syscall := $(src)/syscall.tbl
gen-y := $(gen)/calls-oabi.S
gen-y += $(gen)/calls-eabi.S
kapi-hdrs-y := $(kapi)/unistd-nr.h
kapi-hdrs-y += $(kapi)/mach-types.h
-uapi-hdrs-y := $(uapi)/unistd-common.h
uapi-hdrs-y += $(uapi)/unistd-oabi.h
uapi-hdrs-y += $(uapi)/unistd-eabi.h
@@ -41,28 +40,21 @@ $(kapi)/mach-types.h: $(src)/gen-mach-types $(src)/mach-types FORCE
$(call if_changed,gen_mach)
quiet_cmd_syshdr = SYSHDR $@
- cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
- '$(syshdr_abi_$(basetarget))' \
- '$(syshdr_pfx_$(basetarget))' \
- '__NR_SYSCALL_BASE'
+ cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --abis $(abis) \
+ --offset __NR_SYSCALL_BASE $< $@
quiet_cmd_systbl = SYSTBL $@
- cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \
- '$(systbl_abi_$(basetarget))'
+ cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis $(abis) $< $@
quiet_cmd_sysnr = SYSNR $@
cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@' \
'$(syshdr_abi_$(basetarget))'
-syshdr_abi_unistd-common := common
-$(uapi)/unistd-common.h: $(syscall) $(syshdr) FORCE
- $(call if_changed,syshdr)
-
-syshdr_abi_unistd-oabi := oabi
+$(uapi)/unistd-oabi.h: abis := common,oabi
$(uapi)/unistd-oabi.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
-syshdr_abi_unistd-eabi := eabi
+$(uapi)/unistd-eabi.h: abis := common,eabi
$(uapi)/unistd-eabi.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
@@ -70,10 +62,10 @@ sysnr_abi_unistd-nr := common,oabi,eabi,compat
$(kapi)/unistd-nr.h: $(syscall) $(sysnr) FORCE
$(call if_changed,sysnr)
-systbl_abi_calls-oabi := common,oabi
+$(gen)/calls-oabi.S: abis := common,oabi
$(gen)/calls-oabi.S: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
-systbl_abi_calls-eabi := common,eabi
+$(gen)/calls-eabi.S: abis := common,eabi
$(gen)/calls-eabi.S: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
index dcc1191291a2..c5df1179fc5d 100644
--- a/arch/arm/tools/syscall.tbl
+++ b/arch/arm/tools/syscall.tbl
@@ -456,3 +456,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/arm/tools/syscallhdr.sh b/arch/arm/tools/syscallhdr.sh
deleted file mode 100644
index 6b2f25cdd721..000000000000
--- a/arch/arm/tools/syscallhdr.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-prefix="$4"
-offset="$5"
-
-fileguard=_ASM_ARM_`basename "$out" | sed \
- -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
- -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
-if echo $out | grep -q uapi; then
- fileguard="_UAPI$fileguard"
-fi
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- echo "#ifndef ${fileguard}"
- echo "#define ${fileguard} 1"
- echo ""
-
- while read nr abi name entry ; do
- if [ -z "$offset" ]; then
- echo "#define __NR_${prefix}${name} $nr"
- else
- echo "#define __NR_${prefix}${name} ($offset + $nr)"
- fi
- done
-
- echo ""
- echo "#endif /* ${fileguard} */"
-) > "$out"
diff --git a/arch/arm/tools/syscalltbl.sh b/arch/arm/tools/syscalltbl.sh
deleted file mode 100644
index ae7e93cfbfd3..000000000000
--- a/arch/arm/tools/syscalltbl.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- while read nr abi name entry compat; do
- if [ "$abi" = "eabi" -a -n "$compat" ]; then
- echo "$in: error: a compat entry for an EABI syscall ($name) makes no sense" >&2
- exit 1
- fi
-
- if [ -n "$entry" ]; then
- if [ -z "$compat" ]; then
- echo "NATIVE($nr, $entry)"
- else
- echo "COMPAT($nr, $entry, $compat)"
- fi
- fi
- done
-) > "$out"
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index 467fa225c3d0..a7e54a087b80 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -138,9 +138,15 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
static int __init xen_mm_init(void)
{
struct gnttab_cache_flush cflush;
- if (!xen_initial_domain())
+ int rc;
+
+ if (!xen_swiotlb_detect())
return 0;
- xen_swiotlb_init(1, false);
+
+ rc = xen_swiotlb_init();
+ /* we can work with the default swiotlb */
+ if (rc < 0 && rc != -EEXIST)
+ return rc;
cflush.op = 0;
cflush.a.dev_bus_addr = 0;
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c
index acb464547a54..84a1cea1f43b 100644
--- a/arch/arm/xen/p2m.c
+++ b/arch/arm/xen/p2m.c
@@ -11,6 +11,7 @@
#include <xen/xen.h>
#include <xen/interface/memory.h>
+#include <xen/grant_table.h>
#include <xen/page.h>
#include <xen/swiotlb-xen.h>
@@ -109,7 +110,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
map_ops[i].status = GNTST_general_error;
unmap.host_addr = map_ops[i].host_addr,
unmap.handle = map_ops[i].handle;
- map_ops[i].handle = ~0;
+ map_ops[i].handle = INVALID_GRANT_HANDLE;
if (map_ops[i].flags & GNTMAP_device_map)
unmap.dev_bus_addr = map_ops[i].dev_bus_addr;
else
@@ -130,7 +131,6 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
return 0;
}
-EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_unmap_grant_ref *kunmap_ops,
@@ -145,7 +145,6 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
return 0;
}
-EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
bool __set_phys_to_machine_multi(unsigned long pfn,
unsigned long mfn, unsigned long nr_pages)
diff --git a/arch/arm64/Kbuild b/arch/arm64/Kbuild
index d6465823b281..7b393cfec071 100644
--- a/arch/arm64/Kbuild
+++ b/arch/arm64/Kbuild
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y += kernel/ mm/
-obj-$(CONFIG_NET) += net/
+obj-y += kernel/ mm/ net/
obj-$(CONFIG_KVM) += kvm/
obj-$(CONFIG_XEN) += xen/
obj-$(CONFIG_CRYPTO) += crypto/
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 1f212b47a48a..e07e7de9ac49 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -11,6 +11,12 @@ config ARM64
select ACPI_PPTT if ACPI
select ARCH_HAS_DEBUG_WX
select ARCH_BINFMT_ELF_STATE
+ select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
+ select ARCH_ENABLE_MEMORY_HOTPLUG
+ select ARCH_ENABLE_MEMORY_HOTREMOVE
+ select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
+ select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
+ select ARCH_HAS_CACHE_LINE_SIZE
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DMA_PREP_COHERENT
@@ -36,6 +42,7 @@ config ARM64
select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_HAS_ZONE_DMA_SET if EXPERT
select ARCH_HAVE_ELF_PROT
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_INLINE_READ_LOCK if !PREEMPTION
@@ -67,14 +74,17 @@ config ARM64
select ARCH_KEEP_MEMBLOCK
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_GNU_PROPERTY
+ select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_USE_SYM_ANNOTATIONS
select ARCH_SUPPORTS_DEBUG_PAGEALLOC
+ select ARCH_SUPPORTS_HUGETLBFS
select ARCH_SUPPORTS_MEMORY_FAILURE
select ARCH_SUPPORTS_SHADOW_CALL_STACK if CC_HAVE_SHADOW_CALL_STACK
select ARCH_SUPPORTS_LTO_CLANG if CPU_LITTLE_ENDIAN
select ARCH_SUPPORTS_LTO_CLANG_THIN
+ select ARCH_SUPPORTS_CFI_CLANG
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG)
select ARCH_SUPPORTS_NUMA_BALANCING
@@ -84,6 +94,7 @@ config ARM64
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_HUGE_PMD_SHARE if ARM64_4K_PAGES || (ARM64_16K_PAGES && !ARM64_VA_BITS_36)
select ARCH_WANT_LD_ORPHAN_WARN
+ select ARCH_WANTS_NO_INSTR
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARM_AMBA
select ARM_ARCH_TIMER
@@ -108,9 +119,9 @@ config ARM64
select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_VULNERABILITIES
select GENERIC_EARLY_IOREMAP
+ select GENERIC_FIND_FIRST_BIT
select GENERIC_IDLE_POLL_SETUP
select GENERIC_IRQ_IPI
- select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL
@@ -138,14 +149,15 @@ config ARM64
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
+ select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN
select HAVE_ARCH_KASAN_HW_TAGS if (HAVE_ARCH_KASAN && ARM64_MTE)
select HAVE_ARCH_KFENCE
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
- select HAVE_ARCH_PFN_VALID
select HAVE_ARCH_PREL32_RELOCATIONS
+ select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_STACKLEAK
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
@@ -159,7 +171,6 @@ config ARM64
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
select HAVE_CONTEXT_TRACKING
- select HAVE_DEBUG_BUGVERBOSE
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
@@ -194,6 +205,7 @@ config ARM64
select IOMMU_DMA if IOMMU_SUPPORT
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
+ select KASAN_VMALLOC if KASAN_GENERIC
select MODULES_USE_ELF_RELA
select NEED_DMA_MAP_STATE
select NEED_SG_DMA_LENGTH
@@ -208,6 +220,7 @@ config ARM64
select SWIOTLB
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
+ select HAVE_ARCH_USERFAULTFD_MINOR if USERFAULTFD
help
ARM 64-bit (AArch64) Linux support.
@@ -295,18 +308,7 @@ config GENERIC_CSUM
config GENERIC_CALIBRATE_DELAY
def_bool y
-config ZONE_DMA
- bool "Support DMA zone" if EXPERT
- default y
-
-config ZONE_DMA32
- bool "Support DMA32 zone" if EXPERT
- default y
-
-config ARCH_ENABLE_MEMORY_HOTPLUG
- def_bool y
-
-config ARCH_ENABLE_MEMORY_HOTREMOVE
+config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
def_bool y
config SMP
@@ -521,6 +523,9 @@ config ARM64_ERRATUM_843419
If unsure, say Y.
+config ARM64_LD_HAS_FIX_ERRATUM_843419
+ def_bool $(ld-option,--fix-cortex-a53-843419)
+
config ARM64_ERRATUM_1024718
bool "Cortex-A55: 1024718: Update of DBM/AP bits without break before make might result in incorrect update"
default y
@@ -810,6 +815,16 @@ config QCOM_FALKOR_ERRATUM_E1041
If unsure, say Y.
+config NVIDIA_CARMEL_CNP_ERRATUM
+ bool "NVIDIA Carmel CNP: CNP on Carmel semantically different than ARM cores"
+ default y
+ help
+ If CNP is enabled on Carmel cores, non-sharable TLBIs on a core will not
+ invalidate shared TLB entries installed by a different core, as it would
+ on standard ARM cores.
+
+ If unsure, say Y.
+
config SOCIONEXT_SYNQUACER_PREITS
bool "Socionext Synquacer: Workaround for GICv3 pre-ITS"
default y
@@ -1013,7 +1028,7 @@ config NODES_SHIFT
int "Maximum NUMA Nodes (as a power of 2)"
range 1 10
default "4"
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
help
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accommodate various tables.
@@ -1030,39 +1045,20 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK
def_bool y
depends on NUMA
-config HOLES_IN_ZONE
- def_bool y
-
source "kernel/Kconfig.hz"
config ARCH_SPARSEMEM_ENABLE
def_bool y
select SPARSEMEM_VMEMMAP_ENABLE
-
-config ARCH_SPARSEMEM_DEFAULT
- def_bool ARCH_SPARSEMEM_ENABLE
-
-config ARCH_SELECT_MEMORY_MODEL
- def_bool ARCH_SPARSEMEM_ENABLE
-
-config ARCH_FLATMEM_ENABLE
- def_bool !NUMA
+ select SPARSEMEM_VMEMMAP
config HW_PERF_EVENTS
def_bool y
depends on ARM_PMU
-config SYS_SUPPORTS_HUGETLBFS
+config ARCH_HAS_FILTER_PGPROT
def_bool y
-config ARCH_WANT_HUGE_PMD_SHARE
-
-config ARCH_HAS_CACHE_LINE_SIZE
- def_bool y
-
-config ARCH_ENABLE_SPLIT_PMD_PTLOCK
- def_bool y if PGTABLE_LEVELS > 2
-
# Supported by clang >= 7.0
config CC_HAVE_SHADOW_CALL_STACK
def_bool $(cc-option, -fsanitize=shadow-call-stack -ffixed-x18)
@@ -1098,6 +1094,7 @@ config KEXEC
config KEXEC_FILE
bool "kexec file based system call"
select KEXEC_CORE
+ select HAVE_IMA_KEXEC if IMA
help
This is new version of kexec system call. This system call is
file based and takes file descriptors as system call argument
@@ -1157,8 +1154,8 @@ config XEN
config FORCE_MAX_ZONEORDER
int
- default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE)
- default "12" if (ARM64_16K_PAGES && TRANSPARENT_HUGEPAGE)
+ default "14" if ARM64_64K_PAGES
+ default "12" if ARM64_16K_PAGES
default "11"
help
The kernel memory allocator divides physically contiguous memory
@@ -1398,10 +1395,13 @@ config ARM64_PAN
config AS_HAS_LDAPR
def_bool $(as-instr,.arch_extension rcpc)
+config AS_HAS_LSE_ATOMICS
+ def_bool $(as-instr,.arch_extension lse)
+
config ARM64_LSE_ATOMICS
bool
default ARM64_USE_LSE_ATOMICS
- depends on $(as-instr,.arch_extension lse)
+ depends on AS_HAS_LSE_ATOMICS
config ARM64_USE_LSE_ATOMICS
bool "Atomic instructions"
@@ -1418,19 +1418,6 @@ config ARM64_USE_LSE_ATOMICS
built with binutils >= 2.25 in order for the new instructions
to be used.
-config ARM64_VHE
- bool "Enable support for Virtualization Host Extensions (VHE)"
- default y
- help
- Virtualization Host Extensions (VHE) allow the kernel to run
- directly at EL2 (instead of EL1) on processors that support
- it. This leads to better performance for KVM, as they reduce
- the cost of the world switch.
-
- Selecting this option allows the VHE feature to be detected
- at runtime, and does not affect processors that do not
- implement this feature.
-
endmenu
menu "ARMv8.2 architectural features"
@@ -1484,12 +1471,6 @@ menu "ARMv8.3 architectural features"
config ARM64_PTR_AUTH
bool "Enable support for pointer authentication"
default y
- depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
- # Modern compilers insert a .note.gnu.property section note for PAC
- # which is only understood by binutils starting with version 2.33.1.
- depends on LD_IS_LLD || LD_VERSION >= 23301 || (CC_IS_GCC && GCC_VERSION < 90100)
- depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE
- depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
help
Pointer authentication (part of the ARMv8.3 Extensions) provides
instructions for signing and authenticating pointers against secret
@@ -1501,13 +1482,6 @@ config ARM64_PTR_AUTH
for each process at exec() time, with these keys being
context-switched along with the process.
- If the compiler supports the -mbranch-protection or
- -msign-return-address flag (e.g. GCC 7 or later), then this option
- will also cause the kernel itself to be compiled with return address
- protection. In this case, and if the target hardware is known to
- support pointer authentication, then CONFIG_STACKPROTECTOR can be
- disabled with minimal loss of protection.
-
The feature is detected at runtime. If the feature is not present in
hardware it will not be advertised to userspace/KVM guest nor will it
be enabled.
@@ -1518,6 +1492,24 @@ config ARM64_PTR_AUTH
but with the feature disabled. On such a system, this option should
not be selected.
+config ARM64_PTR_AUTH_KERNEL
+ bool "Use pointer authentication for kernel"
+ default y
+ depends on ARM64_PTR_AUTH
+ depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
+ # Modern compilers insert a .note.gnu.property section note for PAC
+ # which is only understood by binutils starting with version 2.33.1.
+ depends on LD_IS_LLD || LD_VERSION >= 23301 || (CC_IS_GCC && GCC_VERSION < 90100)
+ depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE
+ depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
+ help
+ If the compiler supports the -mbranch-protection or
+ -msign-return-address flag (e.g. GCC 7 or later), then this option
+ will cause the kernel itself to be compiled with return address
+ protection. In this case, and if the target hardware is known to
+ support pointer authentication, then CONFIG_STACKPROTECTOR can be
+ disabled with minimal loss of protection.
+
This feature works with FUNCTION_GRAPH_TRACER option only if
DYNAMIC_FTRACE_WITH_REGS is enabled.
@@ -1609,7 +1601,7 @@ config ARM64_BTI_KERNEL
bool "Use Branch Target Identification for kernel"
default y
depends on ARM64_BTI
- depends on ARM64_PTR_AUTH
+ depends on ARM64_PTR_AUTH_KERNEL
depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697
depends on !CC_IS_GCC || GCC_VERSION >= 100100
@@ -1658,6 +1650,7 @@ config ARM64_MTE
default y
depends on ARM64_AS_HAS_MTE && ARM64_TAGGED_ADDR_ABI
depends on AS_HAS_ARMV8_5
+ depends on AS_HAS_LSE_ATOMICS
# Required for tag checking in the uaccess routines
depends on ARM64_PAN
select ARCH_USES_HIGH_VMA_FLAGS
@@ -1683,10 +1676,23 @@ config ARM64_MTE
endmenu
+menu "ARMv8.7 architectural features"
+
+config ARM64_EPAN
+ bool "Enable support for Enhanced Privileged Access Never (EPAN)"
+ default y
+ depends on ARM64_PAN
+ help
+ Enhanced Privileged Access Never (EPAN) allows Privileged
+ Access Never to be used with Execute-only mappings.
+
+ The feature is detected at runtime, and will remain disabled
+ if the cpu does not implement the feature.
+endmenu
+
config ARM64_SVE
bool "ARM Scalable Vector Extension support"
default y
- depends on !KVM || ARM64_VHE
help
The Scalable Vector Extension (SVE) is an extension to the AArch64
execution state which complements and extends the SIMD functionality
@@ -1715,12 +1721,6 @@ config ARM64_SVE
booting the kernel. If unsure and you are not observing these
symptoms, you should assume that it is safe to say Y.
- CPUs that support SVE are architecturally required to support the
- Virtualization Host Extensions (VHE), so the kernel makes no
- provision for supporting SVE alongside KVM without VHE enabled.
- Thus, you will need to enable CONFIG_ARM64_VHE if you want to support
- KVM in the same kernel image.
-
config ARM64_MODULE_PLTS
bool "Use PLTs to allow module memory to spill over into vmalloc area"
depends on MODULES
@@ -1855,12 +1855,6 @@ config CMDLINE_FROM_BOOTLOADER
the boot loader doesn't provide any, the default kernel command
string provided in CMDLINE will be used.
-config CMDLINE_EXTEND
- bool "Extend bootloader kernel arguments"
- help
- The command-line arguments provided by the boot loader will be
- appended to the default kernel command string.
-
config CMDLINE_FORCE
bool "Always use the default kernel command string"
help
@@ -1911,14 +1905,6 @@ config SYSVIPC_COMPAT
def_bool y
depends on COMPAT && SYSVIPC
-config ARCH_ENABLE_HUGEPAGE_MIGRATION
- def_bool y
- depends on HUGETLB_PAGE && MIGRATION
-
-config ARCH_ENABLE_THP_MIGRATION
- def_bool y
- depends on TRANSPARENT_HUGEPAGE
-
menu "Power management options"
source "kernel/power/Kconfig"
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index cdfd5fed457f..7336c1fd0dda 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -8,16 +8,6 @@ config ARCH_ACTIONS
help
This enables support for the Actions Semiconductor S900 SoC family.
-config ARCH_AGILEX
- bool "Intel's Agilex SoCFPGA Family"
- help
- This enables support for Intel's Agilex SoCFPGA Family.
-
-config ARCH_N5X
- bool "Intel's eASIC N5X SoCFPGA Family"
- help
- This enables support for Intel's eASIC N5X SoCFPGA Family.
-
config ARCH_SUNXI
bool "Allwinner sunxi 64-bit SoC Family"
select ARCH_HAS_RESET_CONTROLLER
@@ -36,6 +26,13 @@ config ARCH_ALPINE
This enables support for the Annapurna Labs Alpine
Soc family.
+config ARCH_APPLE
+ bool "Apple Silicon SoC family"
+ select APPLE_AIC
+ help
+ This enables support for Apple's in-house ARM SoC family, starting
+ with the Apple M1.
+
config ARCH_BCM2835
bool "Broadcom BCM2835 family"
select TIMER_OF
@@ -168,6 +165,7 @@ config ARCH_MEDIATEK
config ARCH_MESON
bool "Amlogic Platforms"
+ select COMMON_CLK
select MESON_IRQ_GPIO
help
This enables support for the arm64 based Amlogic SoCs
@@ -235,9 +233,7 @@ config ARCH_RENESAS
config ARCH_ROCKCHIP
bool "Rockchip Platforms"
select ARCH_HAS_RESET_CONTROLLER
- select GPIOLIB
select PINCTRL
- select PINCTRL_ROCKCHIP
select PM
select ROCKCHIP_TIMER
help
@@ -254,10 +250,11 @@ config ARCH_SEATTLE
help
This enables support for AMD Seattle SOC Family
-config ARCH_STRATIX10
- bool "Altera's Stratix 10 SoCFPGA Family"
+config ARCH_INTEL_SOCFPGA
+ bool "Intel's SoCFPGA ARMv8 Families"
help
- This enables support for Altera's Stratix 10 SoCFPGA Family.
+ This enables support for Intel's SoCFPGA ARMv8 families:
+ Stratix 10 (ex. Altera), Agilex and eASIC N5X.
config ARCH_SYNQUACER
bool "Socionext SynQuacer SoC Family"
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 5b84aec31ed3..3b5b1c480449 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -21,7 +21,7 @@ LDFLAGS_vmlinux += -shared -Bsymbolic -z notext \
endif
ifeq ($(CONFIG_ARM64_ERRATUM_843419),y)
- ifeq ($(call ld-option, --fix-cortex-a53-843419),)
+ ifneq ($(CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419),y)
$(warning ld does not support --fix-cortex-a53-843419; kernel may be susceptible to erratum)
else
LDFLAGS_vmlinux += --fix-cortex-a53-843419
@@ -70,7 +70,7 @@ endif
# off, this will be overridden if we are using branch protection.
branch-prot-flags-y += $(call cc-option,-mbranch-protection=none)
-ifeq ($(CONFIG_ARM64_PTR_AUTH),y)
+ifeq ($(CONFIG_ARM64_PTR_AUTH_KERNEL),y)
branch-prot-flags-$(CONFIG_CC_HAS_SIGN_RETURN_ADDRESS) := -msign-return-address=all
# We enable additional protection for leaf functions as there is some
# narrow potential for ROP protection benefits and no substantial
@@ -175,6 +175,9 @@ vdso_install:
$(if $(CONFIG_COMPAT_VDSO), \
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@)
+archprepare:
+ $(Q)$(MAKE) $(build)=arch/arm64/tools kapi
+
# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index f1173cd93594..639e01a4d855 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -6,6 +6,7 @@ subdir-y += amazon
subdir-y += amd
subdir-y += amlogic
subdir-y += apm
+subdir-y += apple
subdir-y += arm
subdir-y += bitmain
subdir-y += broadcom
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-amarula-relic.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-amarula-relic.dts
index c7bd73f35ed8..f17cc89f472d 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-amarula-relic.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-amarula-relic.dts
@@ -173,7 +173,7 @@
compatible = "x-powers,axp803";
reg = <0x3a3>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
x-powers,drive-vbus-en; /* set N_VBUSEN as output pin */
};
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
index f7fe9fa50cb3..997a19372683 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-bananapi-m64.dts
@@ -191,7 +191,7 @@
compatible = "x-powers,axp803";
reg = <0x3a3>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
x-powers,drive-vbus-en; /* set N_VBUSEN as output pin */
};
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts
index 09b3c7fb82c0..e47ff06a6fa9 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-nanopi-a64.dts
@@ -152,7 +152,7 @@
compatible = "x-powers,axp803";
reg = <0x3a3>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
};
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts
index f3f8e177ab61..ec7e2c0e82c1 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-olinuxino.dts
@@ -185,7 +185,7 @@
compatible = "x-powers,axp803";
reg = <0x3a3>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
x-powers,drive-vbus-en; /* set N_VBUSEN as output pin */
};
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
index 70e31743f0ba..097a5511523a 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts
@@ -192,7 +192,7 @@
compatible = "x-powers,axp803";
reg = <0x3a3>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
x-powers,drive-vbus-en; /* set N_VBUSEN as output pin */
};
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
index 437ffe3628a5..596a25907432 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-lts.dts
@@ -19,3 +19,7 @@
};
};
};
+
+&mmc0 {
+ broken-cd; /* card detect is broken on *some* boards */
+};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
index 329cf276561e..2accb5ddf783 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts
@@ -139,7 +139,7 @@
compatible = "x-powers,axp803";
reg = <0x3a3>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
};
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
index 7ae16541d14f..34e67f5f8297 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
@@ -245,7 +245,7 @@
compatible = "x-powers,axp803";
reg = <0x3a3>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
};
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
index 9f69d489a81d..79adea3f8cc1 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi
@@ -296,7 +296,7 @@
compatible = "x-powers,axp803";
reg = <0x3a3>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
};
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts
index 422a8507f674..7ef96f9ff7ae 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinetab.dts
@@ -266,7 +266,7 @@
compatible = "x-powers,axp803";
reg = <0x3a3>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
x-powers,drive-vbus-en;
};
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
index 3402cec87035..6d78a1c98f10 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
@@ -34,7 +34,7 @@
vmmc-supply = <&reg_dcdc1>;
disable-wp;
bus-width = <4>;
- cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
+ cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; /* PF6 push-pull switch */
status = "okay";
};
@@ -45,7 +45,7 @@
compatible = "x-powers,axp803";
reg = <0x3a3>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
};
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts
index f0a16f355e27..45e1abdf70a0 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-teres-i.dts
@@ -205,7 +205,7 @@
compatible = "x-powers,axp803";
reg = <0x3a3>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_LOW>;
wakeup-source;
};
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
index 57786fc120c3..5b30e6c1fa05 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
@@ -648,6 +648,7 @@
pio: pinctrl@1c20800 {
compatible = "allwinner,sun50i-a64-pinctrl";
reg = <0x01c20800 0x400>;
+ interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
@@ -818,6 +819,7 @@
compatible = "allwinner,sun50i-a64-lradc",
"allwinner,sun8i-a83t-r-lradc";
reg = <0x01c21800 0x400>;
+ interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
@@ -1208,6 +1210,7 @@
compatible = "allwinner,sun50i-a64-rtc",
"allwinner,sun8i-h3-rtc";
reg = <0x01f00000 0x400>;
+ interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
clock-output-names = "osc32k", "osc32k-out", "iosc";
@@ -1219,7 +1222,7 @@
compatible = "allwinner,sun50i-a64-r-intc",
"allwinner,sun6i-a31-r-intc";
interrupt-controller;
- #interrupt-cells = <2>;
+ #interrupt-cells = <3>;
reg = <0x01f00c00 0x400>;
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
};
@@ -1279,6 +1282,7 @@
r_pio: pinctrl@1f02c00 {
compatible = "allwinner,sun50i-a64-r-pinctrl";
reg = <0x01f02c00 0x400>;
+ interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&r_ccu CLK_APB0_PIO>, <&osc24M>, <&osc32k>;
clock-names = "apb", "hosc", "losc";
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
index 4f4755152fce..6249e9e02928 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts
@@ -150,14 +150,30 @@
vcc-pg-supply = <&reg_aldo1>;
};
-&r_i2c {
+&r_ir {
+ linux,rc-map-name = "rc-beelink-gs1";
+ status = "okay";
+};
+
+&r_pio {
+ /*
+ * FIXME: We can't add that supply for now since it would
+ * create a circular dependency between pinctrl, the regulator
+ * and the RSB Bus.
+ *
+ * vcc-pl-supply = <&reg_aldo1>;
+ */
+ vcc-pm-supply = <&reg_aldo1>;
+};
+
+&r_rsb {
status = "okay";
- axp805: pmic@36 {
+ axp805: pmic@745 {
compatible = "x-powers,axp805", "x-powers,axp806";
- reg = <0x36>;
+ reg = <0x745>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
#interrupt-cells = <1>;
x-powers,self-working-mode;
@@ -273,26 +289,6 @@
};
};
-&r_ir {
- linux,rc-map-name = "rc-beelink-gs1";
- status = "okay";
-};
-
-&r_pio {
- /*
- * PL0 and PL1 are used for PMIC I2C
- * don't enable the pl-supply else
- * it will fail at boot
- *
- * vcc-pl-supply = <&reg_aldo1>;
- */
- vcc-pm-supply = <&reg_aldo1>;
-};
-
-&rtc {
- clocks = <&ext_osc32k>;
-};
-
&spdif {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts
index 7e83f6146f8a..c45d7b7fb39a 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts
@@ -175,14 +175,18 @@
vcc-pg-supply = <&reg_vcc_wifi_io>;
};
-&r_i2c {
+&r_ir {
+ status = "okay";
+};
+
+&r_rsb {
status = "okay";
- axp805: pmic@36 {
+ axp805: pmic@745 {
compatible = "x-powers,axp805", "x-powers,axp806";
- reg = <0x36>;
+ reg = <0x745>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
#interrupt-cells = <1>;
x-powers,self-working-mode;
@@ -291,10 +295,6 @@
};
};
-&r_ir {
- status = "okay";
-};
-
&rtc {
clocks = <&ext_osc32k>;
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
index da0875bd38d4..92745128fcfe 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi
@@ -112,14 +112,22 @@
vcc-pg-supply = <&reg_aldo1>;
};
-&r_i2c {
+&r_ir {
+ status = "okay";
+};
+
+&r_pio {
+ vcc-pm-supply = <&reg_bldo3>;
+};
+
+&r_rsb {
status = "okay";
- axp805: pmic@36 {
+ axp805: pmic@745 {
compatible = "x-powers,axp805", "x-powers,axp806";
- reg = <0x36>;
+ reg = <0x745>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
#interrupt-cells = <1>;
x-powers,self-working-mode;
@@ -232,14 +240,6 @@
};
};
-&r_ir {
- status = "okay";
-};
-
-&r_pio {
- vcc-pm-supply = <&reg_bldo3>;
-};
-
&rtc {
clocks = <&ext_osc32k>;
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
index b868ad17af8f..1ffd68f43f87 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
@@ -168,7 +168,7 @@
compatible = "x-powers,axp805", "x-powers,axp806";
reg = <0x36>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
#interrupt-cells = <1>;
x-powers,self-working-mode;
@@ -281,7 +281,7 @@
compatible = "nxp,pcf8563";
reg = <0x51>;
interrupt-parent = <&r_intc>;
- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
#clock-cells = <0>;
};
};
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
index 49e979794094..50815867ce7b 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
@@ -294,6 +294,7 @@
pio: pinctrl@300b000 {
compatible = "allwinner,sun50i-h6-pinctrl";
reg = <0x0300b000 0x400>;
+ interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
@@ -902,6 +903,7 @@
rtc: rtc@7000000 {
compatible = "allwinner,sun50i-h6-rtc";
reg = <0x07000000 0x400>;
+ interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
clock-output-names = "osc32k", "osc32k-out", "iosc";
@@ -927,10 +929,9 @@
};
r_intc: interrupt-controller@7021000 {
- compatible = "allwinner,sun50i-h6-r-intc",
- "allwinner,sun6i-a31-r-intc";
+ compatible = "allwinner,sun50i-h6-r-intc";
interrupt-controller;
- #interrupt-cells = <2>;
+ #interrupt-cells = <3>;
reg = <0x07021000 0x400>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
};
@@ -938,6 +939,7 @@
r_pio: pinctrl@7022000 {
compatible = "allwinner,sun50i-h6-r-pinctrl";
reg = <0x07022000 0x400>;
+ interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&r_ccu CLK_R_APB1>, <&osc24M>, <&rtc 0>;
@@ -995,9 +997,9 @@
compatible = "allwinner,sun8i-a23-rsb";
reg = <0x07083000 0x400>;
interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&r_ccu 13>;
+ clocks = <&r_ccu CLK_R_APB2_RSB>;
clock-frequency = <3000000>;
- resets = <&r_ccu 7>;
+ resets = <&r_ccu RST_R_APB2_RSB>;
pinctrl-names = "default";
pinctrl-0 = <&r_rsb_pins>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/altera/Makefile b/arch/arm64/boot/dts/altera/Makefile
index 10119c7ab437..4db83fbeb115 100644
--- a/arch/arm64/boot/dts/altera/Makefile
+++ b/arch/arm64/boot/dts/altera/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-dtb-$(CONFIG_ARCH_STRATIX10) += socfpga_stratix10_socdk.dtb \
+dtb-$(CONFIG_ARCH_INTEL_SOCFPGA) += socfpga_stratix10_socdk.dtb \
socfpga_stratix10_socdk_nand.dtb
diff --git a/arch/arm64/boot/dts/amlogic/Makefile b/arch/arm64/boot/dts/amlogic/Makefile
index 78a569d7fa20..a58ccecfcb55 100644
--- a/arch/arm64/boot/dts/amlogic/Makefile
+++ b/arch/arm64/boot/dts/amlogic/Makefile
@@ -29,6 +29,7 @@ dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-libretech-cc.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-libretech-cc-v2.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-nexbox-a95x.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905x-p212.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-mecool-kii-pro.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-p230.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-p231.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-phicomm-n1.dtb
@@ -38,6 +39,8 @@ dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905w-p281.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905w-tx3-mini.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxl-s905d-libretech-pc.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxm-khadas-vim2.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-gxm-mecool-kiii-pro.dtb
+dtb-$(CONFIG_ARCH_MESON) += meson-gxm-minix-neo-u9h.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxm-nexbox-a1.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxm-q200.dtb
dtb-$(CONFIG_ARCH_MESON) += meson-gxm-q201.dtb
diff --git a/arch/arm64/boot/dts/amlogic/meson-a1.dtsi b/arch/arm64/boot/dts/amlogic/meson-a1.dtsi
index 755b4ad15184..b4000cf65a9a 100644
--- a/arch/arm64/boot/dts/amlogic/meson-a1.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-a1.dtsi
@@ -88,13 +88,13 @@
#reset-cells = <1>;
};
- periphs_pinctrl: pinctrl@0400 {
+ periphs_pinctrl: pinctrl@400 {
compatible = "amlogic,meson-a1-periphs-pinctrl";
#address-cells = <2>;
#size-cells = <2>;
ranges;
- gpio: bank@0400 {
+ gpio: bank@400 {
reg = <0x0 0x0400 0x0 0x003c>,
<0x0 0x0480 0x0 0x0118>;
reg-names = "mux", "gpio";
diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
index d945c84ab697..895c43c7af9f 100644
--- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
@@ -1731,7 +1731,6 @@
interrupts = <GIC_SPI 150 IRQ_TYPE_EDGE_RISING>;
clocks = <&clkc CLKID_VAPB>;
resets = <&reset RESET_GE2D>;
- reset-names = "core";
};
gic: interrupt-controller@ffc01000 {
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
index b858c5e43cc8..793d48f72390 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
@@ -17,6 +17,12 @@
#address-cells = <2>;
#size-cells = <2>;
+ aliases {
+ mmc0 = &sd_emmc_b; /* SD card */
+ mmc1 = &sd_emmc_c; /* eMMC */
+ mmc2 = &sd_emmc_a; /* SDIO */
+ };
+
chosen {
#address-cells = <2>;
#size-cells = <2>;
@@ -122,9 +128,9 @@
pcie: pcie@fc000000 {
compatible = "amlogic,g12a-pcie", "snps,dw-pcie";
- reg = <0x0 0xfc000000 0x0 0x400000
- 0x0 0xff648000 0x0 0x2000
- 0x0 0xfc400000 0x0 0x200000>;
+ reg = <0x0 0xfc000000 0x0 0x400000>,
+ <0x0 0xff648000 0x0 0x2000>,
+ <0x0 0xfc400000 0x0 0x200000>;
reg-names = "elbi", "cfg", "config";
interrupts = <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
#interrupt-cells = <1>;
@@ -134,8 +140,8 @@
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
- ranges = <0x81000000 0 0 0x0 0xfc600000 0 0x00100000
- 0x82000000 0 0xfc700000 0x0 0xfc700000 0 0x1900000>;
+ ranges = <0x81000000 0 0 0x0 0xfc600000 0 0x00100000>,
+ <0x82000000 0 0xfc700000 0x0 0xfc700000 0 0x1900000>;
clocks = <&clkc CLKID_PCIE_PHY
&clkc CLKID_PCIE_COMB
@@ -2003,7 +2009,7 @@
};
};
- vrtc: rtc@0a8 {
+ vrtc: rtc@a8 {
compatible = "amlogic,meson-vrtc";
reg = <0x0 0x000a8 0x0 0x4>;
};
@@ -2181,7 +2187,7 @@
amlogic,channel-interrupts = <64 65 66 67 68 69 70 71>;
};
- watchdog: wdt@f0d0 {
+ watchdog: watchdog@f0d0 {
compatible = "amlogic,meson-gxbb-wdt";
reg = <0x0 0xf0d0 0x0 0x10>;
clocks = <&xtal>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-gsking-x.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-gsking-x.dts
index 211191f66344..6c7bfacbad78 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-gsking-x.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-gsking-x.dts
@@ -12,7 +12,7 @@
#include <dt-bindings/sound/meson-g12a-tohdmitx.h>
/ {
- compatible = "azw,gsking-x", "amlogic,g12b";
+ compatible = "azw,gsking-x", "amlogic,s922x", "amlogic,g12b";
model = "Beelink GS-King X";
aliases {
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts
index 0e331aa5a2d7..707daf92787b 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-gtking-pro.dts
@@ -11,7 +11,7 @@
#include <dt-bindings/sound/meson-g12a-tohdmitx.h>
/ {
- compatible = "azw,gtking", "amlogic,g12b";
+ compatible = "azw,gtking", "amlogic,s922x", "amlogic,g12b";
model = "Beelink GT-King Pro";
aliases {
@@ -35,7 +35,7 @@
leds {
compatible = "gpio-leds";
- white {
+ led-white {
label = "power:white";
gpios = <&gpio_ao GPIOAO_11 GPIO_ACTIVE_HIGH>;
default-state = "on";
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts b/arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts
index a7db84a500bb..5d96c1449050 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-gtking.dts
@@ -11,7 +11,7 @@
#include <dt-bindings/sound/meson-g12a-tohdmitx.h>
/ {
- compatible = "azw,gtking", "amlogic,g12b";
+ compatible = "azw,gtking", "amlogic,s922x", "amlogic,g12b";
model = "Beelink GT-King";
aliases {
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi
index 58ce569b2ace..344573e157a7 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi
@@ -42,7 +42,7 @@
leds {
compatible = "gpio-leds";
- blue {
+ led-blue {
label = "n2:blue";
gpios = <&gpio_ao GPIOAO_11 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
@@ -410,7 +410,7 @@
&ext_mdio {
external_phy: ethernet-phy@0 {
- /* Realtek RTL8211F (0x001cc916) */
+ /* Realtek RTL8211F (0x001cc916) */
reg = <0>;
max-speed = <1000>;
@@ -446,13 +446,58 @@
};
&gpio {
+ gpio-line-names =
+ /* GPIOZ */
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ /* GPIOH */
+ "", "", "", "", "", "", "", "",
+ "",
+ /* BOOT */
+ "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "",
+ /* GPIOC */
+ "", "", "", "", "", "", "", "",
+ /* GPIOA */
+ "PIN_44", /* GPIOA_0 */
+ "PIN_46", /* GPIOA_1 */
+ "PIN_45", /* GPIOA_2 */
+ "PIN_47", /* GPIOA_3 */
+ "PIN_26", /* GPIOA_4 */
+ "", "", "", "", "", "",
+ "PIN_42", /* GPIOA_11 */
+ "PIN_32", /* GPIOA_12 */
+ "PIN_7", /* GPIOA_13 */
+ "PIN_27", /* GPIOA_14 */
+ "PIN_28", /* GPIOA_15 */
+ /* GPIOX */
+ "PIN_16", /* GPIOX_0 */
+ "PIN_18", /* GPIOX_1 */
+ "PIN_22", /* GPIOX_2 */
+ "PIN_11", /* GPIOX_3 */
+ "PIN_13", /* GPIOX_4 */
+ "PIN_33", /* GPIOX_5 */
+ "PIN_35", /* GPIOX_6 */
+ "PIN_15", /* GPIOX_7 */
+ "PIN_19", /* GPIOX_8 */
+ "PIN_21", /* GPIOX_9 */
+ "PIN_24", /* GPIOX_10 */
+ "PIN_23", /* GPIOX_11 */
+ "PIN_8", /* GPIOX_12 */
+ "PIN_10", /* GPIOX_13 */
+ "PIN_29", /* GPIOX_14 */
+ "PIN_31", /* GPIOX_15 */
+ "PIN_12", /* GPIOX_16 */
+ "PIN_3", /* GPIOX_17 */
+ "PIN_5", /* GPIOX_18 */
+ "PIN_36"; /* GPIOX_19 */
/*
* WARNING: The USB Hub on the Odroid-N2 needs a reset signal
* to be turned high in order to be detected by the USB Controller
* This signal should be handled by a USB specific power sequence
* in order to reset the Hub when USB bus is powered down.
*/
- usb-hub {
+ hog-0 {
gpio-hog;
gpios = <GPIOH_4 GPIO_ACTIVE_HIGH>;
output-high;
@@ -508,6 +553,11 @@
status = "okay";
};
+&saradc {
+ status = "okay";
+ vref-supply = <&vddao_1v8>;
+};
+
/* SD card */
&sd_emmc_b {
status = "okay";
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
index 0edd137151f8..3d00404aae0f 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
@@ -20,6 +20,12 @@
#address-cells = <2>;
#size-cells = <2>;
+ aliases {
+ mmc0 = &sd_emmc_b; /* SD card */
+ mmc1 = &sd_emmc_c; /* eMMC */
+ mmc2 = &sd_emmc_a; /* SDIO */
+ };
+
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
index bfaf7f41a2d6..201596247fd9 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
@@ -257,7 +257,7 @@
* This signal should be handled by a USB specific power sequence
* in order to reset the Hub when USB bus is powered down.
*/
- usb-hub {
+ hog-0 {
gpio-hog;
gpios = <GPIOAO_4 GPIO_ACTIVE_HIGH>;
output-high;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-mecool-kii-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-mecool-kii-pro.dts
new file mode 100644
index 000000000000..c529b6c860a4
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-mecool-kii-pro.dts
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/*
+ * Author: Christian Hewitt <christianshewitt@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "meson-gxl-s905d.dtsi"
+#include "meson-gx-p23x-q20x.dtsi"
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+
+/ {
+ compatible = "videostrong,gxl-kii-pro", "amlogic,s905d", "amlogic,meson-gxl";
+ model = "MeCool KII Pro";
+
+ adc-keys {
+ compatible = "adc-keys";
+ io-channels = <&saradc 0>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1710000>;
+
+ button-function {
+ label = "Update";
+ linux,code = <KEY_VENDOR>;
+ press-threshold-microvolt = <10000>;
+ };
+ };
+
+ gpio-keys-polled {
+ compatible = "gpio-keys-polled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ poll-interval = <100>;
+
+ button@0 {
+ label = "power";
+ linux,code = <KEY_POWER>;
+ gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-blue {
+ color = <LED_COLOR_ID_BLUE>;
+ function = LED_FUNCTION_POWER;
+ gpios = <&gpio GPIODV_24 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ panic-indicator;
+ };
+ };
+};
+
+&ethmac {
+ phy-mode = "rmii";
+ phy-handle = <&internal_phy>;
+};
+
+&ir {
+ linux,rc-map-name = "rc-mecool-kii-pro";
+};
+
+&sd_emmc_a {
+ brcmf: wifi@1 {
+ reg = <1>;
+ compatible = "brcm,bcm4329-fmac";
+ };
+};
+
+&uart_A {
+ status = "okay";
+ pinctrl-0 = <&uart_a_pins>, <&uart_a_cts_rts_pins>;
+ pinctrl-names = "default";
+ uart-has-rtscts;
+
+ bluetooth {
+ compatible = "brcm,bcm43438-bt";
+ shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
+ max-speed = <2000000>;
+ clocks = <&wifi32k>;
+ clock-names = "lpo";
+ };
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts
index ad6d72254150..b331a013572f 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-sml5442tw.dts
@@ -16,28 +16,28 @@
leds {
compatible = "gpio-leds";
- yellow {
+ led-yellow {
color = <LED_COLOR_ID_YELLOW>;
function = LED_FUNCTION_STATUS;
gpios = <&gpio_ao GPIOAO_6 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- blue {
+ led-blue {
color = <LED_COLOR_ID_BLUE>;
function = LED_FUNCTION_STATUS;
gpios = <&gpio GPIODV_28 GPIO_ACTIVE_HIGH>;
default-state = "off";
};
- green {
+ led-green {
color = <LED_COLOR_ID_GREEN>;
function = LED_FUNCTION_STATUS;
gpios = <&gpio_ao GPIOAO_9 GPIO_ACTIVE_HIGH>;
default-state = "on";
};
- red {
+ led-red {
color = <LED_COLOR_ID_RED>;
function = LED_FUNCTION_STATUS;
gpios = <&gpio GPIODV_27 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-mecool-kiii-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-mecool-kiii-pro.dts
new file mode 100644
index 000000000000..ebebf344b715
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-mecool-kiii-pro.dts
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+/*
+ * Author: Christian Hewitt <christianshewitt@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "meson-gxm.dtsi"
+#include "meson-gx-p23x-q20x.dtsi"
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+
+/ {
+ compatible = "videostrong,gxm-kiii-pro", "amlogic,s912", "amlogic,meson-gxm";
+ model = "MeCool KIII Pro";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0xC0000000>;
+ };
+
+ adc-keys {
+ compatible = "adc-keys";
+ io-channels = <&saradc 0>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1710000>;
+
+ button-function {
+ label = "Update";
+ linux,code = <KEY_VENDOR>;
+ press-threshold-microvolt = <10000>;
+ };
+ };
+
+ gpio-keys-polled {
+ compatible = "gpio-keys-polled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ poll-interval = <100>;
+
+ button@0 {
+ label = "power";
+ linux,code = <KEY_POWER>;
+ gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-blue {
+ color = <LED_COLOR_ID_BLUE>;
+ function = LED_FUNCTION_POWER;
+ gpios = <&gpio GPIODV_24 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ panic-indicator;
+ };
+ };
+};
+
+&ethmac {
+ pinctrl-0 = <&eth_pins>;
+ pinctrl-names = "default";
+
+ phy-handle = <&external_phy>;
+
+ amlogic,tx-delay-ns = <2>;
+
+ phy-mode = "rgmii";
+};
+
+&external_mdio {
+ external_phy: ethernet-phy@0 {
+ /* Realtek RTL8211F (0x001cc916) */
+ reg = <0>;
+ max-speed = <1000>;
+
+ reset-assert-us = <10000>;
+ reset-deassert-us = <80000>;
+ reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>;
+
+ interrupt-parent = <&gpio_intc>;
+ /* MAC_INTR on GPIOZ_15 */
+ interrupts = <25 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
+&ir {
+ linux,rc-map-name = "rc-mecool-kiii-pro";
+};
+
+&sd_emmc_a {
+ brcmf: wifi@1 {
+ reg = <1>;
+ compatible = "brcm,bcm4329-fmac";
+ };
+};
+
+&uart_A {
+ status = "okay";
+ pinctrl-0 = <&uart_a_pins>, <&uart_a_cts_rts_pins>;
+ pinctrl-names = "default";
+ uart-has-rtscts;
+
+ bluetooth {
+ compatible = "brcm,bcm43438-bt";
+ shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
+ max-speed = <2000000>;
+ clocks = <&wifi32k>;
+ clock-names = "lpo";
+ };
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-minix-neo-u9h.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-minix-neo-u9h.dts
new file mode 100644
index 000000000000..ea9f234d1fc7
--- /dev/null
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-minix-neo-u9h.dts
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) Christian Hewitt <christianshewitt@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "meson-gxm.dtsi"
+#include "meson-gx-p23x-q20x.dtsi"
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+
+/ {
+ compatible = "minix,neo-u9h", "amlogic,s912", "amlogic,meson-gxm";
+ model = "Minix Neo U9-H";
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-white {
+ color = <LED_COLOR_ID_WHITE>;
+ function = LED_FUNCTION_POWER;
+ gpios = <&gpio_ao GPIOAO_9 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ panic-indicator;
+ };
+ };
+
+ adc-keys {
+ compatible = "adc-keys";
+ io-channels = <&saradc 0>;
+ io-channel-names = "buttons";
+ keyup-threshold-microvolt = <1710000>;
+
+ button-function {
+ label = "update";
+ linux,code = <KEY_VENDOR>;
+ press-threshold-microvolt = <10000>;
+ };
+ };
+
+ gpio-keys-polled {
+ compatible = "gpio-keys-polled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ poll-interval = <100>;
+
+ button@0 {
+ label = "power";
+ linux,code = <KEY_POWER>;
+ gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&ethmac {
+ pinctrl-0 = <&eth_pins>;
+ pinctrl-names = "default";
+ phy-handle = <&external_phy>;
+ amlogic,tx-delay-ns = <2>;
+ phy-mode = "rgmii";
+};
+
+&external_mdio {
+ external_phy: ethernet-phy@0 {
+ /* Realtek RTL8211F (0x001cc916) */
+ reg = <0>;
+ max-speed = <1000>;
+
+ reset-assert-us = <10000>;
+ reset-deassert-us = <80000>;
+ reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>;
+
+ interrupt-parent = <&gpio_intc>;
+ /* MAC_INTR on GPIOZ_15 */
+ interrupts = <25 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
+&ir {
+ linux,rc-map-name = "rc-minix-neo";
+};
+
+&i2c_B {
+ status = "okay";
+ pinctrl-0 = <&i2c_b_pins>;
+ pinctrl-names = "default";
+
+ rtc: rtc@51 {
+ status = "okay";
+ compatible = "haoyu,hym8563";
+ reg = <0x51>;
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "xin32k";
+ wakeup-source;
+ };
+};
+
+&sd_emmc_a {
+ brcmf: wifi@1 {
+ reg = <1>;
+ compatible = "brcm,bcm4329-fmac";
+ };
+};
+
+&uart_A {
+ status = "okay";
+ pinctrl-0 = <&uart_a_pins>, <&uart_a_cts_rts_pins>;
+ pinctrl-names = "default";
+ uart-has-rtscts;
+
+ bluetooth {
+ compatible = "brcm,bcm43438-bt";
+ shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
+ max-speed = <2000000>;
+ clocks = <&wifi32k>;
+ clock-names = "lpo";
+ };
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-wetek-core2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-wetek-core2.dts
index ec794c134c15..1e7f77f9b533 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-wetek-core2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-wetek-core2.dts
@@ -22,7 +22,7 @@
leds {
compatible = "gpio-leds";
- blue {
+ led-blue {
color = <LED_COLOR_ID_BLUE>;
function = LED_FUNCTION_STATUS;
gpios = <&gpio GPIODV_24 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
index 877e3b989203..66d67524b031 100644
--- a/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-khadas-vim3.dtsi
@@ -43,14 +43,14 @@
leds {
compatible = "gpio-leds";
- white {
+ led-white {
color = <LED_COLOR_ID_WHITE>;
function = LED_FUNCTION_STATUS;
gpios = <&gpio_ao GPIOAO_4 GPIO_ACTIVE_HIGH>;
linux,default-trigger = "heartbeat";
};
- red {
+ led-red {
color = <LED_COLOR_ID_RED>;
function = LED_FUNCTION_STATUS;
gpios = <&gpio_expander 5 GPIO_ACTIVE_HIGH>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts
index b2a4e823c1d8..8c327c03d845 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid-c4.dts
@@ -47,7 +47,7 @@
* This signal should be handled by a USB specific power sequence
* in order to reset the Hub when USB bus is powered down.
*/
- usb-hub {
+ hog-0 {
gpio-hog;
gpios = <GPIOH_4 GPIO_ACTIVE_HIGH>;
output-high;
diff --git a/arch/arm64/boot/dts/apple/Makefile b/arch/arm64/boot/dts/apple/Makefile
new file mode 100644
index 000000000000..cbbd701ebf05
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+dtb-$(CONFIG_ARCH_APPLE) += t8103-j274.dtb
diff --git a/arch/arm64/boot/dts/apple/t8103-j274.dts b/arch/arm64/boot/dts/apple/t8103-j274.dts
new file mode 100644
index 000000000000..e0f6775b9878
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8103-j274.dts
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple Mac mini (M1, 2020)
+ *
+ * target-type: J274
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+/dts-v1/;
+
+#include "t8103.dtsi"
+
+/ {
+ compatible = "apple,j274", "apple,t8103", "apple,arm-platform";
+ model = "Apple Mac mini (M1, 2020)";
+
+ aliases {
+ serial0 = &serial0;
+ };
+
+ chosen {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ stdout-path = "serial0";
+
+ framebuffer0: framebuffer@0 {
+ compatible = "apple,simple-framebuffer", "simple-framebuffer";
+ reg = <0 0 0 0>; /* To be filled by loader */
+ /* Format properties will be added by loader */
+ status = "disabled";
+ };
+ };
+
+ memory@800000000 {
+ device_type = "memory";
+ reg = <0x8 0 0x2 0>; /* To be filled by loader */
+ };
+};
+
+&serial0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/apple/t8103.dtsi b/arch/arm64/boot/dts/apple/t8103.dtsi
new file mode 100644
index 000000000000..a1e22a2ea2e5
--- /dev/null
+++ b/arch/arm64/boot/dts/apple/t8103.dtsi
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0+ OR MIT
+/*
+ * Apple T8103 "M1" SoC
+ *
+ * Other names: H13G, "Tonga"
+ *
+ * Copyright The Asahi Linux Contributors
+ */
+
+#include <dt-bindings/interrupt-controller/apple-aic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ compatible = "apple,t8103", "apple,arm-platform";
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ compatible = "apple,icestorm";
+ device_type = "cpu";
+ reg = <0x0 0x0>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0 0>; /* To be filled by loader */
+ };
+
+ cpu1: cpu@1 {
+ compatible = "apple,icestorm";
+ device_type = "cpu";
+ reg = <0x0 0x1>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0 0>; /* To be filled by loader */
+ };
+
+ cpu2: cpu@2 {
+ compatible = "apple,icestorm";
+ device_type = "cpu";
+ reg = <0x0 0x2>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0 0>; /* To be filled by loader */
+ };
+
+ cpu3: cpu@3 {
+ compatible = "apple,icestorm";
+ device_type = "cpu";
+ reg = <0x0 0x3>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0 0>; /* To be filled by loader */
+ };
+
+ cpu4: cpu@10100 {
+ compatible = "apple,firestorm";
+ device_type = "cpu";
+ reg = <0x0 0x10100>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0 0>; /* To be filled by loader */
+ };
+
+ cpu5: cpu@10101 {
+ compatible = "apple,firestorm";
+ device_type = "cpu";
+ reg = <0x0 0x10101>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0 0>; /* To be filled by loader */
+ };
+
+ cpu6: cpu@10102 {
+ compatible = "apple,firestorm";
+ device_type = "cpu";
+ reg = <0x0 0x10102>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0 0>; /* To be filled by loader */
+ };
+
+ cpu7: cpu@10103 {
+ compatible = "apple,firestorm";
+ device_type = "cpu";
+ reg = <0x0 0x10103>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0 0>; /* To be filled by loader */
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupt-parent = <&aic>;
+ interrupt-names = "phys", "virt", "hyp-phys", "hyp-virt";
+ interrupts = <AIC_FIQ AIC_TMR_GUEST_PHYS IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_FIQ AIC_TMR_GUEST_VIRT IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_FIQ AIC_TMR_HV_PHYS IRQ_TYPE_LEVEL_HIGH>,
+ <AIC_FIQ AIC_TMR_HV_VIRT IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ clk24: clock-24m {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ clock-output-names = "clk24";
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ ranges;
+ nonposted-mmio;
+
+ serial0: serial@235200000 {
+ compatible = "apple,s5l-uart";
+ reg = <0x2 0x35200000 0x0 0x1000>;
+ reg-io-width = <4>;
+ interrupt-parent = <&aic>;
+ interrupts = <AIC_IRQ 605 IRQ_TYPE_LEVEL_HIGH>;
+ /*
+ * TODO: figure out the clocking properly, there may
+ * be a third selectable clock.
+ */
+ clocks = <&clk24>, <&clk24>;
+ clock-names = "uart", "clk_uart_baud0";
+ status = "disabled";
+ };
+
+ aic: interrupt-controller@23b100000 {
+ compatible = "apple,t8103-aic", "apple,aic";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x2 0x3b100000 0x0 0x8000>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi
index f6c55877fbd9..1cc7fdcec51b 100644
--- a/arch/arm64/boot/dts/arm/juno-base.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-base.dtsi
@@ -544,6 +544,10 @@
ranges = <0x01000000 0x00 0x00000000 0x00 0x5f800000 0x0 0x00800000>,
<0x02000000 0x00 0x50000000 0x00 0x50000000 0x0 0x08000000>,
<0x42000000 0x40 0x00000000 0x40 0x00000000 0x1 0x00000000>;
+ /* Standard AXI Translation entries as programmed by EDK2 */
+ dma-ranges = <0x02000000 0x0 0x2c1c0000 0x0 0x2c1c0000 0x0 0x00040000>,
+ <0x02000000 0x0 0x80000000 0x0 0x80000000 0x0 0x80000000>,
+ <0x43000000 0x8 0x00000000 0x8 0x00000000 0x2 0x00000000>;
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0 0 0 1 &gic 0 GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
@@ -640,7 +644,6 @@
#iommu-cells = <1>;
#global-interrupts = <1>;
dma-coherent;
- status = "disabled";
};
smmu_hdlcd1: iommu@7fb10000 {
diff --git a/arch/arm64/boot/dts/arm/juno-r1.dts b/arch/arm64/boot/dts/arm/juno-r1.dts
index 5f290090b0cf..0e24e29eb9b1 100644
--- a/arch/arm64/boot/dts/arm/juno-r1.dts
+++ b/arch/arm64/boot/dts/arm/juno-r1.dts
@@ -230,6 +230,10 @@
status = "okay";
};
+&smmu_pcie {
+ status = "okay";
+};
+
&etm0 {
cpu = <&A57_0>;
};
diff --git a/arch/arm64/boot/dts/arm/juno-r2.dts b/arch/arm64/boot/dts/arm/juno-r2.dts
index 305300dd521c..e609420ce3e4 100644
--- a/arch/arm64/boot/dts/arm/juno-r2.dts
+++ b/arch/arm64/boot/dts/arm/juno-r2.dts
@@ -236,6 +236,10 @@
status = "okay";
};
+&smmu_pcie {
+ status = "okay";
+};
+
&etm0 {
cpu = <&A72_0>;
};
diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/Makefile b/arch/arm64/boot/dts/broadcom/bcm4908/Makefile
index ebebc0cd421f..cc75854519ac 100644
--- a/arch/arm64/boot/dts/broadcom/bcm4908/Makefile
+++ b/arch/arm64/boot/dts/broadcom/bcm4908/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_ARCH_BCM4908) += bcm4906-netgear-r8000p.dtb
+dtb-$(CONFIG_ARCH_BCM4908) += bcm4906-tplink-archer-c2300-v1.dtb
dtb-$(CONFIG_ARCH_BCM4908) += bcm4908-asus-gt-ac5300.dtb
diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906-netgear-r8000p.dts b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906-netgear-r8000p.dts
index ee3ed612274c..2dd028438c22 100644
--- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906-netgear-r8000p.dts
+++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906-netgear-r8000p.dts
@@ -18,11 +18,106 @@
leds {
compatible = "gpio-leds";
- wps {
+ led-power-white {
+ function = LED_FUNCTION_POWER;
+ color = <LED_COLOR_ID_WHITE>;
+ gpios = <&gpio0 8 GPIO_ACTIVE_LOW>;
+ };
+
+ led-power-amber {
+ function = LED_FUNCTION_POWER;
+ color = <LED_COLOR_ID_AMBER>;
+ gpios = <&gpio0 9 GPIO_ACTIVE_LOW>;
+ };
+
+ led-wps {
function = LED_FUNCTION_WPS;
color = <LED_COLOR_ID_WHITE>;
gpios = <&gpio0 10 GPIO_ACTIVE_LOW>;
};
+
+ led-2ghz {
+ function = "2ghz";
+ color = <LED_COLOR_ID_WHITE>;
+ gpios = <&gpio0 14 GPIO_ACTIVE_LOW>;
+ };
+
+ led-5ghz-1 {
+ function = "5ghz-1";
+ color = <LED_COLOR_ID_WHITE>;
+ gpios = <&gpio0 15 GPIO_ACTIVE_LOW>;
+ };
+
+ led-5ghz-2 {
+ function = "5ghz-2";
+ color = <LED_COLOR_ID_WHITE>;
+ gpios = <&gpio0 16 GPIO_ACTIVE_LOW>;
+ };
+
+ led-usb2 {
+ function = "usb2";
+ color = <LED_COLOR_ID_WHITE>;
+ gpios = <&gpio0 17 GPIO_ACTIVE_LOW>;
+ };
+
+ led-usb3 {
+ function = "usb3";
+ color = <LED_COLOR_ID_WHITE>;
+ gpios = <&gpio0 18 GPIO_ACTIVE_LOW>;
+ };
+
+ led-wifi {
+ function = "wifi";
+ color = <LED_COLOR_ID_WHITE>;
+ gpios = <&gpio0 56 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&enet {
+ nvmem-cells = <&base_mac_addr>;
+ nvmem-cell-names = "mac-address";
+};
+
+&usb_phy {
+ brcm,ioc = <1>;
+ status = "okay";
+};
+
+&ehci {
+ status = "okay";
+};
+
+&ohci {
+ status = "okay";
+};
+
+&xhci {
+ status = "okay";
+};
+
+&ports {
+ port@0 {
+ label = "lan4";
+ };
+
+ port@1 {
+ label = "lan3";
+ };
+
+ port@2 {
+ label = "lan2";
+ };
+
+ port@3 {
+ label = "lan1";
+ };
+
+ port@7 {
+ reg = <7>;
+ phy-mode = "internal";
+ phy-handle = <&phy12>;
+ label = "wan";
};
};
@@ -40,11 +135,21 @@
#size-cells = <1>;
partition@0 {
+ compatible = "nvmem-cells";
label = "cferom";
reg = <0x0 0x100000>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x0 0x100000>;
+
+ base_mac_addr: mac@106a0 {
+ reg = <0x106a0 0x6>;
+ };
};
partition@100000 {
+ compatible = "brcm,bcm4908-firmware";
label = "firmware";
reg = <0x100000 0x4400000>;
};
diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906-tplink-archer-c2300-v1.dts b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906-tplink-archer-c2300-v1.dts
new file mode 100644
index 000000000000..b63eefab48bd
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906-tplink-archer-c2300-v1.dts
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0-or-later OR MIT
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+
+#include "bcm4906.dtsi"
+
+/ {
+ compatible = "tplink,archer-c2300-v1", "brcm,bcm4906", "brcm,bcm4908";
+ model = "TP-Link Archer C2300 V1";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00 0x00 0x00 0x20000000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-power {
+ function = LED_FUNCTION_POWER;
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
+ };
+
+ led-2ghz {
+ function = "2ghz";
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&gpio0 2 GPIO_ACTIVE_LOW>;
+ };
+
+ led-5ghz {
+ function = "5ghz";
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&gpio0 3 GPIO_ACTIVE_LOW>;
+ };
+
+ led-wan-amber {
+ function = LED_FUNCTION_WAN;
+ color = <LED_COLOR_ID_AMBER>;
+ gpios = <&gpio0 4 GPIO_ACTIVE_HIGH>;
+ };
+
+ led-wan-blue {
+ function = LED_FUNCTION_WAN;
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&gpio0 10 GPIO_ACTIVE_LOW>;
+ };
+
+ led-lan {
+ function = LED_FUNCTION_LAN;
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&gpio0 12 GPIO_ACTIVE_LOW>;
+ };
+
+ led-wps {
+ function = LED_FUNCTION_WPS;
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&gpio0 14 GPIO_ACTIVE_LOW>;
+ };
+
+ led-usb2 {
+ function = "usb2";
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&gpio0 15 GPIO_ACTIVE_LOW>;
+ };
+
+ led-usb3 {
+ function = "usbd3";
+ color = <LED_COLOR_ID_BLUE>;
+ gpios = <&gpio0 17 GPIO_ACTIVE_LOW>;
+ };
+
+ led-brightness {
+ function = LED_FUNCTION_BACKLIGHT;
+ color = <LED_COLOR_ID_WHITE>;
+ gpios = <&gpio0 19 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ gpio-keys-polled {
+ compatible = "gpio-keys-polled";
+ poll-interval = <100>;
+
+ brightness {
+ label = "LEDs";
+ linux,code = <KEY_BRIGHTNESS_ZERO>;
+ gpios = <&gpio0 18 GPIO_ACTIVE_LOW>;
+ };
+
+ wps {
+ label = "WPS";
+ linux,code = <KEY_WPS_BUTTON>;
+ gpios = <&gpio0 21 GPIO_ACTIVE_LOW>;
+ };
+
+ wifi {
+ label = "WiFi";
+ linux,code = <KEY_RFKILL>;
+ gpios = <&gpio0 22 GPIO_ACTIVE_LOW>;
+ };
+
+ restart {
+ label = "Reset";
+ linux,code = <KEY_RESTART>;
+ gpios = <&gpio0 23 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&usb_phy {
+ brcm,ioc = <1>;
+ status = "okay";
+};
+
+&ehci {
+ status = "okay";
+};
+
+&ohci {
+ status = "okay";
+};
+
+&xhci {
+ status = "okay";
+};
+
+&ports {
+ port@0 {
+ label = "lan4";
+ };
+
+ port@1 {
+ label = "lan3";
+ };
+
+ port@2 {
+ label = "lan2";
+ };
+
+ port@3 {
+ label = "lan1";
+ };
+
+ port@7 {
+ reg = <7>;
+ phy-mode = "internal";
+ phy-handle = <&phy12>;
+ label = "wan";
+ };
+};
+
+&nandcs {
+ nand-ecc-strength = <4>;
+ nand-ecc-step-size = <512>;
+ nand-on-flash-bbt;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ partitions {
+ compatible = "brcm,bcm4908-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "cferom";
+ reg = <0x0 0x100000>;
+ };
+
+ partition@100000 {
+ compatible = "brcm,bcm4908-firmware";
+ reg = <0x100000 0x3900000>;
+ };
+
+ partition@5800000 {
+ compatible = "brcm,bcm4908-firmware";
+ reg = <0x3a00000 0x3900000>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts
index 6e4ad66ff536..169fbb7cfd34 100644
--- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts
+++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908-asus-gt-ac5300.dts
@@ -44,6 +44,28 @@
};
};
+&enet {
+ nvmem-cells = <&base_mac_addr>;
+ nvmem-cell-names = "mac-address";
+};
+
+&usb_phy {
+ brcm,ioc = <1>;
+ status = "okay";
+};
+
+&ehci {
+ status = "okay";
+};
+
+&ohci {
+ status = "okay";
+};
+
+&xhci {
+ status = "okay";
+};
+
&ports {
port@0 {
label = "lan2";
@@ -65,6 +87,7 @@
port@7 {
label = "sw";
reg = <7>;
+ phy-mode = "rgmii";
fixed-link {
speed = <1000>;
@@ -105,13 +128,32 @@
#size-cells = <0>;
partitions {
- compatible = "fixed-partitions";
+ compatible = "brcm,bcm4908-partitions";
#address-cells = <1>;
#size-cells = <1>;
partition@0 {
+ compatible = "nvmem-cells";
label = "cferom";
reg = <0x0 0x100000>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x0 0x100000>;
+
+ base_mac_addr: mac@106a0 {
+ reg = <0x106a0 0x6>;
+ };
+ };
+
+ partition@100000 {
+ compatible = "brcm,bcm4908-firmware";
+ reg = <0x100000 0x5700000>;
+ };
+
+ partition@5800000 {
+ compatible = "brcm,bcm4908-firmware";
+ reg = <0x5800000 0x5700000>;
};
};
};
diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
index 9354077f74cd..8060178b365d 100644
--- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
+++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi
@@ -2,6 +2,8 @@
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/soc/bcm-pmb.h>
/dts-v1/;
@@ -110,28 +112,52 @@
#size-cells = <1>;
ranges = <0x00 0x00 0x80000000 0x281000>;
- usb@c300 {
+ enet: ethernet@2000 {
+ compatible = "brcm,bcm4908-enet";
+ reg = <0x2000 0x1000>;
+
+ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "rx", "tx";
+ };
+
+ usb_phy: usb-phy@c200 {
+ compatible = "brcm,bcm4908-usb-phy";
+ reg = <0xc200 0x100>;
+ reg-names = "ctrl";
+ power-domains = <&pmb BCM_PMB_HOST_USB>;
+ dr_mode = "host";
+ brcm,has-xhci;
+ brcm,has-eohci;
+ #phy-cells = <1>;
+ status = "disabled";
+ };
+
+ ehci: usb@c300 {
compatible = "generic-ehci";
reg = <0xc300 0x100>;
interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&usb_phy PHY_TYPE_USB2>;
status = "disabled";
};
- usb@c400 {
+ ohci: usb@c400 {
compatible = "generic-ohci";
reg = <0xc400 0x100>;
interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&usb_phy PHY_TYPE_USB2>;
status = "disabled";
};
- usb@d000 {
+ xhci: usb@d000 {
compatible = "generic-xhci";
reg = <0xd000 0x8c8>;
interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&usb_phy PHY_TYPE_USB3>;
status = "disabled";
};
- ethernet-switch@80000 {
+ bus@80000 {
compatible = "simple-bus";
#size-cells = <1>;
#address-cells = <1>;
@@ -182,6 +208,17 @@
phy-mode = "internal";
phy-handle = <&phy11>;
};
+
+ port@8 {
+ reg = <8>;
+ phy-mode = "internal";
+ ethernet = <&enet>;
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
};
};
@@ -222,7 +259,7 @@
#address-cells = <1>;
#size-cells = <1>;
- power-controller@2800c0 {
+ pmb: power-controller@2800c0 {
compatible = "brcm,bcm4908-pmb";
reg = <0x2800c0 0x40>;
#power-domain-cells = <1>;
diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
index 413cac63a1cb..773d9abe3a44 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
@@ -1002,7 +1002,7 @@
ppmu_event0_d1_general: ppmu-event0-d1-general {
event-name = "ppmu-event0-d1-general";
};
- };
+ };
};
&pinctrl_alive {
diff --git a/arch/arm64/boot/dts/exynos/exynos5433.dtsi b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
index 6433f9ee35e1..18a912eee360 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
@@ -564,9 +564,9 @@
compatible = "samsung,exynos5433-slim-sss";
reg = <0x11140000 0x1000>;
interrupts = <GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH>;
- clock-names = "aclk", "pclk";
- clocks = <&cmu_imem CLK_ACLK_SLIMSSS>,
- <&cmu_imem CLK_PCLK_SLIMSSS>;
+ clock-names = "pclk", "aclk";
+ clocks = <&cmu_imem CLK_PCLK_SLIMSSS>,
+ <&cmu_imem CLK_ACLK_SLIMSSS>;
};
pd_gscl: power-domain@105c4000 {
diff --git a/arch/arm64/boot/dts/freescale/Makefile b/arch/arm64/boot/dts/freescale/Makefile
index 6438db3822f8..44890d56c194 100644
--- a/arch/arm64/boot/dts/freescale/Makefile
+++ b/arch/arm64/boot/dts/freescale/Makefile
@@ -33,6 +33,8 @@ dtb-$(CONFIG_ARCH_LAYERSCAPE) += fsl-lx2162a-qds.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-beacon-kit.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-ddr4-evk.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mm-icore-mx8mm-ctouch2.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mm-icore-mx8mm-edimm2.2.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-kontron-n801x-s.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-nitrogen-r2.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mm-var-som-symphony.dtb
@@ -47,6 +49,7 @@ dtb-$(CONFIG_ARCH_MXC) += imx8mp-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mp-phyboard-pollux-rdk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mq-evk.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mq-hummingboard-pulse.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8mq-kontron-pitx-imx8m.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mq-librem5-devkit.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mq-librem5-r2.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mq-librem5-r3.dtb
@@ -57,6 +60,7 @@ dtb-$(CONFIG_ARCH_MXC) += imx8mq-pico-pi.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mq-thor96.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mq-zii-ultra-rmb3.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8mq-zii-ultra-zest.dtb
+dtb-$(CONFIG_ARCH_MXC) += imx8qm-mek.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8qxp-ai_ml.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8qxp-colibri-eval-v3.dtb
dtb-$(CONFIG_ARCH_MXC) += imx8qxp-mek.dtb
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
index 7de6b376d792..9058cfa4980f 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
@@ -198,6 +198,7 @@
ranges = <0x0 0x00 0x1700000 0x100000>;
reg = <0x00 0x1700000 0x0 0x100000>;
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+ dma-coherent;
sec_jr0: jr@10000 {
compatible = "fsl,sec-v5.4-job-ring",
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts
index 6c309b97587d..e8d31279b7a3 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts
@@ -46,7 +46,8 @@
eee-broken-100tx;
qca,clk-out-frequency = <125000000>;
qca,clk-out-strength = <AR803X_STRENGTH_FULL>;
- vddio-supply = <&vddh>;
+ qca,keep-pll-enabled;
+ vddio-supply = <&vddio>;
vddio: vddio-regulator {
regulator-name = "VDDIO";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
index df212ed5bb94..e65d1c477e2c 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts
@@ -31,11 +31,10 @@
reg = <0x4>;
eee-broken-1000t;
eee-broken-100tx;
-
qca,clk-out-frequency = <125000000>;
qca,clk-out-strength = <AR803X_STRENGTH_FULL>;
-
- vddio-supply = <&vddh>;
+ qca,keep-pll-enabled;
+ vddio-supply = <&vddio>;
vddio: vddio-regulator {
regulator-name = "VDDIO";
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts
index 0516076087ae..a92ecb331cdc 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts
@@ -25,6 +25,8 @@
spi1 = &dspi2;
mmc0 = &esdhc1;
mmc1 = &esdhc;
+ rtc0 = &rtc;
+ rtc1 = &ftm_alarm0;
};
buttons0 {
@@ -115,8 +117,6 @@
status = "okay";
flash@0 {
- #address-cells = <1>;
- #size-cells = <1>;
compatible = "jedec,spi-nor";
m25p,fast-read;
spi-max-frequency = <133000000>;
@@ -125,49 +125,37 @@
spi-rx-bus-width = <2>; /* 2 SPI Rx lines */
spi-tx-bus-width = <1>; /* 1 SPI Tx line */
- partition@0 {
- reg = <0x000000 0x010000>;
- label = "rcw";
- read-only;
- };
-
- partition@10000 {
- reg = <0x010000 0x0f0000>;
- label = "failsafe bootloader";
- read-only;
- };
-
- partition@100000 {
- reg = <0x100000 0x040000>;
- label = "failsafe DP firmware";
- read-only;
- };
-
- partition@140000 {
- reg = <0x140000 0x0a0000>;
- label = "failsafe trusted firmware";
- read-only;
- };
-
- partition@1e0000 {
- reg = <0x1e0000 0x020000>;
- label = "reserved";
- read-only;
- };
-
- partition@200000 {
- reg = <0x200000 0x010000>;
- label = "configuration store";
- };
-
- partition@210000 {
- reg = <0x210000 0x1d0000>;
- label = "bootloader";
- };
-
- partition@3e0000 {
- reg = <0x3e0000 0x020000>;
- label = "bootloader environment";
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ reg = <0x000000 0x010000>;
+ label = "rcw";
+ read-only;
+ };
+
+ partition@10000 {
+ reg = <0x010000 0x1d0000>;
+ label = "failsafe bootloader";
+ read-only;
+ };
+
+ partition@200000 {
+ reg = <0x200000 0x010000>;
+ label = "configuration store";
+ };
+
+ partition@210000 {
+ reg = <0x210000 0x1d0000>;
+ label = "bootloader";
+ };
+
+ partition@3e0000 {
+ reg = <0x3e0000 0x020000>;
+ label = "bootloader environment";
+ };
};
};
};
@@ -191,7 +179,7 @@
&i2c0 {
status = "okay";
- rtc@32 {
+ rtc: rtc@32 {
compatible = "microcrystal,rv8803";
reg = <0x32>;
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts
index fbcba9cb8503..bfd14b64567e 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds.dts
@@ -25,6 +25,7 @@
serial1 = &duart1;
mmc0 = &esdhc;
mmc1 = &esdhc1;
+ rtc1 = &ftm_alarm0;
};
chosen {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
index 41ae6e7675ba..9322c6ad8e4a 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
@@ -21,6 +21,7 @@
serial1 = &duart1;
mmc0 = &esdhc;
mmc1 = &esdhc1;
+ rtc1 = &ftm_alarm0;
};
chosen {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
index 262fbad8f0ec..a30249ebffa8 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
@@ -18,10 +18,6 @@
#address-cells = <2>;
#size-cells = <2>;
- aliases {
- rtc1 = &ftm_alarm0;
- };
-
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -201,8 +197,8 @@
ddr: memory-controller@1080000 {
compatible = "fsl,qoriq-memory-controller";
reg = <0x0 0x1080000 0x0 0x1000>;
- interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
- big-endian;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ little-endian;
};
dcfg: syscon@1e00000 {
@@ -1027,7 +1023,7 @@
status = "disabled";
fixed-link {
- speed = <1000>;
+ speed = <2500>;
full-duplex;
};
};
@@ -1114,6 +1110,18 @@
full-duplex;
};
};
+
+ rcec@1f,0 {
+ reg = <0x00f800 0 0 0 0>;
+ /* IEP INT_A */
+ interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+ /* Integrated Endpoint Register Block */
+ ierb@1f0800000 {
+ compatible = "fsl,ls1028a-enetc-ierb";
+ reg = <0x01 0xf0800000 0x0 0x10000>;
};
rcpm: power-controller@1e34040 {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
index 5a8a1dc4262d..28c51e521cb2 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
@@ -348,6 +348,7 @@
ranges = <0x0 0x00 0x1700000 0x100000>;
reg = <0x00 0x1700000 0x0 0x100000>;
interrupts = <0 75 0x4>;
+ dma-coherent;
sec_jr0: jr@10000 {
compatible = "fsl,sec-v5.4-job-ring",
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
index 1d6dfd189c7f..39458305e333 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
@@ -354,6 +354,7 @@
ranges = <0x0 0x00 0x1700000 0x100000>;
reg = <0x00 0x1700000 0x0 0x100000>;
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+ dma-coherent;
sec_jr0: jr@10000 {
compatible = "fsl,sec-v5.4-job-ring",
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi
index 459dccad8326..afb455210bd0 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-cex7.dtsi
@@ -122,6 +122,30 @@
vcc-supply = <&sb_3v3>;
};
};
+
+ sfp0_i2c: i2c@4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <4>;
+ };
+
+ sfp1_i2c: i2c@5 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <5>;
+ };
+
+ sfp2_i2c: i2c@6 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <6>;
+ };
+
+ sfp3_i2c: i2c@7 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <7>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi b/arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi
index 2b63235ca627..17f8e733972a 100644
--- a/arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-lx2160a-clearfog-itx.dtsi
@@ -30,6 +30,54 @@
gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
};
};
+
+ sfp0: sfp-0 {
+ compatible = "sff,sfp";
+ i2c-bus = <&sfp0_i2c>;
+ mod-def0-gpio = <&gpio2 0 GPIO_ACTIVE_LOW>;
+ maximum-power-milliwatt = <2000>;
+ };
+
+ sfp1: sfp-1 {
+ compatible = "sff,sfp";
+ i2c-bus = <&sfp1_i2c>;
+ mod-def0-gpio = <&gpio2 9 GPIO_ACTIVE_LOW>;
+ maximum-power-milliwatt = <2000>;
+ };
+
+ sfp2: sfp-2 {
+ compatible = "sff,sfp";
+ i2c-bus = <&sfp2_i2c>;
+ mod-def0-gpio = <&gpio2 10 GPIO_ACTIVE_LOW>;
+ maximum-power-milliwatt = <2000>;
+ };
+
+ sfp3: sfp-3 {
+ compatible = "sff,sfp";
+ i2c-bus = <&sfp3_i2c>;
+ mod-def0-gpio = <&gpio2 11 GPIO_ACTIVE_LOW>;
+ maximum-power-milliwatt = <2000>;
+ };
+};
+
+&dpmac7 {
+ sfp = <&sfp0>;
+ managed = "in-band-status";
+};
+
+&dpmac8 {
+ sfp = <&sfp1>;
+ managed = "in-band-status";
+};
+
+&dpmac9 {
+ sfp = <&sfp2>;
+ managed = "in-band-status";
+};
+
+&dpmac10 {
+ sfp = <&sfp3>;
+ managed = "in-band-status";
};
&emdio2 {
@@ -44,6 +92,22 @@
status = "okay";
};
+&pcs_mdio7 {
+ status = "okay";
+};
+
+&pcs_mdio8 {
+ status = "okay";
+};
+
+&pcs_mdio9 {
+ status = "okay";
+};
+
+&pcs_mdio10 {
+ status = "okay";
+};
+
&sata0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-adma.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-adma.dtsi
new file mode 100644
index 000000000000..9386d1a59e82
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-adma.dtsi
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018-2020 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+#include "imx8-ss-audio.dtsi"
+#include "imx8-ss-dma.dtsi"
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-audio.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-audio.dtsi
new file mode 100644
index 000000000000..6c8d75ef9250
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-audio.dtsi
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018-2019 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+#include <dt-bindings/clock/imx8-lpcg.h>
+#include <dt-bindings/firmware/imx/rsrc.h>
+
+audio_subsys: bus@59000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x59000000 0x0 0x59000000 0x1000000>;
+
+ audio_ipg_clk: clock-audio-ipg {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <120000000>;
+ clock-output-names = "audio_ipg_clk";
+ };
+
+ dsp_lpcg: clock-controller@59580000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x59580000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&audio_ipg_clk>,
+ <&audio_ipg_clk>,
+ <&audio_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_5>,
+ <IMX_LPCG_CLK_7>;
+ clock-output-names = "dsp_lpcg_adb_clk",
+ "dsp_lpcg_ipg_clk",
+ "dsp_lpcg_core_clk";
+ power-domains = <&pd IMX_SC_R_DSP>;
+ };
+
+ dsp_ram_lpcg: clock-controller@59590000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x59590000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&audio_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_4>;
+ clock-output-names = "dsp_ram_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_DSP_RAM>;
+ };
+
+ dsp: dsp@596e8000 {
+ compatible = "fsl,imx8qxp-dsp";
+ reg = <0x596e8000 0x88000>;
+ clocks = <&dsp_lpcg IMX_LPCG_CLK_5>,
+ <&dsp_ram_lpcg IMX_LPCG_CLK_4>,
+ <&dsp_lpcg IMX_LPCG_CLK_7>;
+ clock-names = "ipg", "ocram", "core";
+ power-domains = <&pd IMX_SC_R_MU_13A>,
+ <&pd IMX_SC_R_MU_13B>,
+ <&pd IMX_SC_R_DSP>,
+ <&pd IMX_SC_R_DSP_RAM>;
+ mbox-names = "txdb0", "txdb1",
+ "rxdb0", "rxdb1";
+ mboxes = <&lsio_mu13 2 0>,
+ <&lsio_mu13 2 1>,
+ <&lsio_mu13 3 0>,
+ <&lsio_mu13 3 1>;
+ memory-region = <&dsp_reserved>;
+ status = "disabled";
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
new file mode 100644
index 000000000000..e1e81ca0ca69
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018-2019 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+#include <dt-bindings/clock/imx8-lpcg.h>
+#include <dt-bindings/firmware/imx/rsrc.h>
+
+conn_subsys: bus@5b000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x5b000000 0x0 0x5b000000 0x1000000>;
+
+ conn_axi_clk: clock-conn-axi {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <333333333>;
+ clock-output-names = "conn_axi_clk";
+ };
+
+ conn_ahb_clk: clock-conn-ahb {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <166666666>;
+ clock-output-names = "conn_ahb_clk";
+ };
+
+ conn_ipg_clk: clock-conn-ipg {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <83333333>;
+ clock-output-names = "conn_ipg_clk";
+ };
+
+ usdhc1: mmc@5b010000 {
+ interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x5b010000 0x10000>;
+ clocks = <&sdhc0_lpcg IMX_LPCG_CLK_4>,
+ <&sdhc0_lpcg IMX_LPCG_CLK_5>,
+ <&sdhc0_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg", "per", "ahb";
+ power-domains = <&pd IMX_SC_R_SDHC_0>;
+ status = "disabled";
+ };
+
+ usdhc2: mmc@5b020000 {
+ interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x5b020000 0x10000>;
+ clocks = <&sdhc1_lpcg IMX_LPCG_CLK_4>,
+ <&sdhc1_lpcg IMX_LPCG_CLK_5>,
+ <&sdhc1_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg", "per", "ahb";
+ power-domains = <&pd IMX_SC_R_SDHC_1>;
+ fsl,tuning-start-tap = <20>;
+ fsl,tuning-step= <2>;
+ status = "disabled";
+ };
+
+ usdhc3: mmc@5b030000 {
+ interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0x5b030000 0x10000>;
+ clocks = <&sdhc2_lpcg IMX_LPCG_CLK_4>,
+ <&sdhc2_lpcg IMX_LPCG_CLK_5>,
+ <&sdhc2_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg", "per", "ahb";
+ power-domains = <&pd IMX_SC_R_SDHC_2>;
+ status = "disabled";
+ };
+
+ fec1: ethernet@5b040000 {
+ reg = <0x5b040000 0x10000>;
+ interrupts = <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&enet0_lpcg IMX_LPCG_CLK_4>,
+ <&enet0_lpcg IMX_LPCG_CLK_2>,
+ <&enet0_lpcg IMX_LPCG_CLK_1>,
+ <&enet0_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg", "ahb", "enet_clk_ref", "ptp";
+ fsl,num-tx-queues=<3>;
+ fsl,num-rx-queues=<3>;
+ power-domains = <&pd IMX_SC_R_ENET_0>;
+ status = "disabled";
+ };
+
+ fec2: ethernet@5b050000 {
+ reg = <0x5b050000 0x10000>;
+ interrupts = <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&enet1_lpcg IMX_LPCG_CLK_4>,
+ <&enet1_lpcg IMX_LPCG_CLK_2>,
+ <&enet1_lpcg IMX_LPCG_CLK_1>,
+ <&enet1_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg", "ahb", "enet_clk_ref", "ptp";
+ fsl,num-tx-queues=<3>;
+ fsl,num-rx-queues=<3>;
+ power-domains = <&pd IMX_SC_R_ENET_1>;
+ status = "disabled";
+ };
+
+ /* LPCG clocks */
+ sdhc0_lpcg: clock-controller@5b200000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5b200000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_SDHC_0 IMX_SC_PM_CLK_PER>,
+ <&conn_ipg_clk>, <&conn_axi_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>,
+ <IMX_LPCG_CLK_5>;
+ clock-output-names = "sdhc0_lpcg_per_clk",
+ "sdhc0_lpcg_ipg_clk",
+ "sdhc0_lpcg_ahb_clk";
+ power-domains = <&pd IMX_SC_R_SDHC_0>;
+ };
+
+ sdhc1_lpcg: clock-controller@5b210000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5b210000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_SDHC_1 IMX_SC_PM_CLK_PER>,
+ <&conn_ipg_clk>, <&conn_axi_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>,
+ <IMX_LPCG_CLK_5>;
+ clock-output-names = "sdhc1_lpcg_per_clk",
+ "sdhc1_lpcg_ipg_clk",
+ "sdhc1_lpcg_ahb_clk";
+ power-domains = <&pd IMX_SC_R_SDHC_1>;
+ };
+
+ sdhc2_lpcg: clock-controller@5b220000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5b220000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_SDHC_2 IMX_SC_PM_CLK_PER>,
+ <&conn_ipg_clk>, <&conn_axi_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>,
+ <IMX_LPCG_CLK_5>;
+ clock-output-names = "sdhc2_lpcg_per_clk",
+ "sdhc2_lpcg_ipg_clk",
+ "sdhc2_lpcg_ahb_clk";
+ power-domains = <&pd IMX_SC_R_SDHC_2>;
+ };
+
+ enet0_lpcg: clock-controller@5b230000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5b230000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_ENET_0 IMX_SC_PM_CLK_PER>,
+ <&clk IMX_SC_R_ENET_0 IMX_SC_PM_CLK_PER>,
+ <&conn_axi_clk>, <&conn_ipg_clk>, <&conn_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_1>,
+ <IMX_LPCG_CLK_2>, <IMX_LPCG_CLK_4>,
+ <IMX_LPCG_CLK_5>;
+ clock-output-names = "enet0_ipg_root_clk",
+ "enet0_tx_clk",
+ "enet0_ahb_clk",
+ "enet0_ipg_clk",
+ "enet0_ipg_s_clk";
+ power-domains = <&pd IMX_SC_R_ENET_0>;
+ };
+
+ enet1_lpcg: clock-controller@5b240000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5b240000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_ENET_1 IMX_SC_PM_CLK_PER>,
+ <&clk IMX_SC_R_ENET_1 IMX_SC_PM_CLK_PER>,
+ <&conn_axi_clk>, <&conn_ipg_clk>, <&conn_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_1>,
+ <IMX_LPCG_CLK_2>, <IMX_LPCG_CLK_4>,
+ <IMX_LPCG_CLK_5>;
+ clock-output-names = "enet1_ipg_root_clk",
+ "enet1_tx_clk",
+ "enet1_ahb_clk",
+ "enet1_ipg_clk",
+ "enet1_ipg_s_clk";
+ power-domains = <&pd IMX_SC_R_ENET_1>;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-ddr.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-ddr.dtsi
new file mode 100644
index 000000000000..8b5cad4e2700
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-ddr.dtsi
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2019-2020 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+ddr_subsys: bus@5c000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x5c000000 0x0 0x5c000000 0x1000000>;
+
+ ddr-pmu@5c020000 {
+ compatible = "fsl,imx8-ddr-pmu";
+ reg = <0x5c020000 0x10000>;
+ interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi
new file mode 100644
index 000000000000..960a802b8b6e
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018-2019 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+#include <dt-bindings/clock/imx8-lpcg.h>
+#include <dt-bindings/firmware/imx/rsrc.h>
+
+dma_subsys: bus@5a000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x5a000000 0x0 0x5a000000 0x1000000>;
+
+ dma_ipg_clk: clock-dma-ipg {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <120000000>;
+ clock-output-names = "dma_ipg_clk";
+ };
+
+ lpuart0: serial@5a060000 {
+ reg = <0x5a060000 0x1000>;
+ interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&uart0_lpcg IMX_LPCG_CLK_4>,
+ <&uart0_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg", "baud";
+ power-domains = <&pd IMX_SC_R_UART_0>;
+ status = "disabled";
+ };
+
+ lpuart1: serial@5a070000 {
+ reg = <0x5a070000 0x1000>;
+ interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&uart1_lpcg IMX_LPCG_CLK_4>,
+ <&uart1_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg", "baud";
+ power-domains = <&pd IMX_SC_R_UART_1>;
+ status = "disabled";
+ };
+
+ lpuart2: serial@5a080000 {
+ reg = <0x5a080000 0x1000>;
+ interrupts = <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&uart2_lpcg IMX_LPCG_CLK_4>,
+ <&uart2_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg", "baud";
+ power-domains = <&pd IMX_SC_R_UART_2>;
+ status = "disabled";
+ };
+
+ lpuart3: serial@5a090000 {
+ reg = <0x5a090000 0x1000>;
+ interrupts = <GIC_SPI 228 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&uart3_lpcg IMX_LPCG_CLK_4>,
+ <&uart3_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "ipg", "baud";
+ power-domains = <&pd IMX_SC_R_UART_3>;
+ status = "disabled";
+ };
+
+ uart0_lpcg: clock-controller@5a460000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5a460000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_UART_0 IMX_SC_PM_CLK_PER>,
+ <&dma_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>;
+ clock-output-names = "uart0_lpcg_baud_clk",
+ "uart0_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_UART_0>;
+ };
+
+ uart1_lpcg: clock-controller@5a470000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5a470000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_UART_1 IMX_SC_PM_CLK_PER>,
+ <&dma_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>;
+ clock-output-names = "uart1_lpcg_baud_clk",
+ "uart1_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_UART_1>;
+ };
+
+ uart2_lpcg: clock-controller@5a480000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5a480000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_UART_2 IMX_SC_PM_CLK_PER>,
+ <&dma_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>;
+ clock-output-names = "uart2_lpcg_baud_clk",
+ "uart2_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_UART_2>;
+ };
+
+ uart3_lpcg: clock-controller@5a490000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5a490000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_UART_3 IMX_SC_PM_CLK_PER>,
+ <&dma_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>;
+ clock-output-names = "uart3_lpcg_baud_clk",
+ "uart3_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_UART_3>;
+ };
+
+ i2c0: i2c@5a800000 {
+ reg = <0x5a800000 0x4000>;
+ interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&i2c0_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "per";
+ assigned-clocks = <&clk IMX_SC_R_I2C_0 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+ power-domains = <&pd IMX_SC_R_I2C_0>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@5a810000 {
+ reg = <0x5a810000 0x4000>;
+ interrupts = <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&i2c1_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "per";
+ assigned-clocks = <&clk IMX_SC_R_I2C_1 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+ power-domains = <&pd IMX_SC_R_I2C_1>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@5a820000 {
+ reg = <0x5a820000 0x4000>;
+ interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&i2c2_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "per";
+ assigned-clocks = <&clk IMX_SC_R_I2C_2 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+ power-domains = <&pd IMX_SC_R_I2C_2>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@5a830000 {
+ reg = <0x5a830000 0x4000>;
+ interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&i2c3_lpcg IMX_LPCG_CLK_0>;
+ clock-names = "per";
+ assigned-clocks = <&clk IMX_SC_R_I2C_3 IMX_SC_PM_CLK_PER>;
+ assigned-clock-rates = <24000000>;
+ power-domains = <&pd IMX_SC_R_I2C_3>;
+ status = "disabled";
+ };
+
+ i2c0_lpcg: clock-controller@5ac00000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5ac00000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_I2C_0 IMX_SC_PM_CLK_PER>,
+ <&dma_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>;
+ clock-output-names = "i2c0_lpcg_clk",
+ "i2c0_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_I2C_0>;
+ };
+
+ i2c1_lpcg: clock-controller@5ac10000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5ac10000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_I2C_1 IMX_SC_PM_CLK_PER>,
+ <&dma_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>;
+ clock-output-names = "i2c1_lpcg_clk",
+ "i2c1_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_I2C_1>;
+ };
+
+ i2c2_lpcg: clock-controller@5ac20000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5ac20000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_I2C_2 IMX_SC_PM_CLK_PER>,
+ <&dma_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>;
+ clock-output-names = "i2c2_lpcg_clk",
+ "i2c2_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_I2C_2>;
+ };
+
+ i2c3_lpcg: clock-controller@5ac30000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5ac30000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_I2C_3 IMX_SC_PM_CLK_PER>,
+ <&dma_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>;
+ clock-output-names = "i2c3_lpcg_clk",
+ "i2c3_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_I2C_3>;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi b/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi
new file mode 100644
index 000000000000..ee4e585a9c39
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018-2020 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+#include <dt-bindings/clock/imx8-lpcg.h>
+#include <dt-bindings/firmware/imx/rsrc.h>
+
+lsio_subsys: bus@5d000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x5d000000 0x0 0x5d000000 0x1000000>;
+
+ lsio_mem_clk: clock-lsio-mem {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <200000000>;
+ clock-output-names = "lsio_mem_clk";
+ };
+
+ lsio_bus_clk: clock-lsio-bus {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ clock-output-names = "lsio_bus_clk";
+ };
+
+ lsio_gpio0: gpio@5d080000 {
+ reg = <0x5d080000 0x10000>;
+ interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ power-domains = <&pd IMX_SC_R_GPIO_0>;
+ };
+
+ lsio_gpio1: gpio@5d090000 {
+ reg = <0x5d090000 0x10000>;
+ interrupts = <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ power-domains = <&pd IMX_SC_R_GPIO_1>;
+ };
+
+ lsio_gpio2: gpio@5d0a0000 {
+ reg = <0x5d0a0000 0x10000>;
+ interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ power-domains = <&pd IMX_SC_R_GPIO_2>;
+ };
+
+ lsio_gpio3: gpio@5d0b0000 {
+ reg = <0x5d0b0000 0x10000>;
+ interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ power-domains = <&pd IMX_SC_R_GPIO_3>;
+ };
+
+ lsio_gpio4: gpio@5d0c0000 {
+ reg = <0x5d0c0000 0x10000>;
+ interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ power-domains = <&pd IMX_SC_R_GPIO_4>;
+ };
+
+ lsio_gpio5: gpio@5d0d0000 {
+ reg = <0x5d0d0000 0x10000>;
+ interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ power-domains = <&pd IMX_SC_R_GPIO_5>;
+ };
+
+ lsio_gpio6: gpio@5d0e0000 {
+ reg = <0x5d0e0000 0x10000>;
+ interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ power-domains = <&pd IMX_SC_R_GPIO_6>;
+ };
+
+ lsio_gpio7: gpio@5d0f0000 {
+ reg = <0x5d0f0000 0x10000>;
+ interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ power-domains = <&pd IMX_SC_R_GPIO_7>;
+ };
+
+ lsio_mu0: mailbox@5d1b0000 {
+ reg = <0x5d1b0000 0x10000>;
+ interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+
+ lsio_mu1: mailbox@5d1c0000 {
+ reg = <0x5d1c0000 0x10000>;
+ interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <2>;
+ };
+
+ lsio_mu2: mailbox@5d1d0000 {
+ reg = <0x5d1d0000 0x10000>;
+ interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+
+ lsio_mu3: mailbox@5d1e0000 {
+ reg = <0x5d1e0000 0x10000>;
+ interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+
+ lsio_mu4: mailbox@5d1f0000 {
+ reg = <0x5d1f0000 0x10000>;
+ interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <2>;
+ status = "disabled";
+ };
+
+ lsio_mu13: mailbox@5d280000 {
+ reg = <0x5d280000 0x10000>;
+ interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <2>;
+ power-domains = <&pd IMX_SC_R_MU_13A>;
+ };
+
+ /* LPCG clocks */
+ pwm0_lpcg: clock-controller@5d400000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5d400000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_PWM_0 IMX_SC_PM_CLK_PER>,
+ <&clk IMX_SC_R_PWM_0 IMX_SC_PM_CLK_PER>,
+ <&clk IMX_SC_R_PWM_0 IMX_SC_PM_CLK_PER>,
+ <&lsio_bus_clk>,
+ <&clk IMX_SC_R_PWM_0 IMX_SC_PM_CLK_PER>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_1>,
+ <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_5>,
+ <IMX_LPCG_CLK_6>;
+ clock-output-names = "pwm0_lpcg_ipg_clk",
+ "pwm0_lpcg_ipg_hf_clk",
+ "pwm0_lpcg_ipg_s_clk",
+ "pwm0_lpcg_ipg_slv_clk",
+ "pwm0_lpcg_ipg_mstr_clk";
+ power-domains = <&pd IMX_SC_R_PWM_0>;
+ };
+
+ pwm1_lpcg: clock-controller@5d410000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5d410000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_PWM_1 IMX_SC_PM_CLK_PER>,
+ <&clk IMX_SC_R_PWM_1 IMX_SC_PM_CLK_PER>,
+ <&clk IMX_SC_R_PWM_1 IMX_SC_PM_CLK_PER>,
+ <&lsio_bus_clk>,
+ <&clk IMX_SC_R_PWM_1 IMX_SC_PM_CLK_PER>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_1>,
+ <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_5>,
+ <IMX_LPCG_CLK_6>;
+ clock-output-names = "pwm1_lpcg_ipg_clk",
+ "pwm1_lpcg_ipg_hf_clk",
+ "pwm1_lpcg_ipg_s_clk",
+ "pwm1_lpcg_ipg_slv_clk",
+ "pwm1_lpcg_ipg_mstr_clk";
+ power-domains = <&pd IMX_SC_R_PWM_1>;
+ };
+
+ pwm2_lpcg: clock-controller@5d420000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5d420000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_PWM_2 IMX_SC_PM_CLK_PER>,
+ <&clk IMX_SC_R_PWM_2 IMX_SC_PM_CLK_PER>,
+ <&clk IMX_SC_R_PWM_2 IMX_SC_PM_CLK_PER>,
+ <&lsio_bus_clk>,
+ <&clk IMX_SC_R_PWM_2 IMX_SC_PM_CLK_PER>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_1>,
+ <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_5>,
+ <IMX_LPCG_CLK_6>;
+ clock-output-names = "pwm2_lpcg_ipg_clk",
+ "pwm2_lpcg_ipg_hf_clk",
+ "pwm2_lpcg_ipg_s_clk",
+ "pwm2_lpcg_ipg_slv_clk",
+ "pwm2_lpcg_ipg_mstr_clk";
+ power-domains = <&pd IMX_SC_R_PWM_2>;
+ };
+
+ pwm3_lpcg: clock-controller@5d430000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5d430000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_PWM_3 IMX_SC_PM_CLK_PER>,
+ <&clk IMX_SC_R_PWM_3 IMX_SC_PM_CLK_PER>,
+ <&clk IMX_SC_R_PWM_3 IMX_SC_PM_CLK_PER>,
+ <&lsio_bus_clk>,
+ <&clk IMX_SC_R_PWM_3 IMX_SC_PM_CLK_PER>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_1>,
+ <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_5>,
+ <IMX_LPCG_CLK_6>;
+ clock-output-names = "pwm3_lpcg_ipg_clk",
+ "pwm3_lpcg_ipg_hf_clk",
+ "pwm3_lpcg_ipg_s_clk",
+ "pwm3_lpcg_ipg_slv_clk",
+ "pwm3_lpcg_ipg_mstr_clk";
+ power-domains = <&pd IMX_SC_R_PWM_3>;
+ };
+
+ pwm4_lpcg: clock-controller@5d440000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5d440000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_PWM_4 IMX_SC_PM_CLK_PER>,
+ <&clk IMX_SC_R_PWM_4 IMX_SC_PM_CLK_PER>,
+ <&clk IMX_SC_R_PWM_4 IMX_SC_PM_CLK_PER>,
+ <&lsio_bus_clk>,
+ <&clk IMX_SC_R_PWM_4 IMX_SC_PM_CLK_PER>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_1>,
+ <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_5>,
+ <IMX_LPCG_CLK_6>;
+ clock-output-names = "pwm4_lpcg_ipg_clk",
+ "pwm4_lpcg_ipg_hf_clk",
+ "pwm4_lpcg_ipg_s_clk",
+ "pwm4_lpcg_ipg_slv_clk",
+ "pwm4_lpcg_ipg_mstr_clk";
+ power-domains = <&pd IMX_SC_R_PWM_4>;
+ };
+
+ pwm5_lpcg: clock-controller@5d450000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5d450000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_PWM_5 IMX_SC_PM_CLK_PER>,
+ <&clk IMX_SC_R_PWM_5 IMX_SC_PM_CLK_PER>,
+ <&clk IMX_SC_R_PWM_5 IMX_SC_PM_CLK_PER>,
+ <&lsio_bus_clk>,
+ <&clk IMX_SC_R_PWM_5 IMX_SC_PM_CLK_PER>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_1>,
+ <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_5>,
+ <IMX_LPCG_CLK_6>;
+ clock-output-names = "pwm5_lpcg_ipg_clk",
+ "pwm5_lpcg_ipg_hf_clk",
+ "pwm5_lpcg_ipg_s_clk",
+ "pwm5_lpcg_ipg_slv_clk",
+ "pwm5_lpcg_ipg_mstr_clk";
+ power-domains = <&pd IMX_SC_R_PWM_5>;
+ };
+
+ pwm6_lpcg: clock-controller@5d460000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5d460000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_PWM_6 IMX_SC_PM_CLK_PER>,
+ <&clk IMX_SC_R_PWM_6 IMX_SC_PM_CLK_PER>,
+ <&clk IMX_SC_R_PWM_6 IMX_SC_PM_CLK_PER>,
+ <&lsio_bus_clk>,
+ <&clk IMX_SC_R_PWM_6 IMX_SC_PM_CLK_PER>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_1>,
+ <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_5>,
+ <IMX_LPCG_CLK_6>;
+ clock-output-names = "pwm6_lpcg_ipg_clk",
+ "pwm6_lpcg_ipg_hf_clk",
+ "pwm6_lpcg_ipg_s_clk",
+ "pwm6_lpcg_ipg_slv_clk",
+ "pwm6_lpcg_ipg_mstr_clk";
+ power-domains = <&pd IMX_SC_R_PWM_6>;
+ };
+
+ pwm7_lpcg: clock-controller@5d470000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5d470000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_PWM_7 IMX_SC_PM_CLK_PER>,
+ <&clk IMX_SC_R_PWM_7 IMX_SC_PM_CLK_PER>,
+ <&clk IMX_SC_R_PWM_7 IMX_SC_PM_CLK_PER>,
+ <&lsio_bus_clk>,
+ <&clk IMX_SC_R_PWM_7 IMX_SC_PM_CLK_PER>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_1>,
+ <IMX_LPCG_CLK_4>, <IMX_LPCG_CLK_5>,
+ <IMX_LPCG_CLK_6>;
+ clock-output-names = "pwm7_lpcg_ipg_clk",
+ "pwm7_lpcg_ipg_hf_clk",
+ "pwm7_lpcg_ipg_s_clk",
+ "pwm7_lpcg_ipg_slv_clk",
+ "pwm7_lpcg_ipg_mstr_clk";
+ power-domains = <&pd IMX_SC_R_PWM_7>;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-icore-mx8mm-ctouch2.dts b/arch/arm64/boot/dts/freescale/imx8mm-icore-mx8mm-ctouch2.dts
new file mode 100644
index 000000000000..5389d6f2beba
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-icore-mx8mm-ctouch2.dts
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2019 NXP
+ * Copyright (c) 2019 Engicam srl
+ * Copyright (c) 2020 Amarula Solutions(India)
+ */
+
+/dts-v1/;
+#include "imx8mm.dtsi"
+#include "imx8mm-icore-mx8mm.dtsi"
+
+/ {
+ model = "Engicam i.Core MX8M Mini C.TOUCH 2.0";
+ compatible = "engicam,icore-mx8mm-ctouch2", "engicam,icore-mx8mm",
+ "fsl,imx8mm";
+
+ chosen {
+ stdout-path = &uart2;
+ };
+};
+
+&fec1 {
+ status = "okay";
+};
+
+&i2c2 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+};
+
+&i2c4 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c4>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C2_SCL_I2C2_SCL 0x400001c3
+ MX8MM_IOMUXC_I2C2_SDA_I2C2_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c4: i2c4grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x400001c3
+ MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART2_RXD_UART2_DCE_RX 0x140
+ MX8MM_IOMUXC_UART2_TXD_UART2_DCE_TX 0x140
+ >;
+ };
+
+ pinctrl_usdhc1_gpio: usdhc1gpiogrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO06_GPIO1_IO6 0x41
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD1_CLK_USDHC1_CLK 0x190
+ MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x1d0
+ MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x1d0
+ MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x1d0
+ MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x1d0
+ MX8MM_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x1d0
+ >;
+ };
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+};
+
+/* SD */
+&usdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1>, <&pinctrl_usdhc1_gpio>;
+ cd-gpios = <&gpio1 6 GPIO_ACTIVE_LOW>;
+ max-frequency = <50000000>;
+ bus-width = <4>;
+ no-1-8-v;
+ pm-ignore-notify;
+ keep-power-in-suspend;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-icore-mx8mm-edimm2.2.dts b/arch/arm64/boot/dts/freescale/imx8mm-icore-mx8mm-edimm2.2.dts
new file mode 100644
index 000000000000..a4a2ada14835
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-icore-mx8mm-edimm2.2.dts
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2019 NXP
+ * Copyright (c) 2019 Engicam srl
+ * Copyright (c) 2020 Amarula Solutions(India)
+ */
+
+/dts-v1/;
+#include "imx8mm.dtsi"
+#include "imx8mm-icore-mx8mm.dtsi"
+
+/ {
+ model = "Engicam i.Core MX8M Mini EDIMM2.2 Starter Kit";
+ compatible = "engicam,icore-mx8mm-edimm2.2", "engicam,icore-mx8mm",
+ "fsl,imx8mm";
+
+ chosen {
+ stdout-path = &uart2;
+ };
+};
+
+&fec1 {
+ status = "okay";
+};
+
+&i2c2 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+};
+
+&i2c4 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c4>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C2_SCL_I2C2_SCL 0x400001c3
+ MX8MM_IOMUXC_I2C2_SDA_I2C2_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_i2c4: i2c4grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x400001c3
+ MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART2_RXD_UART2_DCE_RX 0x140
+ MX8MM_IOMUXC_UART2_TXD_UART2_DCE_TX 0x140
+ >;
+ };
+
+ pinctrl_usdhc1_gpio: usdhc1gpiogrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO06_GPIO1_IO6 0x41
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SD1_CLK_USDHC1_CLK 0x190
+ MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x1d0
+ MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x1d0
+ MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x1d0
+ MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x1d0
+ MX8MM_IOMUXC_SD1_DATA3_USDHC1_DATA3 0x1d0
+ >;
+ };
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ status = "okay";
+};
+
+/* SD */
+&usdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1>, <&pinctrl_usdhc1_gpio>;
+ cd-gpios = <&gpio1 6 GPIO_ACTIVE_LOW>;
+ max-frequency = <50000000>;
+ bus-width = <4>;
+ no-1-8-v;
+ pm-ignore-notify;
+ keep-power-in-suspend;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-icore-mx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-icore-mx8mm.dtsi
new file mode 100644
index 000000000000..b40148d728ea
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mm-icore-mx8mm.dtsi
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2018 NXP
+ * Copyright (c) 2019 Engicam srl
+ * Copyright (c) 2020 Amarula Solutons(India)
+ */
+
+/ {
+ compatible = "engicam,icore-mx8mm", "fsl,imx8mm";
+};
+
+&A53_0 {
+ cpu-supply = <&reg_buck4>;
+};
+
+&A53_1 {
+ cpu-supply = <&reg_buck4>;
+};
+
+&A53_2 {
+ cpu-supply = <&reg_buck4>;
+};
+
+&A53_3 {
+ cpu-supply = <&reg_buck4>;
+};
+
+&fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec1>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy>;
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy: ethernet-phy@3 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <3>;
+ reset-gpios = <&gpio3 7 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ };
+ };
+};
+
+&i2c1 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ pmic@8 {
+ compatible = "nxp,pf8121a";
+ reg = <0x08>;
+
+ regulators {
+ reg_ldo1: ldo1 {
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_ldo2: ldo2 {
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_ldo3: ldo3 {
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_ldo4: ldo4 {
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_buck1: buck1 {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_buck2: buck2 {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_buck3: buck3 {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_buck4: buck4 {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_buck5: buck5 {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_buck6: buck6 {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_buck7: buck7 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ reg_vsnvs: vsnvs {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+ };
+ };
+};
+
+&iomuxc {
+ pinctrl_fec1: fec1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_ENET_MDC_ENET1_MDC 0x3
+ MX8MM_IOMUXC_ENET_MDIO_ENET1_MDIO 0x3
+ MX8MM_IOMUXC_ENET_TD3_ENET1_RGMII_TD3 0x1f
+ MX8MM_IOMUXC_ENET_TD2_ENET1_RGMII_TD2 0x1f
+ MX8MM_IOMUXC_ENET_TD1_ENET1_RGMII_TD1 0x1f
+ MX8MM_IOMUXC_ENET_TD0_ENET1_RGMII_TD0 0x1f
+ MX8MM_IOMUXC_ENET_RD3_ENET1_RGMII_RD3 0x91
+ MX8MM_IOMUXC_ENET_RD2_ENET1_RGMII_RD2 0x91
+ MX8MM_IOMUXC_ENET_RD1_ENET1_RGMII_RD1 0x91
+ MX8MM_IOMUXC_ENET_RD0_ENET1_RGMII_RD0 0x91
+ MX8MM_IOMUXC_ENET_TXC_ENET1_RGMII_TXC 0x1f
+ MX8MM_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x91
+ MX8MM_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x91
+ MX8MM_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x1f
+ MX8MM_IOMUXC_NAND_DATA01_GPIO3_IO7 0x19
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C1_SCL_I2C1_SCL 0x400001c3
+ MX8MM_IOMUXC_I2C1_SDA_I2C1_SDA 0x400001c3
+ >;
+ };
+
+ pinctrl_usdhc3: usdhc3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x190
+ MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d0
+ MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d0
+ MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d0
+ MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d0
+ MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d0
+ MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d0
+ MX8MM_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d0
+ MX8MM_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d0
+ MX8MM_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d0
+ MX8MM_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d0
+ MX8MM_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x190
+ >;
+ };
+
+ pinctrl_usdhc3_100mhz: usdhc3-100mhzgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x194
+ MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d4
+ MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d4
+ MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d4
+ MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d4
+ MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d4
+ MX8MM_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d4
+ MX8MM_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d4
+ MX8MM_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d4
+ MX8MM_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d4
+ MX8MM_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x194
+ >;
+ };
+
+ pinctrl_usdhc3_200mhz: usdhc3-200mhzgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_NAND_WE_B_USDHC3_CLK 0x196
+ MX8MM_IOMUXC_NAND_WP_B_USDHC3_CMD 0x1d6
+ MX8MM_IOMUXC_NAND_DATA04_USDHC3_DATA0 0x1d6
+ MX8MM_IOMUXC_NAND_DATA05_USDHC3_DATA1 0x1d6
+ MX8MM_IOMUXC_NAND_DATA06_USDHC3_DATA2 0x1d6
+ MX8MM_IOMUXC_NAND_DATA07_USDHC3_DATA3 0x1d6
+ MX8MM_IOMUXC_NAND_RE_B_USDHC3_DATA4 0x1d6
+ MX8MM_IOMUXC_NAND_CE2_B_USDHC3_DATA5 0x1d6
+ MX8MM_IOMUXC_NAND_CE3_B_USDHC3_DATA6 0x1d6
+ MX8MM_IOMUXC_NAND_CLE_USDHC3_DATA7 0x1d6
+ MX8MM_IOMUXC_NAND_CE1_B_USDHC3_STROBE 0x196
+ >;
+ };
+};
+
+/* eMMC */
+&usdhc3 {
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ bus-width = <8>;
+ non-removable;
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts b/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts
index c0c384d76147..74c09891600f 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts
@@ -9,6 +9,53 @@
/ {
model = "Boundary Devices i.MX8MMini Nitrogen8MM Rev2";
compatible = "boundary,imx8mm-nitrogen8mm", "fsl,imx8mm";
+
+ reg_vref_1v8: regulator-vref-1v8 {
+ compatible = "regulator-fixed";
+ regulator-name = "vref-1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ reg_vref_3v3: regulator-vref-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "vref-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ reg_wlan_vmmc: regulator-wlan-vmmc {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_wlan_vmmc>;
+ regulator-name = "reg_wlan_vmmc";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio3 20 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+
+ sound-wm8960 {
+ audio-cpu = <&sai1>;
+ audio-codec = <&wm8960>;
+ audio-routing =
+ "Headphone Jack", "HP_L",
+ "Headphone Jack", "HP_R",
+ "Ext Spk", "SPK_LP",
+ "Ext Spk", "SPK_LN",
+ "Ext Spk", "SPK_RP",
+ "Ext Spk", "SPK_RN",
+ "RINPUT1", "Mic Jack",
+ "Mic Jack", "MICB";
+ compatible = "fsl,imx-audio-wm8960";
+ /* JD2: hp detect high for headphone*/
+ hp-det-gpios = <&gpio4 28 GPIO_ACTIVE_HIGH>;
+ /* Jack is not stuffed */
+ mic-det-gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>;
+ model = "wm8960-audio";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sound_wm8960>;
+ };
};
&A53_0 {
@@ -27,6 +74,17 @@
cpu-supply = <&reg_buck3>;
};
+/* J15 */
+&ecspi2 {
+ assigned-clocks = <&clk IMX8MM_CLK_ECSPI2>;
+ assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_40M>;
+ assigned-clock-rates = <40000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi2>;
+ cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>;
+ status = "okay";
+};
+
&fec1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_fec1>;
@@ -47,6 +105,12 @@
};
};
+&flexspi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_flexspi>;
+ status = "okay";
+};
+
&i2c1 {
clock-frequency = <100000>;
pinctrl-names = "default";
@@ -156,7 +220,7 @@
#address-cells = <1>;
#size-cells = <0>;
- i2c3 {
+ i2c3@0 {
reg = <0>;
#address-cells = <1>;
#size-cells = <0>;
@@ -173,12 +237,88 @@
};
};
+&i2c4 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c4>;
+ status = "okay";
+
+ wm8960: codec@1a {
+ compatible = "wlf,wm8960";
+ reg = <0x1a>;
+ clocks = <&clk IMX8MM_CLK_SAI1_ROOT>;
+ clock-names = "mclk1";
+ wlf,shared-lrclk;
+ #sound-dai-cells = <0>;
+ };
+};
+
+&pwm1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm1>;
+ status = "okay";
+};
+
+&pwm2 {
+ assigned-clocks = <&clk IMX8MM_CLK_PWM2>;
+ assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_40M>;
+ assigned-clock-rates = <40000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm2>;
+ status = "okay";
+};
+
+&pwm3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm3>;
+ status = "okay";
+};
+
+&pwm4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pwm4>;
+ status = "okay";
+};
+
+&sai1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai1>;
+ status = "okay";
+};
+
+&sai2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sai2>;
+ status = "okay";
+};
+
+/* BT */
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ uart-has-rtscts;
+ status = "okay";
+};
+
/* console */
&uart2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart2>;
- assigned-clocks = <&clk IMX8MM_CLK_UART2>;
- assigned-clock-parents = <&clk IMX8MM_CLK_24M>;
+ status = "okay";
+};
+
+/* J15 */
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ uart-has-rtscts;
+ status = "okay";
+};
+
+/* J9 */
+&uart4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart4>;
status = "okay";
};
@@ -191,6 +331,8 @@
pinctrl-0 = <&pinctrl_usdhc1>;
pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+ vmmc-supply = <&reg_vref_3v3>;
+ vqmmc-supply = <&reg_vref_1v8>;
status = "okay";
};
@@ -206,6 +348,48 @@
status = "okay";
};
+/* wlan */
+&usdhc3 {
+ bus-width = <4>;
+ sdhci-caps-mask = <0x2 0x0>;
+ non-removable;
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc3>;
+ pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+ vmmc-supply = <&reg_wlan_vmmc>;
+ vqmmc-supply = <&reg_vref_1v8>;
+ status = "okay";
+};
+
+/* USB OTG port */
+&usbotg1 {
+ dr_mode = "otg";
+ over-current-active-low;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg1>;
+ power-active-high;
+ status = "okay";
+};
+
+/* USB Host port */
+&usbotg2 {
+ dr_mode = "host";
+ over-current-active-low;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg2>;
+ power-active-high;
+ /*
+ * FIXME: having USB2 enabled hangs the boot just after:
+ *[ 1.655941] ci_hdrc ci_hdrc.1: EHCI Host Controller
+ *[ 1.660880] ci_hdrc ci_hdrc.1: new USB bus registered, assigned bus number 2
+ *[ 1.681505] ci_hdrc ci_hdrc.1: USB 2.0 started, EHCI 1.00
+ *[ 1.687730] hub 2-0:1.0: USB hub found
+ *[ 1.691528] hub 2-0:1.0: 1 port detected
+ */
+ status = "disabled";
+};
+
&wdog1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_wdog>;
@@ -217,6 +401,15 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_hog>;
+ pinctrl_ecspi2: ecspi2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0x140
+ MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x19
+ MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x19
+ MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x19
+ >;
+ };
+
pinctrl_fec1: fec1grp {
fsl,pins = <
MX8MM_IOMUXC_ENET_MDC_ENET1_MDC 0x3
@@ -237,6 +430,17 @@
>;
};
+ pinctrl_flexspi: flexspigrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_NAND_ALE_QSPI_A_SCLK 0x1c2
+ MX8MM_IOMUXC_NAND_CE0_B_QSPI_A_SS0_B 0x82
+ MX8MM_IOMUXC_NAND_DATA00_QSPI_A_DATA0 0x82
+ MX8MM_IOMUXC_NAND_DATA01_QSPI_A_DATA1 0x82
+ MX8MM_IOMUXC_NAND_DATA02_QSPI_A_DATA2 0x82
+ MX8MM_IOMUXC_NAND_DATA03_QSPI_A_DATA3 0x82
+ >;
+ };
+
pinctrl_hog: hoggrp {
fsl,pins = <
MX8MM_IOMUXC_GPIO1_IO03_GPIO1_IO3 0x09
@@ -258,12 +462,86 @@
>;
};
+ pinctrl_i2c4: i2c4grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_I2C4_SCL_I2C4_SCL 0x400001c3
+ MX8MM_IOMUXC_I2C4_SDA_I2C4_SDA 0x400001c3
+ >;
+ };
+
pinctrl_i2c3a_rv4162: i2c3a-rv4162grp {
fsl,pins = <
MX8MM_IOMUXC_SAI2_RXC_GPIO4_IO22 0x1c0
>;
};
+ pinctrl_pwm1: pwm1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SPDIF_EXT_CLK_PWM1_OUT 0x16
+ >;
+ };
+
+ pinctrl_pwm2: pwm2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SPDIF_RX_PWM2_OUT 0x16
+ >;
+ };
+
+ pinctrl_pwm3: pwm3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SPDIF_TX_PWM3_OUT 0x16
+ >;
+ };
+
+ pinctrl_pwm4: pwm4grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI3_MCLK_PWM4_OUT 0x16
+ >;
+ };
+
+ pinctrl_reg_wlan_vmmc: reg-wlan-vmmcgrp {
+ fsl,pins = <
+ MX8MM_IOMUXC_SAI5_RXC_GPIO3_IO20 0x16
+ >;
+ };
+
+ pinctrl_sai1: sai1grp {
+ fsl,pins = <
+ /* wm8960 */
+ MX8MM_IOMUXC_SAI1_MCLK_SAI1_MCLK 0xd6
+ MX8MM_IOMUXC_SAI1_TXFS_SAI1_TX_SYNC 0xd6
+ MX8MM_IOMUXC_SAI1_TXC_SAI1_TX_BCLK 0xd6
+ MX8MM_IOMUXC_SAI1_TXD0_SAI1_TX_DATA0 0xd6
+ MX8MM_IOMUXC_SAI1_RXD0_SAI1_RX_DATA0 0xd6
+ >;
+ };
+
+ pinctrl_sai2: sai2grp {
+ fsl,pins = <
+ /* Bluetooth PCM */
+ MX8MM_IOMUXC_SAI2_TXFS_SAI2_TX_SYNC 0xd6
+ MX8MM_IOMUXC_SAI2_TXC_SAI2_TX_BCLK 0xd6
+ MX8MM_IOMUXC_SAI2_TXD0_SAI2_TX_DATA0 0xd6
+ MX8MM_IOMUXC_SAI2_RXD0_SAI2_RX_DATA0 0xd6
+ >;
+ };
+
+ pinctrl_sound_wm8960: sound-wm8960grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO10_GPIO1_IO10 0x80
+ MX8MM_IOMUXC_SAI3_RXFS_GPIO4_IO28 0x80
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART1_RXD_UART1_DCE_RX 0x140
+ MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX 0x140
+ MX8MM_IOMUXC_UART3_RXD_UART1_DCE_CTS_B 0x140
+ MX8MM_IOMUXC_UART3_TXD_UART1_DCE_RTS_B 0x140
+ >;
+ };
+
pinctrl_uart2: uart2grp {
fsl,pins = <
MX8MM_IOMUXC_UART2_RXD_UART2_DCE_RX 0x140
@@ -271,6 +549,36 @@
>;
};
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_ECSPI1_SCLK_UART3_DCE_RX 0x140
+ MX8MM_IOMUXC_ECSPI1_MOSI_UART3_DCE_TX 0x140
+ MX8MM_IOMUXC_ECSPI1_SS0_UART3_DCE_RTS_B 0x140
+ MX8MM_IOMUXC_ECSPI1_MISO_UART3_DCE_CTS_B 0x140
+ >;
+ };
+
+ pinctrl_uart4: uart4grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_UART4_RXD_UART4_DCE_RX 0x140
+ MX8MM_IOMUXC_UART4_TXD_UART4_DCE_TX 0x140
+ >;
+ };
+
+ pinctrl_usbotg1: usbotg1grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO12_USB1_OTG_PWR 0x16
+ MX8MM_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x156
+ >;
+ };
+
+ pinctrl_usbotg2: usbotg2grp {
+ fsl,pins = <
+ MX8MM_IOMUXC_GPIO1_IO14_USB2_OTG_PWR 0x16
+ MX8MM_IOMUXC_GPIO1_IO15_USB2_OTG_OC 0x15
+ >;
+ };
+
pinctrl_usdhc1: usdhc1grp {
fsl,pins = <
MX8MM_IOMUXC_SD1_CLK_USDHC1_CLK 0x190
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
index 5ccc4cc91959..a003e6af3353 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
+++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
@@ -124,7 +124,7 @@
#define MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x0A4 0x30C 0x000 0x0 0x0
#define MX8MM_IOMUXC_SD1_CMD_GPIO2_IO1 0x0A4 0x30C 0x000 0x5 0x0
#define MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x0A8 0x310 0x000 0x0 0x0
-#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x31 0x000 0x5 0x0
+#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x310 0x000 0x5 0x0
#define MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x0AC 0x314 0x000 0x0 0x0
#define MX8MM_IOMUXC_SD1_DATA1_GPIO2_IO3 0x0AC 0x314 0x000 0x5 0x0
#define MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x0B0 0x318 0x000 0x0 0x0
diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
index 6bf1d15ba16a..a27e02bee6b4 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
@@ -887,7 +887,7 @@
interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MM_CLK_QSPI_ROOT>,
<&clk IMX8MM_CLK_QSPI_ROOT>;
- clock-names = "fspi", "fspi_en";
+ clock-names = "fspi_en", "fspi";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi
index de2cd0e3201c..c35eeaff958f 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn-beacon-som.dtsi
@@ -262,8 +262,12 @@
&usdhc1 {
#address-cells = <1>;
#size-cells = <0>;
- pinctrl-names = "default";
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
pinctrl-0 = <&pinctrl_usdhc1>;
+ pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+ vmmc-supply = <&buck4_reg>;
+ vqmmc-supply = <&buck5_reg>;
bus-width = <4>;
non-removable;
cap-power-off-card;
diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
index 16ea50089567..4dac4da38f4c 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
@@ -898,7 +898,7 @@
interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MN_CLK_QSPI_ROOT>,
<&clk IMX8MN_CLK_QSPI_ROOT>;
- clock-names = "fspi", "fspi_en";
+ clock-names = "fspi_en", "fspi";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
index 7db4273cc88b..2c28e589677e 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-evk.dts
@@ -98,6 +98,8 @@
reg = <1>;
eee-broken-1000t;
reset-gpios = <&gpio4 2 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10000>;
+ reset-deassert-us = <80000>;
};
};
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts b/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts
index 0e1a6d953389..984a6b9ded8d 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts
@@ -16,7 +16,7 @@
"phytec,imx8mp-phycore-som", "fsl,imx8mp";
chosen {
- stdout-path = &uart2;
+ stdout-path = &uart1;
};
reg_usdhc2_vmmc: regulator-usdhc2 {
@@ -33,9 +33,33 @@
};
};
+&eqos {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_eqos>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy0>;
+ status = "okay";
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0x1>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_1_50_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_1_50_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ ti,clk-output-sel = <DP83867_CLK_O_SEL_OFF>;
+ enet-phy-lane-no-swap;
+ };
+ };
+};
+
&i2c2 {
clock-frequency = <400000>;
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c2>;
pinctrl-1 = <&pinctrl_i2c2_gpio>;
sda-gpios = <&gpio5 17 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
@@ -71,9 +95,9 @@
};
/* debug console */
-&uart2 {
+&uart1 {
pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_uart2>;
+ pinctrl-0 = <&pinctrl_uart1>;
status = "okay";
};
@@ -90,6 +114,26 @@
};
&iomuxc {
+ pinctrl_eqos: eqosgrp {
+ fsl,pins = <
+ MX8MP_IOMUXC_ENET_MDC__ENET_QOS_MDC 0x3
+ MX8MP_IOMUXC_ENET_MDIO__ENET_QOS_MDIO 0x3
+ MX8MP_IOMUXC_ENET_RD0__ENET_QOS_RGMII_RD0 0x91
+ MX8MP_IOMUXC_ENET_RD1__ENET_QOS_RGMII_RD1 0x91
+ MX8MP_IOMUXC_ENET_RD2__ENET_QOS_RGMII_RD2 0x91
+ MX8MP_IOMUXC_ENET_RD3__ENET_QOS_RGMII_RD3 0x91
+ MX8MP_IOMUXC_ENET_RXC__CCM_ENET_QOS_CLOCK_GENERATE_RX_CLK 0x91
+ MX8MP_IOMUXC_ENET_RX_CTL__ENET_QOS_RGMII_RX_CTL 0x91
+ MX8MP_IOMUXC_ENET_TD0__ENET_QOS_RGMII_TD0 0x1f
+ MX8MP_IOMUXC_ENET_TD1__ENET_QOS_RGMII_TD1 0x1f
+ MX8MP_IOMUXC_ENET_TD2__ENET_QOS_RGMII_TD2 0x1f
+ MX8MP_IOMUXC_ENET_TD3__ENET_QOS_RGMII_TD3 0x1f
+ MX8MP_IOMUXC_ENET_TX_CTL__ENET_QOS_RGMII_TX_CTL 0x1f
+ MX8MP_IOMUXC_ENET_TXC__CCM_ENET_QOS_CLOCK_GENERATE_TX_CLK 0x1f
+ MX8MP_IOMUXC_SAI1_MCLK__GPIO4_IO20 0x10
+ >;
+ };
+
pinctrl_i2c2: i2c2grp {
fsl,pins = <
MX8MP_IOMUXC_I2C2_SCL__I2C2_SCL 0x400001c3
@@ -110,10 +154,10 @@
>;
};
- pinctrl_uart2: uart2grp {
+ pinctrl_uart1: uart1grp {
fsl,pins = <
- MX8MP_IOMUXC_UART2_RXD__UART2_DCE_RX 0x49
- MX8MP_IOMUXC_UART2_TXD__UART2_DCE_TX 0x49
+ MX8MP_IOMUXC_UART1_RXD__UART1_DCE_RX 0x49
+ MX8MP_IOMUXC_UART1_TXD__UART1_DCE_TX 0x49
>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
index 44a8c2337cee..f3965ec5b31d 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
@@ -67,7 +67,7 @@
&i2c1 {
clock-frequency = <400000>;
- pinctrl-names = "default";
+ pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c1>;
pinctrl-1 = <&pinctrl_i2c1_gpio>;
sda-gpios = <&gpio5 15 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mp.dtsi b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
index c7523fd4eae9..c2d51a46cb3c 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp.dtsi
@@ -18,6 +18,7 @@
aliases {
ethernet0 = &fec;
+ ethernet1 = &eqos;
gpio0 = &gpio1;
gpio1 = &gpio2;
gpio2 = &gpio3;
@@ -312,6 +313,22 @@
status = "disabled";
};
+ wdog2: watchdog@30290000 {
+ compatible = "fsl,imx8mp-wdt", "fsl,imx21-wdt";
+ reg = <0x30290000 0x10000>;
+ interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_WDOG2_ROOT>;
+ status = "disabled";
+ };
+
+ wdog3: watchdog@302a0000 {
+ compatible = "fsl,imx8mp-wdt", "fsl,imx21-wdt";
+ reg = <0x302a0000 0x10000>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MP_CLK_WDOG3_ROOT>;
+ status = "disabled";
+ };
+
iomuxc: pinctrl@30330000 {
compatible = "fsl,imx8mp-iomuxc";
reg = <0x30330000 0x10000>;
@@ -786,6 +803,28 @@
nvmem_macaddr_swap;
status = "disabled";
};
+
+ eqos: ethernet@30bf0000 {
+ compatible = "nxp,imx8mp-dwmac-eqos", "snps,dwmac-5.10a";
+ reg = <0x30bf0000 0x10000>;
+ interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "eth_wake_irq", "macirq";
+ clocks = <&clk IMX8MP_CLK_ENET_QOS_ROOT>,
+ <&clk IMX8MP_CLK_QOS_ENET_ROOT>,
+ <&clk IMX8MP_CLK_ENET_QOS_TIMER>,
+ <&clk IMX8MP_CLK_ENET_QOS>;
+ clock-names = "stmmaceth", "pclk", "ptp_ref", "tx";
+ assigned-clocks = <&clk IMX8MP_CLK_ENET_AXI>,
+ <&clk IMX8MP_CLK_ENET_QOS_TIMER>,
+ <&clk IMX8MP_CLK_ENET_QOS>;
+ assigned-clock-parents = <&clk IMX8MP_SYS_PLL1_266M>,
+ <&clk IMX8MP_SYS_PLL2_100M>,
+ <&clk IMX8MP_SYS_PLL2_125M>;
+ assigned-clock-rates = <0>, <100000000>, <125000000>;
+ intf_mode = <&gpr 0x4>;
+ status = "disabled";
+ };
};
gic: interrupt-controller@38800000 {
@@ -828,7 +867,7 @@
ranges;
status = "disabled";
- usb_dwc3_0: dwc3@38100000 {
+ usb_dwc3_0: usb@38100000 {
compatible = "snps,dwc3";
reg = <0x38100000 0x10000>;
clocks = <&clk IMX8MP_CLK_HSIO_AXI>,
@@ -869,7 +908,7 @@
ranges;
status = "disabled";
- usb_dwc3_1: dwc3@38200000 {
+ usb_dwc3_1: usb@38200000 {
compatible = "snps,dwc3";
reg = <0x38200000 0x10000>;
clocks = <&clk IMX8MP_CLK_HSIO_AXI>,
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-kontron-pitx-imx8m.dts b/arch/arm64/boot/dts/freescale/imx8mq-kontron-pitx-imx8m.dts
new file mode 100644
index 000000000000..f593e4ff62e1
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8mq-kontron-pitx-imx8m.dts
@@ -0,0 +1,613 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Device Tree File for the Kontron pitx-imx8m board.
+ *
+ * Copyright (C) 2021 Heiko Thiery <heiko.thiery@gmail.com>
+ */
+
+/dts-v1/;
+
+#include "imx8mq.dtsi"
+#include <dt-bindings/net/ti-dp83867.h>
+
+/ {
+ model = "Kontron pITX-imx8m";
+ compatible = "kontron,pitx-imx8m", "fsl,imx8mq";
+
+ aliases {
+ i2c0 = &i2c1;
+ i2c1 = &i2c2;
+ i2c2 = &i2c3;
+ mmc0 = &usdhc1;
+ mmc1 = &usdhc2;
+ serial0 = &uart1;
+ serial1 = &uart2;
+ serial2 = &uart3;
+ spi0 = &qspi0;
+ spi1 = &ecspi2;
+ };
+
+ chosen {
+ stdout-path = "serial2:115200n8";
+ };
+
+ pcie0_refclk: pcie0-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ };
+
+ pcie1_refclk: pcie1-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <100000000>;
+ };
+
+ reg_usdhc2_vmmc: regulator-usdhc2-vmmc {
+ compatible = "regulator-fixed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_reg_usdhc2>;
+ regulator-name = "V_3V3_SD";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>;
+ off-on-delay-us = <20000>;
+ enable-active-high;
+ };
+};
+
+&ecspi2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi2 &pinctrl_ecspi2_cs>;
+ cs-gpios = <&gpio5 13 GPIO_ACTIVE_LOW>;
+ status = "okay";
+
+ tpm@0 {
+ compatible = "infineon,slb9670";
+ reg = <0>;
+ spi-max-frequency = <43000000>;
+ };
+};
+
+&fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec1>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy0>;
+ fsl,magic-packet;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
+ ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_75_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ reset-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
+ reset-assert-us = <10>;
+ reset-deassert-us = <280>;
+ };
+ };
+};
+
+&i2c1 {
+ clock-frequency = <400000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c1>;
+ status = "okay";
+
+ pmic@8 {
+ compatible = "fsl,pfuze100";
+ fsl,pfuze-support-disable-sw;
+ reg = <0x8>;
+
+ regulators {
+ sw1a_reg: sw1ab {
+ regulator-name = "V_0V9_GPU";
+ regulator-min-microvolt = <825000>;
+ regulator-max-microvolt = <1100000>;
+ };
+
+ sw1c_reg: sw1c {
+ regulator-name = "V_0V9_VPU";
+ regulator-min-microvolt = <825000>;
+ regulator-max-microvolt = <1100000>;
+ };
+
+ sw2_reg: sw2 {
+ regulator-name = "V_1V1_NVCC_DRAM";
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-always-on;
+ };
+
+ sw3a_reg: sw3ab {
+ regulator-name = "V_1V0_DRAM";
+ regulator-min-microvolt = <825000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-always-on;
+ };
+
+ sw4_reg: sw4 {
+ regulator-name = "V_1V8_S0";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ swbst_reg: swbst {
+ regulator-name = "NC";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5150000>;
+ };
+
+ snvs_reg: vsnvs {
+ regulator-name = "V_0V9_SNVS";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-always-on;
+ };
+
+ vref_reg: vrefddr {
+ regulator-name = "V_0V55_VREF_DDR";
+ regulator-always-on;
+ };
+
+ vgen1_reg: vgen1 {
+ regulator-name = "V_1V5_CSI";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1550000>;
+ };
+
+ vgen2_reg: vgen2 {
+ regulator-name = "V_0V9_PHY";
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <975000>;
+ regulator-always-on;
+ };
+
+ vgen3_reg: vgen3 {
+ regulator-name = "V_1V8_PHY";
+ regulator-min-microvolt = <1675000>;
+ regulator-max-microvolt = <1975000>;
+ regulator-always-on;
+ };
+
+ vgen4_reg: vgen4 {
+ regulator-name = "V_1V8_VDDA";
+ regulator-min-microvolt = <1625000>;
+ regulator-max-microvolt = <1875000>;
+ regulator-always-on;
+ };
+
+ vgen5_reg: vgen5 {
+ regulator-name = "V_3V3_PHY";
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3625000>;
+ regulator-always-on;
+ };
+
+ vgen6_reg: vgen6 {
+ regulator-name = "V_2V8_CAM";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ };
+ };
+
+ fan-controller@1b {
+ compatible = "maxim,max6650";
+ reg = <0x1b>;
+ maxim,fan-microvolt = <5000000>;
+ };
+
+ rtc@32 {
+ compatible = "microcrystal,rv8803";
+ reg = <0x32>;
+ };
+
+ sensor@4b {
+ compatible = "national,lm75b";
+ reg = <0x4b>;
+ };
+
+ eeprom@51 {
+ compatible = "atmel,24c32";
+ reg = <0x51>;
+ pagesize = <32>;
+ };
+};
+
+&i2c2 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+};
+
+&i2c3 {
+ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+};
+
+/* M.2 B-key slot */
+&pcie0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pcie0>;
+ reset-gpio = <&gpio1 9 GPIO_ACTIVE_LOW>;
+ clocks = <&clk IMX8MQ_CLK_PCIE1_ROOT>,
+ <&clk IMX8MQ_CLK_PCIE1_AUX>,
+ <&clk IMX8MQ_CLK_PCIE1_PHY>,
+ <&pcie0_refclk>;
+ clock-names = "pcie", "pcie_aux", "pcie_phy", "pcie_bus";
+ status = "okay";
+};
+
+/* Intel Ethernet Controller I210/I211 */
+&pcie1 {
+ clocks = <&clk IMX8MQ_CLK_PCIE2_ROOT>,
+ <&clk IMX8MQ_CLK_PCIE2_AUX>,
+ <&clk IMX8MQ_CLK_PCIE2_PHY>,
+ <&pcie1_refclk>;
+ clock-names = "pcie", "pcie_aux", "pcie_phy", "pcie_bus";
+ fsl,max-link-speed = <1>;
+ status = "okay";
+};
+
+&pgc_gpu {
+ power-supply = <&sw1a_reg>;
+};
+
+&pgc_vpu {
+ power-supply = <&sw1c_reg>;
+};
+
+&qspi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_qspi>;
+ status = "okay";
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0>;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
+ m25p,fast-read;
+ spi-max-frequency = <50000000>;
+ };
+};
+
+&snvs_pwrkey {
+ status = "okay";
+};
+
+&uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart1>;
+ assigned-clocks = <&clk IMX8MQ_CLK_UART1>;
+ assigned-clock-parents = <&clk IMX8MQ_SYS1_PLL_80M>;
+ status = "okay";
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart2>;
+ assigned-clocks = <&clk IMX8MQ_CLK_UART2>;
+ assigned-clock-parents = <&clk IMX8MQ_SYS1_PLL_80M>;
+ status = "okay";
+};
+
+&uart3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_uart3>;
+ fsl,uart-has-rtscts;
+ assigned-clocks = <&clk IMX8MQ_CLK_UART3>;
+ assigned-clock-parents = <&clk IMX8MQ_SYS1_PLL_80M>;
+ status = "okay";
+};
+
+&usb3_phy0 {
+ status = "okay";
+};
+
+&usb3_phy1 {
+ status = "okay";
+};
+
+&usb_dwc3_0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usb0>;
+ dr_mode = "otg";
+ hnp-disable;
+ srp-disable;
+ adp-disable;
+ maximum-speed = "high-speed";
+ status = "okay";
+};
+
+&usb_dwc3_1 {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usdhc1 {
+ assigned-clocks = <&clk IMX8MQ_CLK_USDHC1>;
+ assigned-clock-rates = <400000000>;
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
+ pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
+ vqmmc-supply = <&sw4_reg>;
+ bus-width = <8>;
+ non-removable;
+ no-sd;
+ no-sdio;
+ status = "okay";
+};
+
+&usdhc2 {
+ assigned-clocks = <&clk IMX8MQ_CLK_USDHC2>;
+ assigned-clock-rates = <200000000>;
+ pinctrl-names = "default", "state_100mhz", "state_200mhz";
+ pinctrl-0 = <&pinctrl_usdhc2>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-1 = <&pinctrl_usdhc2_100mhz>, <&pinctrl_usdhc2_gpio>;
+ pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_gpio>;
+ bus-width = <4>;
+ cd-gpios = <&gpio2 12 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&gpio2 20 GPIO_ACTIVE_HIGH>;
+ vmmc-supply = <&reg_usdhc2_vmmc>;
+ status = "okay";
+};
+
+&wdog1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_wdog>;
+ fsl,ext-reset-output;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ pinctrl_hog: hoggrp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_NAND_CE1_B_GPIO3_IO2 0x19 /* TPM Reset */
+ MX8MQ_IOMUXC_NAND_CE3_B_GPIO3_IO4 0x19 /* USB2 Hub Reset */
+ >;
+ };
+
+ pinctrl_gpio: gpiogrp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_NAND_CLE_GPIO3_IO5 0x19 /* GPIO0 */
+ MX8MQ_IOMUXC_NAND_RE_B_GPIO3_IO15 0x19 /* GPIO1 */
+ MX8MQ_IOMUXC_NAND_WE_B_GPIO3_IO17 0x19 /* GPIO2 */
+ MX8MQ_IOMUXC_NAND_WP_B_GPIO3_IO18 0x19 /* GPIO3 */
+ MX8MQ_IOMUXC_NAND_READY_B_GPIO3_IO16 0x19 /* GPIO4 */
+ MX8MQ_IOMUXC_NAND_DATA04_GPIO3_IO10 0x19 /* GPIO5 */
+ MX8MQ_IOMUXC_NAND_DATA05_GPIO3_IO11 0x19 /* GPIO6 */
+ MX8MQ_IOMUXC_NAND_DATA06_GPIO3_IO12 0x19 /* GPIO7 */
+ >;
+ };
+
+ pinctrl_pcie0: pcie0grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x16 /* PCIE_PERST */
+ MX8MQ_IOMUXC_UART4_TXD_GPIO5_IO29 0x16 /* W_DISABLE */
+ >;
+ };
+
+ pinctrl_reg_usdhc2: regusdhc2gpiogrp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_SD2_RESET_B_GPIO2_IO19 0x41
+ >;
+ };
+
+ pinctrl_fec1: fec1grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_ENET_MDC_ENET1_MDC 0x3
+ MX8MQ_IOMUXC_ENET_MDIO_ENET1_MDIO 0x23
+ MX8MQ_IOMUXC_ENET_TD3_ENET1_RGMII_TD3 0x1f
+ MX8MQ_IOMUXC_ENET_TD2_ENET1_RGMII_TD2 0x1f
+ MX8MQ_IOMUXC_ENET_TD1_ENET1_RGMII_TD1 0x1f
+ MX8MQ_IOMUXC_ENET_TD0_ENET1_RGMII_TD0 0x1f
+ MX8MQ_IOMUXC_ENET_RD3_ENET1_RGMII_RD3 0x91
+ MX8MQ_IOMUXC_ENET_RD2_ENET1_RGMII_RD2 0x91
+ MX8MQ_IOMUXC_ENET_RD1_ENET1_RGMII_RD1 0x91
+ MX8MQ_IOMUXC_ENET_RD0_ENET1_RGMII_RD0 0x91
+ MX8MQ_IOMUXC_ENET_TXC_ENET1_RGMII_TXC 0x1f
+ MX8MQ_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x91
+ MX8MQ_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x91
+ MX8MQ_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x1f
+ MX8MQ_IOMUXC_GPIO1_IO11_GPIO1_IO11 0x16
+ MX8MQ_IOMUXC_GPIO1_IO15_GPIO1_IO15 0x16
+ >;
+ };
+
+ pinctrl_i2c1: i2c1grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_I2C1_SCL_I2C1_SCL 0x4000007f
+ MX8MQ_IOMUXC_I2C1_SDA_I2C1_SDA 0x4000007f
+ >;
+ };
+
+ pinctrl_i2c2: i2c2grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_I2C2_SCL_I2C2_SCL 0x4000007f
+ MX8MQ_IOMUXC_I2C2_SDA_I2C2_SDA 0x4000007f
+ >;
+ };
+
+ pinctrl_i2c3: i2c3grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_I2C3_SCL_I2C3_SCL 0x4000007f
+ MX8MQ_IOMUXC_I2C3_SDA_I2C3_SDA 0x4000007f
+ >;
+ };
+
+ pinctrl_qspi: qspigrp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_NAND_ALE_QSPI_A_SCLK 0x82
+ MX8MQ_IOMUXC_NAND_CE0_B_QSPI_A_SS0_B 0x82
+ MX8MQ_IOMUXC_NAND_DATA00_QSPI_A_DATA0 0x82
+ MX8MQ_IOMUXC_NAND_DATA01_QSPI_A_DATA1 0x82
+ MX8MQ_IOMUXC_NAND_DATA02_QSPI_A_DATA2 0x82
+ MX8MQ_IOMUXC_NAND_DATA03_QSPI_A_DATA3 0x82
+ >;
+ };
+
+ pinctrl_ecspi2: ecspi2grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x19
+ MX8MQ_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x19
+ MX8MQ_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x19
+ >;
+ };
+
+ pinctrl_ecspi2_cs: ecspi2csgrp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0x19
+ >;
+ };
+
+ pinctrl_uart1: uart1grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_UART1_TXD_UART1_DCE_TX 0x49
+ MX8MQ_IOMUXC_UART1_RXD_UART1_DCE_RX 0x49
+ >;
+ };
+
+ pinctrl_uart2: uart2grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_UART2_TXD_UART2_DCE_TX 0x49
+ MX8MQ_IOMUXC_UART2_RXD_UART2_DCE_RX 0x49
+ >;
+ };
+
+ pinctrl_uart3: uart3grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_UART3_TXD_UART3_DCE_TX 0x49
+ MX8MQ_IOMUXC_UART3_RXD_UART3_DCE_RX 0x49
+ MX8MQ_IOMUXC_ECSPI1_SS0_UART3_DCE_RTS_B 0x49
+ MX8MQ_IOMUXC_ECSPI1_MISO_UART3_DCE_CTS_B 0x49
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x83
+ MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc3
+ MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc3
+ MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc3
+ MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc3
+ MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc3
+ MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc3
+ MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc3
+ MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc3
+ MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc3
+ MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x83
+ MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1
+ >;
+ };
+
+ pinctrl_usdhc1_100mhz: usdhc1-100grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x8d
+ MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xcd
+ MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xcd
+ MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xcd
+ MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xcd
+ MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xcd
+ MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xcd
+ MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xcd
+ MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xcd
+ MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xcd
+ MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x8d
+ MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1
+ >;
+ };
+
+ pinctrl_usdhc1_200mhz: usdhc1-200grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x9f
+ MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xdf
+ MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xdf
+ MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xdf
+ MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xdf
+ MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xdf
+ MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xdf
+ MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xdf
+ MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xdf
+ MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xdf
+ MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x9f
+ MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1
+ >;
+ };
+
+ pinctrl_usdhc2_gpio: usdhc2gpiogrp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_SD2_CD_B_GPIO2_IO12 0x41
+ MX8MQ_IOMUXC_SD2_WP_GPIO2_IO20 0x19
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_SD2_CLK_USDHC2_CLK 0x83
+ MX8MQ_IOMUXC_SD2_CMD_USDHC2_CMD 0xc3
+ MX8MQ_IOMUXC_SD2_DATA0_USDHC2_DATA0 0xc3
+ MX8MQ_IOMUXC_SD2_DATA1_USDHC2_DATA1 0xc3
+ MX8MQ_IOMUXC_SD2_DATA2_USDHC2_DATA2 0xc3
+ MX8MQ_IOMUXC_SD2_DATA3_USDHC2_DATA3 0xc3
+ MX8MQ_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xc1
+ >;
+ };
+
+ pinctrl_usdhc2_100mhz: usdhc2-100grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_SD2_CLK_USDHC2_CLK 0x8d
+ MX8MQ_IOMUXC_SD2_CMD_USDHC2_CMD 0xcd
+ MX8MQ_IOMUXC_SD2_DATA0_USDHC2_DATA0 0xcd
+ MX8MQ_IOMUXC_SD2_DATA1_USDHC2_DATA1 0xcd
+ MX8MQ_IOMUXC_SD2_DATA2_USDHC2_DATA2 0xcd
+ MX8MQ_IOMUXC_SD2_DATA3_USDHC2_DATA3 0xcd
+ MX8MQ_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xc1
+ >;
+ };
+
+ pinctrl_usdhc2_200mhz: usdhc2-200grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_SD2_CLK_USDHC2_CLK 0x9f
+ MX8MQ_IOMUXC_SD2_CMD_USDHC2_CMD 0xdf
+ MX8MQ_IOMUXC_SD2_DATA0_USDHC2_DATA0 0xdf
+ MX8MQ_IOMUXC_SD2_DATA1_USDHC2_DATA1 0xdf
+ MX8MQ_IOMUXC_SD2_DATA2_USDHC2_DATA2 0xdf
+ MX8MQ_IOMUXC_SD2_DATA3_USDHC2_DATA3 0xdf
+ MX8MQ_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0xc1
+ >;
+ };
+
+ pinctrl_usb0: usb0grp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_GPIO1_IO12_USB1_OTG_PWR 0x19
+ MX8MQ_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x19
+ >;
+ };
+
+ pinctrl_wdog: wdoggrp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_GPIO1_IO02_WDOG1_WDOG_B 0xc6
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
index dd217a0760e9..622f3787a186 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-devkit.dts
@@ -50,13 +50,6 @@
linux,code = <KEY_VOLUMEDOWN>;
};
- hp-det {
- label = "HP_DET";
- gpios = <&gpio3 20 GPIO_ACTIVE_LOW>;
- wakeup-source;
- linux,code = <KEY_HP>;
- };
-
wwan-wake {
label = "WWAN_WAKE";
gpios = <&gpio3 8 GPIO_ACTIVE_LOW>;
@@ -163,21 +156,35 @@
#sound-dai-cells = <0>;
};
+ mic_mux: mic-mux {
+ compatible = "simple-audio-mux";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_micsel>;
+ mux-gpios = <&gpio5 5 GPIO_ACTIVE_LOW>;
+ sound-name-prefix = "Mic Mux";
+ };
+
sound {
compatible = "simple-audio-card";
- simple-audio-card,name = "sgtl5000";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hpdet>;
+ simple-audio-card,aux-devs = <&speaker_amp>, <&mic_mux>;
+ simple-audio-card,name = "Librem 5 Devkit";
simple-audio-card,format = "i2s";
simple-audio-card,widgets =
- "Microphone", "Microphone Jack",
- "Headphone", "Headphone Jack",
- "Speaker", "Speaker Ext",
- "Line", "Line In Jack";
+ "Microphone", "Builtin Microphone",
+ "Microphone", "Headset Microphone",
+ "Headphone", "Headphones",
+ "Speaker", "Builtin Speaker";
simple-audio-card,routing =
- "MIC_IN", "Microphone Jack",
- "Microphone Jack", "Mic Bias",
- "LINE_IN", "Line In Jack",
- "Headphone Jack", "HP_OUT",
- "Speaker Ext", "LINE_OUT";
+ "MIC_IN", "Mic Mux OUT",
+ "Mic Mux IN1", "Headset Microphone",
+ "Mic Mux IN2", "Builtin Microphone",
+ "Mic Mux OUT", "Mic Bias",
+ "Headphones", "HP_OUT",
+ "Builtin Speaker", "Speaker Amp OUTR",
+ "Speaker Amp INR", "LINE_OUT";
+ simple-audio-card,hp-det-gpio = <&gpio3 20 GPIO_ACTIVE_HIGH>;
simple-audio-card,cpu {
sound-dai = <&sai2>;
@@ -207,6 +214,15 @@
};
};
+ speaker_amp: speaker-amp {
+ compatible = "simple-audio-amplifier";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_spkamp>;
+ VCC-supply = <&reg_3v3_p>;
+ sound-name-prefix = "Speaker Amp";
+ enable-gpios = <&gpio5 3 GPIO_ACTIVE_HIGH>;
+ };
+
vibrator {
compatible = "gpio-vibrator";
pinctrl-names = "default";
@@ -315,7 +331,6 @@
regulator-min-microvolt = <700000>;
regulator-max-microvolt = <1300000>;
regulator-boot-on;
- regulator-enable-ramp-delay = <200>;
rohm,dvs-run-voltage = <900000>;
};
@@ -610,7 +625,6 @@
fsl,pins = <
MX8MQ_IOMUXC_SAI2_RXFS_GPIO4_IO21 0x16
MX8MQ_IOMUXC_SAI2_RXC_GPIO4_IO22 0x16
- MX8MQ_IOMUXC_SAI5_RXC_GPIO3_IO20 0x180 /* HP_DET */
MX8MQ_IOMUXC_NAND_DATA02_GPIO3_IO8 0x80 /* nWoWWAN */
>;
};
@@ -621,6 +635,12 @@
>;
};
+ pinctrl_hpdet: hpdetgrp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_SAI5_RXC_GPIO3_IO20 0xC0 /* HP_DET */
+ >;
+ };
+
pinctrl_i2c1: i2c1grp {
fsl,pins = <
MX8MQ_IOMUXC_I2C1_SCL_I2C1_SCL 0x4000001f
@@ -641,6 +661,18 @@
>;
};
+ pinctrl_micsel: micselgrp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_SPDIF_EXT_CLK_GPIO5_IO5 0xc6 /* MIC_SEL */
+ >;
+ };
+
+ pinctrl_spkamp: spkamp {
+ fsl,pins = <
+ MX8MQ_IOMUXC_SPDIF_TX_GPIO5_IO3 0x81 /* MUTE */
+ >;
+ };
+
pinctrl_pmic: pmicgrp {
fsl,pins = <
MX8MQ_IOMUXC_GPIO1_IO03_GPIO1_IO3 0x80 /* PMIC intr */
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-r2.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-r2.dts
index d77fc5df3f06..73bd431cbd6a 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-librem5-r2.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-r2.dts
@@ -25,5 +25,5 @@
};
&proximity {
- proximity-near-level = <220>;
+ proximity-near-level = <120>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts b/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
index 0d38327043f8..cd3c3edd48fa 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5-r3.dts
@@ -28,6 +28,10 @@
ti,termination-current = <144000>; /* uA */
};
+&buck3_reg {
+ regulator-always-on;
+};
+
&proximity {
proximity-near-level = <25>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi
index 06a4799b6aeb..460ef0d86540 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi
@@ -258,6 +258,25 @@
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <1000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "protected0";
+ reg = <0x0 0x30000>;
+ read-only;
+ };
+
+ partition@30000 {
+ label = "protected1";
+ reg = <0x30000 0x10000>;
+ read-only;
+ };
+
+ partition@40000 {
+ label = "rw";
+ reg = <0x40000 0x1C0000>;
+ };
};
};
@@ -267,8 +286,9 @@
pmic-5v-hog {
gpio-hog;
- gpios = <&gpio1 1 GPIO_ACTIVE_HIGH>;
+ gpios = <1 GPIO_ACTIVE_HIGH>;
input;
+ lane-mapping = "pmic-5v";
};
};
@@ -1051,8 +1071,6 @@
assigned-clocks = <&clk IMX8MQ_CLK_SAI2>;
assigned-clock-parents = <&clk IMX8MQ_AUDIO_PLL1_OUT>;
assigned-clock-rates = <24576000>;
- assigned-clocks = <&clk IMX8MQ_AUDIO_PLL1>, <&clk IMX8MQ_AUDIO_PLL2>;
- assigned-clock-rates = <786432000>, <722534400>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h
index b94b02080a34..68e8fa172974 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h
+++ b/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h
@@ -130,7 +130,7 @@
#define MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0x0A4 0x30C 0x000 0x0 0x0
#define MX8MQ_IOMUXC_SD1_CMD_GPIO2_IO1 0x0A4 0x30C 0x000 0x5 0x0
#define MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x0A8 0x310 0x000 0x0 0x0
-#define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x31 0x000 0x5 0x0
+#define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x310 0x000 0x5 0x0
#define MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x0AC 0x314 0x000 0x0 0x0
#define MX8MQ_IOMUXC_SD1_DATA1_GPIO2_IO3 0x0AC 0x314 0x000 0x5 0x0
#define MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x0B0 0x318 0x000 0x0 0x0
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts
index 631e01c1b9fd..be1e7d6f0ecb 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts
@@ -88,11 +88,11 @@
pinctrl-0 = <&pinctrl_codec2>;
reg = <0x18>;
#sound-dai-cells = <0>;
- HPVDD-supply = <&reg_3p3v>;
- SPRVDD-supply = <&reg_3p3v>;
- SPLVDD-supply = <&reg_3p3v>;
- AVDD-supply = <&reg_3p3v>;
- IOVDD-supply = <&reg_3p3v>;
+ HPVDD-supply = <&reg_gen_3p3>;
+ SPRVDD-supply = <&reg_gen_3p3>;
+ SPLVDD-supply = <&reg_gen_3p3>;
+ AVDD-supply = <&reg_gen_3p3>;
+ IOVDD-supply = <&reg_gen_3p3>;
DVDD-supply = <&vgen4_reg>;
reset-gpios = <&gpio3 4 GPIO_ACTIVE_HIGH>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
index 4dc8383478ee..a08a568c31d9 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
@@ -45,8 +45,8 @@
reg_12p0_main: regulator-12p0-main {
compatible = "regulator-fixed";
regulator-name = "12V_MAIN";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
regulator-always-on;
};
@@ -77,15 +77,6 @@
regulator-always-on;
};
- reg_3p3v: regulator-3p3v {
- compatible = "regulator-fixed";
- vin-supply = <&reg_3p3_main>;
- regulator-name = "GEN_3V3";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- };
-
reg_usdhc2_vmmc: regulator-vsd-3v3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_reg_usdhc2>;
@@ -415,11 +406,11 @@
pinctrl-0 = <&pinctrl_codec1>;
reg = <0x18>;
#sound-dai-cells = <0>;
- HPVDD-supply = <&reg_3p3v>;
- SPRVDD-supply = <&reg_3p3v>;
- SPLVDD-supply = <&reg_3p3v>;
- AVDD-supply = <&reg_3p3v>;
- IOVDD-supply = <&reg_3p3v>;
+ HPVDD-supply = <&reg_gen_3p3>;
+ SPRVDD-supply = <&reg_gen_3p3>;
+ SPLVDD-supply = <&reg_gen_3p3>;
+ AVDD-supply = <&reg_gen_3p3>;
+ IOVDD-supply = <&reg_gen_3p3>;
DVDD-supply = <&vgen4_reg>;
reset-gpios = <&gpio3 3 GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8qm-mek.dts b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
new file mode 100644
index 000000000000..ce9d3f0b98fc
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8qm-mek.dts
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018-2019 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+/dts-v1/;
+
+#include "imx8qm.dtsi"
+
+/ {
+ model = "Freescale i.MX8QM MEK";
+ compatible = "fsl,imx8qm-mek", "fsl,imx8qm";
+
+ chosen {
+ stdout-path = &lpuart0;
+ };
+
+ cpus {
+ /delete-node/ cpu-map;
+ /delete-node/ cpu@100;
+ /delete-node/ cpu@101;
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x00000000 0x80000000 0 0x40000000>;
+ };
+
+ reg_usdhc2_vmmc: usdhc2-vmmc {
+ compatible = "regulator-fixed";
+ regulator-name = "SD1_SPWR";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ gpio = <&lsio_gpio4 19 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+};
+
+&lpuart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_lpuart0>;
+ status = "okay";
+};
+
+&fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec1>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy0>;
+ fsl,magic-packet;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ };
+
+ ethphy1: ethernet-phy@1 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <1>;
+ };
+ };
+};
+
+&usdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+ bus-width = <8>;
+ no-sd;
+ no-sdio;
+ non-removable;
+ status = "okay";
+};
+
+&usdhc2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc2>;
+ bus-width = <4>;
+ vmmc-supply = <&reg_usdhc2_vmmc>;
+ cd-gpios = <&lsio_gpio4 22 GPIO_ACTIVE_LOW>;
+ wp-gpios = <&lsio_gpio4 21 GPIO_ACTIVE_HIGH>;
+ status = "okay";
+};
+
+&iomuxc {
+ pinctrl_fec1: fec1grp {
+ fsl,pins = <
+ IMX8QM_ENET0_MDC_CONN_ENET0_MDC 0x06000020
+ IMX8QM_ENET0_MDIO_CONN_ENET0_MDIO 0x06000020
+ IMX8QM_ENET0_RGMII_TX_CTL_CONN_ENET0_RGMII_TX_CTL 0x06000020
+ IMX8QM_ENET0_RGMII_TXC_CONN_ENET0_RGMII_TXC 0x06000020
+ IMX8QM_ENET0_RGMII_TXD0_CONN_ENET0_RGMII_TXD0 0x06000020
+ IMX8QM_ENET0_RGMII_TXD1_CONN_ENET0_RGMII_TXD1 0x06000020
+ IMX8QM_ENET0_RGMII_TXD2_CONN_ENET0_RGMII_TXD2 0x06000020
+ IMX8QM_ENET0_RGMII_TXD3_CONN_ENET0_RGMII_TXD3 0x06000020
+ IMX8QM_ENET0_RGMII_RXC_CONN_ENET0_RGMII_RXC 0x06000020
+ IMX8QM_ENET0_RGMII_RX_CTL_CONN_ENET0_RGMII_RX_CTL 0x06000020
+ IMX8QM_ENET0_RGMII_RXD0_CONN_ENET0_RGMII_RXD0 0x06000020
+ IMX8QM_ENET0_RGMII_RXD1_CONN_ENET0_RGMII_RXD1 0x06000020
+ IMX8QM_ENET0_RGMII_RXD2_CONN_ENET0_RGMII_RXD2 0x06000020
+ IMX8QM_ENET0_RGMII_RXD3_CONN_ENET0_RGMII_RXD3 0x06000020
+ >;
+ };
+
+ pinctrl_lpuart0: lpuart0grp {
+ fsl,pins = <
+ IMX8QM_UART0_RX_DMA_UART0_RX 0x06000020
+ IMX8QM_UART0_TX_DMA_UART0_TX 0x06000020
+ >;
+ };
+
+ pinctrl_usdhc1: usdhc1grp {
+ fsl,pins = <
+ IMX8QM_EMMC0_CLK_CONN_EMMC0_CLK 0x06000041
+ IMX8QM_EMMC0_CMD_CONN_EMMC0_CMD 0x00000021
+ IMX8QM_EMMC0_DATA0_CONN_EMMC0_DATA0 0x00000021
+ IMX8QM_EMMC0_DATA1_CONN_EMMC0_DATA1 0x00000021
+ IMX8QM_EMMC0_DATA2_CONN_EMMC0_DATA2 0x00000021
+ IMX8QM_EMMC0_DATA3_CONN_EMMC0_DATA3 0x00000021
+ IMX8QM_EMMC0_DATA4_CONN_EMMC0_DATA4 0x00000021
+ IMX8QM_EMMC0_DATA5_CONN_EMMC0_DATA5 0x00000021
+ IMX8QM_EMMC0_DATA6_CONN_EMMC0_DATA6 0x00000021
+ IMX8QM_EMMC0_DATA7_CONN_EMMC0_DATA7 0x00000021
+ IMX8QM_EMMC0_STROBE_CONN_EMMC0_STROBE 0x00000041
+ >;
+ };
+
+ pinctrl_usdhc2: usdhc2grp {
+ fsl,pins = <
+ IMX8QM_USDHC1_CLK_CONN_USDHC1_CLK 0x06000041
+ IMX8QM_USDHC1_CMD_CONN_USDHC1_CMD 0x00000021
+ IMX8QM_USDHC1_DATA0_CONN_USDHC1_DATA0 0x00000021
+ IMX8QM_USDHC1_DATA1_CONN_USDHC1_DATA1 0x00000021
+ IMX8QM_USDHC1_DATA2_CONN_USDHC1_DATA2 0x00000021
+ IMX8QM_USDHC1_DATA3_CONN_USDHC1_DATA3 0x00000021
+ IMX8QM_USDHC1_VSELECT_CONN_USDHC1_VSELECT 0x00000021
+ >;
+ };
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-conn.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-conn.dtsi
new file mode 100644
index 000000000000..42637a45701c
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-conn.dtsi
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2019-2020 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+&fec1 {
+ compatible = "fsl,imx8qm-fec", "fsl,imx6sx-fec";
+};
+
+&fec2 {
+ compatible = "fsl,imx8qm-fec", "fsl,imx6sx-fec";
+};
+
+&usdhc1 {
+ compatible = "fsl,imx8qm-usdhc", "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc";
+};
+
+&usdhc2 {
+ compatible = "fsl,imx8qm-usdhc", "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi
new file mode 100644
index 000000000000..bbe5f5ecfb92
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018-2019 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+&dma_subsys {
+ uart4_lpcg: clock-controller@5a4a0000 {
+ compatible = "fsl,imx8qxp-lpcg";
+ reg = <0x5a4a0000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&clk IMX_SC_R_UART_4 IMX_SC_PM_CLK_PER>,
+ <&dma_ipg_clk>;
+ clock-indices = <IMX_LPCG_CLK_0>, <IMX_LPCG_CLK_4>;
+ clock-output-names = "uart4_lpcg_baud_clk",
+ "uart4_lpcg_ipg_clk";
+ power-domains = <&pd IMX_SC_R_UART_4>;
+ };
+};
+
+&lpuart0 {
+ compatible = "fsl,imx8qm-lpuart", "fsl,imx8qxp-lpuart";
+};
+
+&lpuart1 {
+ compatible = "fsl,imx8qm-lpuart", "fsl,imx8qxp-lpuart";
+};
+
+&lpuart2 {
+ compatible = "fsl,imx8qm-lpuart", "fsl,imx8qxp-lpuart";
+};
+
+&lpuart3 {
+ compatible = "fsl,imx8qm-lpuart", "fsl,imx8qxp-lpuart";
+};
+
+&i2c0 {
+ compatible = "fsl,imx8qm-lpi2c", "fsl,imx7ulp-lpi2c";
+};
+
+&i2c1 {
+ compatible = "fsl,imx8qm-lpi2c", "fsl,imx7ulp-lpi2c";
+};
+
+&i2c2 {
+ compatible = "fsl,imx8qm-lpi2c", "fsl,imx7ulp-lpi2c";
+};
+
+&i2c3 {
+ compatible = "fsl,imx8qm-lpi2c", "fsl,imx7ulp-lpi2c";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8qm-ss-lsio.dtsi b/arch/arm64/boot/dts/freescale/imx8qm-ss-lsio.dtsi
new file mode 100644
index 000000000000..30896610c654
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8qm-ss-lsio.dtsi
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2019-2020 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+&lsio_gpio0 {
+ compatible = "fsl,imx8qm-gpio", "fsl,imx35-gpio";
+};
+
+&lsio_gpio1 {
+ compatible = "fsl,imx8qm-gpio", "fsl,imx35-gpio";
+};
+
+&lsio_gpio2 {
+ compatible = "fsl,imx8qm-gpio", "fsl,imx35-gpio";
+};
+
+&lsio_gpio3 {
+ compatible = "fsl,imx8qm-gpio", "fsl,imx35-gpio";
+};
+
+&lsio_gpio4 {
+ compatible = "fsl,imx8qm-gpio", "fsl,imx35-gpio";
+};
+
+&lsio_gpio5 {
+ compatible = "fsl,imx8qm-gpio", "fsl,imx35-gpio";
+};
+
+&lsio_gpio6 {
+ compatible = "fsl,imx8qm-gpio", "fsl,imx35-gpio";
+};
+
+&lsio_gpio7 {
+ compatible = "fsl,imx8qm-gpio", "fsl,imx35-gpio";
+};
+
+&lsio_mu0 {
+ compatible = "fsl,imx8-mu-scu", "fsl,imx8qm-mu", "fsl,imx6sx-mu";
+};
+
+&lsio_mu1 {
+ compatible = "fsl,imx8-mu-scu", "fsl,imx8qm-mu", "fsl,imx6sx-mu";
+};
+
+&lsio_mu2 {
+ compatible = "fsl,imx8-mu-scu", "fsl,imx8qm-mu", "fsl,imx6sx-mu";
+};
+
+&lsio_mu3 {
+ compatible = "fsl,imx8-mu-scu", "fsl,imx8qm-mu", "fsl,imx6sx-mu";
+};
+
+&lsio_mu4 {
+ compatible = "fsl,imx8-mu-scu", "fsl,imx8qm-mu", "fsl,imx6sx-mu";
+};
+
+&lsio_mu13 {
+ compatible = "fsl,imx8qm-mu", "fsl,imx6sx-mu";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8qm.dtsi b/arch/arm64/boot/dts/freescale/imx8qm.dtsi
new file mode 100644
index 000000000000..12cd059b339b
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8qm.dtsi
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018-2019 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+#include <dt-bindings/clock/imx8-lpcg.h>
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/pinctrl/pads-imx8qm.h>
+
+/ {
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ mmc0 = &usdhc1;
+ mmc1 = &usdhc2;
+ mmc2 = &usdhc3;
+ serial0 = &lpuart0;
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&A53_0>;
+ };
+ core1 {
+ cpu = <&A53_1>;
+ };
+ core2 {
+ cpu = <&A53_2>;
+ };
+ core3 {
+ cpu = <&A53_3>;
+ };
+ };
+
+ cluster1 {
+ core0 {
+ cpu = <&A72_0>;
+ };
+ core1 {
+ cpu = <&A72_1>;
+ };
+ };
+ };
+
+ A53_0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ };
+
+ A53_1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x1>;
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ };
+
+ A53_2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x2>;
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ };
+
+ A53_3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x3>;
+ enable-method = "psci";
+ next-level-cache = <&A53_L2>;
+ };
+
+ A72_0: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a72", "arm,armv8";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ next-level-cache = <&A72_L2>;
+ };
+
+ A72_1: cpu@101 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a72", "arm,armv8";
+ reg = <0x0 0x101>;
+ enable-method = "psci";
+ next-level-cache = <&A72_L2>;
+ };
+
+ A53_L2: l2-cache0 {
+ compatible = "cache";
+ };
+
+ A72_L2: l2-cache1 {
+ compatible = "cache";
+ };
+ };
+
+ gic: interrupt-controller@51a00000 {
+ compatible = "arm,gic-v3";
+ reg = <0x0 0x51a00000 0 0x10000>, /* GIC Dist */
+ <0x0 0x51b00000 0 0xC0000>, /* GICR */
+ <0x0 0x52000000 0 0x2000>, /* GICC */
+ <0x0 0x52010000 0 0x1000>, /* GICH */
+ <0x0 0x52020000 0 0x20000>; /* GICV */
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gic>;
+ };
+
+ pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>, /* Physical Secure */
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>, /* Physical Non-Secure */
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>, /* Virtual */
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>; /* Hypervisor */
+ };
+
+ scu {
+ compatible = "fsl,imx-scu";
+ mbox-names = "tx0",
+ "rx0",
+ "gip3";
+ mboxes = <&lsio_mu1 0 0
+ &lsio_mu1 1 0
+ &lsio_mu1 3 3>;
+
+ pd: imx8qx-pd {
+ compatible = "fsl,imx8qm-scu-pd", "fsl,scu-pd";
+ #power-domain-cells = <1>;
+ };
+
+ clk: clock-controller {
+ compatible = "fsl,imx8qxp-clk", "fsl,scu-clk";
+ #clock-cells = <2>;
+ };
+
+ iomuxc: pinctrl {
+ compatible = "fsl,imx8qm-iomuxc";
+ };
+
+ };
+
+ /* sorted in register address */
+ #include "imx8-ss-dma.dtsi"
+ #include "imx8-ss-conn.dtsi"
+ #include "imx8-ss-lsio.dtsi"
+};
+
+#include "imx8qm-ss-dma.dtsi"
+#include "imx8qm-ss-conn.dtsi"
+#include "imx8qm-ss-lsio.dtsi"
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-ai_ml.dts b/arch/arm64/boot/dts/freescale/imx8qxp-ai_ml.dts
index a3f8cf195974..47bb68823b24 100644
--- a/arch/arm64/boot/dts/freescale/imx8qxp-ai_ml.dts
+++ b/arch/arm64/boot/dts/freescale/imx8qxp-ai_ml.dts
@@ -13,13 +13,13 @@
compatible = "einfochips,imx8qxp-ai_ml", "fsl,imx8qxp";
aliases {
- serial1 = &adma_lpuart1;
- serial2 = &adma_lpuart2;
- serial3 = &adma_lpuart3;
+ serial1 = &lpuart1;
+ serial2 = &lpuart2;
+ serial3 = &lpuart3;
};
chosen {
- stdout-path = &adma_lpuart2;
+ stdout-path = &lpuart2;
};
memory@80000000 {
@@ -82,7 +82,7 @@
};
/* BT */
-&adma_lpuart0 {
+&lpuart0 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_lpuart0>;
uart-has-rtscts;
@@ -90,21 +90,21 @@
};
/* LS-UART0 */
-&adma_lpuart1 {
+&lpuart1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_lpuart1>;
status = "okay";
};
/* Debug */
-&adma_lpuart2 {
+&lpuart2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_lpuart2>;
status = "okay";
};
/* PCI-E UART */
-&adma_lpuart3 {
+&lpuart3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_lpuart3>;
status = "okay";
@@ -133,7 +133,7 @@
&usdhc1 {
#address-cells = <1>;
#size-cells = <0>;
- assigned-clocks = <&clk IMX_CONN_SDHC0_CLK>;
+ assigned-clocks = <&clk IMX_SC_R_SDHC_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <200000000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc1>;
@@ -151,7 +151,7 @@
/* SD */
&usdhc2 {
- assigned-clocks = <&clk IMX_CONN_SDHC1_CLK>;
+ assigned-clocks = <&clk IMX_SC_R_SDHC_1 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <200000000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc2>;
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-colibri-eval-v3.dtsi b/arch/arm64/boot/dts/freescale/imx8qxp-colibri-eval-v3.dtsi
index c7336f387605..144fc9e82da7 100644
--- a/arch/arm64/boot/dts/freescale/imx8qxp-colibri-eval-v3.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8qxp-colibri-eval-v3.dtsi
@@ -26,7 +26,7 @@
};
};
-&adma_i2c1 {
+&i2c1 {
status = "okay";
/* M41T0M6 real time clock on carrier board */
@@ -37,17 +37,17 @@
};
/* Colibri UART_B */
-&adma_lpuart0 {
+&lpuart0 {
status= "okay";
};
/* Colibri UART_C */
-&adma_lpuart2 {
+&lpuart2 {
status= "okay";
};
/* Colibri UART_A */
-&adma_lpuart3 {
+&lpuart3 {
status= "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-colibri.dtsi b/arch/arm64/boot/dts/freescale/imx8qxp-colibri.dtsi
index f38acff0d25c..89d70e030433 100644
--- a/arch/arm64/boot/dts/freescale/imx8qxp-colibri.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8qxp-colibri.dtsi
@@ -10,7 +10,7 @@
compatible = "toradex,colibri-imx8x", "fsl,imx8qxp";
chosen {
- stdout-path = &adma_lpuart3;
+ stdout-path = &lpuart3;
};
reg_module_3v3: regulator-module-3v3 {
@@ -22,7 +22,7 @@
};
/* On-module I2C */
-&adma_i2c0 {
+&i2c0 {
#address-cells = <1>;
#size-cells = <0>;
clock-frequency = <100000>;
@@ -49,7 +49,7 @@
};
/* Colibri I2C */
-&adma_i2c1 {
+&i2c1 {
#address-cells = <1>;
#size-cells = <0>;
clock-frequency = <100000>;
@@ -58,19 +58,19 @@
};
/* Colibri UART_B */
-&adma_lpuart0 {
+&lpuart0 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_lpuart0>;
};
/* Colibri UART_C */
-&adma_lpuart2 {
+&lpuart2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_lpuart2>;
};
/* Colibri UART_A */
-&adma_lpuart3 {
+&lpuart3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_lpuart3>, <&pinctrl_lpuart3_ctrl>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
index 46437d3c7a04..863232a47004 100644
--- a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
+++ b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
@@ -12,7 +12,7 @@
compatible = "fsl,imx8qxp-mek", "fsl,imx8qxp";
chosen {
- stdout-path = &adma_lpuart0;
+ stdout-path = &lpuart0;
};
memory@80000000 {
@@ -30,11 +30,30 @@
};
};
-&adma_dsp {
+&dsp {
status = "okay";
};
-&adma_i2c1 {
+&fec1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_fec1>;
+ phy-mode = "rgmii-id";
+ phy-handle = <&ethphy0>;
+ fsl,magic-packet;
+ status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-ieee802.3-c22";
+ reg = <0>;
+ };
+ };
+};
+
+&i2c1 {
#address-cells = <1>;
#size-cells = <0>;
clock-frequency = <100000>;
@@ -110,31 +129,12 @@
};
};
-&adma_lpuart0 {
+&lpuart0 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_lpuart0>;
status = "okay";
};
-&fec1 {
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_fec1>;
- phy-mode = "rgmii-id";
- phy-handle = <&ethphy0>;
- fsl,magic-packet;
- status = "okay";
-
- mdio {
- #address-cells = <1>;
- #size-cells = <0>;
-
- ethphy0: ethernet-phy@0 {
- compatible = "ethernet-phy-ieee802.3-c22";
- reg = <0>;
- };
- };
-};
-
&scu_key {
status = "okay";
};
@@ -173,7 +173,7 @@
};
&usdhc1 {
- assigned-clocks = <&clk IMX_CONN_SDHC0_CLK>;
+ assigned-clocks = <&clk IMX_SC_R_SDHC_0 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <200000000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc1>;
@@ -185,7 +185,7 @@
};
&usdhc2 {
- assigned-clocks = <&clk IMX_CONN_SDHC1_CLK>;
+ assigned-clocks = <&clk IMX_SC_R_SDHC_1 IMX_SC_PM_CLK_PER>;
assigned-clock-rates = <200000000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc2>;
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-ss-adma.dtsi b/arch/arm64/boot/dts/freescale/imx8qxp-ss-adma.dtsi
new file mode 100644
index 000000000000..dc1daa8dc72f
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8qxp-ss-adma.dtsi
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018-2020 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+&lpuart0 {
+ compatible = "fsl,imx8qxp-lpuart", "fsl,imx7ulp-lpuart";
+};
+
+&lpuart1 {
+ compatible = "fsl,imx8qxp-lpuart", "fsl,imx7ulp-lpuart";
+};
+
+&lpuart2 {
+ compatible = "fsl,imx8qxp-lpuart", "fsl,imx7ulp-lpuart";
+};
+
+&lpuart3 {
+ compatible = "fsl,imx8qxp-lpuart", "fsl,imx7ulp-lpuart";
+};
+
+&i2c0 {
+ compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c";
+};
+
+&i2c1 {
+ compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c";
+};
+
+&i2c2 {
+ compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c";
+};
+
+&i2c3 {
+ compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-ss-conn.dtsi b/arch/arm64/boot/dts/freescale/imx8qxp-ss-conn.dtsi
new file mode 100644
index 000000000000..f5f58959f65c
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8qxp-ss-conn.dtsi
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018-2020 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+&usdhc1 {
+ compatible = "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc";
+};
+
+&usdhc2 {
+ compatible = "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc";
+};
+
+&usdhc3 {
+ compatible = "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc";
+};
+
+&fec1 {
+ compatible = "fsl,imx8qxp-fec", "fsl,imx6sx-fec";
+};
+
+&fec2 {
+ compatible = "fsl,imx8qxp-fec", "fsl,imx6sx-fec";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-ss-lsio.dtsi b/arch/arm64/boot/dts/freescale/imx8qxp-ss-lsio.dtsi
new file mode 100644
index 000000000000..11395479ffc0
--- /dev/null
+++ b/arch/arm64/boot/dts/freescale/imx8qxp-ss-lsio.dtsi
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018-2020 NXP
+ * Dong Aisheng <aisheng.dong@nxp.com>
+ */
+
+&lsio_gpio0 {
+ compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio";
+};
+
+&lsio_gpio1 {
+ compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio";
+};
+
+&lsio_gpio2 {
+ compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio";
+};
+
+&lsio_gpio3 {
+ compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio";
+};
+
+&lsio_gpio4 {
+ compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio";
+};
+
+&lsio_gpio5 {
+ compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio";
+};
+
+&lsio_gpio6 {
+ compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio";
+};
+
+&lsio_gpio7 {
+ compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio";
+};
+
+&lsio_mu0 {
+ compatible = "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
+};
+
+&lsio_mu1 {
+ compatible = "fsl,imx8-mu-scu", "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
+};
+
+&lsio_mu2 {
+ compatible = "fsl,imx8-mu-scu", "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
+};
+
+&lsio_mu3 {
+ compatible = "fsl,imx8-mu-scu", "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
+};
+
+&lsio_mu4 {
+ compatible = "fsl,imx8-mu-scu", "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
+};
+
+&lsio_mu13 {
+ compatible = "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
+};
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp.dtsi b/arch/arm64/boot/dts/freescale/imx8qxp.dtsi
index e46faac1fe71..1e6b4995091e 100644
--- a/arch/arm64/boot/dts/freescale/imx8qxp.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8qxp.dtsi
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2016 Freescale Semiconductor, Inc.
- * Copyright 2017-2018 NXP
+ * Copyright 2017-2020 NXP
* Dong Aisheng <aisheng.dong@nxp.com>
*/
#include <dt-bindings/clock/imx8-clock.h>
+#include <dt-bindings/clock/imx8-lpcg.h>
#include <dt-bindings/firmware/imx/rsrc.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/input/input.h>
@@ -29,10 +30,10 @@
gpio5 = &lsio_gpio5;
gpio6 = &lsio_gpio6;
gpio7 = &lsio_gpio7;
- i2c0 = &adma_i2c0;
- i2c1 = &adma_i2c1;
- i2c2 = &adma_i2c2;
- i2c3 = &adma_i2c3;
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
+ i2c3 = &i2c3;
mmc0 = &usdhc1;
mmc1 = &usdhc2;
mmc2 = &usdhc3;
@@ -41,10 +42,10 @@
mu2 = &lsio_mu2;
mu3 = &lsio_mu3;
mu4 = &lsio_mu4;
- serial0 = &adma_lpuart0;
- serial1 = &adma_lpuart1;
- serial2 = &adma_lpuart2;
- serial3 = &adma_lpuart3;
+ serial0 = &lpuart0;
+ serial1 = &lpuart1;
+ serial2 = &lpuart2;
+ serial3 = &lpuart3;
};
cpus {
@@ -58,7 +59,7 @@
reg = <0x0 0x0>;
enable-method = "psci";
next-level-cache = <&A35_L2>;
- clocks = <&clk IMX_A35_CLK>;
+ clocks = <&clk IMX_SC_R_A35 IMX_SC_PM_CLK_CPU>;
operating-points-v2 = <&a35_opp_table>;
#cooling-cells = <2>;
};
@@ -69,7 +70,7 @@
reg = <0x0 0x1>;
enable-method = "psci";
next-level-cache = <&A35_L2>;
- clocks = <&clk IMX_A35_CLK>;
+ clocks = <&clk IMX_SC_R_A35 IMX_SC_PM_CLK_CPU>;
operating-points-v2 = <&a35_opp_table>;
#cooling-cells = <2>;
};
@@ -80,7 +81,7 @@
reg = <0x0 0x2>;
enable-method = "psci";
next-level-cache = <&A35_L2>;
- clocks = <&clk IMX_A35_CLK>;
+ clocks = <&clk IMX_SC_R_A35 IMX_SC_PM_CLK_CPU>;
operating-points-v2 = <&a35_opp_table>;
#cooling-cells = <2>;
};
@@ -91,7 +92,7 @@
reg = <0x0 0x3>;
enable-method = "psci";
next-level-cache = <&A35_L2>;
- clocks = <&clk IMX_A35_CLK>;
+ clocks = <&clk IMX_SC_R_A35 IMX_SC_PM_CLK_CPU>;
operating-points-v2 = <&a35_opp_table>;
#cooling-cells = <2>;
};
@@ -158,9 +159,14 @@
&lsio_mu1 1 0
&lsio_mu1 3 3>;
+ pd: imx8qx-pd {
+ compatible = "fsl,imx8qxp-scu-pd", "fsl,scu-pd";
+ #power-domain-cells = <1>;
+ };
+
clk: clock-controller {
compatible = "fsl,imx8qxp-clk";
- #clock-cells = <1>;
+ #clock-cells = <2>;
clocks = <&xtal32k &xtal24m>;
clock-names = "xtal_32KHz", "xtal_24Mhz";
};
@@ -175,11 +181,6 @@
#size-cells = <1>;
};
- pd: imx8qx-pd {
- compatible = "fsl,imx8qxp-scu-pd";
- #power-domain-cells = <1>;
- };
-
scu_key: scu-key {
compatible = "fsl,imx8qxp-sc-key", "fsl,imx-sc-key";
linux,keycodes = <KEY_POWER>;
@@ -223,380 +224,6 @@
clock-output-names = "xtal_24MHz";
};
- adma_subsys: bus@59000000 {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x59000000 0x0 0x59000000 0x2000000>;
-
- adma_lpcg: clock-controller@59000000 {
- compatible = "fsl,imx8qxp-lpcg-adma";
- reg = <0x59000000 0x2000000>;
- #clock-cells = <1>;
- };
-
- adma_dsp: dsp@596e8000 {
- compatible = "fsl,imx8qxp-dsp";
- reg = <0x596e8000 0x88000>;
- clocks = <&adma_lpcg IMX_ADMA_LPCG_DSP_IPG_CLK>,
- <&adma_lpcg IMX_ADMA_LPCG_OCRAM_IPG_CLK>,
- <&adma_lpcg IMX_ADMA_LPCG_DSP_CORE_CLK>;
- clock-names = "ipg", "ocram", "core";
- power-domains = <&pd IMX_SC_R_MU_13A>,
- <&pd IMX_SC_R_MU_13B>,
- <&pd IMX_SC_R_DSP>,
- <&pd IMX_SC_R_DSP_RAM>;
- mbox-names = "txdb0", "txdb1",
- "rxdb0", "rxdb1";
- mboxes = <&lsio_mu13 2 0>,
- <&lsio_mu13 2 1>,
- <&lsio_mu13 3 0>,
- <&lsio_mu13 3 1>;
- memory-region = <&dsp_reserved>;
- status = "disabled";
- };
-
- adma_lpuart0: serial@5a060000 {
- compatible = "fsl,imx8qxp-lpuart", "fsl,imx7ulp-lpuart";
- reg = <0x5a060000 0x1000>;
- interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&adma_lpcg IMX_ADMA_LPCG_UART0_IPG_CLK>,
- <&adma_lpcg IMX_ADMA_LPCG_UART0_BAUD_CLK>;
- clock-names = "ipg", "baud";
- power-domains = <&pd IMX_SC_R_UART_0>;
- status = "disabled";
- };
-
- adma_lpuart1: serial@5a070000 {
- compatible = "fsl,imx8qxp-lpuart", "fsl,imx7ulp-lpuart";
- reg = <0x5a070000 0x1000>;
- interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&adma_lpcg IMX_ADMA_LPCG_UART1_IPG_CLK>,
- <&adma_lpcg IMX_ADMA_LPCG_UART1_BAUD_CLK>;
- clock-names = "ipg", "baud";
- power-domains = <&pd IMX_SC_R_UART_1>;
- status = "disabled";
- };
-
- adma_lpuart2: serial@5a080000 {
- compatible = "fsl,imx8qxp-lpuart", "fsl,imx7ulp-lpuart";
- reg = <0x5a080000 0x1000>;
- interrupts = <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&adma_lpcg IMX_ADMA_LPCG_UART2_IPG_CLK>,
- <&adma_lpcg IMX_ADMA_LPCG_UART2_BAUD_CLK>;
- clock-names = "ipg", "baud";
- power-domains = <&pd IMX_SC_R_UART_2>;
- status = "disabled";
- };
-
- adma_lpuart3: serial@5a090000 {
- compatible = "fsl,imx8qxp-lpuart", "fsl,imx7ulp-lpuart";
- reg = <0x5a090000 0x1000>;
- interrupts = <GIC_SPI 228 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&adma_lpcg IMX_ADMA_LPCG_UART3_IPG_CLK>,
- <&adma_lpcg IMX_ADMA_LPCG_UART3_BAUD_CLK>;
- clock-names = "ipg", "baud";
- power-domains = <&pd IMX_SC_R_UART_3>;
- status = "disabled";
- };
-
- adma_i2c0: i2c@5a800000 {
- compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c";
- reg = <0x5a800000 0x4000>;
- interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&adma_lpcg IMX_ADMA_LPCG_I2C0_CLK>;
- clock-names = "per";
- assigned-clocks = <&clk IMX_ADMA_I2C0_CLK>;
- assigned-clock-rates = <24000000>;
- power-domains = <&pd IMX_SC_R_I2C_0>;
- status = "disabled";
- };
-
- adma_i2c1: i2c@5a810000 {
- compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c";
- reg = <0x5a810000 0x4000>;
- interrupts = <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&adma_lpcg IMX_ADMA_LPCG_I2C1_CLK>;
- clock-names = "per";
- assigned-clocks = <&clk IMX_ADMA_I2C1_CLK>;
- assigned-clock-rates = <24000000>;
- power-domains = <&pd IMX_SC_R_I2C_1>;
- status = "disabled";
- };
-
- adma_i2c2: i2c@5a820000 {
- compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c";
- reg = <0x5a820000 0x4000>;
- interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&adma_lpcg IMX_ADMA_LPCG_I2C2_CLK>;
- clock-names = "per";
- assigned-clocks = <&clk IMX_ADMA_I2C2_CLK>;
- assigned-clock-rates = <24000000>;
- power-domains = <&pd IMX_SC_R_I2C_2>;
- status = "disabled";
- };
-
- adma_i2c3: i2c@5a830000 {
- compatible = "fsl,imx8qxp-lpi2c", "fsl,imx7ulp-lpi2c";
- reg = <0x5a830000 0x4000>;
- interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&adma_lpcg IMX_ADMA_LPCG_I2C3_CLK>;
- clock-names = "per";
- assigned-clocks = <&clk IMX_ADMA_I2C3_CLK>;
- assigned-clock-rates = <24000000>;
- power-domains = <&pd IMX_SC_R_I2C_3>;
- status = "disabled";
- };
- };
-
- conn_subsys: bus@5b000000 {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x5b000000 0x0 0x5b000000 0x1000000>;
-
- conn_lpcg: clock-controller@5b200000 {
- compatible = "fsl,imx8qxp-lpcg-conn";
- reg = <0x5b200000 0xb0000>;
- #clock-cells = <1>;
- };
-
- usdhc1: mmc@5b010000 {
- compatible = "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc";
- interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0x5b010000 0x10000>;
- clocks = <&conn_lpcg IMX_CONN_LPCG_SDHC0_IPG_CLK>,
- <&conn_lpcg IMX_CONN_LPCG_SDHC0_PER_CLK>,
- <&conn_lpcg IMX_CONN_LPCG_SDHC0_HCLK>;
- clock-names = "ipg", "per", "ahb";
- power-domains = <&pd IMX_SC_R_SDHC_0>;
- status = "disabled";
- };
-
- usdhc2: mmc@5b020000 {
- compatible = "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc";
- interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0x5b020000 0x10000>;
- clocks = <&conn_lpcg IMX_CONN_LPCG_SDHC1_IPG_CLK>,
- <&conn_lpcg IMX_CONN_LPCG_SDHC1_PER_CLK>,
- <&conn_lpcg IMX_CONN_LPCG_SDHC1_HCLK>;
- clock-names = "ipg", "per", "ahb";
- power-domains = <&pd IMX_SC_R_SDHC_1>;
- fsl,tuning-start-tap = <20>;
- fsl,tuning-step= <2>;
- status = "disabled";
- };
-
- usdhc3: mmc@5b030000 {
- compatible = "fsl,imx8qxp-usdhc", "fsl,imx7d-usdhc";
- interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>;
- reg = <0x5b030000 0x10000>;
- clocks = <&conn_lpcg IMX_CONN_LPCG_SDHC2_IPG_CLK>,
- <&conn_lpcg IMX_CONN_LPCG_SDHC2_PER_CLK>,
- <&conn_lpcg IMX_CONN_LPCG_SDHC2_HCLK>;
- clock-names = "ipg", "per", "ahb";
- power-domains = <&pd IMX_SC_R_SDHC_2>;
- status = "disabled";
- };
-
- fec1: ethernet@5b040000 {
- compatible = "fsl,imx8qxp-fec", "fsl,imx6sx-fec";
- reg = <0x5b040000 0x10000>;
- interrupts = <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&conn_lpcg IMX_CONN_LPCG_ENET0_IPG_CLK>,
- <&conn_lpcg IMX_CONN_LPCG_ENET0_AHB_CLK>,
- <&conn_lpcg IMX_CONN_LPCG_ENET0_TX_CLK>,
- <&conn_lpcg IMX_CONN_LPCG_ENET0_ROOT_CLK>;
- clock-names = "ipg", "ahb", "enet_clk_ref", "ptp";
- fsl,num-tx-queues=<3>;
- fsl,num-rx-queues=<3>;
- power-domains = <&pd IMX_SC_R_ENET_0>;
- status = "disabled";
- };
-
- fec2: ethernet@5b050000 {
- compatible = "fsl,imx8qxp-fec", "fsl,imx6sx-fec";
- reg = <0x5b050000 0x10000>;
- interrupts = <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&conn_lpcg IMX_CONN_LPCG_ENET1_IPG_CLK>,
- <&conn_lpcg IMX_CONN_LPCG_ENET1_AHB_CLK>,
- <&conn_lpcg IMX_CONN_LPCG_ENET1_TX_CLK>,
- <&conn_lpcg IMX_CONN_LPCG_ENET1_ROOT_CLK>;
- clock-names = "ipg", "ahb", "enet_clk_ref", "ptp";
- fsl,num-tx-queues=<3>;
- fsl,num-rx-queues=<3>;
- power-domains = <&pd IMX_SC_R_ENET_1>;
- status = "disabled";
- };
- };
-
- ddr_subsyss: bus@5c000000 {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x5c000000 0x0 0x5c000000 0x1000000>;
-
- ddr-pmu@5c020000 {
- compatible = "fsl,imx8-ddr-pmu";
- reg = <0x5c020000 0x10000>;
- interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
- };
- };
-
- lsio_subsys: bus@5d000000 {
- compatible = "simple-bus";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x5d000000 0x0 0x5d000000 0x1000000>;
-
- lsio_gpio0: gpio@5d080000 {
- compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio";
- reg = <0x5d080000 0x10000>;
- interrupts = <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- power-domains = <&pd IMX_SC_R_GPIO_0>;
- };
-
- lsio_gpio1: gpio@5d090000 {
- compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio";
- reg = <0x5d090000 0x10000>;
- interrupts = <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- power-domains = <&pd IMX_SC_R_GPIO_1>;
- };
-
- lsio_gpio2: gpio@5d0a0000 {
- compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio";
- reg = <0x5d0a0000 0x10000>;
- interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- power-domains = <&pd IMX_SC_R_GPIO_2>;
- };
-
- lsio_gpio3: gpio@5d0b0000 {
- compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio";
- reg = <0x5d0b0000 0x10000>;
- interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- power-domains = <&pd IMX_SC_R_GPIO_3>;
- };
-
- lsio_gpio4: gpio@5d0c0000 {
- compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio";
- reg = <0x5d0c0000 0x10000>;
- interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- power-domains = <&pd IMX_SC_R_GPIO_4>;
- };
-
- lsio_gpio5: gpio@5d0d0000 {
- compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio";
- reg = <0x5d0d0000 0x10000>;
- interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- power-domains = <&pd IMX_SC_R_GPIO_5>;
- };
-
- lsio_gpio6: gpio@5d0e0000 {
- compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio";
- reg = <0x5d0e0000 0x10000>;
- interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- power-domains = <&pd IMX_SC_R_GPIO_6>;
- };
-
- lsio_gpio7: gpio@5d0f0000 {
- compatible = "fsl,imx8qxp-gpio", "fsl,imx35-gpio";
- reg = <0x5d0f0000 0x10000>;
- interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
- gpio-controller;
- #gpio-cells = <2>;
- interrupt-controller;
- #interrupt-cells = <2>;
- power-domains = <&pd IMX_SC_R_GPIO_7>;
- };
-
- lsio_mu0: mailbox@5d1b0000 {
- compatible = "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
- reg = <0x5d1b0000 0x10000>;
- interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
- #mbox-cells = <2>;
- status = "disabled";
- };
-
- lsio_mu1: mailbox@5d1c0000 {
- compatible = "fsl,imx8-mu-scu", "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
- reg = <0x5d1c0000 0x10000>;
- interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
- #mbox-cells = <2>;
- };
-
- lsio_mu2: mailbox@5d1d0000 {
- compatible = "fsl,imx8-mu-scu", "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
- reg = <0x5d1d0000 0x10000>;
- interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
- #mbox-cells = <2>;
- status = "disabled";
- };
-
- lsio_mu3: mailbox@5d1e0000 {
- compatible = "fsl,imx8-mu-scu", "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
- reg = <0x5d1e0000 0x10000>;
- interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
- #mbox-cells = <2>;
- status = "disabled";
- };
-
- lsio_mu4: mailbox@5d1f0000 {
- compatible = "fsl,imx8-mu-scu", "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
- reg = <0x5d1f0000 0x10000>;
- interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
- #mbox-cells = <2>;
- status = "disabled";
- };
-
- lsio_mu13: mailbox@5d280000 {
- compatible = "fsl,imx8qxp-mu", "fsl,imx6sx-mu";
- reg = <0x5d280000 0x10000>;
- interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
- #mbox-cells = <2>;
- power-domains = <&pd IMX_SC_R_MU_13A>;
- };
-
- lsio_lpcg: clock-controller@5d400000 {
- compatible = "fsl,imx8qxp-lpcg-lsio";
- reg = <0x5d400000 0x400000>;
- #clock-cells = <1>;
- };
- };
-
thermal_zones: thermal-zones {
cpu-thermal0 {
polling-delay-passive = <250>;
@@ -629,4 +256,14 @@
};
};
};
+
+ /* sorted in register address */
+ #include "imx8-ss-adma.dtsi"
+ #include "imx8-ss-conn.dtsi"
+ #include "imx8-ss-ddr.dtsi"
+ #include "imx8-ss-lsio.dtsi"
};
+
+#include "imx8qxp-ss-adma.dtsi"
+#include "imx8qxp-ss-conn.dtsi"
+#include "imx8qxp-ss-lsio.dtsi"
diff --git a/arch/arm64/boot/dts/intel/Makefile b/arch/arm64/boot/dts/intel/Makefile
index 3a052540605b..0b5477442263 100644
--- a/arch/arm64/boot/dts/intel/Makefile
+++ b/arch/arm64/boot/dts/intel/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-dtb-$(CONFIG_ARCH_AGILEX) += socfpga_agilex_socdk.dtb \
- socfpga_agilex_socdk_nand.dtb
+dtb-$(CONFIG_ARCH_INTEL_SOCFPGA) += socfpga_agilex_socdk.dtb \
+ socfpga_agilex_socdk_nand.dtb \
+ socfpga_n5x_socdk.dtb
dtb-$(CONFIG_ARCH_KEEMBAY) += keembay-evm.dtb
-dtb-$(CONFIG_ARCH_N5X) += socfpga_n5x_socdk.dtb
diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
index 07c099b4ed5b..163f33b46e4f 100644
--- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
+++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
@@ -6,6 +6,7 @@
/dts-v1/;
#include <dt-bindings/reset/altr,rst-mgr-s10.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/agilex-clock.h>
/ {
@@ -61,10 +62,10 @@
pmu {
compatible = "arm,armv8-pmuv3";
- interrupts = <0 170 4>,
- <0 171 4>,
- <0 172 4>,
- <0 173 4>;
+ interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
interrupt-affinity = <&cpu0>,
<&cpu1>,
<&cpu2>,
@@ -77,7 +78,7 @@
method = "smc";
};
- intc: intc@fffc1000 {
+ intc: interrupt-controller@fffc1000 {
compatible = "arm,gic-400", "arm,cortex-a15-gic";
#interrupt-cells = <3>;
interrupt-controller;
@@ -87,6 +88,48 @@
<0x0 0xfffc6000 0x0 0x2000>;
};
+ clocks {
+ cb_intosc_hs_div2_clk: cb-intosc-hs-div2-clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ };
+
+ cb_intosc_ls_clk: cb-intosc-ls-clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ };
+
+ f2s_free_clk: f2s-free-clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ };
+
+ osc1: osc1 {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ };
+
+ qspi_clk: qspi-clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <200000000>;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupt-parent = <&intc>;
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ usbphy0: usbphy {
+ #phy-cells = <0>;
+ compatible = "usb-nop-xceiv";
+ };
+
soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -108,38 +151,10 @@
#clock-cells = <1>;
};
- clocks {
- cb_intosc_hs_div2_clk: cb-intosc-hs-div2-clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- };
-
- cb_intosc_ls_clk: cb-intosc-ls-clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- };
-
- f2s_free_clk: f2s-free-clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- };
-
- osc1: osc1 {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- };
-
- qspi_clk: qspi-clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <200000000>;
- };
- };
-
gmac0: ethernet@ff800000 {
compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac";
reg = <0xff800000 0x2000>;
- interrupts = <0 90 4>;
+ interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq";
mac-address = [00 00 00 00 00 00];
resets = <&rst EMAC0_RESET>, <&rst EMAC0_OCP_RESET>;
@@ -157,7 +172,7 @@
gmac1: ethernet@ff802000 {
compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac";
reg = <0xff802000 0x2000>;
- interrupts = <0 91 4>;
+ interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq";
mac-address = [00 00 00 00 00 00];
resets = <&rst EMAC1_RESET>, <&rst EMAC1_OCP_RESET>;
@@ -175,7 +190,7 @@
gmac2: ethernet@ff804000 {
compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac";
reg = <0xff804000 0x2000>;
- interrupts = <0 92 4>;
+ interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq";
mac-address = [00 00 00 00 00 00];
resets = <&rst EMAC2_RESET>, <&rst EMAC2_OCP_RESET>;
@@ -206,7 +221,7 @@
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
- interrupts = <0 110 4>;
+ interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
};
};
@@ -226,7 +241,7 @@
reg = <0>;
interrupt-controller;
#interrupt-cells = <2>;
- interrupts = <0 111 4>;
+ interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
};
};
@@ -235,7 +250,7 @@
#size-cells = <0>;
compatible = "snps,designware-i2c";
reg = <0xffc02800 0x100>;
- interrupts = <0 103 4>;
+ interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
resets = <&rst I2C0_RESET>;
clocks = <&clkmgr AGILEX_L4_SP_CLK>;
status = "disabled";
@@ -246,7 +261,7 @@
#size-cells = <0>;
compatible = "snps,designware-i2c";
reg = <0xffc02900 0x100>;
- interrupts = <0 104 4>;
+ interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
resets = <&rst I2C1_RESET>;
clocks = <&clkmgr AGILEX_L4_SP_CLK>;
status = "disabled";
@@ -257,7 +272,7 @@
#size-cells = <0>;
compatible = "snps,designware-i2c";
reg = <0xffc02a00 0x100>;
- interrupts = <0 105 4>;
+ interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
resets = <&rst I2C2_RESET>;
clocks = <&clkmgr AGILEX_L4_SP_CLK>;
status = "disabled";
@@ -268,7 +283,7 @@
#size-cells = <0>;
compatible = "snps,designware-i2c";
reg = <0xffc02b00 0x100>;
- interrupts = <0 106 4>;
+ interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
resets = <&rst I2C3_RESET>;
clocks = <&clkmgr AGILEX_L4_SP_CLK>;
status = "disabled";
@@ -279,7 +294,7 @@
#size-cells = <0>;
compatible = "snps,designware-i2c";
reg = <0xffc02c00 0x100>;
- interrupts = <0 107 4>;
+ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
resets = <&rst I2C4_RESET>;
clocks = <&clkmgr AGILEX_L4_SP_CLK>;
status = "disabled";
@@ -290,7 +305,7 @@
#size-cells = <0>;
compatible = "altr,socfpga-dw-mshc";
reg = <0xff808000 0x1000>;
- interrupts = <0 96 4>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
fifo-depth = <0x400>;
resets = <&rst SDMMC_RESET>;
reset-names = "reset";
@@ -301,14 +316,14 @@
status = "disabled";
};
- nand: nand@ffb90000 {
+ nand: nand-controller@ffb90000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "altr,socfpga-denali-nand";
reg = <0xffb90000 0x10000>,
<0xffb80000 0x1000>;
reg-names = "nand_data", "denali_reg";
- interrupts = <0 97 4>;
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clkmgr AGILEX_NAND_CLK>,
<&clkmgr AGILEX_NAND_X_CLK>,
<&clkmgr AGILEX_NAND_ECC_CLK>;
@@ -325,15 +340,15 @@
pdma: pdma@ffda0000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0xffda0000 0x1000>;
- interrupts = <0 81 4>,
- <0 82 4>,
- <0 83 4>,
- <0 84 4>,
- <0 85 4>,
- <0 86 4>,
- <0 87 4>,
- <0 88 4>,
- <0 89 4>;
+ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
#dma-cells = <1>;
#dma-channels = <8>;
#dma-requests = <32>;
@@ -355,17 +370,43 @@
#global-interrupts = <2>;
#iommu-cells = <1>;
interrupt-parent = <&intc>;
- interrupts = <0 128 4>, /* Global Secure Fault */
- <0 129 4>, /* Global Non-secure Fault */
+ /* Global Secure Fault */
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ /* Global Non-secure Fault */
+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
/* Non-secure Context Interrupts (32) */
- <0 138 4>, <0 139 4>, <0 140 4>, <0 141 4>,
- <0 142 4>, <0 143 4>, <0 144 4>, <0 145 4>,
- <0 146 4>, <0 147 4>, <0 148 4>, <0 149 4>,
- <0 150 4>, <0 151 4>, <0 152 4>, <0 153 4>,
- <0 154 4>, <0 155 4>, <0 156 4>, <0 157 4>,
- <0 158 4>, <0 159 4>, <0 160 4>, <0 161 4>,
- <0 162 4>, <0 163 4>, <0 164 4>, <0 165 4>,
- <0 166 4>, <0 167 4>, <0 168 4>, <0 169 4>;
+ <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
stream-match-mask = <0x7ff0>;
clocks = <&clkmgr AGILEX_MPU_CCU_CLK>,
<&clkmgr AGILEX_L3_MAIN_FREE_CLK>,
@@ -378,7 +419,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0xffda4000 0x1000>;
- interrupts = <0 99 4>;
+ interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
resets = <&rst SPIM0_RESET>;
reset-names = "spi";
reg-io-width = <4>;
@@ -392,7 +433,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0xffda5000 0x1000>;
- interrupts = <0 100 4>;
+ interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
resets = <&rst SPIM1_RESET>;
reset-names = "spi";
reg-io-width = <4>;
@@ -406,18 +447,9 @@
reg = <0xffd12000 0x500>;
};
- /* Local timer */
- timer {
- compatible = "arm,armv8-timer";
- interrupts = <1 13 0xf08>,
- <1 14 0xf08>,
- <1 11 0xf08>,
- <1 10 0xf08>;
- };
-
timer0: timer0@ffc03000 {
compatible = "snps,dw-apb-timer";
- interrupts = <0 113 4>;
+ interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
reg = <0xffc03000 0x100>;
clocks = <&clkmgr AGILEX_L4_SP_CLK>;
clock-names = "timer";
@@ -425,7 +457,7 @@
timer1: timer1@ffc03100 {
compatible = "snps,dw-apb-timer";
- interrupts = <0 114 4>;
+ interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
reg = <0xffc03100 0x100>;
clocks = <&clkmgr AGILEX_L4_SP_CLK>;
clock-names = "timer";
@@ -433,7 +465,7 @@
timer2: timer2@ffd00000 {
compatible = "snps,dw-apb-timer";
- interrupts = <0 115 4>;
+ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
reg = <0xffd00000 0x100>;
clocks = <&clkmgr AGILEX_L4_SP_CLK>;
clock-names = "timer";
@@ -441,16 +473,16 @@
timer3: timer3@ffd00100 {
compatible = "snps,dw-apb-timer";
- interrupts = <0 116 4>;
+ interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
reg = <0xffd00100 0x100>;
clocks = <&clkmgr AGILEX_L4_SP_CLK>;
clock-names = "timer";
};
- uart0: serial0@ffc02000 {
+ uart0: serial@ffc02000 {
compatible = "snps,dw-apb-uart";
reg = <0xffc02000 0x100>;
- interrupts = <0 108 4>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
reg-shift = <2>;
reg-io-width = <4>;
resets = <&rst UART0_RESET>;
@@ -458,10 +490,10 @@
clocks = <&clkmgr AGILEX_L4_SP_CLK>;
};
- uart1: serial1@ffc02100 {
+ uart1: serial@ffc02100 {
compatible = "snps,dw-apb-uart";
reg = <0xffc02100 0x100>;
- interrupts = <0 109 4>;
+ interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
reg-shift = <2>;
reg-io-width = <4>;
resets = <&rst UART1_RESET>;
@@ -469,16 +501,10 @@
status = "disabled";
};
- usbphy0: usbphy@0 {
- #phy-cells = <0>;
- compatible = "usb-nop-xceiv";
- status = "okay";
- };
-
usb0: usb@ffb00000 {
compatible = "snps,dwc2";
reg = <0xffb00000 0x40000>;
- interrupts = <0 93 4>;
+ interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
phys = <&usbphy0>;
phy-names = "usb2-phy";
resets = <&rst USB0_RESET>, <&rst USB0_OCP_RESET>;
@@ -491,7 +517,7 @@
usb1: usb@ffb40000 {
compatible = "snps,dwc2";
reg = <0xffb40000 0x40000>;
- interrupts = <0 94 4>;
+ interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
phys = <&usbphy0>;
phy-names = "usb2-phy";
resets = <&rst USB1_RESET>, <&rst USB1_OCP_RESET>;
@@ -504,7 +530,7 @@
watchdog0: watchdog@ffd00200 {
compatible = "snps,dw-wdt";
reg = <0xffd00200 0x100>;
- interrupts = <0 117 4>;
+ interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
resets = <&rst WATCHDOG0_RESET>;
clocks = <&clkmgr AGILEX_L4_SYS_FREE_CLK>;
status = "disabled";
@@ -513,7 +539,7 @@
watchdog1: watchdog@ffd00300 {
compatible = "snps,dw-wdt";
reg = <0xffd00300 0x100>;
- interrupts = <0 118 4>;
+ interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
resets = <&rst WATCHDOG1_RESET>;
clocks = <&clkmgr AGILEX_L4_SYS_FREE_CLK>;
status = "disabled";
@@ -522,7 +548,7 @@
watchdog2: watchdog@ffd00400 {
compatible = "snps,dw-wdt";
reg = <0xffd00400 0x100>;
- interrupts = <0 125 4>;
+ interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
resets = <&rst WATCHDOG2_RESET>;
clocks = <&clkmgr AGILEX_L4_SYS_FREE_CLK>;
status = "disabled";
@@ -531,7 +557,7 @@
watchdog3: watchdog@ffd00500 {
compatible = "snps,dw-wdt";
reg = <0xffd00500 0x100>;
- interrupts = <0 126 4>;
+ interrupts = <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>;
resets = <&rst WATCHDOG3_RESET>;
clocks = <&clkmgr AGILEX_L4_SYS_FREE_CLK>;
status = "disabled";
@@ -548,7 +574,7 @@
altr,sysmgr-syscon = <&sysmgr>;
#address-cells = <1>;
#size-cells = <1>;
- interrupts = <0 15 4>;
+ interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
#interrupt-cells = <2>;
ranges;
@@ -607,7 +633,7 @@
#size-cells = <0>;
reg = <0xff8d2000 0x100>,
<0xff900000 0x100000>;
- interrupts = <0 3 4>;
+ interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
cdns,fifo-depth = <128>;
cdns,fifo-width = <4>;
cdns,trigger-address = <0x00000000>;
diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts
index a7a83f29f00b..0f7a0ba344be 100644
--- a/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts
+++ b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk.dts
@@ -41,14 +41,6 @@
/* We expect the bootloader to fill in the reg */
reg = <0 0 0 0>;
};
-
- soc {
- clocks {
- osc1 {
- clock-frequency = <25000000>;
- };
- };
- };
};
&gpio1 {
@@ -92,6 +84,10 @@
bus-width = <4>;
};
+&osc1 {
+ clock-frequency = <25000000>;
+};
+
&uart0 {
status = "okay";
};
@@ -117,7 +113,7 @@
m25p,fast-read;
cdns,page-size = <256>;
cdns,block-size = <16>;
- cdns,read-delay = <1>;
+ cdns,read-delay = <2>;
cdns,tshsl-ns = <50>;
cdns,tsd2d-ns = <50>;
cdns,tchsh-ns = <4>;
diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex_socdk_nand.dts b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk_nand.dts
index 979aa59a6bd0..cc2dcabf34e3 100644
--- a/arch/arm64/boot/dts/intel/socfpga_agilex_socdk_nand.dts
+++ b/arch/arm64/boot/dts/intel/socfpga_agilex_socdk_nand.dts
@@ -20,17 +20,17 @@
leds {
compatible = "gpio-leds";
- hps0 {
+ led0 {
label = "hps_led0";
gpios = <&portb 20 GPIO_ACTIVE_HIGH>;
};
- hps1 {
+ led1 {
label = "hps_led1";
gpios = <&portb 19 GPIO_ACTIVE_HIGH>;
};
- hps2 {
+ led2 {
label = "hps_led2";
gpios = <&portb 21 GPIO_ACTIVE_HIGH>;
};
@@ -41,14 +41,6 @@
/* We expect the bootloader to fill in the reg */
reg = <0 0 0 0>;
};
-
- soc {
- clocks {
- osc1 {
- clock-frequency = <25000000>;
- };
- };
- };
};
&gpio1 {
@@ -121,6 +113,10 @@
};
};
+&osc1 {
+ clock-frequency = <25000000>;
+};
+
&uart0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/intel/socfpga_n5x_socdk.dts b/arch/arm64/boot/dts/intel/socfpga_n5x_socdk.dts
index 5f56e2697fee..01f1307ce4ac 100644
--- a/arch/arm64/boot/dts/intel/socfpga_n5x_socdk.dts
+++ b/arch/arm64/boot/dts/intel/socfpga_n5x_socdk.dts
@@ -23,14 +23,6 @@
/* We expect the bootloader to fill in the reg */
reg = <0 0 0 0>;
};
-
- soc {
- clocks {
- osc1 {
- clock-frequency = <25000000>;
- };
- };
- };
};
&clkmgr {
@@ -44,6 +36,10 @@
bus-width = <4>;
};
+&osc1 {
+ clock-frequency = <25000000>;
+};
+
&uart0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
index d239ab70ed99..53e817c5f6f3 100644
--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Device Tree file for CZ.NIC Turris Mox Board
- * 2019 by Marek Behun <marek.behun@nic.cz>
+ * 2019 by Marek Behún <kabel@kernel.org>
*/
/dts-v1/;
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
index 7a2df148c6a3..456dcd4a7793 100644
--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
@@ -156,7 +156,8 @@
};
nb_periph_clk: nb-periph-clk@13000 {
- compatible = "marvell,armada-3700-periph-clock-nb";
+ compatible = "marvell,armada-3700-periph-clock-nb",
+ "syscon";
reg = <0x13000 0x100>;
clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>,
<&tbg 3>, <&xtalclk>;
diff --git a/arch/arm64/boot/dts/marvell/armada-7040-db.dts b/arch/arm64/boot/dts/marvell/armada-7040-db.dts
index a7eb4e7697a2..51f3e2907597 100644
--- a/arch/arm64/boot/dts/marvell/armada-7040-db.dts
+++ b/arch/arm64/boot/dts/marvell/armada-7040-db.dts
@@ -218,6 +218,10 @@
};
};
+&cp0_utmi {
+ status = "okay";
+};
+
&cp0_comphy1 {
cp0_usbh0_con: connector {
compatible = "usb-a-connector";
@@ -226,8 +230,9 @@
};
&cp0_usb3_0 {
- phys = <&cp0_comphy1 0>;
- phy-names = "cp0-usb3h0-comphy";
+ phys = <&cp0_comphy1 0>, <&cp0_utmi0>;
+ phy-names = "cp0-usb3h0-comphy", "utmi";
+ dr_mode = "host";
status = "okay";
};
@@ -239,8 +244,9 @@
};
&cp0_usb3_1 {
- phys = <&cp0_comphy4 1>;
- phy-names = "cp0-usb3h1-comphy";
+ phys = <&cp0_comphy4 1>, <&cp0_utmi1>;
+ phy-names = "cp0-usb3h1-comphy", "utmi";
+ dr_mode = "host";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
index eb01cc96ba7a..0ec0d5625818 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
+++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts
@@ -31,6 +31,16 @@
ethernet2 = &cp1_eth2;
};
+ fan: pwm {
+ compatible = "pwm-fan";
+ /* 20% steps */
+ cooling-levels = <0 51 102 153 204 255>;
+ #cooling-cells = <2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&cp0_fan_pwm_pins>;
+ pwms = <&cp0_gpio2 16 40000>;
+ };
+
v_3_3: regulator-3-3v {
compatible = "regulator-fixed";
regulator-name = "v_3_3";
@@ -102,6 +112,123 @@
};
};
+&ap_thermal_ic {
+ polling-delay = <1000>; /* milliseconds */
+ trips {
+ ap_active: trip-active {
+ temperature = <40000>; /* millicelsius */
+ hysteresis = <4000>; /* millicelsius */
+ type = "active";
+ };
+ };
+ cooling-maps {
+ map0 {
+ trip = <&ap_active>;
+ cooling-device = <&fan THERMAL_NO_LIMIT 4>;
+ };
+ map1 {
+ trip = <&ap_crit>;
+ cooling-device = <&fan 4 5>;
+ };
+ };
+};
+
+&cp0_thermal_ic {
+ polling-delay = <1000>; /* milliseconds */
+ trips {
+ cp0_active0: trip-active0 {
+ temperature = <40000>; /* millicelsius */
+ hysteresis = <2500>; /* millicelsius */
+ type = "active";
+ };
+ cp0_active1: trip-active1 {
+ temperature = <45000>; /* millicelsius */
+ hysteresis = <2500>; /* millicelsius */
+ type = "active";
+ };
+ cp0_active2: trip-active2 {
+ temperature = <50000>; /* millicelsius */
+ hysteresis = <2500>; /* millicelsius */
+ type = "active";
+ };
+ cp0_active3: trip-active3 {
+ temperature = <60000>; /* millicelsius */
+ hysteresis = <2500>; /* millicelsius */
+ type = "active";
+ };
+ };
+ cooling-maps {
+ map0 {
+ trip = <&cp0_active0>;
+ cooling-device = <&fan 0 1>;
+ };
+ map1 {
+ trip = <&cp0_active1>;
+ cooling-device = <&fan 1 2>;
+ };
+ map2 {
+ trip = <&cp0_active2>;
+ cooling-device = <&fan 2 3>;
+ };
+ map3 {
+ trip = <&cp0_active3>;
+ cooling-device = <&fan 3 4>;
+ };
+ map4 {
+ trip = <&cp0_crit>;
+ cooling-device = <&fan 4 5>;
+ };
+ };
+};
+
+&cp1_thermal_ic {
+ polling-delay = <1000>; /* milliseconds */
+ trips {
+ cp1_active0: trip-active0 {
+ temperature = <40000>; /* millicelsius */
+ hysteresis = <2500>; /* millicelsius */
+ type = "active";
+ };
+ cp1_active1: trip-active1 {
+ temperature = <45000>; /* millicelsius */
+ hysteresis = <2500>; /* millicelsius */
+ type = "active";
+ };
+ cp1_active2: trip-active2 {
+ temperature = <50000>; /* millicelsius */
+ hysteresis = <2500>; /* millicelsius */
+ type = "active";
+ };
+ cp1_active3: trip-active3 {
+ temperature = <60000>; /* millicelsius */
+ hysteresis = <2500>; /* millicelsius */
+ type = "active";
+ };
+ };
+ cooling-maps {
+ map0 {
+ trip = <&cp1_active0>;
+ cooling-device = <&fan 0 1>;
+ };
+ map1 {
+ trip = <&cp1_active1>;
+ cooling-device = <&fan 1 2>;
+ };
+ map2 {
+ trip = <&cp1_active2>;
+ cooling-device = <&fan 2 3>;
+ };
+ map3 {
+ trip = <&cp1_active3>;
+ cooling-device = <&fan 3 4>;
+ };
+ map4 {
+ trip = <&cp1_crit>;
+ cooling-device = <&fan 4 5>;
+ };
+ };
+};
+
&uart0 {
status = "okay";
pinctrl-0 = <&uart0_pins>;
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-db.dts b/arch/arm64/boot/dts/marvell/armada-8040-db.dts
index 09fb5256f1db..e39e1efc95b6 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-db.dts
+++ b/arch/arm64/boot/dts/marvell/armada-8040-db.dts
@@ -154,8 +154,15 @@
};
/* CON9 on CP0 expansion */
+&cp0_utmi {
+ status = "okay";
+};
+
&cp0_usb3_0 {
usb-phy = <&cp0_usb3_0_phy>;
+ phys = <&cp0_utmi0>;
+ phy-names = "utmi";
+ dr_mode = "host";
status = "okay";
};
@@ -168,8 +175,9 @@
/* CON10 on CP0 expansion */
&cp0_usb3_1 {
- phys = <&cp0_comphy4 1>;
- phy-names = "cp0-usb3h1-comphy";
+ phys = <&cp0_comphy4 1>, <&cp0_utmi1>;
+ phy-names = "usb", "utmi";
+ dr_mode = "host";
status = "okay";
};
@@ -306,14 +314,23 @@
};
};
+&cp1_utmi {
+ status = "okay";
+};
+
/* CON9 on CP1 expansion */
&cp1_usb3_0 {
usb-phy = <&cp1_usb3_0_phy>;
+ phys = <&cp1_utmi0>;
+ phy-names = "utmi";
+ dr_mode = "host";
status = "okay";
};
/* CON10 on CP1 expansion */
&cp1_usb3_1 {
+ phys = <&cp1_utmi1>;
+ phy-names = "utmi";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi
index cbcb210cb6d8..adbfecc678b5 100644
--- a/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-8040-mcbin.dtsi
@@ -259,13 +259,23 @@
vqmmc-supply = <&v_3_3>;
};
+&cp0_utmi {
+ status = "okay";
+};
+
&cp0_usb3_0 {
/* J38? - USB2.0 only */
+ phys = <&cp0_utmi0>;
+ phy-names = "utmi";
+ dr_mode = "host";
status = "okay";
};
&cp0_usb3_1 {
/* J38? - USB2.0 only */
+ phys = <&cp0_utmi1>;
+ phy-names = "utmi";
+ dr_mode = "host";
status = "okay";
};
@@ -364,9 +374,14 @@
};
};
+&cp1_utmi {
+ status = "okay";
+};
+
&cp1_usb3_0 {
/* CPS Lane 2 - CON7 */
- phys = <&cp1_comphy2 0>;
- phy-names = "cp1-usb3h0-comphy";
+ phys = <&cp1_comphy2 0>, <&cp1_utmi0>;
+ phy-names = "cp1-usb3h0-comphy", "utmi";
+ dr_mode = "host";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
index 64179a372ecf..3bd2182817fb 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp11x.dtsi
@@ -285,6 +285,25 @@
};
};
+ CP11X_LABEL(utmi): utmi@580000 {
+ compatible = "marvell,cp110-utmi-phy";
+ reg = <0x580000 0x2000>;
+ marvell,system-controller = <&CP11X_LABEL(syscon0)>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ CP11X_LABEL(utmi0): usb-phy@0 {
+ reg = <0>;
+ #phy-cells = <0>;
+ };
+
+ CP11X_LABEL(utmi1): usb-phy@1 {
+ reg = <1>;
+ #phy-cells = <0>;
+ };
+ };
+
CP11X_LABEL(usb3_0): usb@500000 {
compatible = "marvell,armada-8k-xhci",
"generic-xhci";
@@ -310,9 +329,11 @@
};
CP11X_LABEL(sata0): sata@540000 {
- compatible = "marvell,armada-8k-ahci";
+ compatible = "marvell,armada-8k-ahci",
+ "generic-ahci";
reg = <0x540000 0x30000>;
dma-coherent;
+ interrupts = <107 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&CP11X_LABEL(clk) 1 15>,
<&CP11X_LABEL(clk) 1 16>;
#address-cells = <1>;
@@ -320,12 +341,10 @@
status = "disabled";
sata-port@0 {
- interrupts = <109 IRQ_TYPE_LEVEL_HIGH>;
reg = <0>;
};
sata-port@1 {
- interrupts = <107 IRQ_TYPE_LEVEL_HIGH>;
reg = <1>;
};
};
diff --git a/arch/arm64/boot/dts/marvell/cn9130-db.dts b/arch/arm64/boot/dts/marvell/cn9130-db.dts
index 79020e6d2792..2c2af001619b 100644
--- a/arch/arm64/boot/dts/marvell/cn9130-db.dts
+++ b/arch/arm64/boot/dts/marvell/cn9130-db.dts
@@ -392,14 +392,22 @@
};
};
+&cp0_utmi {
+ status = "okay";
+};
+
&cp0_usb3_0 {
status = "okay";
usb-phy = <&cp0_usb3_0_phy0>;
- phy-names = "usb";
+ phys = <&cp0_utmi0>;
+ phy-names = "utmi";
+ dr_mode = "host";
};
&cp0_usb3_1 {
status = "okay";
usb-phy = <&cp0_usb3_0_phy1>;
- phy-names = "usb";
+ phys = <&cp0_utmi1>;
+ phy-names = "utmi";
+ dr_mode = "host";
};
diff --git a/arch/arm64/boot/dts/marvell/cn9131-db.dts b/arch/arm64/boot/dts/marvell/cn9131-db.dts
index 3c975f98b2a3..ba2d4e1da159 100644
--- a/arch/arm64/boot/dts/marvell/cn9131-db.dts
+++ b/arch/arm64/boot/dts/marvell/cn9131-db.dts
@@ -193,10 +193,15 @@
};
/* CON58 */
+&cp1_utmi {
+ status = "okay";
+};
+
&cp1_usb3_1 {
status = "okay";
usb-phy = <&cp1_usb3_0_phy0>;
/* Generic PHY, providing serdes lanes */
- phys = <&cp1_comphy3 1>;
- phy-names = "usb";
+ phys = <&cp1_comphy3 1>, <&cp1_utmi1>;
+ phy-names = "usb", "utmi";
+ dr_mode = "host";
};
diff --git a/arch/arm64/boot/dts/marvell/cn9132-db.dts b/arch/arm64/boot/dts/marvell/cn9132-db.dts
index 4ef0df3097ca..81fba024b22d 100644
--- a/arch/arm64/boot/dts/marvell/cn9132-db.dts
+++ b/arch/arm64/boot/dts/marvell/cn9132-db.dts
@@ -205,17 +205,24 @@
};
};
+&cp2_utmi {
+ status = "okay";
+};
+
&cp2_usb3_0 {
status = "okay";
usb-phy = <&cp2_usb3_0_phy0>;
+ phys = <&cp2_utmi0>;
phy-names = "usb";
+ dr_mode = "host";
};
/* SLM-1521-V2, CON11 */
&cp2_usb3_1 {
status = "okay";
usb-phy = <&cp2_usb3_0_phy1>;
- phy-names = "usb";
/* Generic PHY, providing serdes lanes */
- phys = <&cp2_comphy3 1>;
+ phys = <&cp2_comphy3 1>, <&cp2_utmi1>;
+ phy-names = "usb", "utmi";
+ dr_mode = "host";
};
diff --git a/arch/arm64/boot/dts/mediatek/Makefile b/arch/arm64/boot/dts/mediatek/Makefile
index deba27ab7657..a1c50adc98fa 100644
--- a/arch/arm64/boot/dts/mediatek/Makefile
+++ b/arch/arm64/boot/dts/mediatek/Makefile
@@ -13,7 +13,15 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-elm-hana.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-elm-hana-rev7.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-evb.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-jacuzzi-damu.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-jacuzzi-juniper-sku16.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-kakadu.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-kodama-sku16.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-kodama-sku272.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-kodama-sku288.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-kodama-sku32.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-krane-sku0.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-kukui-krane-sku176.dtb
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt8183-pumpkin.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8192-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8516-pumpkin.dtb
diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
index db17d0a4ed57..a9cca9c146fd 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
@@ -805,7 +805,7 @@
ranges;
status = "disabled";
- usb_host0: xhci@11270000 {
+ usb_host0: usb@11270000 {
compatible = "mediatek,mt2712-xhci",
"mediatek,mtk-xhci";
reg = <0 0x11270000 0 0x1000>;
@@ -818,7 +818,7 @@
};
};
- u3phy0: usb-phy@11290000 {
+ u3phy0: t-phy@11290000 {
compatible = "mediatek,mt2712-tphy",
"mediatek,generic-tphy-v2";
#address-cells = <1>;
@@ -869,7 +869,7 @@
ranges;
status = "disabled";
- usb_host1: xhci@112c0000 {
+ usb_host1: usb@112c0000 {
compatible = "mediatek,mt2712-xhci",
"mediatek,mtk-xhci";
reg = <0 0x112c0000 0 0x1000>;
@@ -882,7 +882,7 @@
};
};
- u3phy1: usb-phy@112e0000 {
+ u3phy1: t-phy@112e0000 {
compatible = "mediatek,mt2712-tphy",
"mediatek,generic-tphy-v2";
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
index 08ad0ffb24df..f2dc850010f1 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
@@ -495,6 +495,16 @@
groups = "watchdog";
};
};
+
+ wmac_pins: wmac-pins {
+ mux {
+ function = "antsel";
+ groups = "antsel0", "antsel1", "antsel2", "antsel3",
+ "antsel4", "antsel5", "antsel6", "antsel7",
+ "antsel8", "antsel9", "antsel12", "antsel13",
+ "antsel14", "antsel15", "antsel16", "antsel17";
+ };
+ };
};
&pwm {
@@ -559,5 +569,7 @@
};
&wmac {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wmac_pins>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
index 7c6d871538a6..890a942ec608 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
@@ -742,8 +742,8 @@
status = "disabled";
};
- u3phy: usb-phy@1a0c4000 {
- compatible = "mediatek,mt7622-u3phy",
+ u3phy: t-phy@1a0c4000 {
+ compatible = "mediatek,mt7622-tphy",
"mediatek,generic-tphy-v1";
reg = <0 0x1a0c4000 0 0x700>;
#address-cells = <2>;
@@ -877,8 +877,9 @@
status = "disabled";
};
- sata_phy: sata-phy@1a243000 {
- compatible = "mediatek,generic-tphy-v1";
+ sata_phy: t-phy@1a243000 {
+ compatible = "mediatek,mt7622-tphy",
+ "mediatek,generic-tphy-v1";
#address-cells = <2>;
#size-cells = <2>;
ranges;
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
index 6dffada2e66b..f6a1738dfbaa 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
@@ -294,7 +294,7 @@
&pwrap {
/* Only MT8173 E1 needs USB power domain */
- power-domains = <&scpsys MT8173_POWER_DOMAIN_USB>;
+ power-domains = <&spm MT8173_POWER_DOMAIN_USB>;
pmic: mt6397 {
compatible = "mediatek,mt6397";
@@ -516,10 +516,8 @@
extcon = <&extcon_usb>;
dr_mode = "otg";
wakeup-source;
- pinctrl-names = "default", "id_float", "id_ground";
+ pinctrl-names = "default";
pinctrl-0 = <&usb_id_pins_float>;
- pinctrl-1 = <&usb_id_pins_float>;
- pinctrl-2 = <&usb_id_pins_ground>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index 7fa870e4386a..003a5653c505 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -631,7 +631,7 @@
#mbox-cells = <2>;
};
- mipi_tx0: mipi-dphy@10215000 {
+ mipi_tx0: dsi-phy@10215000 {
compatible = "mediatek,mt8173-mipi-tx";
reg = <0 0x10215000 0 0x1000>;
clocks = <&clk26m>;
@@ -641,7 +641,7 @@
status = "disabled";
};
- mipi_tx1: mipi-dphy@10216000 {
+ mipi_tx1: dsi-phy@10216000 {
compatible = "mediatek,mt8173-mipi-tx";
reg = <0 0x10216000 0 0x1000>;
clocks = <&clk26m>;
@@ -926,7 +926,7 @@
};
ssusb: usb@11271000 {
- compatible = "mediatek,mt8173-mtu3";
+ compatible = "mediatek,mt8173-mtu3", "mediatek,mtu3";
reg = <0 0x11271000 0 0x3000>,
<0 0x11280700 0 0x0100>;
reg-names = "mac", "ippc";
@@ -943,8 +943,9 @@
ranges;
status = "disabled";
- usb_host: xhci@11270000 {
- compatible = "mediatek,mt8173-xhci";
+ usb_host: usb@11270000 {
+ compatible = "mediatek,mt8173-xhci",
+ "mediatek,mtk-xhci";
reg = <0 0x11270000 0 0x1000>;
reg-names = "mac";
interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_LOW>;
@@ -955,7 +956,7 @@
};
};
- u3phy: usb-phy@11290000 {
+ u3phy: t-phy@11290000 {
compatible = "mediatek,mt8173-u3phy";
reg = <0 0x11290000 0 0x800>;
#address-cells = <2>;
@@ -1235,7 +1236,7 @@
<&mmsys CLK_MM_DSI1_DIGITAL>,
<&mipi_tx1>;
clock-names = "engine", "digital", "hs";
- phy = <&mipi_tx1>;
+ phys = <&mipi_tx1>;
phy-names = "dphy";
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-evb.dts b/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
index 3249c959f76f..edff1e03e6fe 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8183-evb.dts
@@ -352,6 +352,10 @@
};
};
+&mfg {
+ domain-supply = <&mt6358_vgpu_reg>;
+};
+
&spi0 {
pinctrl-names = "default";
pinctrl-0 = <&spi_pins_0>;
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts
new file mode 100644
index 000000000000..42ba9c00866c
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2021 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8183-kukui-jacuzzi.dtsi"
+
+/ {
+ model = "Google damu board";
+ compatible = "google,damu", "mediatek,mt8183";
+};
+
+&touchscreen {
+ status = "okay";
+
+ compatible = "hid-over-i2c";
+ reg = <0x10>;
+ interrupt-parent = <&pio>;
+ interrupts = <155 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&touchscreen_pins>;
+
+ post-power-on-delay-ms = <10>;
+ hid-descr-addr = <0x0001>;
+};
+
+&qca_wifi {
+ qcom,ath10k-calibration-variant = "GO_DAMU";
+};
+
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper-sku16.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper-sku16.dts
new file mode 100644
index 000000000000..36d2c3b3cadf
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper-sku16.dts
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2021 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8183-kukui-jacuzzi-juniper.dtsi"
+
+/ {
+ model = "Google juniper sku16 board";
+ compatible = "google,juniper-sku16", "google,juniper", "mediatek,mt8183";
+};
+
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper.dtsi
new file mode 100644
index 000000000000..078bc765646f
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-juniper.dtsi
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2021 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8183-kukui-jacuzzi.dtsi"
+
+&i2c2 {
+ trackpad@2c {
+ compatible = "hid-over-i2c";
+ reg = <0x2c>;
+ hid-descr-addr = <0x20>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&trackpad_pins>;
+
+ interrupts-extended = <&pio 7 IRQ_TYPE_LEVEL_LOW>;
+
+ wakeup-source;
+ };
+};
+
+&qca_wifi {
+ qcom,ath10k-calibration-variant = "GO_JUNIPER";
+};
+
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
new file mode 100644
index 000000000000..4049dff8464b
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi
@@ -0,0 +1,474 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2021 Google LLC
+ */
+
+#include "mt8183-kukui.dtsi"
+
+/ {
+ panel: panel {
+ compatible = "auo,b116xw03";
+ power-supply = <&pp3300_panel>;
+ ddc-i2c-bus = <&i2c4>;
+ backlight = <&backlight_lcd0>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&anx7625_out>;
+ };
+ };
+ };
+
+ pp1200_mipibrdg: pp1200-mipibrdg {
+ compatible = "regulator-fixed";
+ regulator-name = "pp1200_mipibrdg";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pp1200_mipibrdg_en>;
+
+ enable-active-high;
+ regulator-boot-on;
+
+ gpio = <&pio 54 GPIO_ACTIVE_HIGH>;
+ };
+
+ pp1800_mipibrdg: pp1800-mipibrdg {
+ compatible = "regulator-fixed";
+ regulator-name = "pp1800_mipibrdg";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pp1800_lcd_en>;
+
+ enable-active-high;
+ regulator-boot-on;
+
+ gpio = <&pio 36 GPIO_ACTIVE_HIGH>;
+ };
+
+ pp3300_panel: pp3300-panel {
+ compatible = "regulator-fixed";
+ regulator-name = "pp3300_panel";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pp3300_panel_pins>;
+
+ enable-active-high;
+ regulator-boot-on;
+
+ gpio = <&pio 35 GPIO_ACTIVE_HIGH>;
+ };
+
+ vddio_mipibrdg: vddio-mipibrdg {
+ compatible = "regulator-fixed";
+ regulator-name = "vddio_mipibrdg";
+ pinctrl-names = "default";
+ pinctrl-0 = <&vddio_mipibrdg_en>;
+
+ enable-active-high;
+ regulator-boot-on;
+
+ gpio = <&pio 37 GPIO_ACTIVE_HIGH>;
+ };
+
+ volume_buttons: volume-buttons {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&volume_button_pins>;
+
+ volume_down {
+ label = "Volume Down";
+ linux,code = <KEY_VOLUMEDOWN>;
+ debounce-interval = <100>;
+
+ gpios = <&pio 6 GPIO_ACTIVE_LOW>;
+ };
+
+ volume_up {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ debounce-interval = <100>;
+
+ gpios = <&pio 5 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&dsi0 {
+ status = "okay";
+ /delete-node/panel@0;
+ ports {
+ port {
+ dsi_out: endpoint {
+ remote-endpoint = <&anx7625_in>;
+ };
+ };
+ };
+};
+
+&i2c0 {
+ status = "okay";
+
+ touchscreen: touchscreen@10 {
+ compatible = "elan,ekth3500";
+ reg = <0x10>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&touchscreen_pins>;
+
+ interrupts-extended = <&pio 155 IRQ_TYPE_LEVEL_LOW>;
+
+ reset-gpios = <&pio 156 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins>;
+ status = "okay";
+ clock-frequency = <400000>;
+
+ trackpad@15 {
+ compatible = "elan,ekth3000";
+ reg = <0x15>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&trackpad_pins>;
+
+ interrupts-extended = <&pio 7 IRQ_TYPE_LEVEL_LOW>;
+
+ wakeup-source;
+ };
+};
+
+&i2c4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4_pins>;
+ status = "okay";
+ clock-frequency = <100000>;
+
+ anx_bridge: anx7625@58 {
+ compatible = "analogix,anx7625";
+ reg = <0x58>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&anx7625_pins>;
+ panel_flags = <1>;
+ enable-gpios = <&pio 45 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&pio 73 GPIO_ACTIVE_HIGH>;
+ vdd10-supply = <&pp1200_mipibrdg>;
+ vdd18-supply = <&pp1800_mipibrdg>;
+ vdd33-supply = <&vddio_mipibrdg>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+
+ anx7625_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ anx7625_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+};
+
+&i2c_tunnel {
+ google,remote-bus = <2>;
+};
+
+&pio {
+ /* 192 lines */
+ gpio-line-names =
+ "SPI_AP_EC_CS_L",
+ "SPI_AP_EC_MOSI",
+ "SPI_AP_EC_CLK",
+ "I2S3_DO",
+ "USB_PD_INT_ODL",
+ "",
+ "",
+ "",
+ "",
+ "IT6505_HPD_L",
+ "I2S3_TDM_D3",
+ "SOC_I2C6_1V8_SCL",
+ "SOC_I2C6_1V8_SDA",
+ "DPI_D0",
+ "DPI_D1",
+ "DPI_D2",
+ "DPI_D3",
+ "DPI_D4",
+ "DPI_D5",
+ "DPI_D6",
+ "DPI_D7",
+ "DPI_D8",
+ "DPI_D9",
+ "DPI_D10",
+ "DPI_D11",
+ "DPI_HSYNC",
+ "DPI_VSYNC",
+ "DPI_DE",
+ "DPI_CK",
+ "AP_MSDC1_CLK",
+ "AP_MSDC1_DAT3",
+ "AP_MSDC1_CMD",
+ "AP_MSDC1_DAT0",
+ "AP_MSDC1_DAT2",
+ "AP_MSDC1_DAT1",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "OTG_EN",
+ "DRVBUS",
+ "DISP_PWM",
+ "DSI_TE",
+ "LCM_RST_1V8",
+ "AP_CTS_WIFI_RTS",
+ "AP_RTS_WIFI_CTS",
+ "SOC_I2C5_1V8_SCL",
+ "SOC_I2C5_1V8_SDA",
+ "SOC_I2C3_1V8_SCL",
+ "SOC_I2C3_1V8_SDA",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "SOC_I2C1_1V8_SDA",
+ "SOC_I2C0_1V8_SDA",
+ "SOC_I2C0_1V8_SCL",
+ "SOC_I2C1_1V8_SCL",
+ "AP_SPI_H1_MISO",
+ "AP_SPI_H1_CS_L",
+ "AP_SPI_H1_MOSI",
+ "AP_SPI_H1_CLK",
+ "I2S5_BCK",
+ "I2S5_LRCK",
+ "I2S5_DO",
+ "BOOTBLOCK_EN_L",
+ "MT8183_KPCOL0",
+ "SPI_AP_EC_MISO",
+ "UART_DBG_TX_AP_RX",
+ "UART_AP_TX_DBG_RX",
+ "I2S2_MCK",
+ "I2S2_BCK",
+ "CLK_5M_WCAM",
+ "CLK_2M_UCAM",
+ "I2S2_LRCK",
+ "I2S2_DI",
+ "SOC_I2C2_1V8_SCL",
+ "SOC_I2C2_1V8_SDA",
+ "SOC_I2C4_1V8_SCL",
+ "SOC_I2C4_1V8_SDA",
+ "",
+ "SCL8",
+ "SDA8",
+ "FCAM_PWDN_L",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "I2S_PMIC",
+ "I2S_PMIC",
+ "I2S_PMIC",
+ "I2S_PMIC",
+ "I2S_PMIC",
+ "I2S_PMIC",
+ "I2S_PMIC",
+ "I2S_PMIC",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ /*
+ * AP_FLASH_WP_L is crossystem ABI. Rev1 schematics
+ * call it BIOS_FLASH_WP_R_L.
+ */
+ "AP_FLASH_WP_L",
+ "EC_AP_INT_ODL",
+ "IT6505_INT_ODL",
+ "H1_INT_OD_L",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "AP_SPI_FLASH_MISO",
+ "AP_SPI_FLASH_CS_L",
+ "AP_SPI_FLASH_MOSI",
+ "AP_SPI_FLASH_CLK",
+ "DA7219_IRQ",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "";
+
+ pp1200_mipibrdg_en: pp1200-mipibrdg-en {
+ pins1 {
+ pinmux = <PINMUX_GPIO54__FUNC_GPIO54>;
+ output-low;
+ };
+ };
+
+ pp1800_lcd_en: pp1800-lcd-en {
+ pins1 {
+ pinmux = <PINMUX_GPIO36__FUNC_GPIO36>;
+ output-low;
+ };
+ };
+
+ pp3300_panel_pins: pp3300-panel-pins {
+ panel_3v3_enable: panel-3v3-enable {
+ pinmux = <PINMUX_GPIO35__FUNC_GPIO35>;
+ output-low;
+ };
+ };
+
+ ppvarp_lcd_en: ppvarp-lcd-en {
+ pins1 {
+ pinmux = <PINMUX_GPIO66__FUNC_GPIO66>;
+ output-low;
+ };
+ };
+
+ ppvarn_lcd_en: ppvarn-lcd-en {
+ pins1 {
+ pinmux = <PINMUX_GPIO166__FUNC_GPIO166>;
+ output-low;
+ };
+ };
+
+ anx7625_pins: anx7625-pins {
+ pins1 {
+ pinmux = <PINMUX_GPIO45__FUNC_GPIO45>,
+ <PINMUX_GPIO73__FUNC_GPIO73>;
+ output-low;
+ };
+ pins2 {
+ pinmux = <PINMUX_GPIO4__FUNC_GPIO4>;
+ input-enable;
+ bias-pull-up;
+ };
+ };
+
+ touchscreen_pins: touchscreen-pins {
+ touch_int_odl {
+ pinmux = <PINMUX_GPIO155__FUNC_GPIO155>;
+ input-enable;
+ bias-pull-up;
+ };
+
+ touch_rst_l {
+ pinmux = <PINMUX_GPIO156__FUNC_GPIO156>;
+ output-high;
+ };
+ };
+
+ trackpad_pins: trackpad-pins {
+ trackpad_int {
+ pinmux = <PINMUX_GPIO7__FUNC_GPIO7>;
+ input-enable;
+ bias-disable; /* pulled externally */
+ };
+ };
+
+ vddio_mipibrdg_en: vddio-mipibrdg-en {
+ pins1 {
+ pinmux = <PINMUX_GPIO37__FUNC_GPIO37>;
+ output-low;
+ };
+ };
+
+ volume_button_pins: volume-button-pins {
+ voldn-btn-odl {
+ pinmux = <PINMUX_GPIO6__FUNC_GPIO6>;
+ input-enable;
+ bias-pull-up;
+ };
+
+ volup-btn-odl {
+ pinmux = <PINMUX_GPIO5__FUNC_GPIO5>;
+ input-enable;
+ bias-pull-up;
+ };
+ };
+};
+
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dts
new file mode 100644
index 000000000000..20eb0dc68f09
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dts
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2020 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8183-kukui-kakadu.dtsi"
+
+/ {
+ model = "MediaTek kakadu board";
+ compatible = "google,kakadu-rev3", "google,kakadu-rev2",
+ "google,kakadu", "mediatek,mt8183";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi
new file mode 100644
index 000000000000..b442e38a3156
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2020 Google LLC
+ */
+
+#include "mt8183-kukui.dtsi"
+#include <dt-bindings/input/gpio-keys.h>
+
+/ {
+ ppvarn_lcd: ppvarn-lcd {
+ compatible = "regulator-fixed";
+ regulator-name = "ppvarn_lcd";
+ pinctrl-names = "default";
+ pinctrl-0 = <&ppvarn_lcd_en>;
+
+ enable-active-high;
+
+ gpio = <&pio 66 GPIO_ACTIVE_HIGH>;
+ };
+
+ ppvarp_lcd: ppvarp-lcd {
+ compatible = "regulator-fixed";
+ regulator-name = "ppvarp_lcd";
+ pinctrl-names = "default";
+ pinctrl-0 = <&ppvarp_lcd_en>;
+
+ enable-active-high;
+
+ gpio = <&pio 166 GPIO_ACTIVE_HIGH>;
+ };
+
+ pp1800_lcd: pp1800-lcd {
+ compatible = "regulator-fixed";
+ regulator-name = "pp1800_lcd";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pp1800_lcd_en>;
+
+ enable-active-high;
+
+ gpio = <&pio 36 GPIO_ACTIVE_HIGH>;
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pen_eject>;
+
+ pen-insert {
+ label = "Pen Insert";
+ /* Insert = low, eject = high */
+ gpios = <&pio 6 GPIO_ACTIVE_LOW>;
+ linux,code = <SW_PEN_INSERTED>;
+ linux,input-type = <EV_SW>;
+ wakeup-event-action = <EV_ACT_DEASSERTED>;
+ wakeup-source;
+ };
+ };
+};
+
+&bluetooth {
+ firmware-name = "nvm_00440302_i2s_eu.bin";
+};
+
+&i2c0 {
+ status = "okay";
+
+ touchscreen: touchscreen@10 {
+ compatible = "hid-over-i2c";
+ reg = <0x10>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&open_touch>;
+
+ interrupt-parent = <&pio>;
+ interrupts = <155 IRQ_TYPE_EDGE_FALLING>;
+
+ post-power-on-delay-ms = <10>;
+ hid-descr-addr = <0x0001>;
+ };
+};
+
+&mt6358_vcama2_reg {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+};
+
+&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins>;
+ status = "okay";
+ clock-frequency = <400000>;
+
+ eeprom@58 {
+ compatible = "atmel,24c32";
+ reg = <0x58>;
+ pagesize = <32>;
+ };
+};
+
+&i2c4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4_pins>;
+ status = "okay";
+ clock-frequency = <400000>;
+
+ eeprom@54 {
+ compatible = "atmel,24c32";
+ reg = <0x54>;
+ pagesize = <32>;
+ };
+};
+
+&mipi_tx0 {
+ drive-strength-microamp = <5800>;
+};
+
+&pio {
+ /* 192 lines */
+ gpio-line-names =
+ "SPI_AP_EC_CS_L",
+ "SPI_AP_EC_MOSI",
+ "SPI_AP_EC_CLK",
+ "I2S3_DO",
+ "USB_PD_INT_ODL",
+ "",
+ "",
+ "",
+ "",
+ "IT6505_HPD_L",
+ "I2S3_TDM_D3",
+ "SOC_I2C6_1V8_SCL",
+ "SOC_I2C6_1V8_SDA",
+ "DPI_D0",
+ "DPI_D1",
+ "DPI_D2",
+ "DPI_D3",
+ "DPI_D4",
+ "DPI_D5",
+ "DPI_D6",
+ "DPI_D7",
+ "DPI_D8",
+ "DPI_D9",
+ "DPI_D10",
+ "DPI_D11",
+ "DPI_HSYNC",
+ "DPI_VSYNC",
+ "DPI_DE",
+ "DPI_CK",
+ "AP_MSDC1_CLK",
+ "AP_MSDC1_DAT3",
+ "AP_MSDC1_CMD",
+ "AP_MSDC1_DAT0",
+ "AP_MSDC1_DAT2",
+ "AP_MSDC1_DAT1",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "OTG_EN",
+ "DRVBUS",
+ "DISP_PWM",
+ "DSI_TE",
+ "LCM_RST_1V8",
+ "AP_CTS_WIFI_RTS",
+ "AP_RTS_WIFI_CTS",
+ "SOC_I2C5_1V8_SCL",
+ "SOC_I2C5_1V8_SDA",
+ "SOC_I2C3_1V8_SCL",
+ "SOC_I2C3_1V8_SDA",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "SOC_I2C1_1V8_SDA",
+ "SOC_I2C0_1V8_SDA",
+ "SOC_I2C0_1V8_SCL",
+ "SOC_I2C1_1V8_SCL",
+ "AP_SPI_H1_MISO",
+ "AP_SPI_H1_CS_L",
+ "AP_SPI_H1_MOSI",
+ "AP_SPI_H1_CLK",
+ "I2S5_BCK",
+ "I2S5_LRCK",
+ "I2S5_DO",
+ "BOOTBLOCK_EN_L",
+ "MT8183_KPCOL0",
+ "SPI_AP_EC_MISO",
+ "UART_DBG_TX_AP_RX",
+ "UART_AP_TX_DBG_RX",
+ "I2S2_MCK",
+ "I2S2_BCK",
+ "CLK_5M_WCAM",
+ "CLK_2M_UCAM",
+ "I2S2_LRCK",
+ "I2S2_DI",
+ "SOC_I2C2_1V8_SCL",
+ "SOC_I2C2_1V8_SDA",
+ "SOC_I2C4_1V8_SCL",
+ "SOC_I2C4_1V8_SDA",
+ "",
+ "SCL8",
+ "SDA8",
+ "FCAM_PWDN_L",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "I2S_PMIC",
+ "I2S_PMIC",
+ "I2S_PMIC",
+ "I2S_PMIC",
+ "I2S_PMIC",
+ "I2S_PMIC",
+ "I2S_PMIC",
+ "I2S_PMIC",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ /*
+ * AP_FLASH_WP_L is crossystem ABI. Rev1 schematics
+ * call it BIOS_FLASH_WP_R_L.
+ */
+ "AP_FLASH_WP_L",
+ "EC_AP_INT_ODL",
+ "IT6505_INT_ODL",
+ "H1_INT_OD_L",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "AP_SPI_FLASH_MISO",
+ "AP_SPI_FLASH_CS_L",
+ "AP_SPI_FLASH_MOSI",
+ "AP_SPI_FLASH_CLK",
+ "DA7219_IRQ",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "";
+
+ ppvarp_lcd_en: ppvarp-lcd-en {
+ pins1 {
+ pinmux = <PINMUX_GPIO66__FUNC_GPIO66>;
+ output-low;
+ };
+ };
+
+ ppvarn_lcd_en: ppvarn-lcd-en {
+ pins1 {
+ pinmux = <PINMUX_GPIO166__FUNC_GPIO166>;
+ output-low;
+ };
+ };
+
+ pp1800_lcd_en: pp1800-lcd-en {
+ pins1 {
+ pinmux = <PINMUX_GPIO36__FUNC_GPIO36>;
+ output-low;
+ };
+ };
+
+ open_touch: open_touch {
+ irq_pin {
+ pinmux = <PINMUX_GPIO155__FUNC_GPIO155>;
+ input-enable;
+ bias-pull-up;
+ };
+
+ rst_pin {
+ pinmux = <PINMUX_GPIO156__FUNC_GPIO156>;
+
+ /*
+ * The pen driver doesn't currently support driving
+ * this reset line. By specifying output-high here
+ * we're relying on the fact that this pin has a default
+ * pulldown at boot (which makes sure the pen was in
+ * reset if it was powered) and then we set it high here
+ * to take it out of reset. Better would be if the pen
+ * driver could control this and we could remove
+ * "output-high" here.
+ */
+ output-high;
+ };
+ };
+
+ pen_eject: peneject {
+ pen_eject {
+ pinmux = <PINMUX_GPIO6__FUNC_GPIO6>;
+ input-enable;
+ /* External pull-up. */
+ bias-disable;
+ };
+ };
+};
+
+&qca_wifi {
+ qcom,ath10k-calibration-variant = "GO_KAKADU";
+};
+
+&panel {
+ status = "okay";
+ compatible = "boe,tv105wum-nw0";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku16.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku16.dts
new file mode 100644
index 000000000000..e3dd75bdaea4
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku16.dts
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2021 Google LLC
+ *
+ * SKU: 0x10 => 16
+ * - bit 8: Camera: 0 (OV5695)
+ * - bits 7..4: Panel ID: 0x1 (AUO)
+ */
+
+/dts-v1/;
+#include "mt8183-kukui-kodama.dtsi"
+
+/ {
+ model = "MediaTek kodama sku16 board";
+ compatible = "google,kodama-sku16", "google,kodama", "mediatek,mt8183";
+};
+
+&panel {
+ status = "okay";
+ compatible = "auo,b101uan08.3";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku272.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku272.dts
new file mode 100644
index 000000000000..d81935ae07bc
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku272.dts
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2020 Google LLC
+ *
+ * SKU: 0x110 => 272
+ * - bit 8: Camera: 1 (GC5035)
+ * - bits 7..4: Panel ID: 0x1 (AUO)
+ */
+
+/dts-v1/;
+#include "mt8183-kukui-kodama.dtsi"
+
+/ {
+ model = "MediaTek kodama sku272 board";
+ compatible = "google,kodama-sku272", "google,kodama", "mediatek,mt8183";
+};
+
+&panel {
+ status = "okay";
+ compatible = "auo,b101uan08.3";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku288.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku288.dts
new file mode 100644
index 000000000000..f4082fbe0517
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku288.dts
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2020 Google LLC
+ *
+ * SKU: 0x120 => 288
+ * - bit 8: Camera: 1 (GC5035)
+ * - bits 7..4: Panel ID: 0x2 (BOE)
+ */
+
+/dts-v1/;
+#include "mt8183-kukui-kodama.dtsi"
+
+/ {
+ model = "MediaTek kodama sku288 board";
+ compatible = "google,kodama-sku288", "google,kodama", "mediatek,mt8183";
+};
+
+&panel {
+ status = "okay";
+ compatible = "boe,tv101wum-n53";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku32.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku32.dts
new file mode 100644
index 000000000000..7739358008ee
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama-sku32.dts
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2021 Google LLC
+ *
+ * SKU: 0x20 => 32
+ * - bit 8: Camera: 0 (OV5695)
+ * - bits 7..4: Panel ID: 0x2 (BOE)
+ */
+
+/dts-v1/;
+#include "mt8183-kukui-kodama.dtsi"
+
+/ {
+ model = "MediaTek kodama sku32 board";
+ compatible = "google,kodama-sku32", "google,kodama", "mediatek,mt8183";
+};
+
+&panel {
+ status = "okay";
+ compatible = "boe,tv101wum-n53";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi
new file mode 100644
index 000000000000..2f5234a16ead
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2021 Google LLC
+ */
+
+/dts-v1/;
+#include "mt8183-kukui.dtsi"
+
+/ {
+ ppvarn_lcd: ppvarn-lcd {
+ compatible = "regulator-fixed";
+ regulator-name = "ppvarn_lcd";
+ pinctrl-names = "default";
+ pinctrl-0 = <&ppvarn_lcd_en>;
+
+ enable-active-high;
+
+ gpio = <&pio 66 GPIO_ACTIVE_HIGH>;
+ };
+
+ ppvarp_lcd: ppvarp-lcd {
+ compatible = "regulator-fixed";
+ regulator-name = "ppvarp_lcd";
+ pinctrl-names = "default";
+ pinctrl-0 = <&ppvarp_lcd_en>;
+
+ enable-active-high;
+
+ gpio = <&pio 166 GPIO_ACTIVE_HIGH>;
+ };
+
+ pp1800_lcd: pp1800-lcd {
+ compatible = "regulator-fixed";
+ regulator-name = "pp1800_lcd";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pp1800_lcd_en>;
+
+ enable-active-high;
+
+ gpio = <&pio 36 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&i2c0 {
+ status = "okay";
+
+ touchscreen: touchscreen@10 {
+ compatible = "hid-over-i2c";
+ reg = <0x10>;
+ interrupt-parent = <&pio>;
+ interrupts = <155 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&touch_default>;
+
+ post-power-on-delay-ms = <10>;
+ hid-descr-addr = <0x0001>;
+ };
+};
+
+&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins>;
+ status = "okay";
+ clock-frequency = <400000>;
+
+ eeprom@58 {
+ compatible = "atmel,24c64";
+ reg = <0x58>;
+ pagesize = <32>;
+ };
+};
+
+&i2c4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4_pins>;
+ status = "okay";
+ clock-frequency = <400000>;
+
+ eeprom@54 {
+ compatible = "atmel,24c64";
+ reg = <0x54>;
+ pagesize = <32>;
+ };
+};
+
+&mt6358_vcama2_reg {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+};
+
+&pio {
+ /* 192 lines */
+ gpio-line-names =
+ "SPI_AP_EC_CS_L",
+ "SPI_AP_EC_MOSI",
+ "SPI_AP_EC_CLK",
+ "I2S3_DO",
+ "USB_PD_INT_ODL",
+ "",
+ "",
+ "",
+ "",
+ "IT6505_HPD_L",
+ "I2S3_TDM_D3",
+ "SOC_I2C6_1V8_SCL",
+ "SOC_I2C6_1V8_SDA",
+ "DPI_D0",
+ "DPI_D1",
+ "DPI_D2",
+ "DPI_D3",
+ "DPI_D4",
+ "DPI_D5",
+ "DPI_D6",
+ "DPI_D7",
+ "DPI_D8",
+ "DPI_D9",
+ "DPI_D10",
+ "DPI_D11",
+ "DPI_HSYNC",
+ "DPI_VSYNC",
+ "DPI_DE",
+ "DPI_CK",
+ "AP_MSDC1_CLK",
+ "AP_MSDC1_DAT3",
+ "AP_MSDC1_CMD",
+ "AP_MSDC1_DAT0",
+ "AP_MSDC1_DAT2",
+ "AP_MSDC1_DAT1",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "OTG_EN",
+ "DRVBUS",
+ "DISP_PWM",
+ "DSI_TE",
+ "LCM_RST_1V8",
+ "AP_CTS_WIFI_RTS",
+ "AP_RTS_WIFI_CTS",
+ "SOC_I2C5_1V8_SCL",
+ "SOC_I2C5_1V8_SDA",
+ "SOC_I2C3_1V8_SCL",
+ "SOC_I2C3_1V8_SDA",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "SOC_I2C1_1V8_SDA",
+ "SOC_I2C0_1V8_SDA",
+ "SOC_I2C0_1V8_SCL",
+ "SOC_I2C1_1V8_SCL",
+ "AP_SPI_H1_MISO",
+ "AP_SPI_H1_CS_L",
+ "AP_SPI_H1_MOSI",
+ "AP_SPI_H1_CLK",
+ "I2S5_BCK",
+ "I2S5_LRCK",
+ "I2S5_DO",
+ "BOOTBLOCK_EN_L",
+ "MT8183_KPCOL0",
+ "SPI_AP_EC_MISO",
+ "UART_DBG_TX_AP_RX",
+ "UART_AP_TX_DBG_RX",
+ "I2S2_MCK",
+ "I2S2_BCK",
+ "CLK_5M_WCAM",
+ "CLK_2M_UCAM",
+ "I2S2_LRCK",
+ "I2S2_DI",
+ "SOC_I2C2_1V8_SCL",
+ "SOC_I2C2_1V8_SDA",
+ "SOC_I2C4_1V8_SCL",
+ "SOC_I2C4_1V8_SDA",
+ "",
+ "SCL8",
+ "SDA8",
+ "FCAM_PWDN_L",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "I2S_PMIC",
+ "I2S_PMIC",
+ "I2S_PMIC",
+ "I2S_PMIC",
+ "I2S_PMIC",
+ "I2S_PMIC",
+ "I2S_PMIC",
+ "I2S_PMIC",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ /*
+ * AP_FLASH_WP_L is crossystem ABI. Rev1 schematics
+ * call it BIOS_FLASH_WP_R_L.
+ */
+ "AP_FLASH_WP_L",
+ "EC_AP_INT_ODL",
+ "IT6505_INT_ODL",
+ "H1_INT_OD_L",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "AP_SPI_FLASH_MISO",
+ "AP_SPI_FLASH_CS_L",
+ "AP_SPI_FLASH_MOSI",
+ "AP_SPI_FLASH_CLK",
+ "DA7219_IRQ",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "";
+
+ ppvarp_lcd_en: ppvarp-lcd-en {
+ pins1 {
+ pinmux = <PINMUX_GPIO66__FUNC_GPIO66>;
+ output-low;
+ };
+ };
+
+ ppvarn_lcd_en: ppvarn-lcd-en {
+ pins1 {
+ pinmux = <PINMUX_GPIO166__FUNC_GPIO166>;
+ output-low;
+ };
+ };
+
+ pp1800_lcd_en: pp1800-lcd-en {
+ pins1 {
+ pinmux = <PINMUX_GPIO36__FUNC_GPIO36>;
+ output-low;
+ };
+ };
+
+ touch_default: touchdefault {
+ pin_irq {
+ pinmux = <PINMUX_GPIO155__FUNC_GPIO155>;
+ input-enable;
+ bias-pull-up;
+ };
+
+ touch_pin_reset: pin_reset {
+ pinmux = <PINMUX_GPIO156__FUNC_GPIO156>;
+
+ /*
+ * The touchscreen driver doesn't currently support driving
+ * this reset line. By specifying output-high here
+ * we're relying on the fact that this pin has a default
+ * pulldown at boot (which makes sure the controller was in
+ * reset if it was powered) and then we set it high here
+ * to take it out of reset. Better would be if the touchscreen
+ * driver could control this and we could remove
+ * "output-high" here.
+ */
+ output-high;
+ };
+ };
+};
+
+&qca_wifi {
+ qcom,ath10k-calibration-variant = "GO_KODAMA";
+};
+
+&i2c_tunnel {
+ google,remote-bus = <2>;
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts b/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
new file mode 100644
index 000000000000..0aff5eb52e88
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt8183-pumpkin.dts
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 BayLibre, SAS.
+ * Author: Fabien Parent <fparent@baylibre.com>
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include "mt8183.dtsi"
+#include "mt6358.dtsi"
+
+/ {
+ model = "Pumpkin MT8183";
+ compatible = "mediatek,mt8183-pumpkin", "mediatek,mt8183";
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0 0x40000000 0 0x80000000>;
+ };
+
+ chosen {
+ stdout-path = "serial0:921600n8";
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ scp_mem_reserved: scp_mem_region@50000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0x50000000 0 0x2900000>;
+ no-map;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-red {
+ label = "red";
+ gpios = <&pio 155 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ led-green {
+ label = "green";
+ gpios = <&pio 156 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+ };
+
+ ntc {
+ compatible = "murata,ncp03wf104";
+ pullup-uv = <1800000>;
+ pullup-ohm = <390000>;
+ pulldown-ohm = <0>;
+ io-channels = <&auxadc 0>;
+ };
+};
+
+&auxadc {
+ status = "okay";
+};
+
+&i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c_pins_0>;
+ status = "okay";
+ clock-frequency = <100000>;
+};
+
+&i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c_pins_1>;
+ status = "okay";
+ clock-frequency = <100000>;
+};
+
+&i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c_pins_2>;
+ status = "okay";
+ clock-frequency = <100000>;
+};
+
+&i2c3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c_pins_3>;
+ status = "okay";
+ clock-frequency = <100000>;
+};
+
+&i2c4 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c_pins_4>;
+ status = "okay";
+ clock-frequency = <100000>;
+};
+
+&i2c5 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c_pins_5>;
+ status = "okay";
+ clock-frequency = <100000>;
+};
+
+&i2c6 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c6_pins>;
+ status = "okay";
+ clock-frequency = <100000>;
+};
+
+&mmc0 {
+ status = "okay";
+ pinctrl-names = "default", "state_uhs";
+ pinctrl-0 = <&mmc0_pins_default>;
+ pinctrl-1 = <&mmc0_pins_uhs>;
+ bus-width = <8>;
+ max-frequency = <200000000>;
+ cap-mmc-highspeed;
+ mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ cap-mmc-hw-reset;
+ no-sdio;
+ no-sd;
+ hs400-ds-delay = <0x12814>;
+ vmmc-supply = <&mt6358_vemc_reg>;
+ vqmmc-supply = <&mt6358_vio18_reg>;
+ assigned-clocks = <&topckgen CLK_TOP_MUX_MSDC50_0>;
+ assigned-clock-parents = <&topckgen CLK_TOP_MSDCPLL_CK>;
+ non-removable;
+};
+
+&mmc1 {
+ status = "okay";
+ pinctrl-names = "default", "state_uhs";
+ pinctrl-0 = <&mmc1_pins_default>;
+ pinctrl-1 = <&mmc1_pins_uhs>;
+ bus-width = <4>;
+ max-frequency = <200000000>;
+ cap-sd-highspeed;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ cap-sdio-irq;
+ no-mmc;
+ no-sd;
+ vmmc-supply = <&mt6358_vmch_reg>;
+ vqmmc-supply = <&mt6358_vmc_reg>;
+ keep-power-in-suspend;
+ enable-sdio-wakeup;
+ non-removable;
+};
+
+&pio {
+ i2c_pins_0: i2c0 {
+ pins_i2c{
+ pinmux = <PINMUX_GPIO82__FUNC_SDA0>,
+ <PINMUX_GPIO83__FUNC_SCL0>;
+ mediatek,pull-up-adv = <3>;
+ mediatek,drive-strength-adv = <00>;
+ };
+ };
+
+ i2c_pins_1: i2c1 {
+ pins_i2c{
+ pinmux = <PINMUX_GPIO81__FUNC_SDA1>,
+ <PINMUX_GPIO84__FUNC_SCL1>;
+ mediatek,pull-up-adv = <3>;
+ mediatek,drive-strength-adv = <00>;
+ };
+ };
+
+ i2c_pins_2: i2c2 {
+ pins_i2c{
+ pinmux = <PINMUX_GPIO103__FUNC_SCL2>,
+ <PINMUX_GPIO104__FUNC_SDA2>;
+ mediatek,pull-up-adv = <3>;
+ mediatek,drive-strength-adv = <00>;
+ };
+ };
+
+ i2c_pins_3: i2c3 {
+ pins_i2c{
+ pinmux = <PINMUX_GPIO50__FUNC_SCL3>,
+ <PINMUX_GPIO51__FUNC_SDA3>;
+ mediatek,pull-up-adv = <3>;
+ mediatek,drive-strength-adv = <00>;
+ };
+ };
+
+ i2c_pins_4: i2c4 {
+ pins_i2c{
+ pinmux = <PINMUX_GPIO105__FUNC_SCL4>,
+ <PINMUX_GPIO106__FUNC_SDA4>;
+ mediatek,pull-up-adv = <3>;
+ mediatek,drive-strength-adv = <00>;
+ };
+ };
+
+ i2c_pins_5: i2c5 {
+ pins_i2c{
+ pinmux = <PINMUX_GPIO48__FUNC_SCL5>,
+ <PINMUX_GPIO49__FUNC_SDA5>;
+ mediatek,pull-up-adv = <3>;
+ mediatek,drive-strength-adv = <00>;
+ };
+ };
+
+ i2c6_pins: i2c6 {
+ pins_cmd_dat {
+ pinmux = <PINMUX_GPIO113__FUNC_SCL6>,
+ <PINMUX_GPIO114__FUNC_SDA6>;
+ mediatek,pull-up-adv = <3>;
+ };
+ };
+
+ mmc0_pins_default: mmc0-pins-default {
+ pins_cmd_dat {
+ pinmux = <PINMUX_GPIO123__FUNC_MSDC0_DAT0>,
+ <PINMUX_GPIO128__FUNC_MSDC0_DAT1>,
+ <PINMUX_GPIO125__FUNC_MSDC0_DAT2>,
+ <PINMUX_GPIO132__FUNC_MSDC0_DAT3>,
+ <PINMUX_GPIO126__FUNC_MSDC0_DAT4>,
+ <PINMUX_GPIO129__FUNC_MSDC0_DAT5>,
+ <PINMUX_GPIO127__FUNC_MSDC0_DAT6>,
+ <PINMUX_GPIO130__FUNC_MSDC0_DAT7>,
+ <PINMUX_GPIO122__FUNC_MSDC0_CMD>;
+ input-enable;
+ drive-strength = <MTK_DRIVE_14mA>;
+ mediatek,pull-up-adv = <01>;
+ };
+
+ pins_clk {
+ pinmux = <PINMUX_GPIO124__FUNC_MSDC0_CLK>;
+ drive-strength = <MTK_DRIVE_14mA>;
+ mediatek,pull-down-adv = <10>;
+ };
+
+ pins_rst {
+ pinmux = <PINMUX_GPIO133__FUNC_MSDC0_RSTB>;
+ drive-strength = <MTK_DRIVE_14mA>;
+ mediatek,pull-down-adv = <01>;
+ };
+ };
+
+ mmc0_pins_uhs: mmc0-pins-uhs {
+ pins_cmd_dat {
+ pinmux = <PINMUX_GPIO123__FUNC_MSDC0_DAT0>,
+ <PINMUX_GPIO128__FUNC_MSDC0_DAT1>,
+ <PINMUX_GPIO125__FUNC_MSDC0_DAT2>,
+ <PINMUX_GPIO132__FUNC_MSDC0_DAT3>,
+ <PINMUX_GPIO126__FUNC_MSDC0_DAT4>,
+ <PINMUX_GPIO129__FUNC_MSDC0_DAT5>,
+ <PINMUX_GPIO127__FUNC_MSDC0_DAT6>,
+ <PINMUX_GPIO130__FUNC_MSDC0_DAT7>,
+ <PINMUX_GPIO122__FUNC_MSDC0_CMD>;
+ input-enable;
+ drive-strength = <MTK_DRIVE_14mA>;
+ mediatek,pull-up-adv = <01>;
+ };
+
+ pins_clk {
+ pinmux = <PINMUX_GPIO124__FUNC_MSDC0_CLK>;
+ drive-strength = <MTK_DRIVE_14mA>;
+ mediatek,pull-down-adv = <10>;
+ };
+
+ pins_ds {
+ pinmux = <PINMUX_GPIO131__FUNC_MSDC0_DSL>;
+ drive-strength = <MTK_DRIVE_14mA>;
+ mediatek,pull-down-adv = <10>;
+ };
+
+ pins_rst {
+ pinmux = <PINMUX_GPIO133__FUNC_MSDC0_RSTB>;
+ drive-strength = <MTK_DRIVE_14mA>;
+ mediatek,pull-up-adv = <01>;
+ };
+ };
+
+ mmc1_pins_default: mmc1-pins-default {
+ pins_cmd_dat {
+ pinmux = <PINMUX_GPIO31__FUNC_MSDC1_CMD>,
+ <PINMUX_GPIO32__FUNC_MSDC1_DAT0>,
+ <PINMUX_GPIO34__FUNC_MSDC1_DAT1>,
+ <PINMUX_GPIO33__FUNC_MSDC1_DAT2>,
+ <PINMUX_GPIO30__FUNC_MSDC1_DAT3>;
+ input-enable;
+ mediatek,pull-up-adv = <10>;
+ };
+
+ pins_clk {
+ pinmux = <PINMUX_GPIO29__FUNC_MSDC1_CLK>;
+ input-enable;
+ mediatek,pull-down-adv = <10>;
+ };
+
+ pins_pmu {
+ pinmux = <PINMUX_GPIO178__FUNC_GPIO178>;
+ output-high;
+ };
+ };
+
+ mmc1_pins_uhs: mmc1-pins-uhs {
+ pins_cmd_dat {
+ pinmux = <PINMUX_GPIO31__FUNC_MSDC1_CMD>,
+ <PINMUX_GPIO32__FUNC_MSDC1_DAT0>,
+ <PINMUX_GPIO34__FUNC_MSDC1_DAT1>,
+ <PINMUX_GPIO33__FUNC_MSDC1_DAT2>,
+ <PINMUX_GPIO30__FUNC_MSDC1_DAT3>;
+ drive-strength = <MTK_DRIVE_6mA>;
+ input-enable;
+ mediatek,pull-up-adv = <10>;
+ };
+
+ pins_clk {
+ pinmux = <PINMUX_GPIO29__FUNC_MSDC1_CLK>;
+ drive-strength = <MTK_DRIVE_8mA>;
+ mediatek,pull-down-adv = <10>;
+ input-enable;
+ };
+ };
+};
+
+&mfg {
+ domain-supply = <&mt6358_vgpu_reg>;
+};
+
+&cpu0 {
+ proc-supply = <&mt6358_vproc12_reg>;
+};
+
+&cpu1 {
+ proc-supply = <&mt6358_vproc12_reg>;
+};
+
+&cpu2 {
+ proc-supply = <&mt6358_vproc12_reg>;
+};
+
+&cpu3 {
+ proc-supply = <&mt6358_vproc12_reg>;
+};
+
+&cpu4 {
+ proc-supply = <&mt6358_vproc11_reg>;
+};
+
+&cpu5 {
+ proc-supply = <&mt6358_vproc11_reg>;
+};
+
+&cpu6 {
+ proc-supply = <&mt6358_vproc11_reg>;
+};
+
+&cpu7 {
+ proc-supply = <&mt6358_vproc11_reg>;
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&scp {
+ status = "okay";
+};
+
+&dsi0 {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
index 80519a145f13..c5e822b6b77a 100644
--- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi
@@ -13,6 +13,7 @@
#include <dt-bindings/power/mt8183-power.h>
#include <dt-bindings/reset-controller/mt8183-resets.h>
#include <dt-bindings/phy/phy.h>
+#include <dt-bindings/thermal/thermal.h>
#include "mt8183-pinfunc.h"
/ {
@@ -657,6 +658,142 @@
status = "disabled";
};
+ thermal: thermal@1100b000 {
+ #thermal-sensor-cells = <1>;
+ compatible = "mediatek,mt8183-thermal";
+ reg = <0 0x1100b000 0 0x1000>;
+ clocks = <&infracfg CLK_INFRA_THERM>,
+ <&infracfg CLK_INFRA_AUXADC>;
+ clock-names = "therm", "auxadc";
+ resets = <&infracfg MT8183_INFRACFG_AO_THERM_SW_RST>;
+ interrupts = <0 76 IRQ_TYPE_LEVEL_LOW>;
+ mediatek,auxadc = <&auxadc>;
+ mediatek,apmixedsys = <&apmixedsys>;
+ nvmem-cells = <&thermal_calibration>;
+ nvmem-cell-names = "calibration-data";
+ };
+
+ thermal-zones {
+ cpu_thermal: cpu_thermal {
+ polling-delay-passive = <100>;
+ polling-delay = <500>;
+ thermal-sensors = <&thermal 0>;
+ sustainable-power = <5000>;
+
+ trips {
+ threshold: trip-point0 {
+ temperature = <68000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ target: trip-point1 {
+ temperature = <80000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu_crit: cpu-crit {
+ temperature = <115000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&target>;
+ cooling-device = <&cpu0
+ THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>,
+ <&cpu1
+ THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>,
+ <&cpu2
+ THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>,
+ <&cpu3
+ THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>;
+ contribution = <3072>;
+ };
+ map1 {
+ trip = <&target>;
+ cooling-device = <&cpu4
+ THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>,
+ <&cpu5
+ THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>,
+ <&cpu6
+ THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>,
+ <&cpu7
+ THERMAL_NO_LIMIT
+ THERMAL_NO_LIMIT>;
+ contribution = <1024>;
+ };
+ };
+ };
+
+ /* The tzts1 ~ tzts6 don't need to polling */
+ /* The tzts1 ~ tzts6 don't need to thermal throttle */
+
+ tzts1: tzts1 {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&thermal 1>;
+ sustainable-power = <5000>;
+ trips {};
+ cooling-maps {};
+ };
+
+ tzts2: tzts2 {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&thermal 2>;
+ sustainable-power = <5000>;
+ trips {};
+ cooling-maps {};
+ };
+
+ tzts3: tzts3 {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&thermal 3>;
+ sustainable-power = <5000>;
+ trips {};
+ cooling-maps {};
+ };
+
+ tzts4: tzts4 {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&thermal 4>;
+ sustainable-power = <5000>;
+ trips {};
+ cooling-maps {};
+ };
+
+ tzts5: tzts5 {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&thermal 5>;
+ sustainable-power = <5000>;
+ trips {};
+ cooling-maps {};
+ };
+
+ tztsABB: tztsABB {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&thermal 6>;
+ sustainable-power = <5000>;
+ trips {};
+ cooling-maps {};
+ };
+ };
+
pwm0: pwm@1100e000 {
compatible = "mediatek,mt8183-disp-pwm";
reg = <0 0x1100e000 0 0x1000>;
@@ -874,13 +1011,13 @@
clocks = <&infracfg CLK_INFRA_UNIPRO_SCK>,
<&infracfg CLK_INFRA_USB>;
clock-names = "sys_ck", "ref_ck";
- mediatek,syscon-wakeup = <&pericfg 0x400 0>;
+ mediatek,syscon-wakeup = <&pericfg 0x420 101>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
status = "disabled";
- usb_host: xhci@11200000 {
+ usb_host: usb@11200000 {
compatible = "mediatek,mt8183-xhci",
"mediatek,mtk-xhci";
reg = <0 0x11200000 0 0x1000>;
@@ -923,11 +1060,10 @@
status = "disabled";
};
- mipi_tx0: mipi-dphy@11e50000 {
+ mipi_tx0: dsi-phy@11e50000 {
compatible = "mediatek,mt8183-mipi-tx";
reg = <0 0x11e50000 0 0x1000>;
clocks = <&apmixedsys CLK_APMIXED_MIPID0_26M>;
- clock-names = "ref_clk";
#clock-cells = <0>;
#phy-cells = <0>;
clock-output-names = "mipi_tx0_pll";
@@ -941,16 +1077,19 @@
reg = <0 0x11f10000 0 0x1000>;
#address-cells = <1>;
#size-cells = <1>;
+ thermal_calibration: calib@180 {
+ reg = <0x180 0xc>;
+ };
+
mipi_tx_calibration: calib@190 {
reg = <0x190 0xc>;
};
};
- u3phy: usb-phy@11f40000 {
+ u3phy: t-phy@11f40000 {
compatible = "mediatek,mt8183-tphy",
"mediatek,generic-tphy-v2";
#address-cells = <1>;
- #phy-cells = <1>;
#size-cells = <1>;
ranges = <0 0 0x11f40000 0x1000>;
status = "okay";
@@ -964,7 +1103,7 @@
status = "okay";
};
- u3port0: usb-phy@0700 {
+ u3port0: usb-phy@700 {
reg = <0x0700 0x900>;
clocks = <&clk26m>;
clock-names = "ref";
@@ -983,6 +1122,9 @@
compatible = "mediatek,mt8183-mmsys", "syscon";
reg = <0 0x14000000 0 0x1000>;
#clock-cells = <1>;
+ mboxes = <&gce 0 CMDQ_THR_PRIO_HIGHEST>,
+ <&gce 1 CMDQ_THR_PRIO_HIGHEST>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0 0x1000>;
};
ovl0: ovl@14008000 {
@@ -1058,6 +1200,7 @@
interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
clocks = <&mmsys CLK_MM_DISP_CCORR0>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1400XXXX 0xf000 0x1000>;
};
aal0: aal@14010000 {
@@ -1067,6 +1210,7 @@
interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
clocks = <&mmsys CLK_MM_DISP_AAL0>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0 0x1000>;
};
gamma0: gamma@14011000 {
@@ -1075,6 +1219,7 @@
interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
clocks = <&mmsys CLK_MM_DISP_GAMMA0>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x1000 0x1000>;
};
dither0: dither@14012000 {
@@ -1083,6 +1228,7 @@
interrupts = <GIC_SPI 235 IRQ_TYPE_LEVEL_LOW>;
power-domains = <&spm MT8183_POWER_DOMAIN_DISP>;
clocks = <&mmsys CLK_MM_DISP_DITHER0>;
+ mediatek,gce-client-reg = <&gce SUBSYS_1401XXXX 0x2000 0x1000>;
};
dsi0: dsi@14014000 {
diff --git a/arch/arm64/boot/dts/mediatek/mt8516.dtsi b/arch/arm64/boot/dts/mediatek/mt8516.dtsi
index b80e95574bef..bbe5a1419eff 100644
--- a/arch/arm64/boot/dts/mediatek/mt8516.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8516.dtsi
@@ -480,7 +480,7 @@
};
usb0: usb@11100000 {
- compatible = "mediatek,mtk-musb";
+ compatible = "mediatek,mt8516-musb", "mediatek,mtk-musb";
reg = <0 0x11100000 0 0x1000>;
interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_LOW>;
interrupt-names = "mc";
@@ -493,7 +493,7 @@
};
usb1: usb@11190000 {
- compatible = "mediatek,mtk-musb";
+ compatible = "mediatek,mt8516-musb", "mediatek,mtk-musb";
reg = <0 0x11190000 0 0x1000>;
interrupts = <GIC_SPI 210 IRQ_TYPE_LEVEL_LOW>;
interrupt-names = "mc";
@@ -506,8 +506,9 @@
status = "disabled";
};
- usb_phy: usb@11110000 {
- compatible = "mediatek,generic-tphy-v1";
+ usb_phy: t-phy@11110000 {
+ compatible = "mediatek,mt8516-tphy",
+ "mediatek,generic-tphy-v1";
reg = <0 0x11110000 0 0x800>;
#address-cells = <2>;
#size-cells = <2>;
diff --git a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
index 63fd70086bb8..fcddec14738d 100644
--- a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
+++ b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
@@ -56,7 +56,7 @@
tca6416: gpio@20 {
compatible = "ti,tca6416";
reg = <0x20>;
- reset-gpios = <&pio 65 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&pio 65 GPIO_ACTIVE_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&tca6416_pins>;
@@ -188,6 +188,7 @@
&usb0 {
status = "okay";
dr_mode = "peripheral";
+ usb-role-switch;
usb_con: connector {
compatible = "usb-c-connector";
diff --git a/arch/arm64/boot/dts/microchip/sparx5.dtsi b/arch/arm64/boot/dts/microchip/sparx5.dtsi
index d64621d1213b..ad07fff40544 100644
--- a/arch/arm64/boot/dts/microchip/sparx5.dtsi
+++ b/arch/arm64/boot/dts/microchip/sparx5.dtsi
@@ -135,9 +135,12 @@
};
};
- reset@611010008 {
- compatible = "microchip,sparx5-chip-reset";
+ reset: reset-controller@611010008 {
+ compatible = "microchip,sparx5-switch-reset";
reg = <0x6 0x11010008 0x4>;
+ reg-names = "gcb";
+ #reset-cells = <1>;
+ cpu-syscon = <&cpu_ctrl>;
};
uart0: serial@600100000 {
@@ -275,6 +278,21 @@
"GPIO_46", "GPIO_47";
function = "emmc";
};
+
+ miim1_pins: miim1-pins {
+ pins = "GPIO_56", "GPIO_57";
+ function = "miim";
+ };
+
+ miim2_pins: miim2-pins {
+ pins = "GPIO_58", "GPIO_59";
+ function = "miim";
+ };
+
+ miim3_pins: miim3-pins {
+ pins = "GPIO_52", "GPIO_53";
+ function = "miim";
+ };
};
sgpio0: gpio@61101036c {
@@ -285,6 +303,8 @@
clocks = <&sys_clk>;
pinctrl-0 = <&sgpio0_pins>;
pinctrl-names = "default";
+ resets = <&reset 0>;
+ reset-names = "switch";
reg = <0x6 0x1101036c 0x100>;
sgpio_in0: gpio@0 {
compatible = "microchip,sparx5-sgpio-bank";
@@ -292,6 +312,9 @@
gpio-controller;
#gpio-cells = <3>;
ngpios = <96>;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
};
sgpio_out0: gpio@1 {
compatible = "microchip,sparx5-sgpio-bank";
@@ -310,6 +333,8 @@
clocks = <&sys_clk>;
pinctrl-0 = <&sgpio1_pins>;
pinctrl-names = "default";
+ resets = <&reset 0>;
+ reset-names = "switch";
reg = <0x6 0x11010484 0x100>;
sgpio_in1: gpio@0 {
compatible = "microchip,sparx5-sgpio-bank";
@@ -317,6 +342,9 @@
gpio-controller;
#gpio-cells = <3>;
ngpios = <96>;
+ interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
};
sgpio_out1: gpio@1 {
compatible = "microchip,sparx5-sgpio-bank";
@@ -335,6 +363,8 @@
clocks = <&sys_clk>;
pinctrl-0 = <&sgpio2_pins>;
pinctrl-names = "default";
+ resets = <&reset 0>;
+ reset-names = "switch";
reg = <0x6 0x1101059c 0x100>;
sgpio_in2: gpio@0 {
reg = <0>;
@@ -342,6 +372,9 @@
gpio-controller;
#gpio-cells = <3>;
ngpios = <96>;
+ interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
};
sgpio_out2: gpio@1 {
compatible = "microchip,sparx5-sgpio-bank";
@@ -386,5 +419,62 @@
#thermal-sensor-cells = <0>;
clocks = <&ahb_clk>;
};
+
+ mdio0: mdio@6110102b0 {
+ compatible = "mscc,ocelot-miim";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x6 0x110102b0 0x24>;
+ };
+
+ mdio1: mdio@6110102d4 {
+ compatible = "mscc,ocelot-miim";
+ status = "disabled";
+ pinctrl-0 = <&miim1_pins>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x6 0x110102d4 0x24>;
+ };
+
+ mdio2: mdio@6110102f8 {
+ compatible = "mscc,ocelot-miim";
+ status = "disabled";
+ pinctrl-0 = <&miim2_pins>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x6 0x110102d4 0x24>;
+ };
+
+ mdio3: mdio@61101031c {
+ compatible = "mscc,ocelot-miim";
+ status = "disabled";
+ pinctrl-0 = <&miim3_pins>;
+ pinctrl-names = "default";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x6 0x1101031c 0x24>;
+ };
+
+ serdes: serdes@10808000 {
+ compatible = "microchip,sparx5-serdes";
+ #phy-cells = <1>;
+ clocks = <&sys_clk>;
+ reg = <0x6 0x10808000 0x5d0000>;
+ };
+
+ switch: switch@0x600000000 {
+ compatible = "microchip,sparx5-switch";
+ reg = <0x6 0 0x401000>,
+ <0x6 0x10004000 0x7fc000>,
+ <0x6 0x11010000 0xaf0000>;
+ reg-names = "cpu", "dev", "gcb";
+ interrupt-names = "xtr";
+ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&reset 0>;
+ reset-names = "switch";
+ };
};
};
diff --git a/arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi b/arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi
index f0c915160990..33faf1f3264f 100644
--- a/arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi
+++ b/arch/arm64/boot/dts/microchip/sparx5_pcb134_board.dtsi
@@ -7,30 +7,6 @@
#include "sparx5_pcb_common.dtsi"
/{
- aliases {
- i2c0 = &i2c0;
- i2c100 = &i2c100;
- i2c101 = &i2c101;
- i2c102 = &i2c102;
- i2c103 = &i2c103;
- i2c104 = &i2c104;
- i2c105 = &i2c105;
- i2c106 = &i2c106;
- i2c107 = &i2c107;
- i2c108 = &i2c108;
- i2c109 = &i2c109;
- i2c110 = &i2c110;
- i2c111 = &i2c111;
- i2c112 = &i2c112;
- i2c113 = &i2c113;
- i2c114 = &i2c114;
- i2c115 = &i2c115;
- i2c116 = &i2c116;
- i2c117 = &i2c117;
- i2c118 = &i2c118;
- i2c119 = &i2c119;
- };
-
gpio-restart {
compatible = "gpio-restart";
gpios = <&gpio 37 GPIO_ACTIVE_LOW>;
@@ -298,17 +274,10 @@
&spi0 {
status = "okay";
- spi@0 {
- compatible = "spi-mux";
- mux-controls = <&mux>;
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>; /* CS0 */
- spi-flash@9 {
- compatible = "jedec,spi-nor";
- spi-max-frequency = <8000000>;
- reg = <0x9>; /* SPI */
- };
+ spi-flash@0 {
+ compatible = "jedec,spi-nor";
+ spi-max-frequency = <8000000>;
+ reg = <0>;
};
};
@@ -328,6 +297,33 @@
};
};
+&sgpio0 {
+ status = "okay";
+ microchip,sgpio-port-ranges = <8 15>;
+ gpio@0 {
+ ngpios = <64>;
+ };
+ gpio@1 {
+ ngpios = <64>;
+ };
+};
+
+&sgpio1 {
+ status = "okay";
+ microchip,sgpio-port-ranges = <24 31>;
+ gpio@0 {
+ ngpios = <64>;
+ };
+ gpio@1 {
+ ngpios = <64>;
+ };
+};
+
+&sgpio2 {
+ status = "okay";
+ microchip,sgpio-port-ranges = <0 0>, <11 31>;
+};
+
&gpio {
i2cmux_pins_i: i2cmux-pins-i {
pins = "GPIO_16", "GPIO_17", "GPIO_18", "GPIO_19",
@@ -415,9 +411,9 @@
&i2c0_imux {
pinctrl-names =
- "i2c100", "i2c101", "i2c102", "i2c103",
- "i2c104", "i2c105", "i2c106", "i2c107",
- "i2c108", "i2c109", "i2c110", "i2c111", "idle";
+ "i2c_sfp1", "i2c_sfp2", "i2c_sfp3", "i2c_sfp4",
+ "i2c_sfp5", "i2c_sfp6", "i2c_sfp7", "i2c_sfp8",
+ "i2c_sfp9", "i2c_sfp10", "i2c_sfp11", "i2c_sfp12", "idle";
pinctrl-0 = <&i2cmux_0>;
pinctrl-1 = <&i2cmux_1>;
pinctrl-2 = <&i2cmux_2>;
@@ -431,62 +427,62 @@
pinctrl-10 = <&i2cmux_10>;
pinctrl-11 = <&i2cmux_11>;
pinctrl-12 = <&i2cmux_pins_i>;
- i2c100: i2c_sfp1 {
+ i2c_sfp1: i2c_sfp1 {
reg = <0x0>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c101: i2c_sfp2 {
+ i2c_sfp2: i2c_sfp2 {
reg = <0x1>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c102: i2c_sfp3 {
+ i2c_sfp3: i2c_sfp3 {
reg = <0x2>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c103: i2c_sfp4 {
+ i2c_sfp4: i2c_sfp4 {
reg = <0x3>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c104: i2c_sfp5 {
+ i2c_sfp5: i2c_sfp5 {
reg = <0x4>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c105: i2c_sfp6 {
+ i2c_sfp6: i2c_sfp6 {
reg = <0x5>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c106: i2c_sfp7 {
+ i2c_sfp7: i2c_sfp7 {
reg = <0x6>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c107: i2c_sfp8 {
+ i2c_sfp8: i2c_sfp8 {
reg = <0x7>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c108: i2c_sfp9 {
+ i2c_sfp9: i2c_sfp9 {
reg = <0x8>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c109: i2c_sfp10 {
+ i2c_sfp10: i2c_sfp10 {
reg = <0x9>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c110: i2c_sfp11 {
+ i2c_sfp11: i2c_sfp11 {
reg = <0xa>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c111: i2c_sfp12 {
+ i2c_sfp12: i2c_sfp12 {
reg = <0xb>;
#address-cells = <1>;
#size-cells = <0>;
@@ -499,44 +495,413 @@
&gpio 61 GPIO_ACTIVE_HIGH
&gpio 54 GPIO_ACTIVE_HIGH>;
idle-state = <0x8>;
- i2c112: i2c_sfp13 {
+ i2c_sfp13: i2c_sfp13 {
reg = <0x0>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c113: i2c_sfp14 {
+ i2c_sfp14: i2c_sfp14 {
reg = <0x1>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c114: i2c_sfp15 {
+ i2c_sfp15: i2c_sfp15 {
reg = <0x2>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c115: i2c_sfp16 {
+ i2c_sfp16: i2c_sfp16 {
reg = <0x3>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c116: i2c_sfp17 {
+ i2c_sfp17: i2c_sfp17 {
reg = <0x4>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c117: i2c_sfp18 {
+ i2c_sfp18: i2c_sfp18 {
reg = <0x5>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c118: i2c_sfp19 {
+ i2c_sfp19: i2c_sfp19 {
reg = <0x6>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c119: i2c_sfp20 {
+ i2c_sfp20: i2c_sfp20 {
reg = <0x7>;
#address-cells = <1>;
#size-cells = <0>;
};
};
+
+&mdio3 {
+ status = "ok";
+ phy64: ethernet-phy@64 {
+ reg = <28>;
+ };
+};
+
+&axi {
+ sfp_eth12: sfp-eth12 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp1>;
+ tx-disable-gpios = <&sgpio_out2 11 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 11 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 11 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 12 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth13: sfp-eth13 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp2>;
+ tx-disable-gpios = <&sgpio_out2 12 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 12 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 12 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 13 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth14: sfp-eth14 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp3>;
+ tx-disable-gpios = <&sgpio_out2 13 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 13 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 13 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 14 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth15: sfp-eth15 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp4>;
+ tx-disable-gpios = <&sgpio_out2 14 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 14 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 14 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 15 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth48: sfp-eth48 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp5>;
+ tx-disable-gpios = <&sgpio_out2 15 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 15 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 15 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 16 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth49: sfp-eth49 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp6>;
+ tx-disable-gpios = <&sgpio_out2 16 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 16 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 16 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 17 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth50: sfp-eth50 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp7>;
+ tx-disable-gpios = <&sgpio_out2 17 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 17 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 17 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 18 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth51: sfp-eth51 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp8>;
+ tx-disable-gpios = <&sgpio_out2 18 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 18 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 18 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 19 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth52: sfp-eth52 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp9>;
+ tx-disable-gpios = <&sgpio_out2 19 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 19 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 19 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 20 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth53: sfp-eth53 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp10>;
+ tx-disable-gpios = <&sgpio_out2 20 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 20 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 20 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 21 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth54: sfp-eth54 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp11>;
+ tx-disable-gpios = <&sgpio_out2 21 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 21 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 21 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 22 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth55: sfp-eth55 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp12>;
+ tx-disable-gpios = <&sgpio_out2 22 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 22 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 22 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 23 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth56: sfp-eth56 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp13>;
+ tx-disable-gpios = <&sgpio_out2 23 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 23 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 23 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 24 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth57: sfp-eth57 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp14>;
+ tx-disable-gpios = <&sgpio_out2 24 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 24 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 24 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 25 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth58: sfp-eth58 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp15>;
+ tx-disable-gpios = <&sgpio_out2 25 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 25 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 25 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 26 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth59: sfp-eth59 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp16>;
+ tx-disable-gpios = <&sgpio_out2 26 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 26 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 26 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 27 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth60: sfp-eth60 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp17>;
+ tx-disable-gpios = <&sgpio_out2 27 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 27 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 27 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 28 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth61: sfp-eth61 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp18>;
+ tx-disable-gpios = <&sgpio_out2 28 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 28 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 28 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 29 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth62: sfp-eth62 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp19>;
+ tx-disable-gpios = <&sgpio_out2 29 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 29 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 29 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 30 0 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth63: sfp-eth63 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp20>;
+ tx-disable-gpios = <&sgpio_out2 30 1 GPIO_ACTIVE_LOW>;
+ los-gpios = <&sgpio_in2 30 1 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 30 2 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 31 0 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&switch {
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* 10G SFPs */
+ port12: port@12 {
+ reg = <12>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 13>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth12>;
+ microchip,sd-sgpio = <301>;
+ managed = "in-band-status";
+ };
+ port13: port@13 {
+ reg = <13>;
+ /* Example: CU SFP, 1G speed */
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 14>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth13>;
+ microchip,sd-sgpio = <305>;
+ managed = "in-band-status";
+ };
+ port14: port@14 {
+ reg = <14>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 15>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth14>;
+ microchip,sd-sgpio = <309>;
+ managed = "in-band-status";
+ };
+ port15: port@15 {
+ reg = <15>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 16>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth15>;
+ microchip,sd-sgpio = <313>;
+ managed = "in-band-status";
+ };
+ port48: port@48 {
+ reg = <48>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 17>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth48>;
+ microchip,sd-sgpio = <317>;
+ managed = "in-band-status";
+ };
+ port49: port@49 {
+ reg = <49>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 18>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth49>;
+ microchip,sd-sgpio = <321>;
+ managed = "in-band-status";
+ };
+ port50: port@50 {
+ reg = <50>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 19>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth50>;
+ microchip,sd-sgpio = <325>;
+ managed = "in-band-status";
+ };
+ port51: port@51 {
+ reg = <51>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 20>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth51>;
+ microchip,sd-sgpio = <329>;
+ managed = "in-band-status";
+ };
+ port52: port@52 {
+ reg = <52>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 21>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth52>;
+ microchip,sd-sgpio = <333>;
+ managed = "in-band-status";
+ };
+ port53: port@53 {
+ reg = <53>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 22>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth53>;
+ microchip,sd-sgpio = <337>;
+ managed = "in-band-status";
+ };
+ port54: port@54 {
+ reg = <54>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 23>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth54>;
+ microchip,sd-sgpio = <341>;
+ managed = "in-band-status";
+ };
+ port55: port@55 {
+ reg = <55>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 24>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth55>;
+ microchip,sd-sgpio = <345>;
+ managed = "in-band-status";
+ };
+ /* 25G SFPs */
+ port56: port@56 {
+ reg = <56>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 25>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth56>;
+ microchip,sd-sgpio = <349>;
+ managed = "in-band-status";
+ };
+ port57: port@57 {
+ reg = <57>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 26>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth57>;
+ microchip,sd-sgpio = <353>;
+ managed = "in-band-status";
+ };
+ port58: port@58 {
+ reg = <58>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 27>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth58>;
+ microchip,sd-sgpio = <357>;
+ managed = "in-band-status";
+ };
+ port59: port@59 {
+ reg = <59>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 28>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth59>;
+ microchip,sd-sgpio = <361>;
+ managed = "in-band-status";
+ };
+ port60: port@60 {
+ reg = <60>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 29>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth60>;
+ microchip,sd-sgpio = <365>;
+ managed = "in-band-status";
+ };
+ port61: port@61 {
+ reg = <61>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 30>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth61>;
+ microchip,sd-sgpio = <369>;
+ managed = "in-band-status";
+ };
+ port62: port@62 {
+ reg = <62>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 31>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth62>;
+ microchip,sd-sgpio = <373>;
+ managed = "in-band-status";
+ };
+ port63: port@63 {
+ reg = <63>;
+ microchip,bandwidth = <10000>;
+ phys = <&serdes 32>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth63>;
+ microchip,sd-sgpio = <377>;
+ managed = "in-band-status";
+ };
+ /* Finally the Management interface */
+ port64: port@64 {
+ reg = <64>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 0>;
+ phy-handle = <&phy64>;
+ phy-mode = "sgmii";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi b/arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi
index e28c6dd16377..ef96e6d8c6b3 100644
--- a/arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi
+++ b/arch/arm64/boot/dts/microchip/sparx5_pcb135_board.dtsi
@@ -7,14 +7,6 @@
#include "sparx5_pcb_common.dtsi"
/{
- aliases {
- i2c0 = &i2c0;
- i2c152 = &i2c152;
- i2c153 = &i2c153;
- i2c154 = &i2c154;
- i2c155 = &i2c155;
- };
-
gpio-restart {
compatible = "gpio-restart";
gpios = <&gpio 37 GPIO_ACTIVE_LOW>;
@@ -97,17 +89,10 @@
&spi0 {
status = "okay";
- spi@0 {
- compatible = "spi-mux";
- mux-controls = <&mux>;
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>; /* CS0 */
- spi-flash@9 {
- compatible = "jedec,spi-nor";
- spi-max-frequency = <8000000>;
- reg = <0x9>; /* SPI */
- };
+ spi-flash@0 {
+ compatible = "jedec,spi-nor";
+ spi-max-frequency = <8000000>;
+ reg = <0>;
};
};
@@ -138,6 +123,11 @@
};
};
+&sgpio2 {
+ status = "okay";
+ microchip,sgpio-port-ranges = <0 0>, <16 18>, <28 31>;
+};
+
&axi {
i2c0_imux: i2c0-imux@0 {
compatible = "i2c-mux-pinctrl";
@@ -149,31 +139,614 @@
&i2c0_imux {
pinctrl-names =
- "i2c152", "i2c153", "i2c154", "i2c155",
+ "i2c_sfp1", "i2c_sfp2", "i2c_sfp3", "i2c_sfp4",
"idle";
pinctrl-0 = <&i2cmux_s29>;
pinctrl-1 = <&i2cmux_s30>;
pinctrl-2 = <&i2cmux_s31>;
pinctrl-3 = <&i2cmux_s32>;
pinctrl-4 = <&i2cmux_pins_i>;
- i2c152: i2c_sfp1 {
+ i2c_sfp1: i2c_sfp1 {
reg = <0x0>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c153: i2c_sfp2 {
+ i2c_sfp2: i2c_sfp2 {
reg = <0x1>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c154: i2c_sfp3 {
+ i2c_sfp3: i2c_sfp3 {
reg = <0x2>;
#address-cells = <1>;
#size-cells = <0>;
};
- i2c155: i2c_sfp4 {
+ i2c_sfp4: i2c_sfp4 {
reg = <0x3>;
#address-cells = <1>;
#size-cells = <0>;
};
};
+
+&axi {
+ sfp_eth60: sfp-eth60 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp1>;
+ tx-disable-gpios = <&sgpio_out2 28 0 GPIO_ACTIVE_LOW>;
+ rate-select0-gpios = <&sgpio_out2 28 1 GPIO_ACTIVE_HIGH>;
+ los-gpios = <&sgpio_in2 28 0 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 28 1 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 28 2 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth61: sfp-eth61 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp2>;
+ tx-disable-gpios = <&sgpio_out2 29 0 GPIO_ACTIVE_LOW>;
+ rate-select0-gpios = <&sgpio_out2 29 1 GPIO_ACTIVE_HIGH>;
+ los-gpios = <&sgpio_in2 29 0 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 29 1 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 29 2 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth62: sfp-eth62 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp3>;
+ tx-disable-gpios = <&sgpio_out2 30 0 GPIO_ACTIVE_LOW>;
+ rate-select0-gpios = <&sgpio_out2 30 1 GPIO_ACTIVE_HIGH>;
+ los-gpios = <&sgpio_in2 30 0 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 30 1 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 30 2 GPIO_ACTIVE_HIGH>;
+ };
+ sfp_eth63: sfp-eth63 {
+ compatible = "sff,sfp";
+ i2c-bus = <&i2c_sfp4>;
+ tx-disable-gpios = <&sgpio_out2 31 0 GPIO_ACTIVE_LOW>;
+ rate-select0-gpios = <&sgpio_out2 31 1 GPIO_ACTIVE_HIGH>;
+ los-gpios = <&sgpio_in2 31 0 GPIO_ACTIVE_HIGH>;
+ mod-def0-gpios = <&sgpio_in2 31 1 GPIO_ACTIVE_LOW>;
+ tx-fault-gpios = <&sgpio_in2 31 2 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&mdio0 {
+ status = "ok";
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+ phy2: ethernet-phy@2 {
+ reg = <2>;
+ };
+ phy3: ethernet-phy@3 {
+ reg = <3>;
+ };
+ phy4: ethernet-phy@4 {
+ reg = <4>;
+ };
+ phy5: ethernet-phy@5 {
+ reg = <5>;
+ };
+ phy6: ethernet-phy@6 {
+ reg = <6>;
+ };
+ phy7: ethernet-phy@7 {
+ reg = <7>;
+ };
+ phy8: ethernet-phy@8 {
+ reg = <8>;
+ };
+ phy9: ethernet-phy@9 {
+ reg = <9>;
+ };
+ phy10: ethernet-phy@10 {
+ reg = <10>;
+ };
+ phy11: ethernet-phy@11 {
+ reg = <11>;
+ };
+ phy12: ethernet-phy@12 {
+ reg = <12>;
+ };
+ phy13: ethernet-phy@13 {
+ reg = <13>;
+ };
+ phy14: ethernet-phy@14 {
+ reg = <14>;
+ };
+ phy15: ethernet-phy@15 {
+ reg = <15>;
+ };
+ phy16: ethernet-phy@16 {
+ reg = <16>;
+ };
+ phy17: ethernet-phy@17 {
+ reg = <17>;
+ };
+ phy18: ethernet-phy@18 {
+ reg = <18>;
+ };
+ phy19: ethernet-phy@19 {
+ reg = <19>;
+ };
+ phy20: ethernet-phy@20 {
+ reg = <20>;
+ };
+ phy21: ethernet-phy@21 {
+ reg = <21>;
+ };
+ phy22: ethernet-phy@22 {
+ reg = <22>;
+ };
+ phy23: ethernet-phy@23 {
+ reg = <23>;
+ };
+};
+
+&mdio1 {
+ status = "ok";
+ phy24: ethernet-phy@24 {
+ reg = <0>;
+ };
+ phy25: ethernet-phy@25 {
+ reg = <1>;
+ };
+ phy26: ethernet-phy@26 {
+ reg = <2>;
+ };
+ phy27: ethernet-phy@27 {
+ reg = <3>;
+ };
+ phy28: ethernet-phy@28 {
+ reg = <4>;
+ };
+ phy29: ethernet-phy@29 {
+ reg = <5>;
+ };
+ phy30: ethernet-phy@30 {
+ reg = <6>;
+ };
+ phy31: ethernet-phy@31 {
+ reg = <7>;
+ };
+ phy32: ethernet-phy@32 {
+ reg = <8>;
+ };
+ phy33: ethernet-phy@33 {
+ reg = <9>;
+ };
+ phy34: ethernet-phy@34 {
+ reg = <10>;
+ };
+ phy35: ethernet-phy@35 {
+ reg = <11>;
+ };
+ phy36: ethernet-phy@36 {
+ reg = <12>;
+ };
+ phy37: ethernet-phy@37 {
+ reg = <13>;
+ };
+ phy38: ethernet-phy@38 {
+ reg = <14>;
+ };
+ phy39: ethernet-phy@39 {
+ reg = <15>;
+ };
+ phy40: ethernet-phy@40 {
+ reg = <16>;
+ };
+ phy41: ethernet-phy@41 {
+ reg = <17>;
+ };
+ phy42: ethernet-phy@42 {
+ reg = <18>;
+ };
+ phy43: ethernet-phy@43 {
+ reg = <19>;
+ };
+ phy44: ethernet-phy@44 {
+ reg = <20>;
+ };
+ phy45: ethernet-phy@45 {
+ reg = <21>;
+ };
+ phy46: ethernet-phy@46 {
+ reg = <22>;
+ };
+ phy47: ethernet-phy@47 {
+ reg = <23>;
+ };
+};
+
+&mdio3 {
+ status = "ok";
+ phy64: ethernet-phy@64 {
+ reg = <28>;
+ };
+};
+
+&switch {
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port0: port@0 {
+ reg = <0>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 13>;
+ phy-handle = <&phy0>;
+ phy-mode = "qsgmii";
+ };
+ port1: port@1 {
+ reg = <1>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 13>;
+ phy-handle = <&phy1>;
+ phy-mode = "qsgmii";
+ };
+ port2: port@2 {
+ reg = <2>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 13>;
+ phy-handle = <&phy2>;
+ phy-mode = "qsgmii";
+ };
+ port3: port@3 {
+ reg = <3>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 13>;
+ phy-handle = <&phy3>;
+ phy-mode = "qsgmii";
+ };
+ port4: port@4 {
+ reg = <4>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 14>;
+ phy-handle = <&phy4>;
+ phy-mode = "qsgmii";
+ };
+ port5: port@5 {
+ reg = <5>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 14>;
+ phy-handle = <&phy5>;
+ phy-mode = "qsgmii";
+ };
+ port6: port@6 {
+ reg = <6>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 14>;
+ phy-handle = <&phy6>;
+ phy-mode = "qsgmii";
+ };
+ port7: port@7 {
+ reg = <7>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 14>;
+ phy-handle = <&phy7>;
+ phy-mode = "qsgmii";
+ };
+ port8: port@8 {
+ reg = <8>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 15>;
+ phy-handle = <&phy8>;
+ phy-mode = "qsgmii";
+ };
+ port9: port@9 {
+ reg = <9>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 15>;
+ phy-handle = <&phy9>;
+ phy-mode = "qsgmii";
+ };
+ port10: port@10 {
+ reg = <10>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 15>;
+ phy-handle = <&phy10>;
+ phy-mode = "qsgmii";
+ };
+ port11: port@11 {
+ reg = <11>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 15>;
+ phy-handle = <&phy11>;
+ phy-mode = "qsgmii";
+ };
+ port12: port@12 {
+ reg = <12>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 16>;
+ phy-handle = <&phy12>;
+ phy-mode = "qsgmii";
+ };
+ port13: port@13 {
+ reg = <13>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 16>;
+ phy-handle = <&phy13>;
+ phy-mode = "qsgmii";
+ };
+ port14: port@14 {
+ reg = <14>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 16>;
+ phy-handle = <&phy14>;
+ phy-mode = "qsgmii";
+ };
+ port15: port@15 {
+ reg = <15>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 16>;
+ phy-handle = <&phy15>;
+ phy-mode = "qsgmii";
+ };
+ port16: port@16 {
+ reg = <16>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 17>;
+ phy-handle = <&phy16>;
+ phy-mode = "qsgmii";
+ };
+ port17: port@17 {
+ reg = <17>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 17>;
+ phy-handle = <&phy17>;
+ phy-mode = "qsgmii";
+ };
+ port18: port@18 {
+ reg = <18>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 17>;
+ phy-handle = <&phy18>;
+ phy-mode = "qsgmii";
+ };
+ port19: port@19 {
+ reg = <19>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 17>;
+ phy-handle = <&phy19>;
+ phy-mode = "qsgmii";
+ };
+ port20: port@20 {
+ reg = <20>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 18>;
+ phy-handle = <&phy20>;
+ phy-mode = "qsgmii";
+ };
+ port21: port@21 {
+ reg = <21>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 18>;
+ phy-handle = <&phy21>;
+ phy-mode = "qsgmii";
+ };
+ port22: port@22 {
+ reg = <22>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 18>;
+ phy-handle = <&phy22>;
+ phy-mode = "qsgmii";
+ };
+ port23: port@23 {
+ reg = <23>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 18>;
+ phy-handle = <&phy23>;
+ phy-mode = "qsgmii";
+ };
+ port24: port@24 {
+ reg = <24>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 19>;
+ phy-handle = <&phy24>;
+ phy-mode = "qsgmii";
+ };
+ port25: port@25 {
+ reg = <25>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 19>;
+ phy-handle = <&phy25>;
+ phy-mode = "qsgmii";
+ };
+ port26: port@26 {
+ reg = <26>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 19>;
+ phy-handle = <&phy26>;
+ phy-mode = "qsgmii";
+ };
+ port27: port@27 {
+ reg = <27>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 19>;
+ phy-handle = <&phy27>;
+ phy-mode = "qsgmii";
+ };
+ port28: port@28 {
+ reg = <28>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 20>;
+ phy-handle = <&phy28>;
+ phy-mode = "qsgmii";
+ };
+ port29: port@29 {
+ reg = <29>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 20>;
+ phy-handle = <&phy29>;
+ phy-mode = "qsgmii";
+ };
+ port30: port@30 {
+ reg = <30>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 20>;
+ phy-handle = <&phy30>;
+ phy-mode = "qsgmii";
+ };
+ port31: port@31 {
+ reg = <31>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 20>;
+ phy-handle = <&phy31>;
+ phy-mode = "qsgmii";
+ };
+ port32: port@32 {
+ reg = <32>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 21>;
+ phy-handle = <&phy32>;
+ phy-mode = "qsgmii";
+ };
+ port33: port@33 {
+ reg = <33>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 21>;
+ phy-handle = <&phy33>;
+ phy-mode = "qsgmii";
+ };
+ port34: port@34 {
+ reg = <34>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 21>;
+ phy-handle = <&phy34>;
+ phy-mode = "qsgmii";
+ };
+ port35: port@35 {
+ reg = <35>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 21>;
+ phy-handle = <&phy35>;
+ phy-mode = "qsgmii";
+ };
+ port36: port@36 {
+ reg = <36>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 22>;
+ phy-handle = <&phy36>;
+ phy-mode = "qsgmii";
+ };
+ port37: port@37 {
+ reg = <37>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 22>;
+ phy-handle = <&phy37>;
+ phy-mode = "qsgmii";
+ };
+ port38: port@38 {
+ reg = <38>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 22>;
+ phy-handle = <&phy38>;
+ phy-mode = "qsgmii";
+ };
+ port39: port@39 {
+ reg = <39>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 22>;
+ phy-handle = <&phy39>;
+ phy-mode = "qsgmii";
+ };
+ port40: port@40 {
+ reg = <40>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 23>;
+ phy-handle = <&phy40>;
+ phy-mode = "qsgmii";
+ };
+ port41: port@41 {
+ reg = <41>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 23>;
+ phy-handle = <&phy41>;
+ phy-mode = "qsgmii";
+ };
+ port42: port@42 {
+ reg = <42>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 23>;
+ phy-handle = <&phy42>;
+ phy-mode = "qsgmii";
+ };
+ port43: port@43 {
+ reg = <43>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 23>;
+ phy-handle = <&phy43>;
+ phy-mode = "qsgmii";
+ };
+ port44: port@44 {
+ reg = <44>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 24>;
+ phy-handle = <&phy44>;
+ phy-mode = "qsgmii";
+ };
+ port45: port@45 {
+ reg = <45>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 24>;
+ phy-handle = <&phy45>;
+ phy-mode = "qsgmii";
+ };
+ port46: port@46 {
+ reg = <46>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 24>;
+ phy-handle = <&phy46>;
+ phy-mode = "qsgmii";
+ };
+ port47: port@47 {
+ reg = <47>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 24>;
+ phy-handle = <&phy47>;
+ phy-mode = "qsgmii";
+ };
+ /* Then the 25G interfaces */
+ port60: port@60 {
+ reg = <60>;
+ microchip,bandwidth = <25000>;
+ phys = <&serdes 29>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth60>;
+ managed = "in-band-status";
+ };
+ port61: port@61 {
+ reg = <61>;
+ microchip,bandwidth = <25000>;
+ phys = <&serdes 30>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth61>;
+ managed = "in-band-status";
+ };
+ port62: port@62 {
+ reg = <62>;
+ microchip,bandwidth = <25000>;
+ phys = <&serdes 31>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth62>;
+ managed = "in-band-status";
+ };
+ port63: port@63 {
+ reg = <63>;
+ microchip,bandwidth = <25000>;
+ phys = <&serdes 32>;
+ phy-mode = "10gbase-r";
+ sfp = <&sfp_eth63>;
+ managed = "in-band-status";
+ };
+ /* Finally the Management interface */
+ port64: port@64 {
+ reg = <64>;
+ microchip,bandwidth = <1000>;
+ phys = <&serdes 0>;
+ phy-handle = <&phy64>;
+ phy-mode = "sgmii";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
index 9f5f5e1fa82e..683743f81849 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra186-p2771-0000.dts
@@ -10,7 +10,7 @@
model = "NVIDIA Jetson TX2 Developer Kit";
compatible = "nvidia,p2771-0000", "nvidia,tegra186";
- aconnect {
+ aconnect@2900000 {
status = "okay";
dma-controller@2930000 {
diff --git a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
index fd9177447711..fcd71bfc6707 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra186-p3310.dtsi
@@ -23,7 +23,7 @@
};
chosen {
- bootargs = "earlycon console=ttyS0,115200n8";
+ bootargs = "earlycon console=ttyS0,115200n8 fw_devlink=on";
stdout-path = "serial0:115200n8";
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra186.dtsi b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
index 02b26b39cedc..9f75bbf00cf7 100644
--- a/arch/arm64/boot/dts/nvidia/tegra186.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra186.dtsi
@@ -73,7 +73,7 @@
snps,rxpbl = <8>;
};
- aconnect {
+ aconnect@2900000 {
compatible = "nvidia,tegra186-aconnect",
"nvidia,tegra210-aconnect";
clocks = <&bpmp TEGRA186_CLK_APE>,
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts b/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
index 2888efc42ba1..d618f197a1d3 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p2972-0000.dts
@@ -651,6 +651,8 @@
reg = <0x1a>;
interrupt-parent = <&gpio>;
interrupts = <TEGRA194_MAIN_GPIO(S, 5) GPIO_ACTIVE_HIGH>;
+ clocks = <&bpmp TEGRA194_CLK_AUD_MCLK>;
+ clock-names = "mclk";
realtek,jd-src = <2>;
sound-name-prefix = "CVB-RT";
@@ -658,7 +660,6 @@
rt5658_ep: endpoint {
remote-endpoint = <&i2s1_dap_ep>;
mclk-fs = <256>;
- clocks = <&bpmp TEGRA194_CLK_AUD_MCLK>;
};
};
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi
index 7da3d48cb410..14da4206ea66 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p3668-0000.dtsi
@@ -5,6 +5,10 @@
model = "NVIDIA Jetson Xavier NX (SD-card)";
compatible = "nvidia,p3668-0000", "nvidia,tegra194";
+ aliases {
+ mmc0 = "/bus@0/mmc@3400000";
+ };
+
bus@0 {
/* SDMMC1 (SD/MMC) */
mmc@3400000 {
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p3668-0001.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p3668-0001.dtsi
index b7808648cfe4..f5a9ebbfb12f 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p3668-0001.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p3668-0001.dtsi
@@ -5,6 +5,10 @@
model = "NVIDIA Jetson Xavier NX (eMMC)";
compatible = "nvidia,p3668-0001", "nvidia,tegra194";
+ aliases {
+ mmc0 = "/bus@0/mmc@3460000";
+ };
+
bus@0 {
/* SDMMC4 (eMMC) */
mmc@3460000 {
diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi
index 4f12721c332b..f16b0aa8a374 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194-p3668.dtsi
@@ -14,7 +14,6 @@
i2c5 = "/bus@0/i2c@31c0000";
i2c6 = "/bus@0/i2c@c250000";
i2c7 = "/bus@0/i2c@31e0000";
- mmc0 = "/bus@0/mmc@3460000";
rtc0 = "/bpmp/i2c/pmic@3c";
rtc1 = "/bus@0/rtc@c2a0000";
serial0 = &tcu;
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 549a7a2151d4..456502aeee49 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -27,10 +27,16 @@ dtb-$(CONFIG_ARCH_QCOM) += msm8998-asus-novago-tp370ql.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8998-hp-envy-x2.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8998-lenovo-miix-630.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8998-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8998-oneplus-cheeseburger.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8998-oneplus-dumpling.dtb
dtb-$(CONFIG_ARCH_QCOM) += qcs404-evb-1000.dtb
dtb-$(CONFIG_ARCH_QCOM) += qcs404-evb-4000.dtb
dtb-$(CONFIG_ARCH_QCOM) += qrb5165-rb5.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-idp.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-coachz-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-coachz-r1-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-coachz-r2.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-coachz-r2-lte.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-lazor-r0.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-lazor-r1.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-lazor-r1-kb.dtb
@@ -38,8 +44,16 @@ dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-lazor-r1-lte.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-lazor-r3.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-lazor-r3-kb.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-lazor-r3-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-lazor-limozeen.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-lazor-limozeen-nots.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-lazor-limozeen-nots-r4.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-pompom-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-pompom-r1-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-pompom-r2.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-pompom-r2-lte.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-r1.dtb
dtb-$(CONFIG_ARCH_QCOM) += sc7180-trogdor-r1-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sc7280-idp.dtb
dtb-$(CONFIG_ARCH_QCOM) += sdm630-sony-xperia-ganges-kirin.dtb
dtb-$(CONFIG_ARCH_QCOM) += sdm630-sony-xperia-nile-discovery.dtb
dtb-$(CONFIG_ARCH_QCOM) += sdm630-sony-xperia-nile-pioneer.dtb
@@ -59,4 +73,5 @@ dtb-$(CONFIG_ARCH_QCOM) += sm8150-hdk.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8150-mtp.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8250-hdk.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8250-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM) += sm8350-hdk.dtb
dtb-$(CONFIG_ARCH_QCOM) += sm8350-mtp.dtb
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
index 48bd1c2874de..f3c0dbfd0a23 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
@@ -9,5 +9,5 @@
/ {
model = "Qualcomm Technologies, Inc. APQ 8016 SBC";
- compatible = "qcom,apq8016-sbc", "qcom,apq8016", "qcom,sbc";
+ compatible = "qcom,apq8016-sbc", "qcom,apq8016";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
index 5353da521974..4f06c0a9c425 100644
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
@@ -25,10 +25,10 @@
chosen { };
- memory {
+ memory@80000000 {
device_type = "memory";
/* We expect the bootloader to fill in the reg */
- reg = <0 0 0 0>;
+ reg = <0 0x80000000 0 0>;
};
reserved-memory {
@@ -1766,7 +1766,9 @@
compatible = "qcom,msm-qgic2";
interrupt-controller;
#interrupt-cells = <3>;
- reg = <0x0b000000 0x1000>, <0x0b002000 0x1000>;
+ reg = <0x0b000000 0x1000>, <0x0b002000 0x2000>,
+ <0x0b001000 0x1000>, <0x0b004000 0x2000>;
+ interrupts = <GIC_PPI 0 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
};
apcs: mailbox@b011000 {
diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi
index f49d442d2edf..f9f0b5aa6a26 100644
--- a/arch/arm64/boot/dts/qcom/msm8994.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi
@@ -149,10 +149,10 @@
};
};
- memory {
+ memory@80000000 {
device_type = "memory";
/* We expect the bootloader to fill in the reg */
- reg = <0 0 0 0>;
+ reg = <0 0x80000000 0 0>;
};
tcsr_mutex: hwlock {
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index ce430ba9c118..5d06216c5c1c 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -1136,7 +1136,7 @@
};
adreno_smmu: iommu@b40000 {
- compatible = "qcom,msm8996-smmu-v2", "qcom,smmu-v2";
+ compatible = "qcom,msm8996-smmu-v2", "qcom,adreno-smmu", "qcom,smmu-v2";
reg = <0x00b40000 0x10000>;
#global-interrupts = <1>;
diff --git a/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi b/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi
index b500f24d47bc..125d7923d713 100644
--- a/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998-clamshell.dtsi
@@ -281,6 +281,10 @@
};
};
+&remoteproc_mss {
+ status = "okay";
+};
+
&tlmm {
gpio-reserved-ranges = <0 4>, <81 4>;
diff --git a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
index c1ef0c71d5f5..a1d15eab8553 100644
--- a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
@@ -328,6 +328,10 @@
status = "okay";
};
+&remoteproc_mss {
+ status = "okay";
+};
+
&remoteproc_slpi {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/msm8998-oneplus-cheeseburger.dts b/arch/arm64/boot/dts/qcom/msm8998-oneplus-cheeseburger.dts
new file mode 100644
index 000000000000..66b9297588ab
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8998-oneplus-cheeseburger.dts
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * OnePlus 5 (cheeseburger) device tree
+ *
+ * Copyright (c) 2021, Jami Kettunen <jamipkettunen@gmail.com>
+ */
+
+#include <dt-bindings/leds/common.h>
+#include "msm8998-oneplus-common.dtsi"
+
+/ {
+ model = "OnePlus 5";
+ compatible = "oneplus,cheeseburger", "qcom,msm8998";
+ /* Required for bootloader to select correct board */
+ qcom,board-id = <8 0 16859 23>;
+
+ /* Capacitive keypad button backlight */
+ leds {
+ compatible = "gpio-leds";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&button_backlight_default>;
+
+ button-backlight {
+ gpios = <&pmi8998_gpio 5 GPIO_ACTIVE_HIGH>;
+ color = <LED_COLOR_ID_WHITE>;
+ function = LED_FUNCTION_KBD_BACKLIGHT;
+ default-state = "off";
+ };
+ };
+};
+
+&pmi8998_gpio {
+ button_backlight_default: button-backlight-default {
+ pinconf {
+ pins = "gpio5";
+ function = "normal";
+ bias-pull-down;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8998-oneplus-common.dtsi b/arch/arm64/boot/dts/qcom/msm8998-oneplus-common.dtsi
new file mode 100644
index 000000000000..0f5c7828a901
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8998-oneplus-common.dtsi
@@ -0,0 +1,514 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * OnePlus 5(T) (cheeseburger / dumpling) common device tree source based on msm8998-mtp.dtsi
+ *
+ * Copyright (c) 2021, Jami Kettunen <jamipkettunen@gmail.com>
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+#include "msm8998.dtsi"
+#include "pm8998.dtsi"
+#include "pmi8998.dtsi"
+#include "pm8005.dtsi"
+
+/ {
+ /* Required for bootloader to select correct board */
+ qcom,msm-id = <292 0x20001>; /* 8998 v2.1 */
+
+ chosen {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ /* Use display framebuffer setup by the UEFI XBL bootloader for simplefb */
+ framebuffer0: framebuffer@9d400000 {
+ compatible = "simple-framebuffer";
+ reg = <0x0 0x9d400000 0x0 0x2400000>;
+ width = <1080>;
+ height = <1920>;
+ stride = <(1080 * 4)>;
+ format = "a8r8g8b8";
+ };
+ };
+
+ reserved-memory {
+ /* Bootloader display framebuffer region */
+ cont_splash_mem: memory@9d400000 {
+ reg = <0x0 0x9d400000 0x0 0x2400000>;
+ no-map;
+ };
+
+ /* For getting crash logs using Android downstream kernels */
+ ramoops@ac000000 {
+ compatible = "ramoops";
+ reg = <0x0 0xac000000 0x0 0x200000>;
+ console-size = <0x80000>;
+ pmsg-size = <0x40000>;
+ record-size = <0x8000>;
+ ftrace-size = <0x20000>;
+ };
+
+ /*
+ * The following memory regions on downstream are "dynamically allocated"
+ * but given the same addresses every time. Hard code them as these addresses
+ * are where the OnePlus signed firmware expects them to be.
+ */
+ ipa_fws_region: ipa@f6800000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0xf6800000 0x0 0x5000>;
+ no-map;
+ };
+ zap_shader_region: gpu@f6900000 {
+ compatible = "shared-dma-pool";
+ reg = <0x0 0xf6900000 0x0 0x2000>;
+ no-map;
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ label = "Volume buttons";
+ autorepeat;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&vol_keys_default>;
+
+ vol-down {
+ label = "Volume down";
+ gpios = <&pm8998_gpio 5 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEDOWN>;
+ debounce-interval = <15>;
+ wakeup-source;
+ };
+
+ vol-up {
+ label = "Volume up";
+ gpios = <&pm8998_gpio 6 GPIO_ACTIVE_LOW>;
+ linux,code = <KEY_VOLUMEUP>;
+ debounce-interval = <15>;
+ wakeup-source;
+ };
+ };
+
+ gpio-hall-sensor {
+ compatible = "gpio-keys";
+ label = "Hall effect sensor";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&hall_sensor_default>;
+
+ hall-sensor {
+ label = "Hall Effect Sensor";
+ gpios = <&tlmm 124 GPIO_ACTIVE_LOW>;
+ linux,input-type = <EV_SW>;
+ linux,code = <SW_LID>;
+ linux,can-disable;
+ wakeup-source;
+ };
+ };
+
+ vph_pwr: vph-pwr-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vph_pwr";
+ regulator-always-on;
+ regulator-boot-on;
+ };
+};
+
+/*
+ * OnePlus' ADSP firmware requires 30 MiB in total, so increase the adsp_mem
+ * region by 4 MiB to account for this while relocating the other now
+ * conflicting memory nodes accordingly.
+ */
+&adsp_mem {
+ reg = <0x0 0x8b200000 0x0 0x1e00000>;
+};
+&mpss_mem {
+ reg = <0x0 0x8d000000 0x0 0x7000000>;
+};
+&venus_mem {
+ reg = <0x0 0x94000000 0x0 0x500000>;
+};
+&mba_mem {
+ reg = <0x0 0x94500000 0x0 0x200000>;
+};
+&slpi_mem {
+ reg = <0x0 0x94700000 0x0 0xf00000>;
+};
+&ipa_fw_mem {
+ reg = <0x0 0x95600000 0x0 0x10000>;
+};
+&ipa_gsi_mem {
+ reg = <0x0 0x95610000 0x0 0x5000>;
+};
+&gpu_mem {
+ reg = <0x0 0x95615000 0x0 0x100000>;
+};
+&wlan_msa_mem {
+ reg = <0x0 0x95715000 0x0 0x100000>;
+};
+
+&blsp1_i2c5 {
+ status = "okay";
+
+ touchscreen@20 {
+ compatible = "syna,rmi4-i2c";
+ reg = <0x20>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupt-parent = <&tlmm>;
+ interrupts = <125 IRQ_TYPE_EDGE_FALLING>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&ts_int_active &ts_reset_active>;
+
+ vdd-supply = <&vreg_l28_3p0>;
+ vio-supply = <&vreg_l6a_1p8>;
+
+ syna,reset-delay-ms = <20>;
+ syna,startup-delay-ms = <20>;
+
+ rmi4-f01@1 {
+ reg = <0x01>;
+ syna,nosleep-mode = <1>;
+ };
+
+ rmi4_f12: rmi4-f12@12 {
+ reg = <0x12>;
+ syna,rezero-wait-ms = <20>;
+ syna,sensor-type = <1>;
+ touchscreen-x-mm = <68>;
+ touchscreen-y-mm = <122>;
+ };
+ };
+};
+
+&blsp1_uart3 {
+ status = "okay";
+
+ bluetooth {
+ compatible = "qcom,wcn3990-bt";
+
+ vddio-supply = <&vreg_s4a_1p8>;
+ vddxo-supply = <&vreg_l7a_1p8>;
+ vddrf-supply = <&vreg_l17a_1p3>;
+ vddch0-supply = <&vreg_l25a_3p3>;
+ max-speed = <3200000>;
+ };
+};
+
+&blsp1_uart3_on {
+ rx {
+ /delete-property/ bias-disable;
+ /*
+ * Configure a pull-up on 46 (RX). This is needed to
+ * avoid garbage data when the TX pin of the Bluetooth
+ * module is in tri-state (module powered off or not
+ * driving the signal yet).
+ */
+ bias-pull-up;
+ };
+
+ cts {
+ /delete-property/ bias-disable;
+ /*
+ * Configure a pull-down on 47 (CTS) to match the pull
+ * of the Bluetooth module.
+ */
+ bias-pull-down;
+ };
+};
+
+&blsp2_uart1 {
+ status = "okay";
+};
+
+&pm8005_lsid1 {
+ pm8005-regulators {
+ compatible = "qcom,pm8005-regulators";
+
+ vdd_s1-supply = <&vph_pwr>;
+
+ pm8005_s1: s1 { /* VDD_GFX supply */
+ regulator-min-microvolt = <524000>;
+ regulator-max-microvolt = <1100000>;
+ regulator-enable-ramp-delay = <500>;
+
+ /* hack until we rig up the gpu consumer */
+ regulator-always-on;
+ };
+ };
+};
+
+&pm8998_gpio {
+ vol_keys_default: vol-keys-default {
+ pinconf {
+ pins = "gpio5", "gpio6";
+ function = "normal";
+ bias-pull-up;
+ input-enable;
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_NO>;
+ };
+ };
+};
+
+&qusb2phy {
+ status = "okay";
+
+ vdda-pll-supply = <&vreg_l12a_1p8>;
+ vdda-phy-dpdm-supply = <&vreg_l24a_3p075>;
+};
+
+&rpm_requests {
+ pm8998-regulators {
+ compatible = "qcom,rpm-pm8998-regulators";
+
+ vdd_s1-supply = <&vph_pwr>;
+ vdd_s2-supply = <&vph_pwr>;
+ vdd_s3-supply = <&vph_pwr>;
+ vdd_s4-supply = <&vph_pwr>;
+ vdd_s5-supply = <&vph_pwr>;
+ vdd_s6-supply = <&vph_pwr>;
+ vdd_s7-supply = <&vph_pwr>;
+ vdd_s8-supply = <&vph_pwr>;
+ vdd_s9-supply = <&vph_pwr>;
+ vdd_s10-supply = <&vph_pwr>;
+ vdd_s11-supply = <&vph_pwr>;
+ vdd_s12-supply = <&vph_pwr>;
+ vdd_s13-supply = <&vph_pwr>;
+ vdd_l1_l27-supply = <&vreg_s7a_1p025>;
+ vdd_l2_l8_l17-supply = <&vreg_s3a_1p35>;
+ vdd_l3_l11-supply = <&vreg_s7a_1p025>;
+ vdd_l4_l5-supply = <&vreg_s7a_1p025>;
+ vdd_l6-supply = <&vreg_s5a_2p04>;
+ vdd_l7_l12_l14_l15-supply = <&vreg_s5a_2p04>;
+ vdd_l9-supply = <&vreg_bob>;
+ vdd_l10_l23_l25-supply = <&vreg_bob>;
+ vdd_l13_l19_l21-supply = <&vreg_bob>;
+ vdd_l16_l28-supply = <&vreg_bob>;
+ vdd_l18_l22-supply = <&vreg_bob>;
+ vdd_l20_l24-supply = <&vreg_bob>;
+ vdd_l26-supply = <&vreg_s3a_1p35>;
+ vdd_lvs1_lvs2-supply = <&vreg_s4a_1p8>;
+
+ vreg_s3a_1p35: s3 {
+ regulator-min-microvolt = <1352000>;
+ regulator-max-microvolt = <1352000>;
+ };
+ vreg_s4a_1p8: s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-allow-set-load;
+ };
+ vreg_s5a_2p04: s5 {
+ regulator-min-microvolt = <1904000>;
+ regulator-max-microvolt = <2040000>;
+ };
+ vreg_s7a_1p025: s7 {
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1028000>;
+ };
+ vreg_l1a_0p875: l1 {
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ };
+ vreg_l2a_1p2: l2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+ vreg_l3a_1p0: l3 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+ vreg_l5a_0p8: l5 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ };
+ vreg_l6a_1p8: l6 {
+ regulator-min-microvolt = <1808000>;
+ regulator-max-microvolt = <1808000>;
+ };
+ vreg_l7a_1p8: l7 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ vreg_l8a_1p2: l8 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+ vreg_l9a_1p8: l9 {
+ regulator-min-microvolt = <1808000>;
+ regulator-max-microvolt = <2960000>;
+ };
+ vreg_l10a_1p8: l10 {
+ regulator-min-microvolt = <1808000>;
+ regulator-max-microvolt = <2960000>;
+ };
+ vreg_l11a_1p0: l11 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+ vreg_l12a_1p8: l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ vreg_l13a_2p95: l13 {
+ regulator-min-microvolt = <1808000>;
+ regulator-max-microvolt = <2960000>;
+ };
+ vreg_l14a_1p88: l14 {
+ regulator-min-microvolt = <1880000>;
+ regulator-max-microvolt = <1880000>;
+ };
+ vreg_l15a_1p8: l15 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ vreg_l16a_2p7: l16 {
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <2704000>;
+ };
+ vreg_l17a_1p3: l17 {
+ regulator-min-microvolt = <1304000>;
+ regulator-max-microvolt = <1304000>;
+ };
+ vreg_l18a_2p7: l18 {
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <2704000>;
+ };
+ vreg_l19a_3p0: l19 {
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3008000>;
+ };
+ vreg_l20a_2p95: l20 {
+ regulator-min-microvolt = <2960000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-allow-set-load;
+ };
+ vreg_l21a_2p95: l21 {
+ regulator-min-microvolt = <2960000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-allow-set-load;
+ regulator-system-load = <800000>;
+ };
+ vreg_l22a_2p85: l22 {
+ regulator-min-microvolt = <2864000>;
+ regulator-max-microvolt = <2864000>;
+ };
+ vreg_l23a_3p3: l23 {
+ regulator-min-microvolt = <3312000>;
+ regulator-max-microvolt = <3312000>;
+ };
+ vreg_l24a_3p075: l24 {
+ regulator-min-microvolt = <3088000>;
+ regulator-max-microvolt = <3088000>;
+ };
+ vreg_l25a_3p3: l25 {
+ regulator-min-microvolt = <3104000>;
+ regulator-max-microvolt = <3312000>;
+ };
+ vreg_l26a_1p2: l26 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-allow-set-load;
+ };
+ vreg_l28_3p0: l28 {
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3008000>;
+ };
+ vreg_lvs1a_1p8: lvs1 { };
+ vreg_lvs2a_1p8: lvs2 { };
+ };
+
+ pmi8998-regulators {
+ compatible = "qcom,rpm-pmi8998-regulators";
+
+ vdd_bob-supply = <&vph_pwr>;
+
+ vreg_bob: bob {
+ regulator-min-microvolt = <3312000>;
+ regulator-max-microvolt = <3600000>;
+ };
+ };
+};
+
+&tlmm {
+ gpio-reserved-ranges = <0 4>, <81 4>;
+
+ hall_sensor_default: hall-sensor-default {
+ pins = "gpio124";
+ function = "gpio";
+ drive-strength = <2>;
+ bias-disable;
+ input-enable;
+ };
+
+ ts_int_active: ts-int-active {
+ pins = "gpio125";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+
+ ts_reset_active: ts-reset-active {
+ pins = "gpio89";
+ function = "gpio";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+};
+
+&ufshc {
+ status = "okay";
+
+ vcc-supply = <&vreg_l20a_2p95>;
+ vccq-supply = <&vreg_l26a_1p2>;
+ vccq2-supply = <&vreg_s4a_1p8>;
+ vcc-max-microamp = <750000>;
+ vccq-max-microamp = <560000>;
+ vccq2-max-microamp = <750000>;
+};
+
+&ufsphy {
+ status = "okay";
+
+ vdda-phy-supply = <&vreg_l1a_0p875>;
+ vdda-pll-supply = <&vreg_l2a_1p2>;
+ vddp-ref-clk-supply = <&vreg_l26a_1p2>;
+ vdda-phy-max-microamp = <51400>;
+ vdda-pll-max-microamp = <14600>;
+ vddp-ref-clk-max-microamp = <100>;
+ vddp-ref-clk-always-on;
+};
+
+&usb3 {
+ status = "okay";
+
+ /* Disable USB3 clock requirement as the device only supports USB2 */
+ qcom,select-utmi-as-pipe-clk;
+};
+
+&usb3_dwc3 {
+ /* Drop the unused USB 3 PHY */
+ phys = <&qusb2phy>;
+ phy-names = "usb2-phy";
+
+ /* Fastest mode for USB 2 */
+ maximum-speed = "high-speed";
+
+ /* Force to peripheral until we can switch modes */
+ dr_mode = "peripheral";
+};
+
+&wifi {
+ /* Leave disabled until MSS is functional */
+ vdd-0.8-cx-mx-supply = <&vreg_l5a_0p8>;
+ vdd-1.8-xo-supply = <&vreg_l7a_1p8>;
+ vdd-1.3-rfa-supply = <&vreg_l17a_1p3>;
+ vdd-3.3-ch0-supply = <&vreg_l25a_3p3>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8998-oneplus-dumpling.dts b/arch/arm64/boot/dts/qcom/msm8998-oneplus-dumpling.dts
new file mode 100644
index 000000000000..544b9b0ae44b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8998-oneplus-dumpling.dts
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * OnePlus 5T (dumpling) device tree
+ *
+ * Copyright (c) 2021, Jami Kettunen <jamipkettunen@gmail.com>
+ */
+
+#include "msm8998-oneplus-common.dtsi"
+
+/ {
+ model = "OnePlus 5T";
+ compatible = "oneplus,dumpling", "qcom,msm8998";
+ /* Required for bootloader to select correct board */
+ qcom,board-id = <8 0 17801 43>;
+};
+
+/* Update the screen height values from 1920 to 2160 on the 5T */
+&framebuffer0 {
+ height = <2160>;
+};
+
+/* Adjust digitizer area height to match the 5T's taller panel */
+&rmi4_f12 {
+ touchscreen-y-mm = <137>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8998.dtsi b/arch/arm64/boot/dts/qcom/msm8998.dtsi
index 1f2e93aa6553..e9d3ce29937c 100644
--- a/arch/arm64/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8998.dtsi
@@ -1398,6 +1398,8 @@
<&rpmpd MSM8998_VDDMX>;
power-domain-names = "cx", "mx";
+ status = "disabled";
+
mba {
memory-region = <&mba_mem>;
};
diff --git a/arch/arm64/boot/dts/qcom/pm8150.dtsi b/arch/arm64/boot/dts/qcom/pm8150.dtsi
index bdc76d504b78..fa4ea7ded0ab 100644
--- a/arch/arm64/boot/dts/qcom/pm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150.dtsi
@@ -107,13 +107,11 @@
status = "disabled";
};
- pm8150_rtc: rtc@6000 {
+ rtc@6000 {
compatible = "qcom,pm8941-rtc";
reg = <0x6000>;
reg-names = "rtc", "alarm";
interrupts = <0x0 0x61 0x1 IRQ_TYPE_NONE>;
-
- status = "disabled";
};
pm8150_gpios: gpio@c000 {
diff --git a/arch/arm64/boot/dts/qcom/pm8350.dtsi b/arch/arm64/boot/dts/qcom/pm8350.dtsi
new file mode 100644
index 000000000000..308f9ca7c744
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pm8350.dtsi
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+
+&spmi_bus {
+ pm8350: pmic@1 {
+ compatible = "qcom,pm8350", "qcom,spmi-pmic";
+ reg = <0x1 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pm8350_gpios: gpio@8800 {
+ compatible = "qcom,pm8350-gpio";
+ reg = <0x8800>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/pm8350b.dtsi b/arch/arm64/boot/dts/qcom/pm8350b.dtsi
new file mode 100644
index 000000000000..b23bb1d49a4d
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pm8350b.dtsi
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+
+&spmi_bus {
+ pm8350b: pmic@3 {
+ compatible = "qcom,pm8350b", "qcom,spmi-pmic";
+ reg = <0x3 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pm8350b_gpios: gpio@8800 {
+ compatible = "qcom,pm8350b-gpio";
+ reg = <0x8800>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/pm8350c.dtsi b/arch/arm64/boot/dts/qcom/pm8350c.dtsi
new file mode 100644
index 000000000000..2b9b75ecec60
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pm8350c.dtsi
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+
+&spmi_bus {
+ pm8350c: pmic@2 {
+ compatible = "qcom,pm8350c", "qcom,spmi-pmic";
+ reg = <0x2 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pm8350c_gpios: gpio@8800 {
+ compatible = "qcom,pm8350c-gpio";
+ reg = <0x8800>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/pmk8350.dtsi b/arch/arm64/boot/dts/qcom/pmk8350.dtsi
new file mode 100644
index 000000000000..1530b8ff270f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pmk8350.dtsi
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+
+&spmi_bus {
+ pmk8350: pmic@0 {
+ compatible = "qcom,pmk8350", "qcom,spmi-pmic";
+ reg = <0x0 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmk8350_gpios: gpio@b000 {
+ compatible = "qcom,pmk8350-gpio";
+ reg = <0xb000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/pmr735a.dtsi b/arch/arm64/boot/dts/qcom/pmr735a.dtsi
new file mode 100644
index 000000000000..1c675af13cbf
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pmr735a.dtsi
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+
+&spmi_bus {
+ pmr735a: pmic@4 {
+ compatible = "qcom,pmr735a", "qcom,spmi-pmic";
+ reg = <0x4 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmr735a_gpios: gpio@8800 {
+ compatible = "qcom,pmr735a-gpio";
+ reg = <0x8800>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/pmr735b.dtsi b/arch/arm64/boot/dts/qcom/pmr735b.dtsi
new file mode 100644
index 000000000000..1144086280f5
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/pmr735b.dtsi
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+
+&spmi_bus {
+ pmr735b: pmic@5 {
+ compatible = "qcom,pmr735b", "qcom,spmi-pmic";
+ reg = <0x5 SPMI_USID>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pmr735b_gpios: gpio@8800 {
+ compatible = "qcom,pmr735b-gpio";
+ reg = <0x8800>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
index 2f0528d01299..5f41de20aa22 100644
--- a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
+++ b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
@@ -811,10 +811,6 @@
};
};
-&pm8150_rtc {
- status = "okay";
-};
-
&qupv3_id_0 {
status = "okay";
};
@@ -952,6 +948,9 @@
/* CAN */
&spi0 {
status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_spi0_data_clk>, <&qup_spi0_cs_gpio>;
+ cs-gpios = <&tlmm 31 GPIO_ACTIVE_LOW>;
can@0 {
compatible = "microchip,mcp2518fd";
@@ -1352,3 +1351,14 @@
vdd-micb-supply = <&vreg_s4a_1p8>;
qcom,dmic-sample-rate = <600000>;
};
+
+/* PINCTRL - additions to nodes defined in sm8250.dtsi */
+&qup_spi0_cs_gpio {
+ drive-strength = <6>;
+ bias-disable;
+};
+
+&qup_spi0_data_clk {
+ drive-strength = <6>;
+ bias-disable;
+};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r1-lte.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r1-lte.dts
new file mode 100644
index 000000000000..533c048903ea
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r1-lte.dts
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Google CoachZ board device tree source
+ *
+ * Copyright 2020 Google LLC.
+ */
+
+#include "sc7180-trogdor-coachz-r1.dts"
+#include "sc7180-trogdor-lte-sku.dtsi"
+
+/ {
+ model = "Google CoachZ (rev1) with LTE";
+ compatible = "google,coachz-rev1-sku0", "qcom,sc7180";
+};
+
+&cros_ec_proximity {
+ label = "proximity-wifi-lte";
+};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r1.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r1.dts
new file mode 100644
index 000000000000..1b1dbdb2a82f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r1.dts
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Google CoachZ board device tree source
+ *
+ * Copyright 2020 Google LLC.
+ */
+
+/dts-v1/;
+
+#include "sc7180-trogdor-coachz.dtsi"
+
+/ {
+ model = "Google CoachZ (rev1)";
+ compatible = "google,coachz-rev1", "qcom,sc7180";
+};
+
+&tlmm {
+ gpio-line-names = "HUB_RST_L",
+ "AP_RAM_ID0",
+ "AP_SKU_ID2",
+ "AP_RAM_ID1",
+ "FP_TO_AP_IRQ_L",
+ "AP_RAM_ID2",
+ "UF_CAM_EN",
+ "WF_CAM_EN",
+ "TS_RESET_L",
+ "TS_INT_L",
+ "FPMCU_BOOT0",
+ "EDP_BRIJ_IRQ",
+ "AP_EDP_BKLTEN",
+ "UF_CAM_MCLK",
+ "WF_CAM_CLK",
+ "EDP_BRIJ_I2C_SDA",
+ "EDP_BRIJ_I2C_SCL",
+ "UF_CAM_SDA",
+ "UF_CAM_SCL",
+ "WF_CAM_SDA",
+ "WF_CAM_SCL",
+ "WLC_IRQ",
+ "FP_RST_L",
+ "AMP_EN",
+ "WLC_NRST",
+ "AP_SAR_SENSOR_SDA",
+ "AP_SAR_SENSOR_SCL",
+ "",
+ "",
+ "WF_CAM_RST_L",
+ "UF_CAM_RST_L",
+ "AP_BRD_ID2",
+ "BRIJ_SUSPEND",
+ "AP_BRD_ID0",
+ "AP_H1_SPI_MISO",
+ "AP_H1_SPI_MOSI",
+ "AP_H1_SPI_CLK",
+ "AP_H1_SPI_CS_L",
+ "",
+ "",
+ "",
+ "",
+ "H1_AP_INT_ODL",
+ "",
+ "UART_AP_TX_DBG_RX",
+ "UART_DBG_TX_AP_RX",
+ "",
+ "",
+ "FORCED_USB_BOOT",
+ "AMP_BCLK",
+ "AMP_LRCLK",
+ "AMP_DIN",
+ "EN_PP3300_DX_EDP",
+ "HP_BCLK",
+ "HP_LRCLK",
+ "HP_DOUT",
+ "HP_DIN",
+ "HP_MCLK",
+ "AP_SKU_ID0",
+ "AP_EC_SPI_MISO",
+ "AP_EC_SPI_MOSI",
+ "AP_EC_SPI_CLK",
+ "AP_EC_SPI_CS_L",
+ "AP_SPI_CLK",
+ "AP_SPI_MOSI",
+ "AP_SPI_MISO",
+ /*
+ * AP_FLASH_WP_L is crossystem ABI. Schematics
+ * call it BIOS_FLASH_WP_L.
+ */
+ "AP_FLASH_WP_L",
+ "",
+ "AP_SPI_CS0_L",
+ "SD_CD_ODL",
+ "",
+ "",
+ "",
+ "",
+ "FPMCU_SEL",
+ "UIM2_DATA",
+ "UIM2_CLK",
+ "UIM2_RST",
+ "UIM2_PRESENT_L",
+ "UIM1_DATA",
+ "UIM1_CLK",
+ "UIM1_RST",
+ "",
+ "DMIC_CLK_EN",
+ "HUB_EN",
+ "",
+ "AP_SPI_FP_MISO",
+ "AP_SPI_FP_MOSI",
+ "AP_SPI_FP_CLK",
+ "AP_SPI_FP_CS_L",
+ "AP_SKU_ID1",
+ "AP_RST_REQ",
+ "",
+ "AP_BRD_ID1",
+ "AP_EC_INT_L",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "EDP_BRIJ_EN",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "AP_TS_PEN_I2C_SDA",
+ "AP_TS_PEN_I2C_SCL",
+ "DP_HOT_PLUG_DET",
+ "EC_IN_RW_ODL";
+};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r2-lte.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r2-lte.dts
new file mode 100644
index 000000000000..6e7745801fae
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r2-lte.dts
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Google CoachZ board device tree source
+ *
+ * Copyright 2020 Google LLC.
+ */
+
+#include "sc7180-trogdor-coachz-r2.dts"
+#include "sc7180-trogdor-lte-sku.dtsi"
+
+/ {
+ model = "Google CoachZ (rev2+) with LTE";
+ compatible = "google,coachz-sku0", "qcom,sc7180";
+};
+
+&cros_ec_proximity {
+ label = "proximity-wifi-lte";
+};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r2.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r2.dts
new file mode 100644
index 000000000000..4f69b6ba299f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz-r2.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Google CoachZ board device tree source
+ *
+ * Copyright 2020 Google LLC.
+ */
+
+/dts-v1/;
+
+#include "sc7180-trogdor-coachz.dtsi"
+
+/ {
+ model = "Google CoachZ (rev2+)";
+ compatible = "google,coachz", "qcom,sc7180";
+};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi
new file mode 100644
index 000000000000..4c6e433c8226
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Google CoachZ board device tree source
+ *
+ * Copyright 2020 Google LLC.
+ */
+
+#include "sc7180.dtsi"
+
+ap_ec_spi: &spi6 {};
+ap_h1_spi: &spi0 {};
+
+#include "sc7180-trogdor.dtsi"
+
+/* Deleted nodes from trogdor.dtsi */
+
+/delete-node/ &alc5682;
+/delete-node/ &pp3300_codec;
+
+/ {
+ /* BOARD-SPECIFIC TOP LEVEL NODES */
+
+ adau7002: audio-codec-1 {
+ compatible = "adi,adau7002";
+ IOVDD-supply = <&pp1800_l15a>;
+ #sound-dai-cells = <0>;
+ };
+};
+
+&ap_spi_fp {
+ status = "okay";
+};
+
+&backlight {
+ pwms = <&cros_ec_pwm 0>;
+};
+
+&camcc {
+ status = "okay";
+};
+
+&cros_ec {
+ cros_ec_proximity: proximity {
+ compatible = "google,cros-ec-mkbp-proximity";
+ label = "proximity-wifi";
+ };
+};
+
+ap_ts_pen_1v8: &i2c4 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ ap_ts: touchscreen@5d {
+ compatible = "goodix,gt7375p";
+ reg = <0x5d>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ts_int_l>, <&ts_reset_l>;
+
+ interrupt-parent = <&tlmm>;
+ interrupts = <9 IRQ_TYPE_LEVEL_LOW>;
+
+ reset-gpios = <&tlmm 8 GPIO_ACTIVE_LOW>;
+
+ vdd-supply = <&pp3300_ts>;
+ };
+};
+
+&i2c7 {
+ status = "disabled";
+};
+
+&i2c9 {
+ status = "disabled";
+};
+
+&panel {
+ compatible = "boe,nv110wtm-n61";
+};
+
+&pp3300_dx_edp {
+ gpio = <&tlmm 67 GPIO_ACTIVE_HIGH>;
+};
+
+&sdhc_2 {
+ status = "okay";
+};
+
+&sn65dsi86_out {
+ data-lanes = <0 1 2 3>;
+};
+
+&sound {
+ compatible = "google,sc7180-coachz";
+ model = "sc7180-adau7002-max98357a";
+ audio-routing = "PDM_DAT", "DMIC";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&dmic_clk_en>;
+};
+
+&sound_multimedia0_codec {
+ sound-dai = <&adau7002>;
+};
+
+/* PINCTRL - modifications to sc7180-trogdor.dtsi */
+
+&en_pp3300_dx_edp {
+ pinmux {
+ pins = "gpio67";
+ };
+
+ pinconf {
+ pins = "gpio67";
+ };
+};
+
+&ts_reset_l {
+ pinconf {
+ /*
+ * We want reset state by default and it will be up to the
+ * driver to disable this when it's ready.
+ */
+ output-low;
+ };
+};
+
+/* PINCTRL - board-specific pinctrl */
+
+&tlmm {
+ gpio-line-names = "HUB_RST_L",
+ "AP_RAM_ID0",
+ "AP_SKU_ID2",
+ "AP_RAM_ID1",
+ "FP_TO_AP_IRQ_L",
+ "AP_RAM_ID2",
+ "UF_CAM_EN",
+ "WF_CAM_EN",
+ "TS_RESET_L",
+ "TS_INT_L",
+ "FPMCU_BOOT0",
+ "EDP_BRIJ_IRQ",
+ "AP_EDP_BKLTEN",
+ "UF_CAM_MCLK",
+ "WF_CAM_CLK",
+ "EDP_BRIJ_I2C_SDA",
+ "EDP_BRIJ_I2C_SCL",
+ "UF_CAM_SDA",
+ "UF_CAM_SCL",
+ "WF_CAM_SDA",
+ "WF_CAM_SCL",
+ "WLC_IRQ",
+ "FP_RST_L",
+ "AMP_EN",
+ "WLC_NRST",
+ "AP_SAR_SENSOR_SDA",
+ "AP_SAR_SENSOR_SCL",
+ "",
+ "",
+ "WF_CAM_RST_L",
+ "UF_CAM_RST_L",
+ "AP_BRD_ID2",
+ "BRIJ_SUSPEND",
+ "AP_BRD_ID0",
+ "AP_H1_SPI_MISO",
+ "AP_H1_SPI_MOSI",
+ "AP_H1_SPI_CLK",
+ "AP_H1_SPI_CS_L",
+ "",
+ "",
+ "",
+ "",
+ "H1_AP_INT_ODL",
+ "",
+ "UART_AP_TX_DBG_RX",
+ "UART_DBG_TX_AP_RX",
+ "",
+ "",
+ "FORCED_USB_BOOT",
+ "AMP_BCLK",
+ "AMP_LRCLK",
+ "AMP_DIN",
+ "",
+ "HP_BCLK",
+ "HP_LRCLK",
+ "HP_DOUT",
+ "HP_DIN",
+ "HP_MCLK",
+ "AP_SKU_ID0",
+ "AP_EC_SPI_MISO",
+ "AP_EC_SPI_MOSI",
+ "AP_EC_SPI_CLK",
+ "AP_EC_SPI_CS_L",
+ "AP_SPI_CLK",
+ "AP_SPI_MOSI",
+ "AP_SPI_MISO",
+ /*
+ * AP_FLASH_WP_L is crossystem ABI. Schematics
+ * call it BIOS_FLASH_WP_L.
+ */
+ "AP_FLASH_WP_L",
+ "EN_PP3300_DX_EDP",
+ "AP_SPI_CS0_L",
+ "SD_CD_ODL",
+ "",
+ "",
+ "",
+ "",
+ "EN_FP_RAILS",
+ "UIM2_DATA",
+ "UIM2_CLK",
+ "UIM2_RST",
+ "UIM2_PRESENT_L",
+ "UIM1_DATA",
+ "UIM1_CLK",
+ "UIM1_RST",
+ "",
+ "",
+ "HUB_EN",
+ "",
+ "AP_SPI_FP_MISO",
+ "AP_SPI_FP_MOSI",
+ "AP_SPI_FP_CLK",
+ "AP_SPI_FP_CS_L",
+ "AP_SKU_ID1",
+ "AP_RST_REQ",
+ "",
+ "AP_BRD_ID1",
+ "AP_EC_INT_L",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "EDP_BRIJ_EN",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "AP_TS_PEN_I2C_SDA",
+ "AP_TS_PEN_I2C_SCL",
+ "DP_HOT_PLUG_DET",
+ "EC_IN_RW_ODL";
+
+ dmic_clk_en: dmic_clk_en {
+ pinmux {
+ pins = "gpio83";
+ function = "gpio";
+ };
+
+ pinconf {
+ pins = "gpio83";
+ drive-strength = <8>;
+ bias-pull-up;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r4.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r4.dts
new file mode 100644
index 000000000000..6ebde0828550
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots-r4.dts
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Google Lazor Limozeen board device tree source
+ *
+ * Copyright 2020 Google LLC.
+ */
+
+#include "sc7180-trogdor-lazor-limozeen-nots.dts"
+
+/ {
+ model = "Google Lazor Limozeen without Touchscreen (rev4)";
+ compatible = "google,lazor-rev4-sku5", "qcom,sc7180";
+};
+
+/*
+ * rev4-sku5 was built with a different trackpad.
+ */
+/delete-node/&trackpad;
+&ap_tp_i2c {
+ trackpad: trackpad@2c {
+ compatible = "hid-over-i2c";
+ reg = <0x2c>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&tp_int_odl>;
+
+ interrupt-parent = <&tlmm>;
+ interrupts = <58 IRQ_TYPE_EDGE_FALLING>;
+
+ vcc-supply = <&pp3300_fp_tp>;
+ hid-descr-addr = <0x20>;
+
+ wakeup-source;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots.dts
new file mode 100644
index 000000000000..0456c7e05d00
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen-nots.dts
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Google Lazor Limozeen board device tree source
+ *
+ * Copyright 2020 Google LLC.
+ */
+
+/dts-v1/;
+
+#include "sc7180-trogdor-lazor.dtsi"
+#include "sc7180-trogdor-lte-sku.dtsi"
+
+/ {
+ model = "Google Lazor Limozeen without Touchscreen";
+ compatible = "google,lazor-sku6", "google,lazor-sku5", "qcom,sc7180";
+};
+
+/delete-node/&ap_ts;
+
+&panel {
+ compatible = "innolux,n116bca-ea1", "innolux,n116bge";
+};
+
+&sdhc_2 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen.dts
new file mode 100644
index 000000000000..e6ad6dae4e60
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-limozeen.dts
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Google Lazor Limozeen board device tree source
+ *
+ * Copyright 2020 Google LLC.
+ */
+
+/dts-v1/;
+
+#include "sc7180-trogdor-lazor.dtsi"
+#include "sc7180-trogdor-lte-sku.dtsi"
+
+/ {
+ model = "Google Lazor Limozeen";
+ compatible = "google,lazor-sku4", "qcom,sc7180";
+};
+
+/delete-node/&ap_ts;
+
+&ap_ts_pen_1v8 {
+ ap_ts: touchscreen@10 {
+ compatible = "elan,ekth3500";
+ reg = <0x10>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ts_int_l>, <&ts_reset_l>;
+
+ interrupt-parent = <&tlmm>;
+ interrupts = <9 IRQ_TYPE_LEVEL_LOW>;
+
+ vcc33-supply = <&pp3300_ts>;
+
+ reset-gpios = <&tlmm 8 GPIO_ACTIVE_LOW>;
+ };
+};
+
+&panel {
+ compatible = "auo,b116xa01";
+};
+
+&sdhc_2 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r0.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r0.dts
index 30e3e769d2b4..5c997cd90069 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r0.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r0.dts
@@ -14,6 +14,15 @@
compatible = "google,lazor-rev0", "qcom,sc7180";
};
+/*
+ * Lazor is stuffed with a 47k NTC as charger thermistor which currently is
+ * not supported by the PM6150 ADC driver. Disable the charger thermal zone
+ * to avoid using bogus temperature values.
+ */
+&charger_thermal {
+ status = "disabled";
+};
+
&pp3300_hub {
/* pp3300_l7c is used to power the USB hub */
/delete-property/regulator-always-on;
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1.dts
index c2ef06367baf..d9fbcc7bc5bd 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r1.dts
@@ -14,6 +14,15 @@
compatible = "google,lazor-rev1", "google,lazor-rev2", "qcom,sc7180";
};
+/*
+ * Lazor is stuffed with a 47k NTC as charger thermistor which currently is
+ * not supported by the PM6150 ADC driver. Disable the charger thermal zone
+ * to avoid using bogus temperature values.
+ */
+&charger_thermal {
+ status = "disabled";
+};
+
&pp3300_hub {
/* pp3300_l7c is used to power the USB hub */
/delete-property/regulator-always-on;
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-kb.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-kb.dts
index 6985beb97e53..dcb41afdc82a 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-kb.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-kb.dts
@@ -5,7 +5,10 @@
* Copyright 2020 Google LLC.
*/
-#include "sc7180-trogdor-lazor-r3.dts"
+/dts-v1/;
+
+#include "sc7180-trogdor-lazor.dtsi"
+#include "sc7180-lite.dtsi"
/ {
model = "Google Lazor (rev3+) with KB Backlight";
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-lte.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-lte.dts
index 0881f8dd02c9..be44900602d7 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-lte.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3-lte.dts
@@ -5,7 +5,9 @@
* Copyright 2020 Google LLC.
*/
-#include "sc7180-trogdor-lazor-r3.dts"
+/dts-v1/;
+
+#include "sc7180-trogdor-lazor.dtsi"
#include "sc7180-trogdor-lte-sku.dtsi"
/ {
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3.dts
index 1b9d2f46359e..ea8c2ee09741 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor-r3.dts
@@ -8,8 +8,18 @@
/dts-v1/;
#include "sc7180-trogdor-lazor.dtsi"
+#include "sc7180-lite.dtsi"
/ {
model = "Google Lazor (rev3+)";
compatible = "google,lazor", "qcom,sc7180";
};
+
+/*
+ * Lazor is stuffed with a 47k NTC as charger thermistor which currently is
+ * not supported by the PM6150 ADC driver. Disable the charger thermal zone
+ * to avoid using bogus temperature values.
+ */
+&charger_thermal {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor.dtsi
index 89e5cd29ec09..6b10b96173e8 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-lazor.dtsi
@@ -12,23 +12,6 @@ ap_h1_spi: &spi0 {};
#include "sc7180-trogdor.dtsi"
-/ {
- panel: panel {
- compatible = "boe,nv133fhm-n62";
- power-supply = <&pp3300_dx_edp>;
- backlight = <&backlight>;
- hpd-gpios = <&sn65dsi86_bridge 2 GPIO_ACTIVE_HIGH>;
-
- ports {
- port {
- panel_in_edp: endpoint {
- remote-endpoint = <&sn65dsi86_out>;
- };
- };
- };
- };
-};
-
&ap_sar_sensor {
semtech,cs0-ground;
semtech,combined-sensors = <3>;
@@ -58,8 +41,30 @@ ap_ts_pen_1v8: &i2c4 {
};
};
+&panel {
+ compatible = "boe,nv133fhm-n62";
+};
+
+&trackpad {
+ interrupts = <58 IRQ_TYPE_EDGE_FALLING>;
+};
+
+&wifi {
+ qcom,ath10k-calibration-variant = "GO_LAZOR";
+};
+
/* PINCTRL - modifications to sc7180-trogdor.dtsi */
+&trackpad_int_1v8_odl {
+ pinmux {
+ pins = "gpio58";
+ };
+
+ pinconf {
+ pins = "gpio58";
+ };
+};
+
&ts_reset_l {
pinconf {
/* This pin is not connected on -rev0, pull up to park. */
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r1-lte.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r1-lte.dts
new file mode 100644
index 000000000000..0202f03eafe6
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r1-lte.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Google Pompom board device tree source
+ *
+ * Copyright 2020 Google LLC.
+ */
+
+#include "sc7180-trogdor-pompom-r1.dts"
+#include "sc7180-trogdor-lte-sku.dtsi"
+
+/ {
+ model = "Google Pompom (rev1) with LTE";
+ compatible = "google,pompom-rev1-sku0", "qcom,sc7180";
+};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r1.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r1.dts
new file mode 100644
index 000000000000..e720e7bd0d70
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r1.dts
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Google Pompom board device tree source
+ *
+ * Copyright 2020 Google LLC.
+ */
+
+/dts-v1/;
+
+#include "sc7180-trogdor-pompom.dtsi"
+
+/ {
+ model = "Google Pompom (rev1)";
+ compatible = "google,pompom-rev1", "qcom,sc7180";
+};
+
+&pp3300_hub {
+ /* pp3300_l7c is used to power the USB hub */
+ /delete-property/regulator-always-on;
+ /delete-property/regulator-boot-on;
+};
+
+&pp3300_l7c {
+ regulator-always-on;
+ regulator-boot-on;
+};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r2-lte.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r2-lte.dts
new file mode 100644
index 000000000000..791d496ad046
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r2-lte.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Google Pompom board device tree source
+ *
+ * Copyright 2020 Google LLC.
+ */
+
+#include "sc7180-trogdor-pompom-r2.dts"
+#include "sc7180-trogdor-lte-sku.dtsi"
+
+/ {
+ model = "Google Pompom (rev2+) with LTE";
+ compatible = "google,pompom-sku0", "qcom,sc7180";
+};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r2.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r2.dts
new file mode 100644
index 000000000000..984d7337da78
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom-r2.dts
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Google Pompom board device tree source
+ *
+ * Copyright 2020 Google LLC.
+ */
+
+/dts-v1/;
+
+#include "sc7180-trogdor-pompom.dtsi"
+
+/ {
+ model = "Google Pompom (rev2+)";
+ compatible = "google,pompom", "qcom,sc7180";
+};
+
+&keyboard_controller {
+ function-row-physmap = <
+ MATRIX_KEY(0x00, 0x02, 0) /* T1 */
+ MATRIX_KEY(0x03, 0x02, 0) /* T2 */
+ MATRIX_KEY(0x02, 0x02, 0) /* T3 */
+ MATRIX_KEY(0x01, 0x02, 0) /* T4 */
+ MATRIX_KEY(0x03, 0x04, 0) /* T5 */
+ MATRIX_KEY(0x02, 0x04, 0) /* T6 */
+ MATRIX_KEY(0x01, 0x04, 0) /* T7 */
+ MATRIX_KEY(0x02, 0x09, 0) /* T8 */
+ MATRIX_KEY(0x01, 0x09, 0) /* T9 */
+ MATRIX_KEY(0x00, 0x04, 0) /* T10 */
+ >;
+ linux,keymap = <
+ MATRIX_KEY(0x00, 0x02, KEY_BACK)
+ MATRIX_KEY(0x03, 0x02, KEY_REFRESH)
+ MATRIX_KEY(0x02, 0x02, KEY_ZOOM)
+ MATRIX_KEY(0x01, 0x02, KEY_SCALE)
+ MATRIX_KEY(0x03, 0x04, KEY_SYSRQ)
+ MATRIX_KEY(0x02, 0x04, KEY_BRIGHTNESSDOWN)
+ MATRIX_KEY(0x01, 0x04, KEY_BRIGHTNESSUP)
+ MATRIX_KEY(0x02, 0x09, KEY_MUTE)
+ MATRIX_KEY(0x01, 0x09, KEY_VOLUMEDOWN)
+ MATRIX_KEY(0x00, 0x04, KEY_VOLUMEUP)
+
+ CROS_STD_MAIN_KEYMAP
+ >;
+};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
new file mode 100644
index 000000000000..622b5f1b88a2
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Google Pompom board device tree source
+ *
+ * Copyright 2020 Google LLC.
+ */
+
+#include "sc7180.dtsi"
+
+ap_ec_spi: &spi6 {};
+ap_h1_spi: &spi0 {};
+
+#include "sc7180-trogdor.dtsi"
+
+/ {
+ thermal-zones {
+ 5v-choke-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <250>;
+
+ thermal-sensors = <&pm6150_adc_tm 1>;
+
+ trips {
+ 5v-choke-crit {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+};
+
+&alc5682 {
+ realtek,dmic-clk-driving-high = "true";
+};
+
+&cpu6_alert0 {
+ temperature = <60000>;
+};
+
+&cpu6_alert1 {
+ temperature = <65000>;
+};
+
+&cpu6_thermal {
+ sustainable-power = <948>;
+};
+
+&cpu7_alert0 {
+ temperature = <60000>;
+};
+
+&cpu7_alert1 {
+ temperature = <65000>;
+};
+
+&cpu7_thermal {
+ sustainable-power = <948>;
+};
+
+&cpu8_alert0 {
+ temperature = <60000>;
+};
+
+&cpu8_alert1 {
+ temperature = <65000>;
+};
+
+&cpu8_thermal {
+ sustainable-power = <948>;
+};
+
+&cpu9_alert0 {
+ temperature = <60000>;
+};
+
+&cpu9_alert1 {
+ temperature = <65000>;
+};
+
+&cpu9_thermal {
+ sustainable-power = <948>;
+};
+
+&gpio_keys {
+ status = "okay";
+};
+
+ap_ts_pen_1v8: &i2c4 {
+ status = "okay";
+ clock-frequency = <400000>;
+
+ ap_ts: touchscreen@10 {
+ compatible = "hid-over-i2c";
+ reg = <0x10>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ts_int_l>, <&ts_reset_l>;
+
+ interrupt-parent = <&tlmm>;
+ interrupts = <9 IRQ_TYPE_LEVEL_LOW>;
+
+ post-power-on-delay-ms = <20>;
+ hid-descr-addr = <0x0001>;
+
+ vdd-supply = <&pp3300_ts>;
+ };
+};
+
+&panel {
+ compatible = "kingdisplay,kd116n21-30nv-a010";
+};
+
+&pen_insert {
+ /* Insert = high, eject = low */
+ gpios = <&tlmm 52 GPIO_ACTIVE_HIGH>;
+};
+
+&pm6150_adc {
+ 5v-choke-thermistor@4e {
+ reg = <ADC5_AMUX_THM2_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+};
+
+&pm6150_adc_tm {
+ status = "okay";
+
+ 5v-choke-thermistor@1 {
+ reg = <1>;
+ io-channels = <&pm6150_adc ADC5_AMUX_THM2_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+};
+
+&sdhc_2 {
+ status = "okay";
+};
+
+&sound {
+ model = "sc7180-rt5682-max98357a-2mic";
+ pinctrl-names = "default";
+ pinctrl-0 = <&dmic_sel>;
+ dmic-gpios = <&tlmm 86 GPIO_ACTIVE_HIGH>;
+};
+
+&usb_c1 {
+ status = "disabled";
+};
+
+&wifi {
+ qcom,ath10k-calibration-variant = "GO_POMPOM";
+};
+
+/* PINCTRL - board-specific pinctrl */
+
+&tlmm {
+ gpio-line-names = "TP_INT_ODL",
+ "AP_RAM_ID0",
+ "AP_SKU_ID2",
+ "AP_RAM_ID1",
+ "",
+ "AP_RAM_ID2",
+ "AP_TP_I2C_SDA",
+ "AP_TP_I2C_SCL",
+ "TS_RESET_L",
+ "TS_INT_L",
+ "",
+ "EDP_BRIJ_IRQ",
+ "AP_EDP_BKLTEN",
+ "",
+ "",
+ "EDP_BRIJ_I2C_SDA",
+ "EDP_BRIJ_I2C_SCL",
+ "HUB_RST_L",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "AMP_EN",
+ "P_SENSOR_INT_L",
+ "AP_SAR_SENSOR_SDA",
+ "AP_SAR_SENSOR_SCL",
+ "",
+ "HP_IRQ",
+ "",
+ "EN_PP3300_DX_EDP",
+ "AP_BRD_ID2",
+ "BRIJ_SUSPEND",
+ "AP_BRD_ID0",
+ "AP_H1_SPI_MISO",
+ "AP_H1_SPI_MOSI",
+ "AP_H1_SPI_CLK",
+ "AP_H1_SPI_CS_L",
+ "",
+ "",
+ "",
+ "",
+ "H1_AP_INT_ODL",
+ "",
+ "UART_AP_TX_DBG_RX",
+ "UART_DBG_TX_AP_RX",
+ "HP_I2C_SDA",
+ "HP_I2C_SCL",
+ "FORCED_USB_BOOT",
+ "AMP_BCLK",
+ "AMP_LRCLK",
+ "AMP_DIN",
+ "PEN_PDCT_L",
+ "HP_BCLK",
+ "HP_LRCLK",
+ "HP_DOUT",
+ "HP_DIN",
+ "HP_MCLK",
+ "AP_SKU_ID0",
+ "AP_EC_SPI_MISO",
+ "AP_EC_SPI_MOSI",
+ "AP_EC_SPI_CLK",
+ "AP_EC_SPI_CS_L",
+ "AP_SPI_CLK",
+ "AP_SPI_MOSI",
+ "AP_SPI_MISO",
+ /*
+ * AP_FLASH_WP_L is crossystem ABI. Schematics
+ * call it BIOS_FLASH_WP_L.
+ */
+ "AP_FLASH_WP_L",
+ "",
+ "AP_SPI_CS0_L",
+ "SD_CD_ODL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "UIM2_DATA",
+ "UIM2_CLK",
+ "UIM2_RST",
+ "UIM2_PRESENT",
+ "UIM1_DATA",
+ "UIM1_CLK",
+ "UIM1_RST",
+ "",
+ "EN_PP3300_CODEC",
+ "EN_PP3300_HUB",
+ "",
+ "DMIC_SEL",
+ "",
+ "",
+ "",
+ "AP_SKU_ID1",
+ "AP_RST_REQ",
+ "",
+ "AP_BRD_ID1",
+ "AP_EC_INT_R_L",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "EDP_BRIJ_EN",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "AP_TS_PEN_I2C_SDA",
+ "AP_TS_PEN_I2C_SCL",
+ "DP_HOT_PLUG_DET",
+ "EC_IN_RW_ODL";
+
+ dmic_sel: dmic-sel {
+ pinmux {
+ pins = "gpio86";
+ function = "gpio";
+ };
+
+ pinconf {
+ pins = "gpio86";
+ bias-pull-down;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-r1.dts b/arch/arm64/boot/dts/qcom/sc7180-trogdor-r1.dts
index 2cb522d6962e..2b522f9e0d8f 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-r1.dts
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-r1.dts
@@ -17,21 +17,6 @@ ap_h1_spi: &spi0 {};
/ {
model = "Google Trogdor (rev1+)";
compatible = "google,trogdor", "qcom,sc7180";
-
- panel: panel {
- compatible = "auo,b116xa01";
- power-supply = <&pp3300_dx_edp>;
- backlight = <&backlight>;
- hpd-gpios = <&sn65dsi86_bridge 2 GPIO_ACTIVE_HIGH>;
-
- ports {
- port {
- panel_in_edp: endpoint {
- remote-endpoint = <&sn65dsi86_out>;
- };
- };
- };
- };
};
ap_ts_pen_1v8: &i2c4 {
@@ -53,6 +38,10 @@ ap_ts_pen_1v8: &i2c4 {
};
};
+&panel {
+ compatible = "auo,b116xa01";
+};
+
&pp3300_hub {
/* pp3300_l7c is used to power the USB hub */
/delete-property/regulator-always-on;
@@ -68,6 +57,22 @@ ap_ts_pen_1v8: &i2c4 {
status = "okay";
};
+&trackpad {
+ interrupts = <58 IRQ_TYPE_EDGE_FALLING>;
+};
+
+/* PINCTRL - modifications to sc7180-trogdor.dtsi */
+
+&trackpad_int_1v8_odl {
+ pinmux {
+ pins = "gpio58";
+ };
+
+ pinconf {
+ pins = "gpio58";
+ };
+};
+
/* PINCTRL - board-specific pinctrl */
&tlmm {
diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
index 07c8b2c926c0..24d293ef56d7 100644
--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi
@@ -6,8 +6,10 @@
*/
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/gpio-keys.h>
#include <dt-bindings/input/input.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include <dt-bindings/sound/sc7180-lpass.h>
/* PMICs depend on spmi_bus label and so must come after SoC */
#include "pm6150.dtsi"
@@ -15,16 +17,18 @@
/ {
thermal-zones {
- charger-thermal {
+ charger_thermal: charger-thermal {
polling-delay-passive = <0>;
polling-delay = <0>;
- thermal-sensors = <&pm6150_adc_tm 1>;
+ thermal-sensors = <&pm6150_adc_tm 0>;
trips {
- temperature = <125000>;
- hysteresis = <1000>;
- type = "critical";
+ charger-crit {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
};
};
};
@@ -45,7 +49,7 @@
/* Increase the size from 2MB to 8MB */
&rmtfs_mem {
- reg = <0x0 0x84400000 0x0 0x800000>;
+ reg = <0x0 0x94600000 0x0 0x800000>;
};
/ {
@@ -243,6 +247,7 @@
gpios = <&tlmm 52 GPIO_ACTIVE_LOW>;
linux,code = <SW_PEN_INSERTED>;
linux,input-type = <EV_SW>;
+ wakeup-event-action = <EV_ACT_DEASSERTED>;
wakeup-source;
};
};
@@ -255,6 +260,21 @@
#sound-dai-cells = <0>;
};
+ panel: panel {
+ /* Compatible will be filled in per-board */
+ power-supply = <&pp3300_dx_edp>;
+ backlight = <&backlight>;
+ hpd-gpios = <&sn65dsi86_bridge 2 GPIO_ACTIVE_HIGH>;
+
+ ports {
+ port {
+ panel_in_edp: endpoint {
+ remote-endpoint = <&sn65dsi86_out>;
+ };
+ };
+ };
+ };
+
pwmleds {
compatible = "pwm-leds";
keyboard_backlight: keyboard-backlight {
@@ -264,6 +284,42 @@
max-brightness = <1023>;
};
};
+
+ sound: sound {
+ compatible = "google,sc7180-trogdor";
+ model = "sc7180-rt5682-max98357a-1mic";
+
+ audio-routing =
+ "Headphone Jack", "HPOL",
+ "Headphone Jack", "HPOR";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dai-link@0 {
+ link-name = "MultiMedia0";
+ reg = <MI2S_PRIMARY>;
+ cpu {
+ sound-dai = <&lpass_cpu MI2S_PRIMARY>;
+ };
+
+ sound_multimedia0_codec: codec {
+ sound-dai = <&alc5682 0 /* aif1 */>;
+ };
+ };
+
+ dai-link@1 {
+ link-name = "MultiMedia1";
+ reg = <MI2S_SECONDARY>;
+ cpu {
+ sound-dai = <&lpass_cpu MI2S_SECONDARY>;
+ };
+
+ sound_multimedia1_codec: codec {
+ sound-dai = <&max98357a>;
+ };
+ };
+ };
};
&qfprom {
@@ -551,6 +607,10 @@
};
};
+&camcc {
+ status = "disabled";
+};
+
&dsi0 {
status = "okay";
vdda-supply = <&vdda_mipi_dsi0_1p2>;
@@ -642,14 +702,14 @@ ap_tp_i2c: &i2c7 {
status = "okay";
clock-frequency = <400000>;
- trackpad@15 {
+ trackpad: trackpad@15 {
compatible = "elan,ekth3000";
reg = <0x15>;
pinctrl-names = "default";
- pinctrl-0 = <&trackpad_int_1v8_odl>;
+ pinctrl-0 = <&tp_int_odl>;
interrupt-parent = <&tlmm>;
- interrupts = <58 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <0 IRQ_TYPE_EDGE_FALLING>;
vcc-supply = <&pp3300_fp_tp>;
@@ -697,6 +757,27 @@ hp_i2c: &i2c9 {
modem-init;
};
+&lpass_cpu {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&sec_mi2s_active>, <&pri_mi2s_active>, <&pri_mi2s_mclk_active>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mi2s@0 {
+ reg = <MI2S_PRIMARY>;
+ qcom,playback-sd-lines = <1>;
+ qcom,capture-sd-lines = <0>;
+ };
+
+ mi2s@1 {
+ reg = <MI2S_SECONDARY>;
+ qcom,playback-sd-lines = <0>;
+ };
+};
+
&mdp {
status = "okay";
};
@@ -716,8 +797,8 @@ hp_i2c: &i2c9 {
&pm6150_adc_tm {
status = "okay";
- charger-thermistor@1 {
- reg = <1>;
+ charger-thermistor@0 {
+ reg = <0>;
io-channels = <&pm6150_adc ADC5_AMUX_THM3_100K_PU>;
qcom,ratiometric;
qcom,hw-settle-time-us = <200>;
@@ -768,17 +849,17 @@ hp_i2c: &i2c9 {
};
&spi0 {
- pinctrl-0 = <&qup_spi0_cs_gpio>;
+ pinctrl-0 = <&qup_spi0_cs_gpio_init_high>, <&qup_spi0_cs_gpio>;
cs-gpios = <&tlmm 37 GPIO_ACTIVE_LOW>;
};
&spi6 {
- pinctrl-0 = <&qup_spi6_cs_gpio>;
+ pinctrl-0 = <&qup_spi6_cs_gpio_init_high>, <&qup_spi6_cs_gpio>;
cs-gpios = <&tlmm 62 GPIO_ACTIVE_LOW>;
};
ap_spi_fp: &spi10 {
- pinctrl-0 = <&qup_spi10_cs_gpio>;
+ pinctrl-0 = <&qup_spi10_cs_gpio_init_high>, <&qup_spi10_cs_gpio>;
cs-gpios = <&tlmm 89 GPIO_ACTIVE_LOW>;
cros_ec_fp: ec@0 {
@@ -787,7 +868,7 @@ ap_spi_fp: &spi10 {
interrupt-parent = <&tlmm>;
interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
- pinctrl-0 = <&fp_to_ap_irq_l>, <&fp_rst_l>, <&fpmcu_boot0>, <&fpmcu_sel>;
+ pinctrl-0 = <&fp_to_ap_irq_l>;
spi-max-frequency = <3000000>;
};
};
@@ -812,7 +893,6 @@ ap_spi_fp: &spi10 {
vddrf-supply = <&pp1300_l2c>;
vddch0-supply = <&pp3300_l10c>;
max-speed = <3200000>;
- clocks = <&rpmhcc RPMH_RF_CLK2>;
};
};
@@ -875,6 +955,22 @@ ap_spi_fp: &spi10 {
};
};
+&pri_mi2s_active {
+ pinconf {
+ pins = "gpio53", "gpio54", "gpio55", "gpio56";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+};
+
+&pri_mi2s_mclk_active {
+ pinconf {
+ pins = "gpio57";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+};
+
&qspi_cs0 {
pinconf {
pins = "gpio68";
@@ -1015,6 +1111,14 @@ ap_spi_fp: &spi10 {
};
};
+&sec_mi2s_active {
+ pinconf {
+ pins = "gpio49", "gpio50", "gpio51";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+};
+
/* PINCTRL - board-specific pinctrl */
&pm6150_gpio {
@@ -1109,20 +1213,6 @@ ap_spi_fp: &spi10 {
};
};
- dp_hot_plug_det: dp-hot-plug-det {
- pinmux {
- pins = "gpio117";
- function = "dp_hot";
- };
-
- config {
- pins = "gpio117";
- bias-disable;
- input-enable;
- drive-strength = <2>;
- };
- };
-
edp_brij_en: edp-brij-en {
pinmux {
pins = "gpio104";
@@ -1188,48 +1278,6 @@ ap_spi_fp: &spi10 {
};
};
- fpmcu_boot0: fpmcu-boot0 {
- pinmux {
- pins = "gpio10";
- function = "gpio";
- };
-
- pinconf {
- pins = "gpio10";
- bias-disable;
- drive-strength = <2>;
- output-low;
- };
- };
-
- fpmcu_sel: fpmcu-sel {
- pinmux {
- pins = "gpio22";
- function = "gpio";
- };
-
- pinconf {
- pins = "gpio22";
- bias-disable;
- drive-strength = <2>;
- output-high;
- };
- };
-
- fp_rst_l: fp-rst-l {
- pinmux {
- pins = "gpio5";
- function = "gpio";
- };
-
- pinconf {
- pins = "gpio5";
- bias-disable;
- drive-strength = <2>;
- output-high;
- };
- };
-
fp_to_ap_irq_l: fp-to-ap-irq-l {
pinmux {
pins = "gpio4";
@@ -1245,7 +1293,6 @@ ap_spi_fp: &spi10 {
};
};
-
h1_ap_int_odl: h1-ap-int-odl {
pinmux {
pins = "gpio42";
@@ -1339,6 +1386,27 @@ ap_spi_fp: &spi10 {
};
};
+ qup_spi0_cs_gpio_init_high: qup-spi0-cs-gpio-init-high {
+ pinconf {
+ pins = "gpio37";
+ output-high;
+ };
+ };
+
+ qup_spi6_cs_gpio_init_high: qup-spi6-cs-gpio-init-high {
+ pinconf {
+ pins = "gpio62";
+ output-high;
+ };
+ };
+
+ qup_spi10_cs_gpio_init_high: qup-spi10-cs-gpio-init-high {
+ pinconf {
+ pins = "gpio89";
+ output-high;
+ };
+ };
+
qup_uart3_sleep: qup-uart3-sleep {
pinmux {
pins = "gpio38", "gpio39",
@@ -1386,14 +1454,16 @@ ap_spi_fp: &spi10 {
};
};
- trackpad_int_1v8_odl: trackpad-int-1v8-odl {
+ /* Named trackpad_int_1v8_odl on earlier revision schematics */
+ trackpad_int_1v8_odl:
+ tp_int_odl: tp-int-odl {
pinmux {
- pins = "gpio58";
+ pins = "gpio0";
function = "gpio";
};
pinconf {
- pins = "gpio58";
+ pins = "gpio0";
/* Has external pullup */
bias-disable;
diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi
index 1ea3344ab62c..6228ba2d8513 100644
--- a/arch/arm64/boot/dts/qcom/sc7180.dtsi
+++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi
@@ -110,9 +110,9 @@
no-map;
};
- rmtfs_mem: memory@84400000 {
+ rmtfs_mem: memory@94600000 {
compatible = "qcom,rmtfs-mem";
- reg = <0x0 0x84400000 0x0 0x200000>;
+ reg = <0x0 0x94600000 0x0 0x200000>;
no-map;
qcom,client-id = <1>;
@@ -1856,12 +1856,6 @@
pins = "gpio49", "gpio50", "gpio51";
function = "mi2s_1";
};
-
- pinconf {
- pins = "gpio49", "gpio50", "gpio51";
- drive-strength = <8>;
- bias-pull-up;
- };
};
pri_mi2s_active: pri-mi2s-active {
@@ -1869,12 +1863,6 @@
pins = "gpio53", "gpio54", "gpio55", "gpio56";
function = "mi2s_0";
};
-
- pinconf {
- pins = "gpio53", "gpio54", "gpio55", "gpio56";
- drive-strength = <8>;
- bias-pull-up;
- };
};
pri_mi2s_mclk_active: pri-mi2s-mclk-active {
@@ -1882,12 +1870,6 @@
pins = "gpio57";
function = "lpass_ext";
};
-
- pinconf {
- pins = "gpio57";
- drive-strength = <8>;
- bias-pull-up;
- };
};
sdc1_on: sdc1-on {
@@ -2770,12 +2752,11 @@
};
usb_1_qmpphy: phy-wrapper@88e9000 {
- compatible = "qcom,sc7180-qmp-usb3-phy";
+ compatible = "qcom,sc7180-qmp-usb3-dp-phy";
reg = <0 0x088e9000 0 0x18c>,
- <0 0x088e8000 0 0x38>;
- reg-names = "reg-base", "dp_com";
+ <0 0x088e8000 0 0x38>,
+ <0 0x088ea000 0 0x40>;
status = "disabled";
- #clock-cells = <1>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -2790,7 +2771,7 @@
<&gcc GCC_USB3_DP_PHY_PRIM_BCR>;
reset-names = "phy", "common";
- usb_1_ssphy: phy@88e9200 {
+ usb_1_ssphy: usb3-phy@88e9200 {
reg = <0 0x088e9200 0 0x128>,
<0 0x088e9400 0 0x200>,
<0 0x088e9c00 0 0x218>,
@@ -2803,6 +2784,16 @@
clock-names = "pipe0";
clock-output-names = "usb3_phy_pipe_clk_src";
};
+
+ dp_phy: dp-phy@88ea200 {
+ reg = <0 0x088ea200 0 0x200>,
+ <0 0x088ea400 0 0x200>,
+ <0 0x088eaa00 0 0x200>,
+ <0 0x088ea600 0 0x200>,
+ <0 0x088ea800 0 0x200>;
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+ };
};
dc_noc: interconnect@9160000 {
@@ -2854,10 +2845,10 @@
<&gcc GCC_USB30_PRIM_MASTER_CLK>;
assigned-clock-rates = <19200000>, <150000000>;
- interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 488 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 489 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 6 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 8 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 9 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "hs_phy_irq", "ss_phy_irq",
"dm_hs_phy_irq", "dp_hs_phy_irq";
@@ -3166,8 +3157,8 @@
<&gcc GCC_DISP_GPLL0_CLK_SRC>,
<&dsi_phy 0>,
<&dsi_phy 1>,
- <0>,
- <0>;
+ <&dp_phy 0>,
+ <&dp_phy 1>;
clock-names = "bi_tcxo",
"gcc_disp_gpll0_clk_src",
"dsi0_phy_pll_out_byteclk",
@@ -3222,7 +3213,7 @@
#reset-cells = <1>;
};
- aoss_qmp: qmp@c300000 {
+ aoss_qmp: power-controller@c300000 {
compatible = "qcom,sc7180-aoss-qmp";
reg = <0 0x0c300000 0 0x100000>;
interrupts = <GIC_SPI 389 IRQ_TYPE_EDGE_RISING>;
@@ -3575,7 +3566,8 @@
reg = <0 0x62f00000 0 0x29000>;
reg-names = "lpass-lpaif";
- iommus = <&apps_smmu 0x1020 0>;
+ iommus = <&apps_smmu 0x1020 0>,
+ <&apps_smmu 0x1021 0>;
power-domains = <&lpass_hm LPASS_CORE_HM_GDSCR>;
diff --git a/arch/arm64/boot/dts/qcom/sc7280-idp.dts b/arch/arm64/boot/dts/qcom/sc7280-idp.dts
new file mode 100644
index 000000000000..54d2cb365b71
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sc7280-idp.dts
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * sc7280 IDP board device tree source
+ *
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "sc7280.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. sc7280 IDP platform";
+ compatible = "qcom,sc7280-idp", "qcom,sc7280";
+
+ aliases {
+ serial0 = &uart5;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+};
+
+&apps_rsc {
+ pm7325-regulators {
+ compatible = "qcom,pm7325-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ vreg_s1b_1p8: smps1 {
+ regulator-min-microvolt = <1856000>;
+ regulator-max-microvolt = <2040000>;
+ };
+
+ vreg_s7b_0p9: smps7 {
+ regulator-min-microvolt = <535000>;
+ regulator-max-microvolt = <1120000>;
+ };
+
+ vreg_s8b_1p2: smps8 {
+ regulator-min-microvolt = <1256000>;
+ regulator-max-microvolt = <1500000>;
+ };
+
+ vreg_l1b_0p8: ldo1 {
+ regulator-min-microvolt = <825000>;
+ regulator-max-microvolt = <925000>;
+ };
+
+ vreg_l2b_3p0: ldo2 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <3544000>;
+ };
+
+ vreg_l6b_1p2: ldo6 {
+ regulator-min-microvolt = <1140000>;
+ regulator-max-microvolt = <1260000>;
+ };
+
+ vreg_l7b_2p9: ldo7 {
+ regulator-min-microvolt = <2960000>;
+ regulator-max-microvolt = <2960000>;
+ };
+
+ vreg_l8b_0p9: ldo8 {
+ regulator-min-microvolt = <870000>;
+ regulator-max-microvolt = <970000>;
+ };
+
+ vreg_l9b_1p2: ldo9 {
+ regulator-min-microvolt = <1080000>;
+ regulator-max-microvolt = <1304000>;
+ };
+
+ vreg_l11b_1p7: ldo11 {
+ regulator-min-microvolt = <1504000>;
+ regulator-max-microvolt = <2000000>;
+ };
+
+ vreg_l12b_0p8: ldo12 {
+ regulator-min-microvolt = <751000>;
+ regulator-max-microvolt = <824000>;
+ };
+
+ vreg_l13b_0p8: ldo13 {
+ regulator-min-microvolt = <530000>;
+ regulator-max-microvolt = <824000>;
+ };
+
+ vreg_l14b_1p2: ldo14 {
+ regulator-min-microvolt = <1080000>;
+ regulator-max-microvolt = <1304000>;
+ };
+
+ vreg_l15b_0p8: ldo15 {
+ regulator-min-microvolt = <765000>;
+ regulator-max-microvolt = <1020000>;
+ };
+
+ vreg_l16b_1p2: ldo16 {
+ regulator-min-microvolt = <1100000>;
+ regulator-max-microvolt = <1300000>;
+ };
+
+ vreg_l17b_1p8: ldo17 {
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <1900000>;
+ };
+
+ vreg_l18b_1p8: ldo18 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2000000>;
+ };
+
+ vreg_l19b_1p8: ldo19 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+
+ pm8350c-regulators {
+ compatible = "qcom,pm8350c-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ vreg_s1c_2p2: smps1 {
+ regulator-min-microvolt = <2190000>;
+ regulator-max-microvolt = <2210000>;
+ };
+
+ vreg_s9c_1p0: smps9 {
+ regulator-min-microvolt = <1010000>;
+ regulator-max-microvolt = <1170000>;
+ };
+
+ vreg_l1c_1p8: ldo1 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1980000>;
+ };
+
+ vreg_l2c_1p8: ldo2 {
+ regulator-min-microvolt = <1620000>;
+ regulator-max-microvolt = <1980000>;
+ };
+
+ vreg_l3c_3p0: ldo3 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <3540000>;
+ };
+
+ vreg_l4c_1p8: ldo4 {
+ regulator-min-microvolt = <1620000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ vreg_l5c_1p8: ldo5 {
+ regulator-min-microvolt = <1620000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ vreg_l6c_2p9: ldo6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+
+ vreg_l7c_3p0: ldo7 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3544000>;
+ };
+
+ vreg_l8c_1p8: ldo8 {
+ regulator-min-microvolt = <1620000>;
+ regulator-max-microvolt = <2000000>;
+ };
+
+ vreg_l9c_2p9: ldo9 {
+ regulator-min-microvolt = <2960000>;
+ regulator-max-microvolt = <2960000>;
+ };
+
+ vreg_l10c_0p8: ldo10 {
+ regulator-min-microvolt = <720000>;
+ regulator-max-microvolt = <1050000>;
+ };
+
+ vreg_l11c_2p8: ldo11 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <3544000>;
+ };
+
+ vreg_l12c_1p8: ldo12 {
+ regulator-min-microvolt = <1650000>;
+ regulator-max-microvolt = <2000000>;
+ };
+
+ vreg_l13c_3p0: ldo13 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <3544000>;
+ };
+
+ vreg_bob: bob {
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3960000>;
+ };
+ };
+
+ pmr735a-regulators {
+ compatible = "qcom,pmr735a-rpmh-regulators";
+ qcom,pmic-id = "e";
+
+ vreg_l2e_1p2: ldo2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+
+ vreg_l3e_0p9: ldo3 {
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <1020000>;
+ };
+
+ vreg_l4e_1p7: ldo4 {
+ regulator-min-microvolt = <1776000>;
+ regulator-max-microvolt = <1890000>;
+ };
+
+ vreg_l5e_0p8: ldo5 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ };
+
+ vreg_l6e_0p8: ldo6 {
+ regulator-min-microvolt = <480000>;
+ regulator-max-microvolt = <904000>;
+ };
+ };
+};
+
+&qupv3_id_0 {
+ status = "okay";
+};
+
+&uart5 {
+ status = "okay";
+};
+
+/* PINCTRL - additions to nodes defined in sc7280.dtsi */
+
+&qup_uart5_default {
+ tx {
+ pins = "gpio46";
+ drive-strength = <2>;
+ bias-disable;
+ };
+
+ rx {
+ pins = "gpio47";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
new file mode 100644
index 000000000000..2cc478553935
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
@@ -0,0 +1,1128 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * sc7280 SoC device tree source
+ *
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <dt-bindings/clock/qcom,gcc-sc7280.h>
+#include <dt-bindings/clock/qcom,rpmh.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/mailbox/qcom-ipcc.h>
+#include <dt-bindings/power/qcom-aoss-qmp.h>
+#include <dt-bindings/power/qcom-rpmpd.h>
+#include <dt-bindings/soc/qcom,rpmh-rsc.h>
+
+/ {
+ interrupt-parent = <&intc>;
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ clocks {
+ xo_board: xo-board {
+ compatible = "fixed-clock";
+ clock-frequency = <76800000>;
+ #clock-cells = <0>;
+ };
+
+ sleep_clk: sleep-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <32000>;
+ #clock-cells = <0>;
+ };
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ aop_mem: memory@80800000 {
+ reg = <0x0 0x80800000 0x0 0x60000>;
+ no-map;
+ };
+
+ aop_cmd_db_mem: memory@80860000 {
+ reg = <0x0 0x80860000 0x0 0x20000>;
+ compatible = "qcom,cmd-db";
+ no-map;
+ };
+
+ cpucp_mem: memory@80b00000 {
+ no-map;
+ reg = <0x0 0x80b00000 0x0 0x100000>;
+ };
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ CPU0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,kryo";
+ reg = <0x0 0x0>;
+ enable-method = "psci";
+ cpu-idle-states = <&LITTLE_CPU_SLEEP_0
+ &LITTLE_CPU_SLEEP_1
+ &CLUSTER_SLEEP_0>;
+ next-level-cache = <&L2_0>;
+ L2_0: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ L3_0: l3-cache {
+ compatible = "cache";
+ };
+ };
+ };
+
+ CPU1: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,kryo";
+ reg = <0x0 0x100>;
+ enable-method = "psci";
+ cpu-idle-states = <&LITTLE_CPU_SLEEP_0
+ &LITTLE_CPU_SLEEP_1
+ &CLUSTER_SLEEP_0>;
+ next-level-cache = <&L2_100>;
+ L2_100: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU2: cpu@200 {
+ device_type = "cpu";
+ compatible = "arm,kryo";
+ reg = <0x0 0x200>;
+ enable-method = "psci";
+ cpu-idle-states = <&LITTLE_CPU_SLEEP_0
+ &LITTLE_CPU_SLEEP_1
+ &CLUSTER_SLEEP_0>;
+ next-level-cache = <&L2_200>;
+ L2_200: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU3: cpu@300 {
+ device_type = "cpu";
+ compatible = "arm,kryo";
+ reg = <0x0 0x300>;
+ enable-method = "psci";
+ cpu-idle-states = <&LITTLE_CPU_SLEEP_0
+ &LITTLE_CPU_SLEEP_1
+ &CLUSTER_SLEEP_0>;
+ next-level-cache = <&L2_300>;
+ L2_300: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU4: cpu@400 {
+ device_type = "cpu";
+ compatible = "arm,kryo";
+ reg = <0x0 0x400>;
+ enable-method = "psci";
+ cpu-idle-states = <&BIG_CPU_SLEEP_0
+ &BIG_CPU_SLEEP_1
+ &CLUSTER_SLEEP_0>;
+ next-level-cache = <&L2_400>;
+ L2_400: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU5: cpu@500 {
+ device_type = "cpu";
+ compatible = "arm,kryo";
+ reg = <0x0 0x500>;
+ enable-method = "psci";
+ cpu-idle-states = <&BIG_CPU_SLEEP_0
+ &BIG_CPU_SLEEP_1
+ &CLUSTER_SLEEP_0>;
+ next-level-cache = <&L2_500>;
+ L2_500: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU6: cpu@600 {
+ device_type = "cpu";
+ compatible = "arm,kryo";
+ reg = <0x0 0x600>;
+ enable-method = "psci";
+ cpu-idle-states = <&BIG_CPU_SLEEP_0
+ &BIG_CPU_SLEEP_1
+ &CLUSTER_SLEEP_0>;
+ next-level-cache = <&L2_600>;
+ L2_600: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ CPU7: cpu@700 {
+ device_type = "cpu";
+ compatible = "arm,kryo";
+ reg = <0x0 0x700>;
+ enable-method = "psci";
+ cpu-idle-states = <&BIG_CPU_SLEEP_0
+ &BIG_CPU_SLEEP_1
+ &CLUSTER_SLEEP_0>;
+ next-level-cache = <&L2_700>;
+ L2_700: l2-cache {
+ compatible = "cache";
+ next-level-cache = <&L3_0>;
+ };
+ };
+
+ idle-states {
+ entry-method = "psci";
+
+ LITTLE_CPU_SLEEP_0: cpu-sleep-0-0 {
+ compatible = "arm,idle-state";
+ idle-state-name = "little-power-down";
+ arm,psci-suspend-param = <0x40000003>;
+ entry-latency-us = <549>;
+ exit-latency-us = <901>;
+ min-residency-us = <1774>;
+ local-timer-stop;
+ };
+
+ LITTLE_CPU_SLEEP_1: cpu-sleep-0-1 {
+ compatible = "arm,idle-state";
+ idle-state-name = "little-rail-power-down";
+ arm,psci-suspend-param = <0x40000004>;
+ entry-latency-us = <702>;
+ exit-latency-us = <915>;
+ min-residency-us = <4001>;
+ local-timer-stop;
+ };
+
+ BIG_CPU_SLEEP_0: cpu-sleep-1-0 {
+ compatible = "arm,idle-state";
+ idle-state-name = "big-power-down";
+ arm,psci-suspend-param = <0x40000003>;
+ entry-latency-us = <523>;
+ exit-latency-us = <1244>;
+ min-residency-us = <2207>;
+ local-timer-stop;
+ };
+
+ BIG_CPU_SLEEP_1: cpu-sleep-1-1 {
+ compatible = "arm,idle-state";
+ idle-state-name = "big-rail-power-down";
+ arm,psci-suspend-param = <0x40000004>;
+ entry-latency-us = <526>;
+ exit-latency-us = <1854>;
+ min-residency-us = <5555>;
+ local-timer-stop;
+ };
+
+ CLUSTER_SLEEP_0: cluster-sleep-0 {
+ compatible = "arm,idle-state";
+ idle-state-name = "cluster-power-down";
+ arm,psci-suspend-param = <0x40003444>;
+ entry-latency-us = <3263>;
+ exit-latency-us = <6562>;
+ min-residency-us = <9926>;
+ local-timer-stop;
+ };
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ /* We expect the bootloader to fill in the size */
+ reg = <0 0x80000000 0 0>;
+ };
+
+ firmware {
+ scm {
+ compatible = "qcom,scm-sc7280", "qcom,scm";
+ };
+ };
+
+ pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
+ };
+
+ psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+
+ soc: soc@0 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0 0 0 0 0x10 0>;
+ dma-ranges = <0 0 0 0 0x10 0>;
+ compatible = "simple-bus";
+
+ gcc: clock-controller@100000 {
+ compatible = "qcom,gcc-sc7280";
+ reg = <0 0x00100000 0 0x1f0000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&rpmhcc RPMH_CXO_CLK_A>, <&sleep_clk>,
+ <0>, <0>, <0>, <0>, <0>, <0>;
+ clock-names = "bi_tcxo", "bi_tcxo_ao", "sleep_clk",
+ "pcie_0_pipe_clk", "pcie_1_pipe-clk",
+ "ufs_phy_rx_symbol_0_clk", "ufs_phy_rx_symbol_1_clk",
+ "ufs_phy_tx_symbol_0_clk",
+ "usb3_phy_wrapper_gcc_usb30_pipe_clk";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
+ ipcc: mailbox@408000 {
+ compatible = "qcom,sc7280-ipcc", "qcom,ipcc";
+ reg = <0 0x00408000 0 0x1000>;
+ interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ #mbox-cells = <2>;
+ };
+
+ qupv3_id_0: geniqup@9c0000 {
+ compatible = "qcom,geni-se-qup";
+ reg = <0 0x009c0000 0 0x2000>;
+ clock-names = "m-ahb", "s-ahb";
+ clocks = <&gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ status = "disabled";
+
+ uart5: serial@994000 {
+ compatible = "qcom,geni-debug-uart";
+ reg = <0 0x00994000 0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_uart5_default>;
+ interrupts = <GIC_SPI 606 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+ };
+
+ stm@6002000 {
+ compatible = "arm,coresight-stm", "arm,primecell";
+ reg = <0 0x06002000 0 0x1000>,
+ <0 0x16280000 0 0x180000>;
+ reg-names = "stm-base", "stm-stimulus-base";
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ stm_out: endpoint {
+ remote-endpoint = <&funnel0_in7>;
+ };
+ };
+ };
+ };
+
+ funnel@6041000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0 0x06041000 0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ funnel0_out: endpoint {
+ remote-endpoint = <&merge_funnel_in0>;
+ };
+ };
+ };
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@7 {
+ reg = <7>;
+ funnel0_in7: endpoint {
+ remote-endpoint = <&stm_out>;
+ };
+ };
+ };
+ };
+
+ funnel@6042000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0 0x06042000 0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ funnel1_out: endpoint {
+ remote-endpoint = <&merge_funnel_in1>;
+ };
+ };
+ };
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@4 {
+ reg = <4>;
+ funnel1_in4: endpoint {
+ remote-endpoint = <&apss_merge_funnel_out>;
+ };
+ };
+ };
+ };
+
+ funnel@6045000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0 0x06045000 0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ merge_funnel_out: endpoint {
+ remote-endpoint = <&swao_funnel_in>;
+ };
+ };
+ };
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ merge_funnel_in0: endpoint {
+ remote-endpoint = <&funnel0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ merge_funnel_in1: endpoint {
+ remote-endpoint = <&funnel1_out>;
+ };
+ };
+ };
+ };
+
+ replicator@6046000 {
+ compatible = "arm,coresight-dynamic-replicator", "arm,primecell";
+ reg = <0 0x06046000 0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ replicator_out: endpoint {
+ remote-endpoint = <&etr_in>;
+ };
+ };
+ };
+
+ in-ports {
+ port {
+ replicator_in: endpoint {
+ remote-endpoint = <&swao_replicator_out>;
+ };
+ };
+ };
+ };
+
+ etr@6048000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0 0x06048000 0 0x1000>;
+ iommus = <&apps_smmu 0x04c0 0>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ arm,scatter-gather;
+
+ in-ports {
+ port {
+ etr_in: endpoint {
+ remote-endpoint = <&replicator_out>;
+ };
+ };
+ };
+ };
+
+ funnel@6b04000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0 0x06b04000 0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ swao_funnel_out: endpoint {
+ remote-endpoint = <&etf_in>;
+ };
+ };
+ };
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@7 {
+ reg = <7>;
+ swao_funnel_in: endpoint {
+ remote-endpoint = <&merge_funnel_out>;
+ };
+ };
+ };
+ };
+
+ etf@6b05000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0 0x06b05000 0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ etf_out: endpoint {
+ remote-endpoint = <&swao_replicator_in>;
+ };
+ };
+ };
+
+ in-ports {
+ port {
+ etf_in: endpoint {
+ remote-endpoint = <&swao_funnel_out>;
+ };
+ };
+ };
+ };
+
+ replicator@6b06000 {
+ compatible = "arm,coresight-dynamic-replicator", "arm,primecell";
+ reg = <0 0x06b06000 0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ qcom,replicator-loses-context;
+
+ out-ports {
+ port {
+ swao_replicator_out: endpoint {
+ remote-endpoint = <&replicator_in>;
+ };
+ };
+ };
+
+ in-ports {
+ port {
+ swao_replicator_in: endpoint {
+ remote-endpoint = <&etf_out>;
+ };
+ };
+ };
+ };
+
+ etm@7040000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x07040000 0 0x1000>;
+
+ cpu = <&CPU0>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm0_out: endpoint {
+ remote-endpoint = <&apss_funnel_in0>;
+ };
+ };
+ };
+ };
+
+ etm@7140000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x07140000 0 0x1000>;
+
+ cpu = <&CPU1>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm1_out: endpoint {
+ remote-endpoint = <&apss_funnel_in1>;
+ };
+ };
+ };
+ };
+
+ etm@7240000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x07240000 0 0x1000>;
+
+ cpu = <&CPU2>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm2_out: endpoint {
+ remote-endpoint = <&apss_funnel_in2>;
+ };
+ };
+ };
+ };
+
+ etm@7340000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x07340000 0 0x1000>;
+
+ cpu = <&CPU3>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm3_out: endpoint {
+ remote-endpoint = <&apss_funnel_in3>;
+ };
+ };
+ };
+ };
+
+ etm@7440000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x07440000 0 0x1000>;
+
+ cpu = <&CPU4>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm4_out: endpoint {
+ remote-endpoint = <&apss_funnel_in4>;
+ };
+ };
+ };
+ };
+
+ etm@7540000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x07540000 0 0x1000>;
+
+ cpu = <&CPU5>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm5_out: endpoint {
+ remote-endpoint = <&apss_funnel_in5>;
+ };
+ };
+ };
+ };
+
+ etm@7640000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x07640000 0 0x1000>;
+
+ cpu = <&CPU6>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm6_out: endpoint {
+ remote-endpoint = <&apss_funnel_in6>;
+ };
+ };
+ };
+ };
+
+ etm@7740000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x07740000 0 0x1000>;
+
+ cpu = <&CPU7>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+ arm,coresight-loses-context-with-cpu;
+ qcom,skip-power-up;
+
+ out-ports {
+ port {
+ etm7_out: endpoint {
+ remote-endpoint = <&apss_funnel_in7>;
+ };
+ };
+ };
+ };
+
+ funnel@7800000 { /* APSS Funnel */
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0 0x07800000 0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ apss_funnel_out: endpoint {
+ remote-endpoint = <&apss_merge_funnel_in>;
+ };
+ };
+ };
+
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ apss_funnel_in0: endpoint {
+ remote-endpoint = <&etm0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ apss_funnel_in1: endpoint {
+ remote-endpoint = <&etm1_out>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ apss_funnel_in2: endpoint {
+ remote-endpoint = <&etm2_out>;
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+ apss_funnel_in3: endpoint {
+ remote-endpoint = <&etm3_out>;
+ };
+ };
+
+ port@4 {
+ reg = <4>;
+ apss_funnel_in4: endpoint {
+ remote-endpoint = <&etm4_out>;
+ };
+ };
+
+ port@5 {
+ reg = <5>;
+ apss_funnel_in5: endpoint {
+ remote-endpoint = <&etm5_out>;
+ };
+ };
+
+ port@6 {
+ reg = <6>;
+ apss_funnel_in6: endpoint {
+ remote-endpoint = <&etm6_out>;
+ };
+ };
+
+ port@7 {
+ reg = <7>;
+ apss_funnel_in7: endpoint {
+ remote-endpoint = <&etm7_out>;
+ };
+ };
+ };
+ };
+
+ funnel@7810000 {
+ compatible = "arm,coresight-dynamic-funnel", "arm,primecell";
+ reg = <0 0x07810000 0 0x1000>;
+
+ clocks = <&aoss_qmp>;
+ clock-names = "apb_pclk";
+
+ out-ports {
+ port {
+ apss_merge_funnel_out: endpoint {
+ remote-endpoint = <&funnel1_in4>;
+ };
+ };
+ };
+
+ in-ports {
+ port {
+ apss_merge_funnel_in: endpoint {
+ remote-endpoint = <&apss_funnel_out>;
+ };
+ };
+ };
+ };
+
+ system-cache-controller@9200000 {
+ compatible = "qcom,sc7280-llcc";
+ reg = <0 0x09200000 0 0xd0000>, <0 0x09600000 0 0x50000>;
+ reg-names = "llcc_base", "llcc_broadcast_base";
+ interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ pdc: interrupt-controller@b220000 {
+ compatible = "qcom,sc7280-pdc", "qcom,pdc";
+ reg = <0 0x0b220000 0 0x30000>;
+ qcom,pdc-ranges = <0 480 40>, <40 140 14>, <54 263 1>,
+ <55 306 4>, <59 312 3>, <62 374 2>,
+ <64 434 2>, <66 438 3>, <69 86 1>,
+ <70 520 54>, <124 609 31>, <155 63 1>,
+ <156 716 12>;
+ #interrupt-cells = <2>;
+ interrupt-parent = <&intc>;
+ interrupt-controller;
+ };
+
+ aoss_qmp: power-controller@c300000 {
+ compatible = "qcom,sc7280-aoss-qmp";
+ reg = <0 0x0c300000 0 0x100000>;
+ interrupts-extended = <&ipcc IPCC_CLIENT_AOP
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_AOP
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ #clock-cells = <0>;
+ #power-domain-cells = <1>;
+ };
+
+ spmi_bus: spmi@c440000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0 0x0c440000 0 0x1100>,
+ <0 0x0c600000 0 0x2000000>,
+ <0 0x0e600000 0 0x100000>,
+ <0 0x0e700000 0 0xa0000>,
+ <0 0x0c40a000 0 0x26000>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts-extended = <&pdc 1 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ };
+
+ tlmm: pinctrl@f100000 {
+ compatible = "qcom,sc7280-pinctrl";
+ reg = <0 0x0f100000 0 0x300000>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 175>;
+ wakeup-parent = <&pdc>;
+
+ qup_uart5_default: qup-uart5-default {
+ pins = "gpio46", "gpio47";
+ function = "qup13";
+ };
+ };
+
+ apps_smmu: iommu@15000000 {
+ compatible = "qcom,sc7280-smmu-500", "arm,mmu-500";
+ reg = <0 0x15000000 0 0x100000>;
+ #iommu-cells = <2>;
+ #global-interrupts = <1>;
+ dma-coherent;
+ interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ intc: interrupt-controller@17a00000 {
+ compatible = "arm,gic-v3";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0 0x17a00000 0 0x10000>, /* GICD */
+ <0 0x17a60000 0 0x100000>; /* GICR * 8 */
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_LOW>;
+
+ gic-its@17a40000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ #msi-cells = <1>;
+ reg = <0 0x17a40000 0 0x20000>;
+ status = "disabled";
+ };
+ };
+
+ watchdog@17c10000 {
+ compatible = "qcom,apss-wdt-sc7280", "qcom,kpss-wdt";
+ reg = <0 0x17c10000 0 0x1000>;
+ clocks = <&sleep_clk>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ timer@17c20000 {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ compatible = "arm,armv7-timer-mem";
+ reg = <0 0x17c20000 0 0x1000>;
+
+ frame@17c21000 {
+ frame-number = <0>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0 0x17c21000 0 0x1000>,
+ <0 0x17c22000 0 0x1000>;
+ };
+
+ frame@17c23000 {
+ frame-number = <1>;
+ interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0 0x17c23000 0 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17c25000 {
+ frame-number = <2>;
+ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0 0x17c25000 0 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17c27000 {
+ frame-number = <3>;
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0 0x17c27000 0 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17c29000 {
+ frame-number = <4>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0 0x17c29000 0 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17c2b000 {
+ frame-number = <5>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0 0x17c2b000 0 0x1000>;
+ status = "disabled";
+ };
+
+ frame@17c2d000 {
+ frame-number = <6>;
+ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ reg = <0 0x17c2d000 0 0x1000>;
+ status = "disabled";
+ };
+ };
+
+ apps_rsc: rsc@18200000 {
+ compatible = "qcom,rpmh-rsc";
+ reg = <0 0x18200000 0 0x10000>,
+ <0 0x18210000 0 0x10000>,
+ <0 0x18220000 0 0x10000>;
+ reg-names = "drv-0", "drv-1", "drv-2";
+ interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,tcs-offset = <0xd00>;
+ qcom,drv-id = <2>;
+ qcom,tcs-config = <ACTIVE_TCS 2>,
+ <SLEEP_TCS 3>,
+ <WAKE_TCS 3>,
+ <CONTROL_TCS 1>;
+
+ rpmhpd: power-controller {
+ compatible = "qcom,sc7280-rpmhpd";
+ #power-domain-cells = <1>;
+ operating-points-v2 = <&rpmhpd_opp_table>;
+
+ rpmhpd_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ rpmhpd_opp_ret: opp1 {
+ opp-level = <RPMH_REGULATOR_LEVEL_RETENTION>;
+ };
+
+ rpmhpd_opp_low_svs: opp2 {
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+ };
+
+ rpmhpd_opp_svs: opp3 {
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+ };
+
+ rpmhpd_opp_svs_l1: opp4 {
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
+ };
+
+ rpmhpd_opp_svs_l2: opp5 {
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS_L2>;
+ };
+
+ rpmhpd_opp_nom: opp6 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
+ };
+
+ rpmhpd_opp_nom_l1: opp7 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
+ };
+
+ rpmhpd_opp_turbo: opp8 {
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
+ };
+
+ rpmhpd_opp_turbo_l1: opp9 {
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>;
+ };
+ };
+ };
+
+ rpmhcc: clock-controller {
+ compatible = "qcom,sc7280-rpmh-clk";
+ clocks = <&xo_board>;
+ clock-names = "xo";
+ #clock-cells = <1>;
+ };
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
index c4ac6f5dc008..2d5533dd4ec2 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
@@ -244,7 +244,7 @@
&adsp_pas {
status = "okay";
- firmware-name = "qcom/sdm845/adsp.mdt";
+ firmware-name = "qcom/sdm845/adsp.mbn";
};
&apps_rsc {
@@ -390,7 +390,7 @@
&cdsp_pas {
status = "okay";
- firmware-name = "qcom/sdm845/cdsp.mdt";
+ firmware-name = "qcom/sdm845/cdsp.mbn";
};
&dsi0 {
@@ -1015,7 +1015,7 @@
left_spkr: wsa8810-left{
compatible = "sdw10217201000";
reg = <0 1>;
- powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
+ powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
#thermal-sensor-cells = <0>;
sound-name-prefix = "SpkrLeft";
#sound-dai-cells = <0>;
@@ -1023,7 +1023,7 @@
right_spkr: wsa8810-right{
compatible = "sdw10217201000";
- powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
+ powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
reg = <0 2>;
#thermal-sensor-cells = <0>;
sound-name-prefix = "SpkrRight";
@@ -1108,6 +1108,25 @@
status = "okay";
};
+&camss {
+ vdda-supply = <&vreg_l1a_0p875>;
+
+ status = "ok";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ csiphy0_ep: endpoint {
+ clock-lanes = <7>;
+ data-lanes = <0 1 2 3>;
+ remote-endpoint = <&ov8856_ep>;
+ };
+ };
+ };
+};
+
&cci_i2c0 {
camera@10 {
compatible = "ovti,ov8856";
@@ -1137,7 +1156,7 @@
avdd-supply = <&cam0_avdd_2v8>;
dvdd-supply = <&cam0_dvdd_1v2>;
- status = "disable";
+ status = "ok";
port {
ov8856_ep: endpoint {
@@ -1145,7 +1164,7 @@
link-frequencies = /bits/ 64
<360000000 180000000>;
data-lanes = <1 2 3 4>;
-// remote-endpoint = <&csiphy0_ep>;
+ remote-endpoint = <&csiphy0_ep>;
};
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium.dts b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium.dts
index 86cbae63eaf7..7d029425336e 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-xiaomi-beryllium.dts
@@ -157,6 +157,14 @@
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
+ vreg_l14a_1p8: ldo14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
vreg_l17a_1p3: ldo17 {
regulator-min-microvolt = <1304000>;
regulator-max-microvolt = <1304000>;
@@ -191,6 +199,7 @@
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-boot-on;
};
};
};
@@ -200,6 +209,43 @@
firmware-name = "qcom/sdm845/cdsp.mdt";
};
+&dsi0 {
+ status = "okay";
+ vdda-supply = <&vreg_l26a_1p2>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "tianma,fhd-video";
+ reg = <0>;
+ vddi0-supply = <&vreg_l14a_1p8>;
+ vddpos-supply = <&lab>;
+ vddneg-supply = <&ibb>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reset-gpios = <&tlmm 6 GPIO_ACTIVE_LOW>;
+
+ port {
+ tianma_nt36672a_in_0: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+ };
+};
+
+&dsi0_out {
+ remote-endpoint = <&tianma_nt36672a_in_0>;
+ data-lanes = <0 1 2 3>;
+};
+
+&dsi0_phy {
+ status = "okay";
+ vdds-supply = <&vreg_l1a_0p875>;
+};
+
&gcc {
protected-clocks = <GCC_QSPI_CORE_CLK>,
<GCC_QSPI_CORE_CLK_SRC>,
@@ -215,6 +261,31 @@
};
};
+&ibb {
+ regulator-min-microvolt = <4600000>;
+ regulator-max-microvolt = <6000000>;
+ regulator-over-current-protection;
+ regulator-pull-down;
+ regulator-soft-start;
+ qcom,discharge-resistor-kohms = <300>;
+};
+
+&lab {
+ regulator-min-microvolt = <4600000>;
+ regulator-max-microvolt = <6000000>;
+ regulator-over-current-protection;
+ regulator-pull-down;
+ regulator-soft-start;
+};
+
+&mdss {
+ status = "okay";
+};
+
+&mdss_mdp {
+ status = "okay";
+};
+
&mss_pil {
status = "okay";
firmware-name = "qcom/sdm845/mba.mbn", "qcom/sdm845/modem.mdt";
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 454f794af547..0a86fe71a66d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -1061,6 +1061,16 @@
gcc: clock-controller@100000 {
compatible = "qcom,gcc-sdm845";
reg = <0 0x00100000 0 0x1f0000>;
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&rpmhcc RPMH_CXO_CLK_A>,
+ <&sleep_clk>,
+ <&pcie0_lane>,
+ <&pcie1_lane>;
+ clock-names = "bi_tcxo",
+ "bi_tcxo_ao",
+ "sleep_clk",
+ "pcie_0_pipe_clk",
+ "pcie_1_pipe_clk";
#clock-cells = <1>;
#reset-cells = <1>;
#power-domain-cells = <1>;
@@ -2062,6 +2072,7 @@
clocks = <&gcc GCC_PCIE_0_PIPE_CLK>;
clock-names = "pipe0";
+ #clock-cells = <0>;
#phy-cells = <0>;
clock-output-names = "pcie_0_pipe_clk";
};
@@ -2170,6 +2181,7 @@
clocks = <&gcc GCC_PCIE_1_PIPE_CLK>;
clock-names = "pipe0";
+ #clock-cells = <0>;
#phy-cells = <0>;
clock-output-names = "pcie_1_pipe_clk";
};
@@ -2382,7 +2394,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
- gpio-ranges = <&tlmm 0 0 150>;
+ gpio-ranges = <&tlmm 0 0 151>;
wakeup-parent = <&pdc_intc>;
cci0_default: cci0-default {
@@ -3673,7 +3685,6 @@
<0 0x088e8000 0 0x10>;
reg-names = "reg-base", "dp_com";
status = "disabled";
- #clock-cells = <1>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -3695,6 +3706,7 @@
<0 0x088e9600 0 0x128>,
<0 0x088e9800 0 0x200>,
<0 0x088e9a00 0 0x100>;
+ #clock-cells = <0>;
#phy-cells = <0>;
clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
clock-names = "pipe0";
@@ -3706,7 +3718,6 @@
compatible = "qcom,sdm845-qmp-usb3-uni-phy";
reg = <0 0x088eb000 0 0x18c>;
status = "disabled";
- #clock-cells = <1>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -3726,6 +3737,7 @@
<0 0x088eb400 0 0x1fc>,
<0 0x088eb800 0 0x218>,
<0 0x088eb600 0 0x70>;
+ #clock-cells = <0>;
#phy-cells = <0>;
clocks = <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>;
clock-names = "pipe0";
@@ -3909,6 +3921,141 @@
#reset-cells = <1>;
};
+ camss: camss@a00000 {
+ compatible = "qcom,sdm845-camss";
+
+ reg = <0 0xacb3000 0 0x1000>,
+ <0 0xacba000 0 0x1000>,
+ <0 0xacc8000 0 0x1000>,
+ <0 0xac65000 0 0x1000>,
+ <0 0xac66000 0 0x1000>,
+ <0 0xac67000 0 0x1000>,
+ <0 0xac68000 0 0x1000>,
+ <0 0xacaf000 0 0x4000>,
+ <0 0xacb6000 0 0x4000>,
+ <0 0xacc4000 0 0x4000>;
+ reg-names = "csid0",
+ "csid1",
+ "csid2",
+ "csiphy0",
+ "csiphy1",
+ "csiphy2",
+ "csiphy3",
+ "vfe0",
+ "vfe1",
+ "vfe_lite";
+
+ interrupts = <GIC_SPI 464 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 478 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 479 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "csid0",
+ "csid1",
+ "csid2",
+ "csiphy0",
+ "csiphy1",
+ "csiphy2",
+ "csiphy3",
+ "vfe0",
+ "vfe1",
+ "vfe_lite";
+
+ power-domains = <&clock_camcc IFE_0_GDSC>,
+ <&clock_camcc IFE_1_GDSC>,
+ <&clock_camcc TITAN_TOP_GDSC>;
+
+ clocks = <&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+ <&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+ <&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
+ <&clock_camcc CAM_CC_IFE_0_CSID_CLK>,
+ <&clock_camcc CAM_CC_IFE_0_CSID_CLK_SRC>,
+ <&clock_camcc CAM_CC_IFE_1_CSID_CLK>,
+ <&clock_camcc CAM_CC_IFE_1_CSID_CLK_SRC>,
+ <&clock_camcc CAM_CC_IFE_LITE_CSID_CLK>,
+ <&clock_camcc CAM_CC_IFE_LITE_CSID_CLK_SRC>,
+ <&clock_camcc CAM_CC_CSIPHY0_CLK>,
+ <&clock_camcc CAM_CC_CSI0PHYTIMER_CLK>,
+ <&clock_camcc CAM_CC_CSI0PHYTIMER_CLK_SRC>,
+ <&clock_camcc CAM_CC_CSIPHY1_CLK>,
+ <&clock_camcc CAM_CC_CSI1PHYTIMER_CLK>,
+ <&clock_camcc CAM_CC_CSI1PHYTIMER_CLK_SRC>,
+ <&clock_camcc CAM_CC_CSIPHY2_CLK>,
+ <&clock_camcc CAM_CC_CSI2PHYTIMER_CLK>,
+ <&clock_camcc CAM_CC_CSI2PHYTIMER_CLK_SRC>,
+ <&clock_camcc CAM_CC_CSIPHY3_CLK>,
+ <&clock_camcc CAM_CC_CSI3PHYTIMER_CLK>,
+ <&clock_camcc CAM_CC_CSI3PHYTIMER_CLK_SRC>,
+ <&gcc GCC_CAMERA_AHB_CLK>,
+ <&gcc GCC_CAMERA_AXI_CLK>,
+ <&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+ <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+ <&clock_camcc CAM_CC_IFE_0_AXI_CLK>,
+ <&clock_camcc CAM_CC_IFE_0_CLK>,
+ <&clock_camcc CAM_CC_IFE_0_CPHY_RX_CLK>,
+ <&clock_camcc CAM_CC_IFE_0_CLK_SRC>,
+ <&clock_camcc CAM_CC_IFE_1_AXI_CLK>,
+ <&clock_camcc CAM_CC_IFE_1_CLK>,
+ <&clock_camcc CAM_CC_IFE_1_CPHY_RX_CLK>,
+ <&clock_camcc CAM_CC_IFE_1_CLK_SRC>,
+ <&clock_camcc CAM_CC_IFE_LITE_CLK>,
+ <&clock_camcc CAM_CC_IFE_LITE_CPHY_RX_CLK>,
+ <&clock_camcc CAM_CC_IFE_LITE_CLK_SRC>;
+ clock-names = "camnoc_axi",
+ "cpas_ahb",
+ "cphy_rx_src",
+ "csi0",
+ "csi0_src",
+ "csi1",
+ "csi1_src",
+ "csi2",
+ "csi2_src",
+ "csiphy0",
+ "csiphy0_timer",
+ "csiphy0_timer_src",
+ "csiphy1",
+ "csiphy1_timer",
+ "csiphy1_timer_src",
+ "csiphy2",
+ "csiphy2_timer",
+ "csiphy2_timer_src",
+ "csiphy3",
+ "csiphy3_timer",
+ "csiphy3_timer_src",
+ "gcc_camera_ahb",
+ "gcc_camera_axi",
+ "slow_ahb_src",
+ "soc_ahb",
+ "vfe0_axi",
+ "vfe0",
+ "vfe0_cphy_rx",
+ "vfe0_src",
+ "vfe1_axi",
+ "vfe1",
+ "vfe1_cphy_rx",
+ "vfe1_src",
+ "vfe_lite",
+ "vfe_lite_cphy_rx",
+ "vfe_lite_src";
+
+ iommus = <&apps_smmu 0x0808 0x0>,
+ <&apps_smmu 0x0810 0x8>,
+ <&apps_smmu 0x0c08 0x0>,
+ <&apps_smmu 0x0c10 0x8>;
+
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+ };
+
cci: cci@ac4a000 {
compatible = "qcom,sdm845-cci";
#address-cells = <1>;
@@ -4428,7 +4575,7 @@
#reset-cells = <1>;
};
- aoss_qmp: qmp@c300000 {
+ aoss_qmp: power-controller@c300000 {
compatible = "qcom,sdm845-aoss-qmp";
reg = <0 0x0c300000 0 0x100000>;
interrupts = <GIC_SPI 389 IRQ_TYPE_EDGE_RISING>;
diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
index e5bb17bc2f46..51235a9521c2 100644
--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
@@ -577,17 +577,188 @@
<&sleep_clk>;
};
+ qupv3_id_0: geniqup@8c0000 {
+ compatible = "qcom,geni-se-qup";
+ reg = <0x0 0x008c0000 0x0 0x6000>;
+ clock-names = "m-ahb", "s-ahb";
+ clocks = <&gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ iommus = <&apps_smmu 0xc3 0x0>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ status = "disabled";
+
+ i2c0: i2c@880000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0 0x00880000 0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c0_default>;
+ interrupts = <GIC_SPI 601 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@884000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0 0x00884000 0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c1_default>;
+ interrupts = <GIC_SPI 602 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@888000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0 0x00888000 0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c2_default>;
+ interrupts = <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@88c000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0 0x0088c000 0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c3_default>;
+ interrupts = <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@890000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0 0x00890000 0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c4_default>;
+ interrupts = <GIC_SPI 605 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c5: i2c@894000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0 0x00894000 0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c5_default>;
+ interrupts = <GIC_SPI 606 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c6: i2c@898000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0 0x00898000 0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S6_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c6_default>;
+ interrupts = <GIC_SPI 607 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c7: i2c@89c000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0 0x0089c000 0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP0_S7_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c7_default>;
+ interrupts = <GIC_SPI 607 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ };
+
qupv3_id_1: geniqup@ac0000 {
compatible = "qcom,geni-se-qup";
reg = <0x0 0x00ac0000 0x0 0x6000>;
clock-names = "m-ahb", "s-ahb";
clocks = <&gcc GCC_QUPV3_WRAP_1_M_AHB_CLK>,
<&gcc GCC_QUPV3_WRAP_1_S_AHB_CLK>;
+ iommus = <&apps_smmu 0x603 0x0>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
status = "disabled";
+ i2c8: i2c@a80000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0 0x00a80000 0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c8_default>;
+ interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c9: i2c@a84000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0 0x00a84000 0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c9_default>;
+ interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c10: i2c@a88000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0 0x00a88000 0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c10_default>;
+ interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c11: i2c@a8c000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0 0x00a8c000 0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c11_default>;
+ interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
uart2: serial@a90000 {
compatible = "qcom,geni-debug-uart";
reg = <0x0 0x00a90000 0x0 0x4000>;
@@ -596,6 +767,124 @@
interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
status = "disabled";
};
+
+ i2c12: i2c@a90000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0 0x00a90000 0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c12_default>;
+ interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c16: i2c@94000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0 0x0094000 0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c16_default>;
+ interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+ };
+
+ qupv3_id_2: geniqup@cc0000 {
+ compatible = "qcom,geni-se-qup";
+ reg = <0x0 0x00cc0000 0x0 0x6000>;
+
+ clock-names = "m-ahb", "s-ahb";
+ clocks = <&gcc GCC_QUPV3_WRAP_2_M_AHB_CLK>,
+ <&gcc GCC_QUPV3_WRAP_2_S_AHB_CLK>;
+ iommus = <&apps_smmu 0x7a3 0x0>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ status = "disabled";
+
+ i2c17: i2c@c80000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0 0x00c80000 0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP2_S0_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c17_default>;
+ interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c18: i2c@c84000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0 0x00c84000 0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP2_S1_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c18_default>;
+ interrupts = <GIC_SPI 583 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c19: i2c@c88000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0 0x00c88000 0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP2_S2_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c19_default>;
+ interrupts = <GIC_SPI 584 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c13: i2c@c8c000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0 0x00c8c000 0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP2_S3_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c13_default>;
+ interrupts = <GIC_SPI 585 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c14: i2c@c90000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0 0x00c90000 0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP2_S4_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c14_default>;
+ interrupts = <GIC_SPI 586 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c15: i2c@c94000 {
+ compatible = "qcom,geni-i2c";
+ reg = <0 0x00c94000 0 0x4000>;
+ clock-names = "se";
+ clocks = <&gcc GCC_QUPV3_WRAP2_S5_CLK>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&qup_i2c15_default>;
+ interrupts = <GIC_SPI 587 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
};
config_noc: interconnect@1500000 {
@@ -914,11 +1203,271 @@
<0x0 0x03D00000 0x0 0x300000>;
reg-names = "west", "east", "north", "south";
interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
- gpio-ranges = <&tlmm 0 0 175>;
+ gpio-ranges = <&tlmm 0 0 176>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+
+ qup_i2c0_default: qup-i2c0-default {
+ mux {
+ pins = "gpio0", "gpio1";
+ function = "qup0";
+ };
+
+ config {
+ pins = "gpio0", "gpio1";
+ drive-strength = <0x02>;
+ bias-disable;
+ };
+ };
+
+ qup_i2c1_default: qup-i2c1-default {
+ mux {
+ pins = "gpio114", "gpio115";
+ function = "qup1";
+ };
+
+ config {
+ pins = "gpio114", "gpio115";
+ drive-strength = <0x02>;
+ bias-disable;
+ };
+ };
+
+ qup_i2c2_default: qup-i2c2-default {
+ mux {
+ pins = "gpio126", "gpio127";
+ function = "qup2";
+ };
+
+ config {
+ pins = "gpio126", "gpio127";
+ drive-strength = <0x02>;
+ bias-disable;
+ };
+ };
+
+ qup_i2c3_default: qup-i2c3-default {
+ mux {
+ pins = "gpio144", "gpio145";
+ function = "qup3";
+ };
+
+ config {
+ pins = "gpio144", "gpio145";
+ drive-strength = <0x02>;
+ bias-disable;
+ };
+ };
+
+ qup_i2c4_default: qup-i2c4-default {
+ mux {
+ pins = "gpio51", "gpio52";
+ function = "qup4";
+ };
+
+ config {
+ pins = "gpio51", "gpio52";
+ drive-strength = <0x02>;
+ bias-disable;
+ };
+ };
+
+ qup_i2c5_default: qup-i2c5-default {
+ mux {
+ pins = "gpio121", "gpio122";
+ function = "qup5";
+ };
+
+ config {
+ pins = "gpio121", "gpio122";
+ drive-strength = <0x02>;
+ bias-disable;
+ };
+ };
+
+ qup_i2c6_default: qup-i2c6-default {
+ mux {
+ pins = "gpio6", "gpio7";
+ function = "qup6";
+ };
+
+ config {
+ pins = "gpio6", "gpio7";
+ drive-strength = <0x02>;
+ bias-disable;
+ };
+ };
+
+ qup_i2c7_default: qup-i2c7-default {
+ mux {
+ pins = "gpio98", "gpio99";
+ function = "qup7";
+ };
+
+ config {
+ pins = "gpio98", "gpio99";
+ drive-strength = <0x02>;
+ bias-disable;
+ };
+ };
+
+ qup_i2c8_default: qup-i2c8-default {
+ mux {
+ pins = "gpio88", "gpio89";
+ function = "qup8";
+ };
+
+ config {
+ pins = "gpio88", "gpio89";
+ drive-strength = <0x02>;
+ bias-disable;
+ };
+ };
+
+ qup_i2c9_default: qup-i2c9-default {
+ mux {
+ pins = "gpio39", "gpio40";
+ function = "qup9";
+ };
+
+ config {
+ pins = "gpio39", "gpio40";
+ drive-strength = <0x02>;
+ bias-disable;
+ };
+ };
+
+ qup_i2c10_default: qup-i2c10-default {
+ mux {
+ pins = "gpio9", "gpio10";
+ function = "qup10";
+ };
+
+ config {
+ pins = "gpio9", "gpio10";
+ drive-strength = <0x02>;
+ bias-disable;
+ };
+ };
+
+ qup_i2c11_default: qup-i2c11-default {
+ mux {
+ pins = "gpio94", "gpio95";
+ function = "qup11";
+ };
+
+ config {
+ pins = "gpio94", "gpio95";
+ drive-strength = <0x02>;
+ bias-disable;
+ };
+ };
+
+ qup_i2c12_default: qup-i2c12-default {
+ mux {
+ pins = "gpio83", "gpio84";
+ function = "qup12";
+ };
+
+ config {
+ pins = "gpio83", "gpio84";
+ drive-strength = <0x02>;
+ bias-disable;
+ };
+ };
+
+ qup_i2c13_default: qup-i2c13-default {
+ mux {
+ pins = "gpio43", "gpio44";
+ function = "qup13";
+ };
+
+ config {
+ pins = "gpio43", "gpio44";
+ drive-strength = <0x02>;
+ bias-disable;
+ };
+ };
+
+ qup_i2c14_default: qup-i2c14-default {
+ mux {
+ pins = "gpio47", "gpio48";
+ function = "qup14";
+ };
+
+ config {
+ pins = "gpio47", "gpio48";
+ drive-strength = <0x02>;
+ bias-disable;
+ };
+ };
+
+ qup_i2c15_default: qup-i2c15-default {
+ mux {
+ pins = "gpio27", "gpio28";
+ function = "qup15";
+ };
+
+ config {
+ pins = "gpio27", "gpio28";
+ drive-strength = <0x02>;
+ bias-disable;
+ };
+ };
+
+ qup_i2c16_default: qup-i2c16-default {
+ mux {
+ pins = "gpio86", "gpio85";
+ function = "qup16";
+ };
+
+ config {
+ pins = "gpio86", "gpio85";
+ drive-strength = <0x02>;
+ bias-disable;
+ };
+ };
+
+ qup_i2c17_default: qup-i2c17-default {
+ mux {
+ pins = "gpio55", "gpio56";
+ function = "qup17";
+ };
+
+ config {
+ pins = "gpio55", "gpio56";
+ drive-strength = <0x02>;
+ bias-disable;
+ };
+ };
+
+ qup_i2c18_default: qup-i2c18-default {
+ mux {
+ pins = "gpio23", "gpio24";
+ function = "qup18";
+ };
+
+ config {
+ pins = "gpio23", "gpio24";
+ drive-strength = <0x02>;
+ bias-disable;
+ };
+ };
+
+ qup_i2c19_default: qup-i2c19-default {
+ mux {
+ pins = "gpio57", "gpio58";
+ function = "qup19";
+ };
+
+ config {
+ pins = "gpio57", "gpio58";
+ drive-strength = <0x02>;
+ bias-disable;
+ };
+ };
};
remoteproc_mpss: remoteproc@4080000 {
@@ -1612,7 +2161,6 @@
<0 0x088e8000 0 0x10>;
reg-names = "reg-base", "dp_com";
status = "disabled";
- #clock-cells = <1>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -1634,6 +2182,7 @@
<0 0x088e9600 0 0x200>,
<0 0x088e9800 0 0x200>,
<0 0x088e9a00 0 0x100>;
+ #clock-cells = <0>;
#phy-cells = <0>;
clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
clock-names = "pipe0";
@@ -1659,7 +2208,6 @@
compatible = "qcom,sm8150-qmp-usb3-uni-phy";
reg = <0 0x088eb000 0 0x200>;
status = "disabled";
- #clock-cells = <1>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -1679,6 +2227,7 @@
<0 0x088eb400 0 0x200>,
<0 0x088eb800 0 0x800>,
<0 0x088eb600 0 0x200>;
+ #clock-cells = <0>;
#phy-cells = <0>;
clocks = <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>;
clock-names = "pipe0";
diff --git a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
index 5b4c5b08434c..cfc4d1febe0f 100644
--- a/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sm8250-mtp.dts
@@ -601,10 +601,6 @@
};
};
-&pm8150_rtc {
- status = "okay";
-};
-
&qupv3_id_0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
index 947e1accae3a..4c0de12aaba6 100644
--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
@@ -17,6 +17,7 @@
#include <dt-bindings/soc/qcom,rpmh-rsc.h>
#include <dt-bindings/sound/qcom,q6afe.h>
#include <dt-bindings/thermal/thermal.h>
+#include <dt-bindings/clock/qcom,videocc-sm8250.h>
/ {
interrupt-parent = <&intc>;
@@ -279,7 +280,7 @@
pmu {
compatible = "arm,armv8-pmuv3";
- interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
};
psci {
@@ -548,8 +549,6 @@
reg = <0 0x00880000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP2_S0_CLK>;
- pinctrl-names = "default";
- pinctrl-0 = <&qup_spi14_default>;
interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
@@ -576,8 +575,6 @@
reg = <0 0x00884000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP2_S1_CLK>;
- pinctrl-names = "default";
- pinctrl-0 = <&qup_spi15_default>;
interrupts = <GIC_SPI 583 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
@@ -604,8 +601,6 @@
reg = <0 0x00888000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP2_S2_CLK>;
- pinctrl-names = "default";
- pinctrl-0 = <&qup_spi16_default>;
interrupts = <GIC_SPI 584 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
@@ -632,8 +627,6 @@
reg = <0 0x0088c000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP2_S3_CLK>;
- pinctrl-names = "default";
- pinctrl-0 = <&qup_spi17_default>;
interrupts = <GIC_SPI 585 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
@@ -673,8 +666,6 @@
reg = <0 0x00890000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP2_S4_CLK>;
- pinctrl-names = "default";
- pinctrl-0 = <&qup_spi18_default>;
interrupts = <GIC_SPI 586 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
@@ -714,8 +705,6 @@
reg = <0 0x00894000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP2_S5_CLK>;
- pinctrl-names = "default";
- pinctrl-0 = <&qup_spi19_default>;
interrupts = <GIC_SPI 587 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
@@ -755,8 +744,6 @@
reg = <0 0x00980000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S0_CLK>;
- pinctrl-names = "default";
- pinctrl-0 = <&qup_spi0_default>;
interrupts = <GIC_SPI 601 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
@@ -783,8 +770,6 @@
reg = <0 0x00984000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S1_CLK>;
- pinctrl-names = "default";
- pinctrl-0 = <&qup_spi1_default>;
interrupts = <GIC_SPI 602 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
@@ -811,8 +796,6 @@
reg = <0 0x00988000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S2_CLK>;
- pinctrl-names = "default";
- pinctrl-0 = <&qup_spi2_default>;
interrupts = <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
@@ -852,8 +835,6 @@
reg = <0 0x0098c000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>;
- pinctrl-names = "default";
- pinctrl-0 = <&qup_spi3_default>;
interrupts = <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
@@ -880,8 +861,6 @@
reg = <0 0x00990000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S4_CLK>;
- pinctrl-names = "default";
- pinctrl-0 = <&qup_spi4_default>;
interrupts = <GIC_SPI 605 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
@@ -908,8 +887,6 @@
reg = <0 0x00994000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S5_CLK>;
- pinctrl-names = "default";
- pinctrl-0 = <&qup_spi5_default>;
interrupts = <GIC_SPI 606 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
@@ -936,8 +913,6 @@
reg = <0 0x00998000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S6_CLK>;
- pinctrl-names = "default";
- pinctrl-0 = <&qup_spi6_default>;
interrupts = <GIC_SPI 607 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
@@ -977,8 +952,6 @@
reg = <0 0x0099c000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP0_S7_CLK>;
- pinctrl-names = "default";
- pinctrl-0 = <&qup_spi7_default>;
interrupts = <GIC_SPI 608 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
@@ -1018,8 +991,6 @@
reg = <0 0x00a80000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S0_CLK>;
- pinctrl-names = "default";
- pinctrl-0 = <&qup_spi8_default>;
interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
@@ -1046,8 +1017,6 @@
reg = <0 0x00a84000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S1_CLK>;
- pinctrl-names = "default";
- pinctrl-0 = <&qup_spi9_default>;
interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
@@ -1074,8 +1043,6 @@
reg = <0 0x00a88000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S2_CLK>;
- pinctrl-names = "default";
- pinctrl-0 = <&qup_spi10_default>;
interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
@@ -1102,8 +1069,6 @@
reg = <0 0x00a8c000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S3_CLK>;
- pinctrl-names = "default";
- pinctrl-0 = <&qup_spi11_default>;
interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
@@ -1130,8 +1095,6 @@
reg = <0 0x00a90000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S4_CLK>;
- pinctrl-names = "default";
- pinctrl-0 = <&qup_spi12_default>;
interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
@@ -1171,8 +1134,6 @@
reg = <0 0x00a94000 0 0x4000>;
clock-names = "se";
clocks = <&gcc GCC_QUPV3_WRAP1_S5_CLK>;
- pinctrl-names = "default";
- pinctrl-0 = <&qup_spi13_default>;
interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
@@ -2097,12 +2058,11 @@
};
usb_1_qmpphy: phy@88e9000 {
- compatible = "qcom,sm8250-qmp-usb3-phy";
+ compatible = "qcom,sm8250-qmp-usb3-dp-phy";
reg = <0 0x088e9000 0 0x200>,
- <0 0x088e8000 0 0x20>;
- reg-names = "reg-base", "dp_com";
+ <0 0x088e8000 0 0x40>,
+ <0 0x088ea000 0 0x200>;
status = "disabled";
- #clock-cells = <1>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -2116,25 +2076,39 @@
<&gcc GCC_USB3_PHY_PRIM_BCR>;
reset-names = "phy", "common";
- usb_1_ssphy: lanes@88e9200 {
+ usb_1_ssphy: usb3-phy@88e9200 {
reg = <0 0x088e9200 0 0x200>,
<0 0x088e9400 0 0x200>,
<0 0x088e9c00 0 0x400>,
<0 0x088e9600 0 0x200>,
<0 0x088e9800 0 0x200>,
<0 0x088e9a00 0 0x100>;
+ #clock-cells = <0>;
#phy-cells = <0>;
clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
clock-names = "pipe0";
clock-output-names = "usb3_phy_pipe_clk_src";
};
+
+ dp_phy: dp-phy@88ea200 {
+ reg = <0 0x088ea200 0 0x200>,
+ <0 0x088ea400 0 0x200>,
+ <0 0x088eac00 0 0x400>,
+ <0 0x088ea600 0 0x200>,
+ <0 0x088ea800 0 0x200>,
+ <0 0x088eaa00 0 0x100>;
+ #phy-cells = <0>;
+ #clock-cells = <1>;
+ clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
+ clock-names = "pipe0";
+ clock-output-names = "usb3_phy_pipe_clk_src";
+ };
};
usb_2_qmpphy: phy@88eb000 {
compatible = "qcom,sm8250-qmp-usb3-uni-phy";
reg = <0 0x088eb000 0 0x200>;
status = "disabled";
- #clock-cells = <1>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -2153,6 +2127,7 @@
reg = <0 0x088eb200 0 0x200>,
<0 0x088eb400 0 0x200>,
<0 0x088eb800 0 0x800>;
+ #clock-cells = <0>;
#phy-cells = <0>;
clocks = <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>;
clock-names = "pipe0";
@@ -2322,15 +2297,86 @@
};
};
+ venus: video-codec@aa00000 {
+ compatible = "qcom,sm8250-venus";
+ reg = <0 0x0aa00000 0 0x100000>;
+ interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&videocc MVS0C_GDSC>,
+ <&videocc MVS0_GDSC>,
+ <&rpmhpd SM8250_MX>;
+ power-domain-names = "venus", "vcodec0", "mx";
+ operating-points-v2 = <&venus_opp_table>;
+
+ clocks = <&gcc GCC_VIDEO_AXI0_CLK>,
+ <&videocc VIDEO_CC_MVS0C_CLK>,
+ <&videocc VIDEO_CC_MVS0_CLK>;
+ clock-names = "iface", "core", "vcodec0_core";
+
+ interconnects = <&gem_noc MASTER_AMPSS_M0 &config_noc SLAVE_VENUS_CFG>,
+ <&mmss_noc MASTER_VIDEO_P0 &mc_virt SLAVE_EBI_CH0>;
+ interconnect-names = "cpu-cfg", "video-mem";
+
+ iommus = <&apps_smmu 0x2100 0x0400>;
+ memory-region = <&video_mem>;
+
+ resets = <&gcc GCC_VIDEO_AXI0_CLK_ARES>,
+ <&videocc VIDEO_CC_MVS0C_CLK_ARES>;
+ reset-names = "bus", "core";
+
+ video-decoder {
+ compatible = "venus-decoder";
+ };
+
+ video-encoder {
+ compatible = "venus-encoder";
+ };
+
+ venus_opp_table: venus-opp-table {
+ compatible = "operating-points-v2";
+
+ opp-720000000 {
+ opp-hz = /bits/ 64 <720000000>;
+ required-opps = <&rpmhpd_opp_low_svs>;
+ };
+
+ opp-1014000000 {
+ opp-hz = /bits/ 64 <1014000000>;
+ required-opps = <&rpmhpd_opp_svs>;
+ };
+
+ opp-1098000000 {
+ opp-hz = /bits/ 64 <1098000000>;
+ required-opps = <&rpmhpd_opp_svs_l1>;
+ };
+
+ opp-1332000000 {
+ opp-hz = /bits/ 64 <1332000000>;
+ required-opps = <&rpmhpd_opp_nom>;
+ };
+ };
+ };
+
+ videocc: clock-controller@abf0000 {
+ compatible = "qcom,sm8250-videocc";
+ reg = <0 0x0abf0000 0 0x10000>;
+ clocks = <&gcc GCC_VIDEO_AHB_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&rpmhcc RPMH_CXO_CLK_A>;
+ mmcx-supply = <&mmcx_reg>;
+ clock-names = "iface", "bi_tcxo", "bi_tcxo_ao";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ };
+
mdss: mdss@ae00000 {
compatible = "qcom,sdm845-mdss";
reg = <0 0x0ae00000 0 0x1000>;
reg-names = "mdss";
- interconnects = <&gem_noc MASTER_AMPSS_M0 &config_noc SLAVE_DISPLAY_CFG>,
- <&mmss_noc MASTER_MDP_PORT0 &mc_virt SLAVE_EBI_CH0>,
+ interconnects = <&mmss_noc MASTER_MDP_PORT0 &mc_virt SLAVE_EBI_CH0>,
<&mmss_noc MASTER_MDP_PORT1 &mc_virt SLAVE_EBI_CH0>;
- interconnect-names = "notused", "mdp0-mem", "mdp1-mem";
+ interconnect-names = "mdp0-mem", "mdp1-mem";
power-domains = <&dispcc MDSS_GDSC>;
@@ -2580,36 +2626,22 @@
dispcc: clock-controller@af00000 {
compatible = "qcom,sm8250-dispcc";
- reg = <0 0x0af00000 0 0x20000>;
+ reg = <0 0x0af00000 0 0x10000>;
mmcx-supply = <&mmcx_reg>;
clocks = <&rpmhcc RPMH_CXO_CLK>,
<&dsi0_phy 0>,
<&dsi0_phy 1>,
<&dsi1_phy 0>,
<&dsi1_phy 1>,
- <0>,
- <0>,
- <0>,
- <0>,
- <0>,
- <0>,
- <0>,
- <0>,
- <&sleep_clk>;
+ <&dp_phy 0>,
+ <&dp_phy 1>;
clock-names = "bi_tcxo",
"dsi0_phy_pll_out_byteclk",
"dsi0_phy_pll_out_dsiclk",
"dsi1_phy_pll_out_byteclk",
"dsi1_phy_pll_out_dsiclk",
- "dp_link_clk_divsel_ten",
- "dp_vco_divided_clk_src_mux",
- "dptx1_phy_pll_link_clk",
- "dptx1_phy_pll_vco_div_clk",
- "dptx2_phy_pll_link_clk",
- "dptx2_phy_pll_vco_div_clk",
- "edp_phy_pll_link_clk",
- "edp_phy_pll_vco_div_clk",
- "sleep_clk";
+ "dp_phy_pll_link_clk",
+ "dp_phy_pll_vco_div_clk";
#clock-cells = <1>;
#reset-cells = <1>;
#power-domain-cells = <1>;
@@ -2647,7 +2679,7 @@
#thermal-sensor-cells = <1>;
};
- aoss_qmp: qmp@c300000 {
+ aoss_qmp: power-controller@c300000 {
compatible = "qcom,sm8250-aoss-qmp";
reg = <0 0x0c300000 0 0x100000>;
interrupts-extended = <&ipcc IPCC_CLIENT_AOP
@@ -2689,7 +2721,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
- gpio-ranges = <&tlmm 0 0 180>;
+ gpio-ranges = <&tlmm 0 0 181>;
wakeup-parent = <&pdc>;
pri_mi2s_active: pri-mi2s-active {
@@ -2983,304 +3015,324 @@
};
};
- qup_spi0_default: qup-spi0-default {
- mux {
- pins = "gpio28", "gpio29",
- "gpio30", "gpio31";
- function = "qup0";
- };
+ qup_spi0_cs: qup-spi0-cs {
+ pins = "gpio31";
+ function = "qup0";
+ };
- config {
- pins = "gpio28", "gpio29",
- "gpio30", "gpio31";
- drive-strength = <6>;
- bias-disable;
- };
+ qup_spi0_cs_gpio: qup-spi0-cs-gpio {
+ pins = "gpio31";
+ function = "gpio";
};
- qup_spi1_default: qup-spi1-default {
- mux {
- pins = "gpio4", "gpio5",
- "gpio6", "gpio7";
- function = "qup1";
- };
+ qup_spi0_data_clk: qup-spi0-data-clk {
+ pins = "gpio28", "gpio29",
+ "gpio30";
+ function = "qup0";
+ };
- config {
- pins = "gpio4", "gpio5",
- "gpio6", "gpio7";
- drive-strength = <6>;
- bias-disable;
- };
+ qup_spi1_cs: qup-spi1-cs {
+ pins = "gpio7";
+ function = "qup1";
};
- qup_spi2_default: qup-spi2-default {
- mux {
- pins = "gpio115", "gpio116",
- "gpio117", "gpio118";
- function = "qup2";
- };
+ qup_spi1_cs_gpio: qup-spi1-cs-gpio {
+ pins = "gpio7";
+ function = "gpio";
+ };
- config {
- pins = "gpio115", "gpio116",
- "gpio117", "gpio118";
- drive-strength = <6>;
- bias-disable;
- };
+ qup_spi1_data_clk: qup-spi1-data-clk {
+ pins = "gpio4", "gpio5",
+ "gpio6";
+ function = "qup1";
};
- qup_spi3_default: qup-spi3-default {
- mux {
- pins = "gpio119", "gpio120",
- "gpio121", "gpio122";
- function = "qup3";
- };
+ qup_spi2_cs: qup-spi2-cs {
+ pins = "gpio118";
+ function = "qup2";
+ };
- config {
- pins = "gpio119", "gpio120",
- "gpio121", "gpio122";
- drive-strength = <6>;
- bias-disable;
- };
+ qup_spi2_cs_gpio: qup-spi2-cs-gpio {
+ pins = "gpio118";
+ function = "gpio";
};
- qup_spi4_default: qup-spi4-default {
- mux {
- pins = "gpio8", "gpio9",
- "gpio10", "gpio11";
- function = "qup4";
- };
+ qup_spi2_data_clk: qup-spi2-data-clk {
+ pins = "gpio115", "gpio116",
+ "gpio117";
+ function = "qup2";
+ };
- config {
- pins = "gpio8", "gpio9",
- "gpio10", "gpio11";
- drive-strength = <6>;
- bias-disable;
- };
+ qup_spi3_cs: qup-spi3-cs {
+ pins = "gpio122";
+ function = "qup3";
};
- qup_spi5_default: qup-spi5-default {
- mux {
- pins = "gpio12", "gpio13",
- "gpio14", "gpio15";
- function = "qup5";
- };
+ qup_spi3_cs_gpio: qup-spi3-cs-gpio {
+ pins = "gpio122";
+ function = "gpio";
+ };
- config {
- pins = "gpio12", "gpio13",
- "gpio14", "gpio15";
- drive-strength = <6>;
- bias-disable;
- };
+ qup_spi3_data_clk: qup-spi3-data-clk {
+ pins = "gpio119", "gpio120",
+ "gpio121";
+ function = "qup3";
};
- qup_spi6_default: qup-spi6-default {
- mux {
- pins = "gpio16", "gpio17",
- "gpio18", "gpio19";
- function = "qup6";
- };
+ qup_spi4_cs: qup-spi4-cs {
+ pins = "gpio11";
+ function = "qup4";
+ };
- config {
- pins = "gpio16", "gpio17",
- "gpio18", "gpio19";
- drive-strength = <6>;
- bias-disable;
- };
+ qup_spi4_cs_gpio: qup-spi4-cs-gpio {
+ pins = "gpio11";
+ function = "gpio";
};
- qup_spi7_default: qup-spi7-default {
- mux {
- pins = "gpio20", "gpio21",
- "gpio22", "gpio23";
- function = "qup7";
- };
+ qup_spi4_data_clk: qup-spi4-data-clk {
+ pins = "gpio8", "gpio9",
+ "gpio10";
+ function = "qup4";
+ };
- config {
- pins = "gpio20", "gpio21",
- "gpio22", "gpio23";
- drive-strength = <6>;
- bias-disable;
- };
+ qup_spi5_cs: qup-spi5-cs {
+ pins = "gpio15";
+ function = "qup5";
};
- qup_spi8_default: qup-spi8-default {
- mux {
- pins = "gpio24", "gpio25",
- "gpio26", "gpio27";
- function = "qup8";
- };
+ qup_spi5_cs_gpio: qup-spi5-cs-gpio {
+ pins = "gpio15";
+ function = "gpio";
+ };
- config {
- pins = "gpio24", "gpio25",
- "gpio26", "gpio27";
- drive-strength = <6>;
- bias-disable;
- };
+ qup_spi5_data_clk: qup-spi5-data-clk {
+ pins = "gpio12", "gpio13",
+ "gpio14";
+ function = "qup5";
};
- qup_spi9_default: qup-spi9-default {
- mux {
- pins = "gpio125", "gpio126",
- "gpio127", "gpio128";
- function = "qup9";
- };
+ qup_spi6_cs: qup-spi6-cs {
+ pins = "gpio19";
+ function = "qup6";
+ };
- config {
- pins = "gpio125", "gpio126",
- "gpio127", "gpio128";
- drive-strength = <6>;
- bias-disable;
- };
+ qup_spi6_cs_gpio: qup-spi6-cs-gpio {
+ pins = "gpio19";
+ function = "gpio";
};
- qup_spi10_default: qup-spi10-default {
- mux {
- pins = "gpio129", "gpio130",
- "gpio131", "gpio132";
- function = "qup10";
- };
+ qup_spi6_data_clk: qup-spi6-data-clk {
+ pins = "gpio16", "gpio17",
+ "gpio18";
+ function = "qup6";
+ };
- config {
- pins = "gpio129", "gpio130",
- "gpio131", "gpio132";
- drive-strength = <6>;
- bias-disable;
- };
+ qup_spi7_cs: qup-spi7-cs {
+ pins = "gpio23";
+ function = "qup7";
};
- qup_spi11_default: qup-spi11-default {
- mux {
- pins = "gpio60", "gpio61",
- "gpio62", "gpio63";
- function = "qup11";
- };
+ qup_spi7_cs_gpio: qup-spi7-cs-gpio {
+ pins = "gpio23";
+ function = "gpio";
+ };
- config {
- pins = "gpio60", "gpio61",
- "gpio62", "gpio63";
- drive-strength = <6>;
- bias-disable;
- };
+ qup_spi7_data_clk: qup-spi7-data-clk {
+ pins = "gpio20", "gpio21",
+ "gpio22";
+ function = "qup7";
};
- qup_spi12_default: qup-spi12-default {
- mux {
- pins = "gpio32", "gpio33",
- "gpio34", "gpio35";
- function = "qup12";
- };
+ qup_spi8_cs: qup-spi8-cs {
+ pins = "gpio27";
+ function = "qup8";
+ };
- config {
- pins = "gpio32", "gpio33",
- "gpio34", "gpio35";
- drive-strength = <6>;
- bias-disable;
- };
+ qup_spi8_cs_gpio: qup-spi8-cs-gpio {
+ pins = "gpio27";
+ function = "gpio";
};
- qup_spi13_default: qup-spi13-default {
- mux {
- pins = "gpio36", "gpio37",
- "gpio38", "gpio39";
- function = "qup13";
- };
+ qup_spi8_data_clk: qup-spi8-data-clk {
+ pins = "gpio24", "gpio25",
+ "gpio26";
+ function = "qup8";
+ };
- config {
- pins = "gpio36", "gpio37",
- "gpio38", "gpio39";
- drive-strength = <6>;
- bias-disable;
- };
+ qup_spi9_cs: qup-spi9-cs {
+ pins = "gpio128";
+ function = "qup9";
};
- qup_spi14_default: qup-spi14-default {
- mux {
- pins = "gpio40", "gpio41",
- "gpio42", "gpio43";
- function = "qup14";
- };
+ qup_spi9_cs_gpio: qup-spi9-cs-gpio {
+ pins = "gpio128";
+ function = "gpio";
+ };
- config {
- pins = "gpio40", "gpio41",
- "gpio42", "gpio43";
- drive-strength = <6>;
- bias-disable;
- };
+ qup_spi9_data_clk: qup-spi9-data-clk {
+ pins = "gpio125", "gpio126",
+ "gpio127";
+ function = "qup9";
};
- qup_spi15_default: qup-spi15-default {
- mux {
- pins = "gpio44", "gpio45",
- "gpio46", "gpio47";
- function = "qup15";
- };
+ qup_spi10_cs: qup-spi10-cs {
+ pins = "gpio132";
+ function = "qup10";
+ };
- config {
- pins = "gpio44", "gpio45",
- "gpio46", "gpio47";
- drive-strength = <6>;
- bias-disable;
- };
+ qup_spi10_cs_gpio: qup-spi10-cs-gpio {
+ pins = "gpio132";
+ function = "gpio";
};
- qup_spi16_default: qup-spi16-default {
- mux {
- pins = "gpio48", "gpio49",
- "gpio50", "gpio51";
- function = "qup16";
- };
+ qup_spi10_data_clk: qup-spi10-data-clk {
+ pins = "gpio129", "gpio130",
+ "gpio131";
+ function = "qup10";
+ };
- config {
- pins = "gpio48", "gpio49",
- "gpio50", "gpio51";
- drive-strength = <6>;
- bias-disable;
- };
+ qup_spi11_cs: qup-spi11-cs {
+ pins = "gpio63";
+ function = "qup11";
};
- qup_spi17_default: qup-spi17-default {
- mux {
- pins = "gpio52", "gpio53",
- "gpio54", "gpio55";
- function = "qup17";
- };
+ qup_spi11_cs_gpio: qup-spi11-cs-gpio {
+ pins = "gpio63";
+ function = "gpio";
+ };
- config {
- pins = "gpio52", "gpio53",
- "gpio54", "gpio55";
- drive-strength = <6>;
- bias-disable;
- };
+ qup_spi11_data_clk: qup-spi11-data-clk {
+ pins = "gpio60", "gpio61",
+ "gpio62";
+ function = "qup11";
};
- qup_spi18_default: qup-spi18-default {
- mux {
- pins = "gpio56", "gpio57",
- "gpio58", "gpio59";
- function = "qup18";
- };
+ qup_spi12_cs: qup-spi12-cs {
+ pins = "gpio35";
+ function = "qup12";
+ };
- config {
- pins = "gpio56", "gpio57",
- "gpio58", "gpio59";
- drive-strength = <6>;
- bias-disable;
- };
+ qup_spi12_cs_gpio: qup-spi12-cs-gpio {
+ pins = "gpio35";
+ function = "gpio";
};
- qup_spi19_default: qup-spi19-default {
- mux {
- pins = "gpio0", "gpio1",
- "gpio2", "gpio3";
- function = "qup19";
- };
+ qup_spi12_data_clk: qup-spi12-data-clk {
+ pins = "gpio32", "gpio33",
+ "gpio34";
+ function = "qup12";
+ };
- config {
- pins = "gpio0", "gpio1",
- "gpio2", "gpio3";
- drive-strength = <6>;
- bias-disable;
- };
+ qup_spi13_cs: qup-spi13-cs {
+ pins = "gpio39";
+ function = "qup13";
+ };
+
+ qup_spi13_cs_gpio: qup-spi13-cs-gpio {
+ pins = "gpio39";
+ function = "gpio";
+ };
+
+ qup_spi13_data_clk: qup-spi13-data-clk {
+ pins = "gpio36", "gpio37",
+ "gpio38";
+ function = "qup13";
+ };
+
+ qup_spi14_cs: qup-spi14-cs {
+ pins = "gpio43";
+ function = "qup14";
+ };
+
+ qup_spi14_cs_gpio: qup-spi14-cs-gpio {
+ pins = "gpio43";
+ function = "gpio";
+ };
+
+ qup_spi14_data_clk: qup-spi14-data-clk {
+ pins = "gpio40", "gpio41",
+ "gpio42";
+ function = "qup14";
+ };
+
+ qup_spi15_cs: qup-spi15-cs {
+ pins = "gpio47";
+ function = "qup15";
+ };
+
+ qup_spi15_cs_gpio: qup-spi15-cs-gpio {
+ pins = "gpio47";
+ function = "gpio";
+ };
+
+ qup_spi15_data_clk: qup-spi15-data-clk {
+ pins = "gpio44", "gpio45",
+ "gpio46";
+ function = "qup15";
+ };
+
+ qup_spi16_cs: qup-spi16-cs {
+ pins = "gpio51";
+ function = "qup16";
+ };
+
+ qup_spi16_cs_gpio: qup-spi16-cs-gpio {
+ pins = "gpio51";
+ function = "gpio";
+ };
+
+ qup_spi16_data_clk: qup-spi16-data-clk {
+ pins = "gpio48", "gpio49",
+ "gpio50";
+ function = "qup16";
+ };
+
+ qup_spi17_cs: qup-spi17-cs {
+ pins = "gpio55";
+ function = "qup17";
+ };
+
+ qup_spi17_cs_gpio: qup-spi17-cs-gpio {
+ pins = "gpio55";
+ function = "gpio";
+ };
+
+ qup_spi17_data_clk: qup-spi17-data-clk {
+ pins = "gpio52", "gpio53",
+ "gpio54";
+ function = "qup17";
+ };
+
+ qup_spi18_cs: qup-spi18-cs {
+ pins = "gpio59";
+ function = "qup18";
+ };
+
+ qup_spi18_cs_gpio: qup-spi18-cs-gpio {
+ pins = "gpio59";
+ function = "gpio";
+ };
+
+ qup_spi18_data_clk: qup-spi18-data-clk {
+ pins = "gpio56", "gpio57",
+ "gpio58";
+ function = "qup18";
+ };
+
+ qup_spi19_cs: qup-spi19-cs {
+ pins = "gpio3";
+ function = "qup19";
+ };
+
+ qup_spi19_cs_gpio: qup-spi19-cs-gpio {
+ pins = "gpio3";
+ function = "gpio";
+ };
+
+ qup_spi19_data_clk: qup-spi19-data-clk {
+ pins = "gpio0", "gpio1",
+ "gpio2";
+ function = "qup19";
};
qup_uart2_default: qup-uart2-default {
@@ -3754,7 +3806,7 @@
(GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
<GIC_PPI 11
(GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 12
+ <GIC_PPI 10
(GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
};
diff --git a/arch/arm64/boot/dts/qcom/sm8350-hdk.dts b/arch/arm64/boot/dts/qcom/sm8350-hdk.dts
new file mode 100644
index 000000000000..f23a0cf3f7b7
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sm8350-hdk.dts
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020-2021, Linaro Limited
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include "sm8350.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SM8350 HDK";
+ compatible = "qcom,sm8350-hdk", "qcom,sm8350";
+
+ aliases {
+ serial0 = &uart2;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ vph_pwr: vph-pwr-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+ };
+};
+
+&adsp {
+ status = "okay";
+ firmware-name = "qcom/sm8350/adsp.mbn";
+};
+
+&apps_rsc {
+ pm8350-rpmh-regulators {
+ compatible = "qcom,pm8350-rpmh-regulators";
+ qcom,pmic-id = "b";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+ vdd-s5-supply = <&vph_pwr>;
+ vdd-s6-supply = <&vph_pwr>;
+ vdd-s7-supply = <&vph_pwr>;
+ vdd-s8-supply = <&vph_pwr>;
+ vdd-s9-supply = <&vph_pwr>;
+ vdd-s10-supply = <&vph_pwr>;
+ vdd-s11-supply = <&vph_pwr>;
+ vdd-s12-supply = <&vph_pwr>;
+
+ vdd-l1-l4-supply = <&vreg_s11b_0p95>;
+ vdd-l2-l7-supply = <&vreg_bob>;
+ vdd-l3-l5-supply = <&vreg_bob>;
+ vdd-l6-l9-l10-supply = <&vreg_s11b_0p95>;
+
+ vreg_s10b_1p8: smps10 {
+ regulator-name = "vreg_s10b_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s11b_0p95: smps11 {
+ regulator-name = "vreg_s11b_0p95";
+ regulator-min-microvolt = <952000>;
+ regulator-max-microvolt = <952000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s12b_1p25: smps12 {
+ regulator-name = "vreg_s12b_1p25";
+ regulator-min-microvolt = <1256000>;
+ regulator-max-microvolt = <1256000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1b_0p88: ldo1 {
+ regulator-name = "vreg_l1b_0p88";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2b_3p07: ldo2 {
+ regulator-name = "vreg_l2b_3p07";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3b_0p9: ldo3 {
+ regulator-name = "vreg_l3b_0p9";
+ regulator-min-microvolt = <904000>;
+ regulator-max-microvolt = <904000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5b_0p88: ldo5 {
+ regulator-name = "vreg_l5b_0p88";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <888000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ };
+
+ vreg_l6b_1p2: ldo6 {
+ regulator-name = "vreg_l6b_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1208000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ };
+
+ vreg_l7b_2p96: ldo7 {
+ regulator-name = "vreg_l7b_2p96";
+ regulator-min-microvolt = <2504000>;
+ regulator-max-microvolt = <2504000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ };
+
+ vreg_l9b_1p2: ldo9 {
+ regulator-name = "vreg_l9b_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ regulator-allow-set-load;
+ };
+ };
+
+ pm8350c-rpmh-regulators {
+ compatible = "qcom,pm8350c-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+ vdd-s5-supply = <&vph_pwr>;
+ vdd-s6-supply = <&vph_pwr>;
+ vdd-s7-supply = <&vph_pwr>;
+ vdd-s8-supply = <&vph_pwr>;
+ vdd-s9-supply = <&vph_pwr>;
+ vdd-s10-supply = <&vph_pwr>;
+
+ vdd-l1-l12-supply = <&vreg_s1c_1p86>;
+ vdd-l2-l8-supply = <&vreg_s1c_1p86>;
+ vdd-l3-l4-l5-l7-l13-supply = <&vreg_bob>;
+ vdd-l6-l9-l11-supply = <&vreg_bob>;
+ vdd-l10-supply = <&vreg_s12b_1p25>;
+
+ vdd-bob-supply = <&vph_pwr>;
+
+ vreg_s1c_1p86: smps1 {
+ regulator-name = "vreg_s1c_1p86";
+ regulator-min-microvolt = <1856000>;
+ regulator-max-microvolt = <1880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_bob: bob {
+ regulator-name = "vreg_bob";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>;
+ };
+
+ vreg_l1c_1p8: ldo1 {
+ regulator-name = "vreg_l1c_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2c_1p8: ldo2 {
+ regulator-name = "vreg_l2c_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6c_1p8: ldo6 {
+ regulator-name = "vreg_l6c_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9c_2p96: ldo9 {
+ regulator-name = "vreg_l9c_2p96";
+ regulator-min-microvolt = <2960000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l10c_1p2: ldo10 {
+ regulator-name = "vreg_l10c_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+};
+
+&cdsp {
+ status = "okay";
+ firmware-name = "qcom/sm8350/cdsp.mbn";
+};
+
+&mpss {
+ status = "okay";
+ firmware-name = "qcom/sm8350/modem.mbn";
+};
+
+&qupv3_id_1 {
+ status = "okay";
+};
+
+&slpi {
+ status = "okay";
+ firmware-name = "qcom/sm8350/slpi.mbn";
+};
+
+&tlmm {
+ gpio-reserved-ranges = <52 8>;
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&ufs_mem_hc {
+ status = "okay";
+
+ reset-gpios = <&tlmm 203 GPIO_ACTIVE_LOW>;
+
+ vcc-supply = <&vreg_l7b_2p96>;
+ vcc-max-microamp = <800000>;
+ vccq-supply = <&vreg_l9b_1p2>;
+ vccq-max-microamp = <900000>;
+};
+
+&ufs_mem_phy {
+ status = "okay";
+
+ vdda-phy-supply = <&vreg_l5b_0p88>;
+ vdda-max-microamp = <91600>;
+ vdda-pll-supply = <&vreg_l6b_1p2>;
+ vdda-pll-max-microamp = <19000>;
+};
+
+&usb_1 {
+ status = "okay";
+};
+
+&usb_1_dwc3 {
+ /* TODO: Define USB-C connector properly */
+ dr_mode = "peripheral";
+};
+
+&usb_1_hsphy {
+ status = "okay";
+
+ vdda-pll-supply = <&vreg_l5b_0p88>;
+ vdda18-supply = <&vreg_l1c_1p8>;
+ vdda33-supply = <&vreg_l2b_3p07>;
+};
+
+&usb_1_qmpphy {
+ status = "okay";
+
+ vdda-phy-supply = <&vreg_l6b_1p2>;
+ vdda-pll-supply = <&vreg_l1b_0p88>;
+};
+
+&usb_2 {
+ status = "okay";
+};
+
+&usb_2_dwc3 {
+ dr_mode = "host";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_hub_enabled_state>;
+};
+
+&usb_2_hsphy {
+ status = "okay";
+
+ vdda-pll-supply = <&vreg_l5b_0p88>;
+ vdda18-supply = <&vreg_l1c_1p8>;
+ vdda33-supply = <&vreg_l2b_3p07>;
+};
+
+&usb_2_qmpphy {
+ status = "okay";
+
+ vdda-phy-supply = <&vreg_l6b_1p2>;
+ vdda-pll-supply = <&vreg_l5b_0p88>;
+};
+
+/* PINCTRL - additions to nodes defined in sm8350.dtsi */
+
+&tlmm {
+ usb_hub_enabled_state: usb-hub-enabled-state {
+ pins = "gpio42";
+ function = "gpio";
+
+ drive-strength = <2>;
+ output-low;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8350-mtp.dts b/arch/arm64/boot/dts/qcom/sm8350-mtp.dts
index 8923657579fb..6ca638b4e321 100644
--- a/arch/arm64/boot/dts/qcom/sm8350-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/sm8350-mtp.dts
@@ -5,8 +5,15 @@
/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
#include "sm8350.dtsi"
+#include "pm8350.dtsi"
+#include "pm8350b.dtsi"
+#include "pm8350c.dtsi"
+#include "pmk8350.dtsi"
+#include "pmr735a.dtsi"
+#include "pmr735b.dtsi"
/ {
model = "Qualcomm Technologies, Inc. sm8350 MTP";
@@ -31,6 +38,11 @@
};
};
+&adsp {
+ status = "okay";
+ firmware-name = "qcom/sm8350/adsp.mbn";
+};
+
&apps_rsc {
pm8350-rpmh-regulators {
compatible = "qcom,pm8350-rpmh-regulators";
@@ -56,57 +68,67 @@
vdd-l8-supply = <&vreg_s2c_0p8>;
vreg_s10b_1p8: smps10 {
+ regulator-name = "vreg_s10b_1p8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
};
vreg_s11b_0p95: smps11 {
+ regulator-name = "vreg_s11b_0p95";
regulator-min-microvolt = <752000>;
regulator-max-microvolt = <1000000>;
};
vreg_s12b_1p25: smps12 {
+ regulator-name = "vreg_s12b_1p25";
regulator-min-microvolt = <1224000>;
regulator-max-microvolt = <1360000>;
};
vreg_l1b_0p88: ldo1 {
+ regulator-name = "vreg_l1b_0p88";
regulator-min-microvolt = <912000>;
regulator-max-microvolt = <920000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l2b_3p07: ldo2 {
+ regulator-name = "vreg_l2b_3p07";
regulator-min-microvolt = <3072000>;
regulator-max-microvolt = <3072000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l3b_0p9: ldo3 {
+ regulator-name = "vreg_l3b_0p9";
regulator-min-microvolt = <904000>;
regulator-max-microvolt = <904000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l5b_0p88: ldo5 {
+ regulator-name = "vreg_l3b_0p9";
regulator-min-microvolt = <880000>;
regulator-max-microvolt = <888000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l6b_1p2: ldo6 {
+ regulator-name = "vreg_l6b_1p2";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1208000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l7b_2p96: ldo7 {
+ regulator-name = "vreg_l7b_2p96";
regulator-min-microvolt = <2400000>;
regulator-max-microvolt = <3008000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l9b_1p2: ldo9 {
+ regulator-name = "vreg_l9b_1p2";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
@@ -137,99 +159,116 @@
vdd-bob-supply = <&vph_pwr>;
vreg_s1c_1p86: smps1 {
+ regulator-name = "vreg_s1c_1p86";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1952000>;
};
vreg_s2c_0p8: smps2 {
+ regulator-name = "vreg_s2c_0p8";
regulator-min-microvolt = <640000>;
regulator-max-microvolt = <1000000>;
};
vreg_s10c_1p05: smps10 {
+ regulator-name = "vreg_s10c_1p05";
regulator-min-microvolt = <1048000>;
regulator-max-microvolt = <1128000>;
};
vreg_bob: bob {
+ regulator-name = "vreg_bob";
regulator-min-microvolt = <3008000>;
regulator-max-microvolt = <3960000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>;
};
vreg_l1c_1p8: ldo1 {
+ regulator-name = "vreg_l1c_1p8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l2c_1p8: ldo2 {
+ regulator-name = "vreg_l2c_1p8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l3c_3p0: ldo3 {
+ regulator-name = "vreg_l3c_3p0";
regulator-min-microvolt = <3008000>;
regulator-max-microvolt = <3008000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l4c_uim1: ldo4 {
+ regulator-name = "vreg_l4c_uim1";
regulator-min-microvolt = <1704000>;
regulator-max-microvolt = <3000000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l5c_uim2: ldo5 {
+ regulator-name = "vreg_l5c_uim2";
regulator-min-microvolt = <1704000>;
regulator-max-microvolt = <3000000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l6c_1p8: ldo6 {
+ regulator-name = "vreg_l6c_1p8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <2960000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l7c_3p0: ldo7 {
+ regulator-name = "vreg_l7c_3p0";
regulator-min-microvolt = <3008000>;
regulator-max-microvolt = <3008000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l8c_1p8: ldo8 {
+ regulator-name = "vreg_l8c_1p8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l9c_2p96: ldo9 {
+ regulator-name = "vreg_l9c_2p96";
regulator-min-microvolt = <2960000>;
regulator-max-microvolt = <3008000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l10c_1p2: ldo10 {
+ regulator-name = "vreg_l10c_1p2";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l11c_2p96: ldo11 {
+ regulator-name = "vreg_l11c_2p96";
regulator-min-microvolt = <2400000>;
regulator-max-microvolt = <3008000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l12c_1p8: ldo12 {
+ regulator-name = "vreg_l12c_1p8";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <2000000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
};
vreg_l13c_3p0: ldo13 {
+ regulator-name = "vreg_l13c_3p0";
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <3000000>;
regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
@@ -237,10 +276,25 @@
};
};
+&cdsp {
+ status = "okay";
+ firmware-name = "qcom/sm8350/cdsp.mbn";
+};
+
+&mpss {
+ status = "okay";
+ firmware-name = "qcom/sm8350/modem.mbn";
+};
+
&qupv3_id_1 {
status = "okay";
};
+&slpi {
+ status = "okay";
+ firmware-name = "qcom/sm8350/slpi.mbn";
+};
+
&tlmm {
gpio-reserved-ranges = <52 8>;
};
@@ -248,3 +302,65 @@
&uart2 {
status = "okay";
};
+
+&ufs_mem_hc {
+ status = "okay";
+
+ reset-gpios = <&tlmm 203 GPIO_ACTIVE_LOW>;
+
+ vcc-supply = <&vreg_l7b_2p96>;
+ vcc-max-microamp = <800000>;
+ vccq-supply = <&vreg_l9b_1p2>;
+ vccq-max-microamp = <900000>;
+};
+
+&ufs_mem_phy {
+ status = "okay";
+
+ vdda-phy-supply = <&vreg_l5b_0p88>;
+ vdda-max-microamp = <91600>;
+ vdda-pll-supply = <&vreg_l6b_1p2>;
+ vdda-pll-max-microamp = <19000>;
+};
+
+&usb_1 {
+ status = "okay";
+};
+
+&usb_1_dwc3 {
+ dr_mode = "peripheral";
+};
+
+&usb_1_hsphy {
+ status = "okay";
+
+ vdda-pll-supply = <&vreg_l5b_0p88>;
+ vdda18-supply = <&vreg_l1c_1p8>;
+ vdda33-supply = <&vreg_l2b_3p07>;
+};
+
+&usb_1_qmpphy {
+ status = "okay";
+
+ vdda-phy-supply = <&vreg_l6b_1p2>;
+ vdda-pll-supply = <&vreg_l1b_0p88>;
+};
+
+&usb_2 {
+ status = "okay";
+};
+
+&usb_2_hsphy {
+ status = "okay";
+
+ vdda-pll-supply = <&vreg_l5b_0p88>;
+ vdda18-supply = <&vreg_l1c_1p8>;
+ vdda33-supply = <&vreg_l2b_3p07>;
+};
+
+&usb_2_qmpphy {
+ status = "okay";
+
+ vdda-phy-supply = <&vreg_l6b_1p2>;
+ vdda-pll-supply = <&vreg_l5b_0p88>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
index 5ef460458f5c..ed0b51bc03ea 100644
--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
@@ -1,14 +1,16 @@
// SPDX-License-Identifier: BSD-3-Clause
/*
- * Copyright (c) 2020, Linaro Limaited
+ * Copyright (c) 2020, Linaro Limited
*/
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,gcc-sm8350.h>
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/mailbox/qcom-ipcc.h>
#include <dt-bindings/power/qcom-aoss-qmp.h>
#include <dt-bindings/power/qcom-rpmpd.h>
#include <dt-bindings/soc/qcom,rpmh-rsc.h>
+#include <dt-bindings/thermal/thermal.h>
/ {
interrupt-parent = <&intc>;
@@ -43,6 +45,8 @@
reg = <0x0 0x0>;
enable-method = "psci";
next-level-cache = <&L2_0>;
+ qcom,freq-domain = <&cpufreq_hw 0>;
+ #cooling-cells = <2>;
L2_0: l2-cache {
compatible = "cache";
next-level-cache = <&L3_0>;
@@ -58,6 +62,8 @@
reg = <0x0 0x100>;
enable-method = "psci";
next-level-cache = <&L2_100>;
+ qcom,freq-domain = <&cpufreq_hw 0>;
+ #cooling-cells = <2>;
L2_100: l2-cache {
compatible = "cache";
next-level-cache = <&L3_0>;
@@ -70,6 +76,8 @@
reg = <0x0 0x200>;
enable-method = "psci";
next-level-cache = <&L2_200>;
+ qcom,freq-domain = <&cpufreq_hw 0>;
+ #cooling-cells = <2>;
L2_200: l2-cache {
compatible = "cache";
next-level-cache = <&L3_0>;
@@ -82,6 +90,8 @@
reg = <0x0 0x300>;
enable-method = "psci";
next-level-cache = <&L2_300>;
+ qcom,freq-domain = <&cpufreq_hw 0>;
+ #cooling-cells = <2>;
L2_300: l2-cache {
compatible = "cache";
next-level-cache = <&L3_0>;
@@ -94,6 +104,8 @@
reg = <0x0 0x400>;
enable-method = "psci";
next-level-cache = <&L2_400>;
+ qcom,freq-domain = <&cpufreq_hw 1>;
+ #cooling-cells = <2>;
L2_400: l2-cache {
compatible = "cache";
next-level-cache = <&L3_0>;
@@ -106,6 +118,8 @@
reg = <0x0 0x500>;
enable-method = "psci";
next-level-cache = <&L2_500>;
+ qcom,freq-domain = <&cpufreq_hw 1>;
+ #cooling-cells = <2>;
L2_500: l2-cache {
compatible = "cache";
next-level-cache = <&L3_0>;
@@ -119,6 +133,8 @@
reg = <0x0 0x600>;
enable-method = "psci";
next-level-cache = <&L2_600>;
+ qcom,freq-domain = <&cpufreq_hw 1>;
+ #cooling-cells = <2>;
L2_600: l2-cache {
compatible = "cache";
next-level-cache = <&L3_0>;
@@ -131,6 +147,8 @@
reg = <0x0 0x700>;
enable-method = "psci";
next-level-cache = <&L2_700>;
+ qcom,freq-domain = <&cpufreq_hw 2>;
+ #cooling-cells = <2>;
L2_700: l2-cache {
compatible = "cache";
next-level-cache = <&L3_0>;
@@ -153,7 +171,7 @@
pmu {
compatible = "arm,armv8-pmuv3";
- interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
};
psci {
@@ -257,6 +275,15 @@
no-map;
};
+ rmtfs_mem: memory@9b800000 {
+ compatible = "qcom,rmtfs-mem";
+ reg = <0x0 0x9b800000 0x0 0x280000>;
+ no-map;
+
+ qcom,client-id = <1>;
+ qcom,vmid = <15>;
+ };
+
hyp_reserved_mem: memory@d0000000 {
reg = <0x0 0xd0000000 0x0 0x800000>;
no-map;
@@ -294,6 +321,102 @@
hwlocks = <&tcsr_mutex 3>;
};
+ smp2p-adsp {
+ compatible = "qcom,smp2p";
+ qcom,smem = <443>, <429>;
+ interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_SMP2P
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_SMP2P>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <2>;
+
+ smp2p_adsp_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ smp2p_adsp_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smp2p-cdsp {
+ compatible = "qcom,smp2p";
+ qcom,smem = <94>, <432>;
+ interrupts-extended = <&ipcc IPCC_CLIENT_CDSP
+ IPCC_MPROC_SIGNAL_SMP2P
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_CDSP
+ IPCC_MPROC_SIGNAL_SMP2P>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <5>;
+
+ smp2p_cdsp_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ smp2p_cdsp_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smp2p-modem {
+ compatible = "qcom,smp2p";
+ qcom,smem = <435>, <428>;
+ interrupts-extended = <&ipcc IPCC_CLIENT_MPSS
+ IPCC_MPROC_SIGNAL_SMP2P
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_MPSS
+ IPCC_MPROC_SIGNAL_SMP2P>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <1>;
+
+ smp2p_modem_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ smp2p_modem_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ smp2p-slpi {
+ compatible = "qcom,smp2p";
+ qcom,smem = <481>, <430>;
+ interrupts-extended = <&ipcc IPCC_CLIENT_SLPI
+ IPCC_MPROC_SIGNAL_SMP2P
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_SLPI
+ IPCC_MPROC_SIGNAL_SMP2P>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <3>;
+
+ smp2p_slpi_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ #qcom,smem-state-cells = <1>;
+ };
+
+ smp2p_slpi_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
soc: soc@0 {
#address-cells = <2>;
#size-cells = <2>;
@@ -324,8 +447,8 @@
compatible = "qcom,geni-se-qup";
reg = <0x0 0x009c0000 0x0 0x6000>;
clock-names = "m-ahb", "s-ahb";
- clocks = <&gcc 121>,
- <&gcc 122>;
+ clocks = <&gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
#address-cells = <2>;
#size-cells = <2>;
ranges;
@@ -335,7 +458,7 @@
compatible = "qcom,geni-debug-uart";
reg = <0 0x0098c000 0 0x4000>;
clock-names = "se";
- clocks = <&gcc 83>;
+ clocks = <&gcc GCC_QUPV3_WRAP0_S3_CLK>;
pinctrl-names = "default";
pinctrl-0 = <&qup_uart3_default_state>;
interrupts = <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>;
@@ -345,12 +468,215 @@
};
};
+ apps_smmu: iommu@15000000 {
+ compatible = "qcom,sm8350-smmu-500", "arm,mmu-500";
+ reg = <0 0x15000000 0 0x100000>;
+ #iommu-cells = <2>;
+ #global-interrupts = <2>;
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 396 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 398 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 399 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 400 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 401 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 402 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 403 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 404 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 409 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 412 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 690 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 691 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 692 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 693 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 694 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 695 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 696 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 697 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 707 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ config_noc: interconnect@1500000 {
+ compatible = "qcom,sm8350-config-noc";
+ reg = <0 0x01500000 0 0xa580>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ mc_virt: interconnect@1580000 {
+ compatible = "qcom,sm8350-mc-virt";
+ reg = <0 0x01580000 0 0x1000>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ system_noc: interconnect@1680000 {
+ compatible = "qcom,sm8350-system-noc";
+ reg = <0 0x01680000 0 0x1c200>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ aggre1_noc: interconnect@16e0000 {
+ compatible = "qcom,sm8350-aggre1-noc";
+ reg = <0 0x016e0000 0 0x1f180>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ aggre2_noc: interconnect@1700000 {
+ compatible = "qcom,sm8350-aggre2-noc";
+ reg = <0 0x01700000 0 0x33000>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ mmss_noc: interconnect@1740000 {
+ compatible = "qcom,sm8350-mmss-noc";
+ reg = <0 0x01740000 0 0x1f080>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ lpass_ag_noc: interconnect@3c40000 {
+ compatible = "qcom,sm8350-lpass-ag-noc";
+ reg = <0 0x03c40000 0 0xf080>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ compute_noc: interconnect@a0c0000{
+ compatible = "qcom,sm8350-compute-noc";
+ reg = <0 0x0a0c0000 0 0xa180>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
tcsr_mutex: hwlock@1f40000 {
compatible = "qcom,tcsr-mutex";
reg = <0x0 0x01f40000 0x0 0x40000>;
#hwlock-cells = <1>;
};
+ mpss: remoteproc@4080000 {
+ compatible = "qcom,sm8350-mpss-pas";
+ reg = <0x0 0x04080000 0x0 0x4040>;
+
+ interrupts-extended = <&intc GIC_SPI 264 IRQ_TYPE_LEVEL_HIGH>,
+ <&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_modem_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_modem_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_modem_in 3 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_modem_in 7 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready", "handover",
+ "stop-ack", "shutdown-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ power-domains = <&aoss_qmp AOSS_QMP_LS_MODEM>,
+ <&rpmhpd 0>,
+ <&rpmhpd 12>;
+ power-domain-names = "load_state", "cx", "mss";
+
+ interconnects = <&mc_virt 0 &mc_virt 1>;
+
+ memory-region = <&pil_modem_mem>;
+
+ qcom,smem-states = <&smp2p_modem_out 0>;
+ qcom,smem-state-names = "stop";
+
+ status = "disabled";
+
+ glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_MPSS
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_MPSS
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
+ interrupts = <GIC_SPI 449 IRQ_TYPE_EDGE_RISING>;
+ label = "modem";
+ qcom,remote-pid = <1>;
+ };
+ };
+
pdc: interrupt-controller@b220000 {
compatible = "qcom,sm8350-pdc", "qcom,pdc";
reg = <0 0x0b220000 0 0x30000>, <0 0x17c000f0 0 0x60>;
@@ -363,7 +689,29 @@
interrupt-controller;
};
- aoss_qmp: qmp@c300000 {
+ tsens0: thermal-sensor@c222000 {
+ compatible = "qcom,sm8350-tsens", "qcom,tsens-v2";
+ reg = <0 0x0c263000 0 0x1ff>, /* TM */
+ <0 0x0c222000 0 0x8>; /* SROT */
+ #qcom,sensors = <15>;
+ interrupts = <&pdc 26 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 28 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "uplow", "critical";
+ #thermal-sensor-cells = <1>;
+ };
+
+ tsens1: thermal-sensor@c223000 {
+ compatible = "qcom,sm8350-tsens", "qcom,tsens-v2";
+ reg = <0 0x0c265000 0 0x1ff>, /* TM */
+ <0 0x0c223000 0 0x8>; /* SROT */
+ #qcom,sensors = <14>;
+ interrupts = <&pdc 27 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 29 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "uplow", "critical";
+ #thermal-sensor-cells = <1>;
+ };
+
+ aoss_qmp: power-controller@c300000 {
compatible = "qcom,sm8350-aoss-qmp";
reg = <0 0x0c300000 0 0x100000>;
interrupts-extended = <&ipcc IPCC_CLIENT_AOP IPCC_MPROC_SIGNAL_GLINK_QMP
@@ -374,6 +722,24 @@
#power-domain-cells = <1>;
};
+ spmi_bus: spmi@c440000 {
+ compatible = "qcom,spmi-pmic-arb";
+ reg = <0x0 0xc440000 0x0 0x1100>,
+ <0x0 0xc600000 0x0 0x2000000>,
+ <0x0 0xe600000 0x0 0x100000>,
+ <0x0 0xe700000 0x0 0xa0000>,
+ <0x0 0xc40a000 0x0 0x26000>;
+ reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+ interrupt-names = "periph_irq";
+ interrupts-extended = <&pdc 1 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,ee = <0>;
+ qcom,channel = <0>;
+ #address-cells = <2>;
+ #size-cells = <0>;
+ interrupt-controller;
+ #interrupt-cells = <4>;
+ };
+
tlmm: pinctrl@f100000 {
compatible = "qcom,sm8350-tlmm";
reg = <0 0x0f100000 0 0x300000>;
@@ -382,7 +748,7 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
- gpio-ranges = <&tlmm 0 0 203>;
+ gpio-ranges = <&tlmm 0 0 204>;
qup_uart3_default_state: qup-uart3-default-state {
rx {
@@ -396,6 +762,13 @@
};
};
+ rng: rng@10d3000 {
+ compatible = "qcom,prng-ee";
+ reg = <0 0x010d3000 0 0x1000>;
+ clocks = <&rpmhcc RPMH_HWKM_CLK>;
+ clock-names = "core";
+ };
+
intc: interrupt-controller@17a00000 {
compatible = "arm,gic-v3";
#interrupt-cells = <3>;
@@ -486,6 +859,1256 @@
clocks = <&xo_board>;
};
+ rpmhpd: power-controller {
+ compatible = "qcom,sm8350-rpmhpd";
+ #power-domain-cells = <1>;
+ operating-points-v2 = <&rpmhpd_opp_table>;
+
+ rpmhpd_opp_table: opp-table {
+ compatible = "operating-points-v2";
+
+ rpmhpd_opp_ret: opp1 {
+ opp-level = <RPMH_REGULATOR_LEVEL_RETENTION>;
+ };
+
+ rpmhpd_opp_min_svs: opp2 {
+ opp-level = <RPMH_REGULATOR_LEVEL_MIN_SVS>;
+ };
+
+ rpmhpd_opp_low_svs: opp3 {
+ opp-level = <RPMH_REGULATOR_LEVEL_LOW_SVS>;
+ };
+
+ rpmhpd_opp_svs: opp4 {
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS>;
+ };
+
+ rpmhpd_opp_svs_l1: opp5 {
+ opp-level = <RPMH_REGULATOR_LEVEL_SVS_L1>;
+ };
+
+ rpmhpd_opp_nom: opp6 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM>;
+ };
+
+ rpmhpd_opp_nom_l1: opp7 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM_L1>;
+ };
+
+ rpmhpd_opp_nom_l2: opp8 {
+ opp-level = <RPMH_REGULATOR_LEVEL_NOM_L2>;
+ };
+
+ rpmhpd_opp_turbo: opp9 {
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO>;
+ };
+
+ rpmhpd_opp_turbo_l1: opp10 {
+ opp-level = <RPMH_REGULATOR_LEVEL_TURBO_L1>;
+ };
+ };
+ };
+
+ apps_bcm_voter: bcm_voter {
+ compatible = "qcom,bcm-voter";
+ };
+ };
+
+ cpufreq_hw: cpufreq@18591000 {
+ compatible = "qcom,sm8350-cpufreq-epss", "qcom,cpufreq-epss";
+ reg = <0 0x18591000 0 0x1000>,
+ <0 0x18592000 0 0x1000>,
+ <0 0x18593000 0 0x1000>;
+ reg-names = "freq-domain0", "freq-domain1", "freq-domain2";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_GPLL0>;
+ clock-names = "xo", "alternate";
+
+ #freq-domain-cells = <1>;
+ };
+
+ ufs_mem_hc: ufshc@1d84000 {
+ compatible = "qcom,sm8350-ufshc", "qcom,ufshc",
+ "jedec,ufs-2.0";
+ reg = <0 0x01d84000 0 0x3000>;
+ interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
+ phys = <&ufs_mem_phy_lanes>;
+ phy-names = "ufsphy";
+ lanes-per-direction = <2>;
+ #reset-cells = <1>;
+ resets = <&gcc GCC_UFS_PHY_BCR>;
+ reset-names = "rst";
+
+ power-domains = <&gcc UFS_PHY_GDSC>;
+
+ iommus = <&apps_smmu 0xe0 0x0>;
+
+ clock-names =
+ "ref_clk",
+ "core_clk",
+ "bus_aggr_clk",
+ "iface_clk",
+ "core_clk_unipro",
+ "ref_clk",
+ "tx_lane0_sync_clk",
+ "rx_lane0_sync_clk",
+ "rx_lane1_sync_clk";
+ clocks =
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_UFS_PHY_AXI_CLK>,
+ <&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
+ <&gcc GCC_UFS_PHY_AHB_CLK>,
+ <&gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>;
+ freq-table-hz =
+ <75000000 300000000>,
+ <75000000 300000000>,
+ <0 0>,
+ <0 0>,
+ <75000000 300000000>,
+ <0 0>,
+ <0 0>,
+ <75000000 300000000>,
+ <75000000 300000000>;
+ status = "disabled";
+ };
+
+ ufs_mem_phy: phy@1d87000 {
+ compatible = "qcom,sm8350-qmp-ufs-phy";
+ reg = <0 0x01d87000 0 0xe10>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ #clock-cells = <1>;
+ ranges;
+ clock-names = "ref",
+ "ref_aux";
+ clocks = <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_UFS_PHY_PHY_AUX_CLK>;
+
+ resets = <&ufs_mem_hc 0>;
+ reset-names = "ufsphy";
+ status = "disabled";
+
+ ufs_mem_phy_lanes: lanes@1d87400 {
+ reg = <0 0x01d87400 0 0x108>,
+ <0 0x01d87600 0 0x1e0>,
+ <0 0x01d87c00 0 0x1dc>,
+ <0 0x01d87800 0 0x108>,
+ <0 0x01d87a00 0 0x1e0>;
+ #phy-cells = <0>;
+ #clock-cells = <0>;
+ };
+ };
+
+ slpi: remoteproc@5c00000 {
+ compatible = "qcom,sm8350-slpi-pas";
+ reg = <0 0x05c00000 0 0x4000>;
+
+ interrupts-extended = <&pdc 9 IRQ_TYPE_LEVEL_HIGH>,
+ <&smp2p_slpi_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_slpi_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_slpi_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_slpi_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready",
+ "handover", "stop-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ power-domains = <&aoss_qmp AOSS_QMP_LS_SLPI>,
+ <&rpmhpd 4>,
+ <&rpmhpd 5>;
+ power-domain-names = "load_state", "lcx", "lmx";
+
+ memory-region = <&pil_slpi_mem>;
+
+ qcom,smem-states = <&smp2p_slpi_out 0>;
+ qcom,smem-state-names = "stop";
+
+ status = "disabled";
+
+ glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_SLPI
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_SLPI
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ label = "slpi";
+ qcom,remote-pid = <3>;
+
+ };
+ };
+
+ cdsp: remoteproc@98900000 {
+ compatible = "qcom,sm8350-cdsp-pas";
+ reg = <0 0x098900000 0 0x1400000>;
+
+ interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_LEVEL_HIGH>,
+ <&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_cdsp_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready",
+ "handover", "stop-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ power-domains = <&aoss_qmp AOSS_QMP_LS_CDSP>,
+ <&rpmhpd 0>,
+ <&rpmhpd 10>;
+ power-domain-names = "load_state", "cx", "mxc";
+
+ interconnects = <&compute_noc 1 &mc_virt 1>;
+
+ memory-region = <&pil_cdsp_mem>;
+
+ qcom,smem-states = <&smp2p_cdsp_out 0>;
+ qcom,smem-state-names = "stop";
+
+ status = "disabled";
+
+ glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_CDSP
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_CDSP
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ label = "cdsp";
+ qcom,remote-pid = <5>;
+ };
+ };
+
+ usb_1_hsphy: phy@88e3000 {
+ compatible = "qcom,sm8350-usb-hs-phy",
+ "qcom,usb-snps-hs-7nm-phy";
+ reg = <0 0x088e3000 0 0x400>;
+ status = "disabled";
+ #phy-cells = <0>;
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "ref";
+
+ resets = <&gcc GCC_QUSB2PHY_PRIM_BCR>;
+ };
+
+ usb_2_hsphy: phy@88e4000 {
+ compatible = "qcom,sm8250-usb-hs-phy",
+ "qcom,usb-snps-hs-7nm-phy";
+ reg = <0 0x088e4000 0 0x400>;
+ status = "disabled";
+ #phy-cells = <0>;
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "ref";
+
+ resets = <&gcc GCC_QUSB2PHY_SEC_BCR>;
+ };
+
+ usb_1_qmpphy: phy-wrapper@88e9000 {
+ compatible = "qcom,sm8350-qmp-usb3-phy";
+ reg = <0 0x088e9000 0 0x200>,
+ <0 0x088e8000 0 0x20>;
+ reg-names = "reg-base", "dp_com";
+ status = "disabled";
+ #clock-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>;
+ clock-names = "aux", "ref_clk_src", "com_aux";
+
+ resets = <&gcc GCC_USB3_DP_PHY_PRIM_BCR>,
+ <&gcc GCC_USB3_PHY_PRIM_BCR>;
+ reset-names = "phy", "common";
+
+ usb_1_ssphy: phy@88e9200 {
+ reg = <0 0x088e9200 0 0x200>,
+ <0 0x088e9400 0 0x200>,
+ <0 0x088e9c00 0 0x400>,
+ <0 0x088e9600 0 0x200>,
+ <0 0x088e9800 0 0x200>,
+ <0 0x088e9a00 0 0x100>;
+ #phy-cells = <0>;
+ #clock-cells = <1>;
+ clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
+ clock-names = "pipe0";
+ clock-output-names = "usb3_phy_pipe_clk_src";
+ };
+ };
+
+ usb_2_qmpphy: phy-wrapper@88eb000 {
+ compatible = "qcom,sm8350-qmp-usb3-uni-phy";
+ reg = <0 0x088eb000 0 0x200>;
+ status = "disabled";
+ #clock-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ clocks = <&gcc GCC_USB3_SEC_PHY_AUX_CLK>,
+ <&rpmhcc RPMH_CXO_CLK>,
+ <&gcc GCC_USB3_SEC_CLKREF_EN>,
+ <&gcc GCC_USB3_SEC_PHY_COM_AUX_CLK>;
+ clock-names = "aux", "ref_clk_src", "ref", "com_aux";
+
+ resets = <&gcc GCC_USB3PHY_PHY_SEC_BCR>,
+ <&gcc GCC_USB3_PHY_SEC_BCR>;
+ reset-names = "phy", "common";
+
+ usb_2_ssphy: phy@88ebe00 {
+ reg = <0 0x088ebe00 0 0x200>,
+ <0 0x088ec000 0 0x200>,
+ <0 0x088eb200 0 0x1100>;
+ #phy-cells = <0>;
+ #clock-cells = <1>;
+ clocks = <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>;
+ clock-names = "pipe0";
+ clock-output-names = "usb3_uni_phy_pipe_clk_src";
+ };
+ };
+
+ dc_noc: interconnect@90e0000 {
+ compatible = "qcom,sm8350-dc-noc";
+ reg = <0 0x090c0000 0 0x4200>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ gem_noc: interconnect@9100000 {
+ compatible = "qcom,sm8350-gem-noc";
+ reg = <0 0x09100000 0 0xb4000>;
+ #interconnect-cells = <1>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ usb_1: usb@a6f8800 {
+ compatible = "qcom,sm8350-dwc3", "qcom,dwc3";
+ reg = <0 0x0a6f8800 0 0x400>;
+ status = "disabled";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ clocks = <&gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
+ <&gcc GCC_USB30_PRIM_MASTER_CLK>,
+ <&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
+ <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_PRIM_SLEEP_CLK>;
+ clock-names = "cfg_noc", "core", "iface", "mock_utmi",
+ "sleep";
+
+ assigned-clocks = <&gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_PRIM_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <200000000>;
+
+ interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 14 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 15 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 17 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hs_phy_irq", "dp_hs_phy_irq",
+ "dm_hs_phy_irq", "ss_phy_irq";
+
+ power-domains = <&gcc USB30_PRIM_GDSC>;
+
+ resets = <&gcc GCC_USB30_PRIM_BCR>;
+
+ usb_1_dwc3: dwc3@a600000 {
+ compatible = "snps,dwc3";
+ reg = <0 0x0a600000 0 0xcd00>;
+ interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ iommus = <&apps_smmu 0x0 0x0>;
+ snps,dis_u2_susphy_quirk;
+ snps,dis_enblslpm_quirk;
+ phys = <&usb_1_hsphy>, <&usb_1_ssphy>;
+ phy-names = "usb2-phy", "usb3-phy";
+ };
+ };
+
+ usb_2: usb@a8f8800 {
+ compatible = "qcom,sm8350-dwc3", "qcom,dwc3";
+ reg = <0 0x0a8f8800 0 0x400>;
+ status = "disabled";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ clocks = <&gcc GCC_CFG_NOC_USB3_SEC_AXI_CLK>,
+ <&gcc GCC_USB30_SEC_MASTER_CLK>,
+ <&gcc GCC_AGGRE_USB3_SEC_AXI_CLK>,
+ <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_SEC_SLEEP_CLK>,
+ <&gcc GCC_USB3_SEC_CLKREF_EN>;
+ clock-names = "cfg_noc", "core", "iface", "mock_utmi",
+ "sleep", "xo";
+
+ assigned-clocks = <&gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
+ <&gcc GCC_USB30_SEC_MASTER_CLK>;
+ assigned-clock-rates = <19200000>, <200000000>;
+
+ interrupts-extended = <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 12 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 13 IRQ_TYPE_EDGE_BOTH>,
+ <&pdc 16 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "hs_phy_irq", "dp_hs_phy_irq",
+ "dm_hs_phy_irq", "ss_phy_irq";
+
+ power-domains = <&gcc USB30_SEC_GDSC>;
+
+ resets = <&gcc GCC_USB30_SEC_BCR>;
+
+ usb_2_dwc3: dwc3@a800000 {
+ compatible = "snps,dwc3";
+ reg = <0 0x0a800000 0 0xcd00>;
+ interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ iommus = <&apps_smmu 0x20 0x0>;
+ snps,dis_u2_susphy_quirk;
+ snps,dis_enblslpm_quirk;
+ phys = <&usb_2_hsphy>, <&usb_2_ssphy>;
+ phy-names = "usb2-phy", "usb3-phy";
+ };
+ };
+
+ adsp: remoteproc@17300000 {
+ compatible = "qcom,sm8350-adsp-pas";
+ reg = <0 0x17300000 0 0x100>;
+
+ interrupts-extended = <&pdc 6 IRQ_TYPE_LEVEL_HIGH>,
+ <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+ <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "wdog", "fatal", "ready",
+ "handover", "stop-ack";
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+
+ power-domains = <&aoss_qmp AOSS_QMP_LS_LPASS>,
+ <&rpmhpd 4>,
+ <&rpmhpd 5>;
+ power-domain-names = "load_state", "lcx", "lmx";
+
+ memory-region = <&pil_adsp_mem>;
+
+ qcom,smem-states = <&smp2p_adsp_out 0>;
+ qcom,smem-state-names = "stop";
+
+ status = "disabled";
+
+ glink-edge {
+ interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_GLINK_QMP
+ IRQ_TYPE_EDGE_RISING>;
+ mboxes = <&ipcc IPCC_CLIENT_LPASS
+ IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+ label = "lpass";
+ qcom,remote-pid = <2>;
+ };
+ };
+ };
+
+ thermal-zones {
+ cpu0-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 1>;
+
+ trips {
+ cpu0_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu0_alert1: trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu0_crit: cpu_crit {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu0_alert0>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu0_alert1>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu1-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 2>;
+
+ trips {
+ cpu1_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu1_alert1: trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu1_crit: cpu_crit {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu1_alert0>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu1_alert1>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu2-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 3>;
+
+ trips {
+ cpu2_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu2_alert1: trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu2_crit: cpu_crit {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu2_alert0>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu2_alert1>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu3-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 4>;
+
+ trips {
+ cpu3_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu3_alert1: trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu3_crit: cpu_crit {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu3_alert0>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu3_alert1>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu4-top-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 7>;
+
+ trips {
+ cpu4_top_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu4_top_alert1: trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu4_top_crit: cpu_crit {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu4_top_alert0>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu4_top_alert1>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu5-top-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 8>;
+
+ trips {
+ cpu5_top_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu5_top_alert1: trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu5_top_crit: cpu_crit {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu5_top_alert0>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu5_top_alert1>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu6-top-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 9>;
+
+ trips {
+ cpu6_top_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu6_top_alert1: trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu6_top_crit: cpu_crit {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu6_top_alert0>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu6_top_alert1>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu7-top-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 10>;
+
+ trips {
+ cpu7_top_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu7_top_alert1: trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu7_top_crit: cpu_crit {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu7_top_alert0>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu7_top_alert1>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu4-bottom-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 11>;
+
+ trips {
+ cpu4_bottom_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu4_bottom_alert1: trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu4_bottom_crit: cpu_crit {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu4_bottom_alert0>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu4_bottom_alert1>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu5-bottom-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 12>;
+
+ trips {
+ cpu5_bottom_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu5_bottom_alert1: trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu5_bottom_crit: cpu_crit {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu5_bottom_alert0>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu5_bottom_alert1>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu6-bottom-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 13>;
+
+ trips {
+ cpu6_bottom_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu6_bottom_alert1: trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu6_bottom_crit: cpu_crit {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu6_bottom_alert0>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu6_bottom_alert1>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu7-bottom-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 14>;
+
+ trips {
+ cpu7_bottom_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu7_bottom_alert1: trip-point1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ cpu7_bottom_crit: cpu_crit {
+ temperature = <110000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu7_bottom_alert0>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ map1 {
+ trip = <&cpu7_bottom_alert1>;
+ cooling-device = <&CPU4 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU5 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU6 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+ <&CPU7 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ aoss0-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 0>;
+
+ trips {
+ aoss0_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ cluster0-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 5>;
+
+ trips {
+ cluster0_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ cluster0_crit: cluster0_crit {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cluster1-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens0 6>;
+
+ trips {
+ cluster1_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ cluster1_crit: cluster1_crit {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ aoss1-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 0>;
+
+ trips {
+ aoss1_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ gpu-thermal-top {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 1>;
+
+ trips {
+ gpu1_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <1000>;
+ type = "hot";
+ };
+ };
+ };
+
+ gpu-thermal-bottom {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 2>;
+
+ trips {
+ gpu2_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <1000>;
+ type = "hot";
+ };
+ };
+ };
+
+ nspss1-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 3>;
+
+ trips {
+ nspss1_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <1000>;
+ type = "hot";
+ };
+ };
+ };
+
+ nspss2-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 4>;
+
+ trips {
+ nspss2_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <1000>;
+ type = "hot";
+ };
+ };
+ };
+
+ nspss3-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 5>;
+
+ trips {
+ nspss3_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <1000>;
+ type = "hot";
+ };
+ };
+ };
+
+ video-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 6>;
+
+ trips {
+ video_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ mem-thermal {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 7>;
+
+ trips {
+ mem_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ modem1-thermal-top {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 8>;
+
+ trips {
+ modem1_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ modem2-thermal-top {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 9>;
+
+ trips {
+ modem2_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ modem3-thermal-top {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 10>;
+
+ trips {
+ modem3_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ modem4-thermal-top {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 11>;
+
+ trips {
+ modem4_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ camera-thermal-top {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 12>;
+
+ trips {
+ camera1_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
+ };
+
+ camera-thermal-bottom {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens1 13>;
+
+ trips {
+ camera2_alert0: trip-point0 {
+ temperature = <90000>;
+ hysteresis = <2000>;
+ type = "hot";
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi b/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi
index 30c169b08536..d8046fedf9c1 100644
--- a/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi
+++ b/arch/arm64/boot/dts/renesas/beacon-renesom-baseboard.dtsi
@@ -615,7 +615,7 @@
};
&rcar_sound {
- pinctrl-0 = <&sound_pins &sound_clk_pins>;
+ pinctrl-0 = <&sound_pins>, <&sound_clk_pins>;
pinctrl-names = "default";
/* Single DAI */
@@ -639,7 +639,7 @@
bitclock-master = <&rsnd_endpoint0>;
frame-master = <&rsnd_endpoint0>;
- playback = <&ssi1 &dvc1 &src1>;
+ playback = <&ssi1>, <&dvc1>, <&src1>;
capture = <&ssi0>;
};
};
diff --git a/arch/arm64/boot/dts/renesas/hihope-common.dtsi b/arch/arm64/boot/dts/renesas/hihope-common.dtsi
index 7a3da9b06f67..0c7e6f790590 100644
--- a/arch/arm64/boot/dts/renesas/hihope-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/hihope-common.dtsi
@@ -12,6 +12,9 @@
aliases {
serial0 = &scif2;
serial1 = &hscif0;
+ mmc0 = &sdhi3;
+ mmc1 = &sdhi0;
+ mmc2 = &sdhi2;
};
chosen {
diff --git a/arch/arm64/boot/dts/renesas/hihope-rev4.dtsi b/arch/arm64/boot/dts/renesas/hihope-rev4.dtsi
index 929f4a1d3f90..7fc0339a3ac9 100644
--- a/arch/arm64/boot/dts/renesas/hihope-rev4.dtsi
+++ b/arch/arm64/boot/dts/renesas/hihope-rev4.dtsi
@@ -80,7 +80,7 @@
};
&rcar_sound {
- pinctrl-0 = <&sound_pins &sound_clk_pins>;
+ pinctrl-0 = <&sound_pins>, <&sound_clk_pins>;
pinctrl-names = "default";
status = "okay";
diff --git a/arch/arm64/boot/dts/renesas/hihope-rzg2-ex-aistarvision-mipi-adapter-2.1.dtsi b/arch/arm64/boot/dts/renesas/hihope-rzg2-ex-aistarvision-mipi-adapter-2.1.dtsi
index c62ddb9b2ba5..3771144a2ce4 100644
--- a/arch/arm64/boot/dts/renesas/hihope-rzg2-ex-aistarvision-mipi-adapter-2.1.dtsi
+++ b/arch/arm64/boot/dts/renesas/hihope-rzg2-ex-aistarvision-mipi-adapter-2.1.dtsi
@@ -14,7 +14,6 @@
ports {
port@0 {
- reg = <0>;
csi20_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2>;
@@ -29,7 +28,6 @@
ports {
port@0 {
- reg = <0>;
csi40_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts b/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
index 501cb05da228..3cf2e076940f 100644
--- a/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
+++ b/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
@@ -21,6 +21,9 @@
serial4 = &hscif2;
serial5 = &scif5;
ethernet0 = &avb;
+ mmc0 = &sdhi3;
+ mmc1 = &sdhi0;
+ mmc2 = &sdhi2;
};
chosen {
diff --git a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
index d64fb8b1b86c..46f8dbf68904 100644
--- a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi
@@ -2573,6 +2573,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -2628,6 +2632,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts b/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts
index 71763f4402a7..3c0d59def8ee 100644
--- a/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts
+++ b/arch/arm64/boot/dts/renesas/r8a774b1-beacon-rzg2n-kit.dts
@@ -22,6 +22,9 @@
serial5 = &scif5;
serial6 = &scif4;
ethernet0 = &avb;
+ mmc0 = &sdhi3;
+ mmc1 = &sdhi0;
+ mmc2 = &sdhi2;
};
chosen {
diff --git a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
index 5b05474dc272..d16a4be5ef77 100644
--- a/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774b1.dtsi
@@ -2419,6 +2419,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -2474,6 +2478,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
index ea87cb5a459c..4e72e4f2bab0 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
+++ b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
@@ -17,6 +17,8 @@
aliases {
serial0 = &scif2;
serial1 = &hscif2;
+ mmc0 = &sdhi0;
+ mmc1 = &sdhi3;
};
chosen {
@@ -351,7 +353,7 @@
};
&rcar_sound {
- pinctrl-0 = <&sound_pins &sound_clk_pins>;
+ pinctrl-0 = <&sound_pins>, <&sound_clk_pins>;
pinctrl-names = "default";
/* Single DAI */
@@ -365,7 +367,7 @@
rcar_sound,dai {
dai0 {
- playback = <&ssi0 &src0 &dvc0>;
+ playback = <&ssi0>, <&src0>, <&dvc0>;
};
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0-ek874-mipi-2.1.dts b/arch/arm64/boot/dts/renesas/r8a774c0-ek874-mipi-2.1.dts
index e7b4a929bb17..2e3d1981cac4 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0-ek874-mipi-2.1.dts
+++ b/arch/arm64/boot/dts/renesas/r8a774c0-ek874-mipi-2.1.dts
@@ -33,7 +33,7 @@
status = "okay";
ports {
- port {
+ port@0 {
csi40_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
index 20fa3caa050e..1aef34447abd 100644
--- a/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774c0.dtsi
@@ -1823,6 +1823,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts b/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts
index 273f062f2909..7b6649a3ded0 100644
--- a/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts
+++ b/arch/arm64/boot/dts/renesas/r8a774e1-beacon-rzg2h-kit.dts
@@ -22,6 +22,9 @@
serial5 = &scif5;
serial6 = &scif4;
ethernet0 = &avb;
+ mmc0 = &sdhi3;
+ mmc1 = &sdhi0;
+ mmc2 = &sdhi2;
};
chosen {
diff --git a/arch/arm64/boot/dts/renesas/r8a774e1.dtsi b/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
index 8eb006cbd9af..1f51237ab0a6 100644
--- a/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a774e1.dtsi
@@ -2709,6 +2709,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -2764,6 +2768,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77950-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a77950-salvator-x.dts
index 2438825c9b22..3e3b954a4a9d 100644
--- a/arch/arm64/boot/dts/renesas/r8a77950-salvator-x.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77950-salvator-x.dts
@@ -52,29 +52,6 @@
status = "okay";
};
-&hdmi0 {
- status = "okay";
-
- ports {
- port@1 {
- reg = <1>;
- rcar_dw_hdmi0_out: endpoint {
- remote-endpoint = <&hdmi0_con>;
- };
- };
- port@2 {
- reg = <2>;
- dw_hdmi0_snd_in: endpoint {
- remote-endpoint = <&rsnd_endpoint1>;
- };
- };
- };
-};
-
-&hdmi0_con {
- remote-endpoint = <&rcar_dw_hdmi0_out>;
-};
-
&hdmi1 {
status = "okay";
@@ -111,19 +88,7 @@
&rcar_sound {
ports {
- /* rsnd_port0 is on salvator-common */
- rsnd_port1: port@1 {
- reg = <1>;
- rsnd_endpoint1: endpoint {
- remote-endpoint = <&dw_hdmi0_snd_in>;
-
- dai-format = "i2s";
- bitclock-master = <&rsnd_endpoint1>;
- frame-master = <&rsnd_endpoint1>;
-
- playback = <&ssi2>;
- };
- };
+ /* rsnd_port0/1 are described in salvator-common.dtsi */
rsnd_port2: port@2 {
reg = <2>;
rsnd_endpoint2: endpoint {
diff --git a/arch/arm64/boot/dts/renesas/r8a77950.dtsi b/arch/arm64/boot/dts/renesas/r8a77950.dtsi
index d716c4386ae9..b643d3079db1 100644
--- a/arch/arm64/boot/dts/renesas/r8a77950.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77950.dtsi
@@ -29,6 +29,11 @@
<&ipmmu_mp1 30>, <&ipmmu_mp1 31>;
};
+&cluster0_opp {
+ /delete-node/ opp-1600000000;
+ /delete-node/ opp-1700000000;
+};
+
&du {
renesas,vsps = <&vspd0 0>, <&vspd1 0>, <&vspd2 0>, <&vspd3 0>;
};
@@ -187,6 +192,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77951-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a77951-salvator-x.dts
index a402a2fb6e3c..cf2165bdf625 100644
--- a/arch/arm64/boot/dts/renesas/r8a77951-salvator-x.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77951-salvator-x.dts
@@ -52,29 +52,6 @@
status = "okay";
};
-&hdmi0 {
- status = "okay";
-
- ports {
- port@1 {
- reg = <1>;
- rcar_dw_hdmi0_out: endpoint {
- remote-endpoint = <&hdmi0_con>;
- };
- };
- port@2 {
- reg = <2>;
- dw_hdmi0_snd_in: endpoint {
- remote-endpoint = <&rsnd_endpoint1>;
- };
- };
- };
-};
-
-&hdmi0_con {
- remote-endpoint = <&rcar_dw_hdmi0_out>;
-};
-
&hdmi1 {
status = "okay";
@@ -111,19 +88,7 @@
&rcar_sound {
ports {
- /* rsnd_port0 is on salvator-common */
- rsnd_port1: port@1 {
- reg = <1>;
- rsnd_endpoint1: endpoint {
- remote-endpoint = <&dw_hdmi0_snd_in>;
-
- dai-format = "i2s";
- bitclock-master = <&rsnd_endpoint1>;
- frame-master = <&rsnd_endpoint1>;
-
- playback = <&ssi2>;
- };
- };
+ /* rsnd_port0/1 are described in salvator-common.dtsi */
rsnd_port2: port@2 {
reg = <2>;
rsnd_endpoint2: endpoint {
diff --git a/arch/arm64/boot/dts/renesas/r8a77951-salvator-xs.dts b/arch/arm64/boot/dts/renesas/r8a77951-salvator-xs.dts
index e5922329a4b8..37202fcdc35b 100644
--- a/arch/arm64/boot/dts/renesas/r8a77951-salvator-xs.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77951-salvator-xs.dts
@@ -57,29 +57,6 @@
status = "okay";
};
-&hdmi0 {
- status = "okay";
-
- ports {
- port@1 {
- reg = <1>;
- rcar_dw_hdmi0_out: endpoint {
- remote-endpoint = <&hdmi0_con>;
- };
- };
- port@2 {
- reg = <2>;
- dw_hdmi0_snd_in: endpoint {
- remote-endpoint = <&rsnd_endpoint1>;
- };
- };
- };
-};
-
-&hdmi0_con {
- remote-endpoint = <&rcar_dw_hdmi0_out>;
-};
-
&hdmi1 {
status = "okay";
@@ -152,19 +129,7 @@
&rcar_sound {
ports {
- /* rsnd_port0 is on salvator-common */
- rsnd_port1: port@1 {
- reg = <1>;
- rsnd_endpoint1: endpoint {
- remote-endpoint = <&dw_hdmi0_snd_in>;
-
- dai-format = "i2s";
- bitclock-master = <&rsnd_endpoint1>;
- frame-master = <&rsnd_endpoint1>;
-
- playback = <&ssi2>;
- };
- };
+ /* rsnd_port0/1 are described in salvator-common.dtsi */
rsnd_port2: port@2 {
reg = <2>;
rsnd_endpoint2: endpoint {
diff --git a/arch/arm64/boot/dts/renesas/r8a77951.dtsi b/arch/arm64/boot/dts/renesas/r8a77951.dtsi
index 5c39152e4570..85d66d15465a 100644
--- a/arch/arm64/boot/dts/renesas/r8a77951.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77951.dtsi
@@ -3097,6 +3097,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -3152,6 +3156,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -3191,6 +3199,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77960-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a77960-salvator-x.dts
index ecfbeafeaf36..d5543f26c472 100644
--- a/arch/arm64/boot/dts/renesas/r8a77960-salvator-x.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77960-salvator-x.dts
@@ -35,49 +35,3 @@
clock-names = "du.0", "du.1", "du.2",
"dclkin.0", "dclkin.1", "dclkin.2";
};
-
-&hdmi0 {
- status = "okay";
-
- ports {
- port@1 {
- reg = <1>;
- rcar_dw_hdmi0_out: endpoint {
- remote-endpoint = <&hdmi0_con>;
- };
- };
- port@2 {
- reg = <2>;
- dw_hdmi0_snd_in: endpoint {
- remote-endpoint = <&rsnd_endpoint1>;
- };
- };
- };
-};
-
-&hdmi0_con {
- remote-endpoint = <&rcar_dw_hdmi0_out>;
-};
-
-&rcar_sound {
- ports {
- /* rsnd_port0 is on salvator-common */
- rsnd_port1: port@1 {
- reg = <1>;
- rsnd_endpoint1: endpoint {
- remote-endpoint = <&dw_hdmi0_snd_in>;
-
- dai-format = "i2s";
- bitclock-master = <&rsnd_endpoint1>;
- frame-master = <&rsnd_endpoint1>;
-
- playback = <&ssi2>;
- };
- };
- };
-};
-
-&sound_card {
- dais = <&rsnd_port0 /* ak4613 */
- &rsnd_port1>; /* HDMI0 */
-};
diff --git a/arch/arm64/boot/dts/renesas/r8a77960-salvator-xs.dts b/arch/arm64/boot/dts/renesas/r8a77960-salvator-xs.dts
index 249896a38fdc..9ebb47b6bf2d 100644
--- a/arch/arm64/boot/dts/renesas/r8a77960-salvator-xs.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77960-salvator-xs.dts
@@ -35,49 +35,3 @@
clock-names = "du.0", "du.1", "du.2",
"dclkin.0", "dclkin.1", "dclkin.2";
};
-
-&hdmi0 {
- status = "okay";
-
- ports {
- port@1 {
- reg = <1>;
- rcar_dw_hdmi0_out: endpoint {
- remote-endpoint = <&hdmi0_con>;
- };
- };
- port@2 {
- reg = <2>;
- dw_hdmi0_snd_in: endpoint {
- remote-endpoint = <&rsnd_endpoint1>;
- };
- };
- };
-};
-
-&hdmi0_con {
- remote-endpoint = <&rcar_dw_hdmi0_out>;
-};
-
-&rcar_sound {
- ports {
- /* rsnd_port0 is on salvator-common */
- rsnd_port1: port@1 {
- reg = <1>;
- rsnd_endpoint1: endpoint {
- remote-endpoint = <&dw_hdmi0_snd_in>;
-
- dai-format = "i2s";
- bitclock-master = <&rsnd_endpoint1>;
- frame-master = <&rsnd_endpoint1>;
-
- playback = <&ssi2>;
- };
- };
- };
-};
-
-&sound_card {
- dais = <&rsnd_port0 /* ak4613 */
- &rsnd_port1>; /* HDMI0 */
-};
diff --git a/arch/arm64/boot/dts/renesas/r8a77960.dtsi b/arch/arm64/boot/dts/renesas/r8a77960.dtsi
index 25d947a81b29..12476e354d74 100644
--- a/arch/arm64/boot/dts/renesas/r8a77960.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77960.dtsi
@@ -2761,6 +2761,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -2816,6 +2820,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77961-salvator-xs.dts b/arch/arm64/boot/dts/renesas/r8a77961-salvator-xs.dts
index 1e7603365106..c7f14177f7b9 100644
--- a/arch/arm64/boot/dts/renesas/r8a77961-salvator-xs.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77961-salvator-xs.dts
@@ -40,49 +40,3 @@
clock-names = "du.0", "du.1", "du.2",
"dclkin.0", "dclkin.1", "dclkin.2";
};
-
-&hdmi0 {
- status = "okay";
-
- ports {
- port@1 {
- reg = <1>;
- rcar_dw_hdmi0_out: endpoint {
- remote-endpoint = <&hdmi0_con>;
- };
- };
- port@2 {
- reg = <2>;
- dw_hdmi0_snd_in: endpoint {
- remote-endpoint = <&rsnd_endpoint1>;
- };
- };
- };
-};
-
-&hdmi0_con {
- remote-endpoint = <&rcar_dw_hdmi0_out>;
-};
-
-&rcar_sound {
- ports {
- /* rsnd_port0 is on salvator-common */
- rsnd_port1: port@1 {
- reg = <1>;
- rsnd_endpoint1: endpoint {
- remote-endpoint = <&dw_hdmi0_snd_in>;
-
- dai-format = "i2s";
- bitclock-master = <&rsnd_endpoint1>;
- frame-master = <&rsnd_endpoint1>;
-
- playback = <&ssi2>;
- };
- };
- };
-};
-
-&sound_card {
- dais = <&rsnd_port0 /* ak4613 */
- &rsnd_port1>; /* HDMI0 */
-};
diff --git a/arch/arm64/boot/dts/renesas/r8a77961-ulcb.dts b/arch/arm64/boot/dts/renesas/r8a77961-ulcb.dts
index 7c6e60f6f32d..294a055f117e 100644
--- a/arch/arm64/boot/dts/renesas/r8a77961-ulcb.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77961-ulcb.dts
@@ -30,3 +30,14 @@
reg = <0x6 0x00000000 0x1 0x00000000>;
};
};
+
+&du {
+ clocks = <&cpg CPG_MOD 724>,
+ <&cpg CPG_MOD 723>,
+ <&cpg CPG_MOD 722>,
+ <&versaclock5 1>,
+ <&versaclock5 3>,
+ <&versaclock5 2>;
+ clock-names = "du.0", "du.1", "du.2",
+ "dclkin.0", "dclkin.1", "dclkin.2";
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a77961.dtsi b/arch/arm64/boot/dts/renesas/r8a77961.dtsi
index e8c31ebec097..d9804768425a 100644
--- a/arch/arm64/boot/dts/renesas/r8a77961.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77961.dtsi
@@ -1155,13 +1155,35 @@
};
can0: can@e6c30000 {
+ compatible = "renesas,can-r8a77961",
+ "renesas,rcar-gen3-can";
reg = <0 0xe6c30000 0 0x1000>;
- /* placeholder */
+ interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 916>,
+ <&cpg CPG_CORE R8A77961_CLK_CANFD>,
+ <&can_clk>;
+ clock-names = "clkp1", "clkp2", "can_clk";
+ assigned-clocks = <&cpg CPG_CORE R8A77961_CLK_CANFD>;
+ assigned-clock-rates = <40000000>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 916>;
+ status = "disabled";
};
can1: can@e6c38000 {
+ compatible = "renesas,can-r8a77961",
+ "renesas,rcar-gen3-can";
reg = <0 0xe6c38000 0 0x1000>;
- /* placeholder */
+ interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 915>,
+ <&cpg CPG_CORE R8A77961_CLK_CANFD>,
+ <&can_clk>;
+ clock-names = "clkp1", "clkp2", "can_clk";
+ assigned-clocks = <&cpg CPG_CORE R8A77961_CLK_CANFD>;
+ assigned-clock-rates = <40000000>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 915>;
+ status = "disabled";
};
pwm0: pwm@e6e30000 {
@@ -1397,43 +1419,259 @@
};
vin0: video@e6ef0000 {
+ compatible = "renesas,vin-r8a77961";
reg = <0 0xe6ef0000 0 0x1000>;
- /* placeholder */
+ interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 811>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 811>;
+ renesas,id = <0>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <1>;
+
+ vin0csi20: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&csi20vin0>;
+ };
+ vin0csi40: endpoint@2 {
+ reg = <2>;
+ remote-endpoint = <&csi40vin0>;
+ };
+ };
+ };
};
vin1: video@e6ef1000 {
+ compatible = "renesas,vin-r8a77961";
reg = <0 0xe6ef1000 0 0x1000>;
- /* placeholder */
+ interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 810>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 810>;
+ renesas,id = <1>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <1>;
+
+ vin1csi20: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&csi20vin1>;
+ };
+ vin1csi40: endpoint@2 {
+ reg = <2>;
+ remote-endpoint = <&csi40vin1>;
+ };
+ };
+ };
};
vin2: video@e6ef2000 {
+ compatible = "renesas,vin-r8a77961";
reg = <0 0xe6ef2000 0 0x1000>;
- /* placeholder */
+ interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 809>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 809>;
+ renesas,id = <2>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <1>;
+
+ vin2csi20: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&csi20vin2>;
+ };
+ vin2csi40: endpoint@2 {
+ reg = <2>;
+ remote-endpoint = <&csi40vin2>;
+ };
+ };
+ };
};
vin3: video@e6ef3000 {
+ compatible = "renesas,vin-r8a77961";
reg = <0 0xe6ef3000 0 0x1000>;
- /* placeholder */
+ interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 808>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 808>;
+ renesas,id = <3>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <1>;
+
+ vin3csi20: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&csi20vin3>;
+ };
+ vin3csi40: endpoint@2 {
+ reg = <2>;
+ remote-endpoint = <&csi40vin3>;
+ };
+ };
+ };
};
vin4: video@e6ef4000 {
+ compatible = "renesas,vin-r8a77961";
reg = <0 0xe6ef4000 0 0x1000>;
- /* placeholder */
+ interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 807>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 807>;
+ renesas,id = <4>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <1>;
+
+ vin4csi20: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&csi20vin4>;
+ };
+ vin4csi40: endpoint@2 {
+ reg = <2>;
+ remote-endpoint = <&csi40vin4>;
+ };
+ };
+ };
};
vin5: video@e6ef5000 {
+ compatible = "renesas,vin-r8a77961";
reg = <0 0xe6ef5000 0 0x1000>;
- /* placeholder */
+ interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 806>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 806>;
+ renesas,id = <5>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <1>;
+
+ vin5csi20: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&csi20vin5>;
+ };
+ vin5csi40: endpoint@2 {
+ reg = <2>;
+ remote-endpoint = <&csi40vin5>;
+ };
+ };
+ };
};
vin6: video@e6ef6000 {
+ compatible = "renesas,vin-r8a77961";
reg = <0 0xe6ef6000 0 0x1000>;
- /* placeholder */
+ interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 805>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 805>;
+ renesas,id = <6>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <1>;
+
+ vin6csi20: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&csi20vin6>;
+ };
+ vin6csi40: endpoint@2 {
+ reg = <2>;
+ remote-endpoint = <&csi40vin6>;
+ };
+ };
+ };
};
vin7: video@e6ef7000 {
+ compatible = "renesas,vin-r8a77961";
reg = <0 0xe6ef7000 0 0x1000>;
- /* placeholder */
+ interrupts = <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 804>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 804>;
+ renesas,id = <7>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <1>;
+
+ vin7csi20: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&csi20vin7>;
+ };
+ vin7csi40: endpoint@2 {
+ reg = <2>;
+ remote-endpoint = <&csi40vin7>;
+ };
+ };
+ };
};
rcar_sound: sound@ec500000 {
@@ -2249,35 +2487,121 @@
};
csi20: csi2@fea80000 {
+ compatible = "renesas,r8a77961-csi2";
reg = <0 0xfea80000 0 0x10000>;
- /* placeholder */
+ interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 714>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 714>;
+ status = "disabled";
ports {
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
+
reg = <1>;
+
+ csi20vin0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&vin0csi20>;
+ };
+ csi20vin1: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&vin1csi20>;
+ };
+ csi20vin2: endpoint@2 {
+ reg = <2>;
+ remote-endpoint = <&vin2csi20>;
+ };
+ csi20vin3: endpoint@3 {
+ reg = <3>;
+ remote-endpoint = <&vin3csi20>;
+ };
+ csi20vin4: endpoint@4 {
+ reg = <4>;
+ remote-endpoint = <&vin4csi20>;
+ };
+ csi20vin5: endpoint@5 {
+ reg = <5>;
+ remote-endpoint = <&vin5csi20>;
+ };
+ csi20vin6: endpoint@6 {
+ reg = <6>;
+ remote-endpoint = <&vin6csi20>;
+ };
+ csi20vin7: endpoint@7 {
+ reg = <7>;
+ remote-endpoint = <&vin7csi20>;
+ };
};
};
};
csi40: csi2@feaa0000 {
+ compatible = "renesas,r8a77961-csi2";
reg = <0 0xfeaa0000 0 0x10000>;
- /* placeholder */
+ interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 716>;
+ power-domains = <&sysc R8A77961_PD_ALWAYS_ON>;
+ resets = <&cpg 716>;
+ status = "disabled";
ports {
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
reg = <1>;
+
+ csi40vin0: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&vin0csi40>;
+ };
+ csi40vin1: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&vin1csi40>;
+ };
+ csi40vin2: endpoint@2 {
+ reg = <2>;
+ remote-endpoint = <&vin2csi40>;
+ };
+ csi40vin3: endpoint@3 {
+ reg = <3>;
+ remote-endpoint = <&vin3csi40>;
+ };
+ csi40vin4: endpoint@4 {
+ reg = <4>;
+ remote-endpoint = <&vin4csi40>;
+ };
+ csi40vin5: endpoint@5 {
+ reg = <5>;
+ remote-endpoint = <&vin5csi40>;
+ };
+ csi40vin6: endpoint@6 {
+ reg = <6>;
+ remote-endpoint = <&vin6csi40>;
+ };
+ csi40vin7: endpoint@7 {
+ reg = <7>;
+ remote-endpoint = <&vin7csi40>;
+ };
};
+
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77965-salvator-x.dts b/arch/arm64/boot/dts/renesas/r8a77965-salvator-x.dts
index 660a0240eec5..f84c64ed4df7 100644
--- a/arch/arm64/boot/dts/renesas/r8a77965-salvator-x.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77965-salvator-x.dts
@@ -30,48 +30,3 @@
clock-names = "du.0", "du.1", "du.3",
"dclkin.0", "dclkin.1", "dclkin.3";
};
-
-&hdmi0 {
- status = "okay";
-
- ports {
- port@1 {
- reg = <1>;
- rcar_dw_hdmi0_out: endpoint {
- remote-endpoint = <&hdmi0_con>;
- };
- };
- port@2 {
- reg = <2>;
- dw_hdmi0_snd_in: endpoint {
- remote-endpoint = <&rsnd_endpoint1>;
- };
- };
- };
-};
-
-&hdmi0_con {
- remote-endpoint = <&rcar_dw_hdmi0_out>;
-};
-
-&rcar_sound {
- ports {
- rsnd_port1: port@1 {
- reg = <1>;
- rsnd_endpoint1: endpoint {
- remote-endpoint = <&dw_hdmi0_snd_in>;
-
- dai-format = "i2s";
- bitclock-master = <&rsnd_endpoint1>;
- frame-master = <&rsnd_endpoint1>;
-
- playback = <&ssi2>;
- };
- };
- };
-};
-
-&sound_card {
- dais = <&rsnd_port0 /* ak4613 */
- &rsnd_port1>; /* HDMI0 */
-};
diff --git a/arch/arm64/boot/dts/renesas/r8a77965-salvator-xs.dts b/arch/arm64/boot/dts/renesas/r8a77965-salvator-xs.dts
index d7e621101af7..729756c24c74 100644
--- a/arch/arm64/boot/dts/renesas/r8a77965-salvator-xs.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77965-salvator-xs.dts
@@ -31,29 +31,6 @@
"dclkin.0", "dclkin.1", "dclkin.3";
};
-&hdmi0 {
- status = "okay";
-
- ports {
- port@1 {
- reg = <1>;
- rcar_dw_hdmi0_out: endpoint {
- remote-endpoint = <&hdmi0_con>;
- };
- };
- port@2 {
- reg = <2>;
- dw_hdmi0_snd_in: endpoint {
- remote-endpoint = <&rsnd_endpoint1>;
- };
- };
- };
-};
-
-&hdmi0_con {
- remote-endpoint = <&rcar_dw_hdmi0_out>;
-};
-
&pca9654 {
pcie-sata-switch-hog {
gpio-hog;
@@ -63,29 +40,7 @@
};
};
-&rcar_sound {
- ports {
- rsnd_port1: port@1 {
- reg = <1>;
- rsnd_endpoint1: endpoint {
- remote-endpoint = <&dw_hdmi0_snd_in>;
-
- dai-format = "i2s";
- bitclock-master = <&rsnd_endpoint1>;
- frame-master = <&rsnd_endpoint1>;
-
- playback = <&ssi2>;
- };
- };
- };
-};
-
/* SW12-7 must be set 'Off' (MD12 set to 1) which is not the default! */
&sata {
status = "okay";
};
-
-&sound_card {
- dais = <&rsnd_port0 /* ak4613 */
- &rsnd_port1>; /* HDMI0 */
-};
diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
index 657b20d3533b..dcb9df861d74 100644
--- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
@@ -2575,6 +2575,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -2630,6 +2634,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77970.dtsi b/arch/arm64/boot/dts/renesas/r8a77970.dtsi
index 5a5d5649332a..e8f6352c3665 100644
--- a/arch/arm64/boot/dts/renesas/r8a77970.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77970.dtsi
@@ -1106,6 +1106,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
index ec7ca72399ec..7b51d464de0e 100644
--- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
@@ -992,8 +992,8 @@
reg = <1>;
- vin4csi41: endpoint@2 {
- reg = <2>;
+ vin4csi41: endpoint@3 {
+ reg = <3>;
remote-endpoint = <&csi41vin4>;
};
};
@@ -1020,8 +1020,8 @@
reg = <1>;
- vin5csi41: endpoint@2 {
- reg = <2>;
+ vin5csi41: endpoint@3 {
+ reg = <3>;
remote-endpoint = <&csi41vin5>;
};
};
@@ -1048,8 +1048,8 @@
reg = <1>;
- vin6csi41: endpoint@2 {
- reg = <2>;
+ vin6csi41: endpoint@3 {
+ reg = <3>;
remote-endpoint = <&csi41vin6>;
};
};
@@ -1076,8 +1076,8 @@
reg = <1>;
- vin7csi41: endpoint@2 {
- reg = <2>;
+ vin7csi41: endpoint@3 {
+ reg = <3>;
remote-endpoint = <&csi41vin7>;
};
};
@@ -1439,6 +1439,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
@@ -1478,6 +1482,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
index f74f8b9993f1..4715e4a4abe0 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
@@ -16,6 +16,9 @@
aliases {
serial0 = &scif2;
ethernet0 = &avb;
+ mmc0 = &sdhi3;
+ mmc1 = &sdhi0;
+ mmc2 = &sdhi1;
};
chosen {
@@ -295,8 +298,6 @@
ports {
port@0 {
- reg = <0>;
-
csi40_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2>;
@@ -621,7 +622,7 @@
};
&rcar_sound {
- pinctrl-0 = <&sound_pins &sound_clk_pins>;
+ pinctrl-0 = <&sound_pins>, <&sound_clk_pins>;
pinctrl-names = "default";
/* Single DAI */
@@ -653,8 +654,8 @@
rcar_sound,dai {
dai0 {
- playback = <&ssi0 &src0 &dvc0>;
- capture = <&ssi1 &src1 &dvc1>;
+ playback = <&ssi0>, <&src0>, <&dvc0>;
+ capture = <&ssi1>, <&src1>, <&dvc1>;
};
};
diff --git a/arch/arm64/boot/dts/renesas/r8a77990.dtsi b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
index 5010f23fafcc..0eaea58f4210 100644
--- a/arch/arm64/boot/dts/renesas/r8a77990.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77990.dtsi
@@ -1970,6 +1970,10 @@
#address-cells = <1>;
#size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ };
+
port@1 {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi
index fa284a7260d6..a0a1a1da0d87 100644
--- a/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-cpu.dtsi
@@ -6,12 +6,45 @@
*/
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+
#include "r8a779a0.dtsi"
/ {
model = "Renesas Falcon CPU board";
compatible = "renesas,falcon-cpu", "renesas,r8a779a0";
+ aliases {
+ serial0 = &scif0;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-1 {
+ gpios = <&gpio4 18 GPIO_ACTIVE_HIGH>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_INDICATOR;
+ function-enumerator = <1>;
+ };
+ led-2 {
+ gpios = <&gpio4 19 GPIO_ACTIVE_HIGH>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_INDICATOR;
+ function-enumerator = <2>;
+ };
+ led-3 {
+ gpios = <&gpio4 20 GPIO_ACTIVE_HIGH>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_INDICATOR;
+ function-enumerator = <3>;
+ };
+ };
+
memory@48000000 {
device_type = "memory";
/* first 128MB is reserved for secure area. */
@@ -52,22 +85,6 @@
};
};
-&avb0 {
- pinctrl-0 = <&avb0_pins>;
- pinctrl-names = "default";
- phy-handle = <&phy0>;
- tx-internal-delay-ps = <2000>;
- status = "okay";
-
- phy0: ethernet-phy@0 {
- rxc-skew-ps = <1500>;
- reg = <0>;
- interrupt-parent = <&gpio4>;
- interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
- reset-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
- };
-};
-
&extal_clk {
clock-frequency = <16666666>;
};
@@ -82,6 +99,13 @@
status = "okay";
clock-frequency = <400000>;
+
+ eeprom@50 {
+ compatible = "rohm,br24g01", "atmel,24c01";
+ label = "cpu-board";
+ reg = <0x50>;
+ pagesize = <8>;
+ };
};
&i2c1 {
@@ -121,24 +145,6 @@
pinctrl-0 = <&scif_clk_pins>;
pinctrl-names = "default";
- avb0_pins: avb0 {
- mux {
- groups = "avb0_link", "avb0_mdio", "avb0_rgmii", "avb0_txcrefclk";
- function = "avb0";
- };
-
- pins_mdio {
- groups = "avb0_mdio";
- drive-strength = <21>;
- };
-
- pins_mii {
- groups = "avb0_rgmii";
- drive-strength = <21>;
- };
-
- };
-
i2c0_pins: i2c0 {
groups = "i2c0";
function = "i2c0";
@@ -171,6 +177,11 @@
};
};
+&rwdt {
+ timeout-sec = <60>;
+ status = "okay";
+};
+
&scif0 {
pinctrl-0 = <&scif0_pins>;
pinctrl-names = "default";
diff --git a/arch/arm64/boot/dts/renesas/r8a779a0-falcon-csi-dsi.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-csi-dsi.dtsi
new file mode 100644
index 000000000000..14d3db5d6c16
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-csi-dsi.dtsi
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the Falcon CSI/DSI sub-board
+ *
+ * Copyright (C) 2021 Glider bv
+ */
+
+&i2c0 {
+ eeprom@52 {
+ compatible = "rohm,br24g01", "atmel,24c01";
+ label = "csi-dsi-sub-board-id";
+ reg = <0x52>;
+ pagesize = <8>;
+ };
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a779a0-falcon-ethernet.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-ethernet.dtsi
new file mode 100644
index 000000000000..e11bf9ace776
--- /dev/null
+++ b/arch/arm64/boot/dts/renesas/r8a779a0-falcon-ethernet.dtsi
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for the Falcon Ethernet sub-board
+ *
+ * Copyright (C) 2021 Glider bv
+ */
+
+&i2c0 {
+ eeprom@53 {
+ compatible = "rohm,br24g01", "atmel,24c01";
+ label = "ethernet-sub-board-id";
+ reg = <0x53>;
+ pagesize = <8>;
+ };
+};
diff --git a/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts b/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts
index 5617b81dd7dc..687f019e79f0 100644
--- a/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts
+++ b/arch/arm64/boot/dts/renesas/r8a779a0-falcon.dts
@@ -7,6 +7,8 @@
/dts-v1/;
#include "r8a779a0-falcon-cpu.dtsi"
+#include "r8a779a0-falcon-csi-dsi.dtsi"
+#include "r8a779a0-falcon-ethernet.dtsi"
/ {
model = "Renesas Falcon CPU and Breakout boards based on r8a779a0";
@@ -14,15 +16,51 @@
aliases {
ethernet0 = &avb0;
- serial0 = &scif0;
};
+};
+
+&avb0 {
+ pinctrl-0 = <&avb0_pins>;
+ pinctrl-names = "default";
+ phy-handle = <&phy0>;
+ tx-internal-delay-ps = <2000>;
+ status = "okay";
- chosen {
- stdout-path = "serial0:115200n8";
+ phy0: ethernet-phy@0 {
+ rxc-skew-ps = <1500>;
+ reg = <0>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&gpio4 15 GPIO_ACTIVE_LOW>;
};
};
-&rwdt {
- timeout-sec = <60>;
- status = "okay";
+&i2c0 {
+ eeprom@51 {
+ compatible = "rohm,br24g01", "atmel,24c01";
+ label = "breakout-board";
+ reg = <0x51>;
+ pagesize = <8>;
+ };
+};
+
+&pfc {
+ avb0_pins: avb0 {
+ mux {
+ groups = "avb0_link", "avb0_mdio", "avb0_rgmii",
+ "avb0_txcrefclk";
+ function = "avb0";
+ };
+
+ pins_mdio {
+ groups = "avb0_mdio";
+ drive-strength = <21>;
+ };
+
+ pins_mii {
+ groups = "avb0_rgmii";
+ drive-strength = <21>;
+ };
+
+ };
};
diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
index dfd6ae8b564f..70b3604e56cd 100644
--- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
@@ -60,10 +60,7 @@
pmu_a76 {
compatible = "arm,cortex-a76-pmu";
- interrupts-extended = <&gic GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
- <&gic GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
- <&gic GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
- <&gic GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts-extended = <&gic GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
};
/* External SCIF clock - to be overridden by boards that provide it */
@@ -239,6 +236,76 @@
#interrupt-cells = <2>;
};
+ cmt0: timer@e60f0000 {
+ compatible = "renesas,r8a779a0-cmt0",
+ "renesas,rcar-gen3-cmt0";
+ reg = <0 0xe60f0000 0 0x1004>;
+ interrupts = <GIC_SPI 500 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 501 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 910>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 910>;
+ status = "disabled";
+ };
+
+ cmt1: timer@e6130000 {
+ compatible = "renesas,r8a779a0-cmt1",
+ "renesas,rcar-gen3-cmt1";
+ reg = <0 0xe6130000 0 0x1004>;
+ interrupts = <GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 449 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 450 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 451 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 452 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 453 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 454 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 455 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 911>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 911>;
+ status = "disabled";
+ };
+
+ cmt2: timer@e6140000 {
+ compatible = "renesas,r8a779a0-cmt1",
+ "renesas,rcar-gen3-cmt1";
+ reg = <0 0xe6140000 0 0x1004>;
+ interrupts = <GIC_SPI 456 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 457 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 458 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 459 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 460 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 461 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 462 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 463 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 912>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 912>;
+ status = "disabled";
+ };
+
+ cmt3: timer@e6148000 {
+ compatible = "renesas,r8a779a0-cmt1",
+ "renesas,rcar-gen3-cmt1";
+ reg = <0 0xe6148000 0 0x1004>;
+ interrupts = <GIC_SPI 464 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 470 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 471 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 913>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 913>;
+ status = "disabled";
+ };
+
cpg: clock-controller@e6150000 {
compatible = "renesas,r8a779a0-cpg-mssr";
reg = <0 0xe6150000 0 0x4000>;
@@ -260,6 +327,84 @@
#power-domain-cells = <1>;
};
+ tmu0: timer@e61e0000 {
+ compatible = "renesas,tmu-r8a779a0", "renesas,tmu";
+ reg = <0 0xe61e0000 0 0x30>;
+ interrupts = <GIC_SPI 512 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 513 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 514 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 713>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 713>;
+ status = "disabled";
+ };
+
+ tmu1: timer@e6fc0000 {
+ compatible = "renesas,tmu-r8a779a0", "renesas,tmu";
+ reg = <0 0xe6fc0000 0 0x30>;
+ interrupts = <GIC_SPI 504 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 505 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 506 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 714>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 714>;
+ status = "disabled";
+ };
+
+ tmu2: timer@e6fd0000 {
+ compatible = "renesas,tmu-r8a779a0", "renesas,tmu";
+ reg = <0 0xe6fd0000 0 0x30>;
+ interrupts = <GIC_SPI 508 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 509 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 510 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 715>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 715>;
+ status = "disabled";
+ };
+
+ tmu3: timer@e6fe0000 {
+ compatible = "renesas,tmu-r8a779a0", "renesas,tmu";
+ reg = <0 0xe6fe0000 0 0x30>;
+ interrupts = <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 716>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 716>;
+ status = "disabled";
+ };
+
+ tmu4: timer@ffc00000 {
+ compatible = "renesas,tmu-r8a779a0", "renesas,tmu";
+ reg = <0 0xffc00000 0 0x30>;
+ interrupts = <GIC_SPI 476 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 477 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 478 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 717>;
+ clock-names = "fck";
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 717>;
+ status = "disabled";
+ };
+
+ tsc: thermal@e6190000 {
+ compatible = "renesas,r8a779a0-thermal";
+ reg = <0 0xe6190000 0 0x200>,
+ <0 0xe6198000 0 0x200>,
+ <0 0xe61a0000 0 0x200>,
+ <0 0xe61a8000 0 0x200>,
+ <0 0xe61b0000 0 0x200>;
+ clocks = <&cpg CPG_MOD 919>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 919>;
+ #thermal-sensor-cells = <1>;
+ };
+
i2c0: i2c@e6500000 {
compatible = "renesas,i2c-r8a779a0",
"renesas,rcar-gen3-i2c";
@@ -954,12 +1099,122 @@
power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
};
+ fcpvd0: fcp@fea10000 {
+ compatible = "renesas,fcpv";
+ reg = <0 0xfea10000 0 0x200>;
+ clocks = <&cpg CPG_MOD 508>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 508>;
+ };
+
+ fcpvd1: fcp@fea11000 {
+ compatible = "renesas,fcpv";
+ reg = <0 0xfea11000 0 0x200>;
+ clocks = <&cpg CPG_MOD 509>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 509>;
+ };
+
+ vspd0: vsp@fea20000 {
+ compatible = "renesas,vsp2";
+ reg = <0 0xfea20000 0 0x5000>;
+ interrupts = <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 830>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 830>;
+
+ renesas,fcp = <&fcpvd0>;
+ };
+
+ vspd1: vsp@fea28000 {
+ compatible = "renesas,vsp2";
+ reg = <0 0xfea28000 0 0x5000>;
+ interrupts = <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 831>;
+ power-domains = <&sysc R8A779A0_PD_ALWAYS_ON>;
+ resets = <&cpg 831>;
+
+ renesas,fcp = <&fcpvd1>;
+ };
+
prr: chipid@fff00044 {
compatible = "renesas,prr";
reg = <0 0xfff00044 0 4>;
};
};
+ thermal-zones {
+ sensor_thermal1: sensor-thermal1 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+ thermal-sensors = <&tsc 0>;
+
+ trips {
+ sensor1_crit: sensor1-crit {
+ temperature = <120000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ sensor_thermal2: sensor-thermal2 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+ thermal-sensors = <&tsc 1>;
+
+ trips {
+ sensor2_crit: sensor2-crit {
+ temperature = <120000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ sensor_thermal3: sensor-thermal3 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+ thermal-sensors = <&tsc 2>;
+
+ trips {
+ sensor3_crit: sensor3-crit {
+ temperature = <120000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ sensor_thermal4: sensor-thermal4 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+ thermal-sensors = <&tsc 3>;
+
+ trips {
+ sensor4_crit: sensor4-crit {
+ temperature = <120000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+
+ sensor_thermal5: sensor-thermal5 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+ thermal-sensors = <&tsc 4>;
+
+ trips {
+ sensor5_crit: sensor5-crit {
+ temperature = <120000>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
timer {
compatible = "arm,armv8-timer";
interrupts-extended = <&gic GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(1) | IRQ_TYPE_LEVEL_LOW)>,
diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
index c22bb38994e8..453ffcef24fa 100644
--- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
@@ -36,6 +36,9 @@
serial0 = &scif2;
serial1 = &hscif1;
ethernet0 = &avb;
+ mmc0 = &sdhi2;
+ mmc1 = &sdhi0;
+ mmc2 = &sdhi3;
};
chosen {
@@ -198,7 +201,8 @@
label = "rcar-sound";
- dais = <&rsnd_port0>;
+ dais = <&rsnd_port0 /* ak4613 */
+ &rsnd_port1>; /* HDMI0 */
};
vbus0_usb2: regulator-vbus0-usb2 {
@@ -316,6 +320,10 @@
};
};
+&a57_0 {
+ cpu-supply = <&dvfs>;
+};
+
&audio_clk_a {
clock-frequency = <22579200>;
};
@@ -341,7 +349,6 @@
ports {
port@0 {
- reg = <0>;
csi20_in: endpoint {
clock-lanes = <0>;
data-lanes = <1>;
@@ -356,8 +363,6 @@
ports {
port@0 {
- reg = <0>;
-
csi40_in: endpoint {
clock-lanes = <0>;
data-lanes = <1 2 3 4>;
@@ -394,6 +399,29 @@
clock-frequency = <32768>;
};
+&hdmi0 {
+ status = "okay";
+
+ ports {
+ port@1 {
+ reg = <1>;
+ rcar_dw_hdmi0_out: endpoint {
+ remote-endpoint = <&hdmi0_con>;
+ };
+ };
+ port@2 {
+ reg = <2>;
+ dw_hdmi0_snd_in: endpoint {
+ remote-endpoint = <&rsnd_endpoint1>;
+ };
+ };
+ };
+};
+
+&hdmi0_con {
+ remote-endpoint = <&rcar_dw_hdmi0_out>;
+};
+
&hscif1 {
pinctrl-0 = <&hscif1_pins>;
pinctrl-names = "default";
@@ -730,7 +758,7 @@
};
&rcar_sound {
- pinctrl-0 = <&sound_pins &sound_clk_pins>;
+ pinctrl-0 = <&sound_pins>, <&sound_clk_pins>;
pinctrl-names = "default";
/* Single DAI */
@@ -773,8 +801,21 @@
bitclock-master = <&rsnd_endpoint0>;
frame-master = <&rsnd_endpoint0>;
- playback = <&ssi0 &src0 &dvc0>;
- capture = <&ssi1 &src1 &dvc1>;
+ playback = <&ssi0>, <&src0>, <&dvc0>;
+ capture = <&ssi1>, <&src1>, <&dvc1>;
+ };
+ };
+
+ rsnd_port1: port@1 {
+ reg = <1>;
+ rsnd_endpoint1: endpoint {
+ remote-endpoint = <&dw_hdmi0_snd_in>;
+
+ dai-format = "i2s";
+ bitclock-master = <&rsnd_endpoint1>;
+ frame-master = <&rsnd_endpoint1>;
+
+ playback = <&ssi2>;
};
};
};
diff --git a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
index e9ed2597f1c2..61bd4df09df0 100644
--- a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
+++ b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
@@ -16,6 +16,7 @@
aliases {
serial1 = &hscif0;
serial2 = &scif1;
+ mmc2 = &sdhi3;
};
clksndsel: clksndsel {
diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi
index a04eae55dd6c..1f177af3eb9d 100644
--- a/arch/arm64/boot/dts/renesas/ulcb.dtsi
+++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi
@@ -23,6 +23,8 @@
aliases {
serial0 = &scif2;
ethernet0 = &avb;
+ mmc0 = &sdhi2;
+ mmc1 = &sdhi0;
};
chosen {
@@ -136,6 +138,10 @@
};
};
+&a57_0 {
+ cpu-supply = <&dvfs>;
+};
+
&audio_clk_a {
clock-frequency = <22579200>;
};
@@ -365,7 +371,7 @@
};
&rcar_sound {
- pinctrl-0 = <&sound_pins &sound_clk_pins>;
+ pinctrl-0 = <&sound_pins>, <&sound_clk_pins>;
pinctrl-names = "default";
/* Single DAI */
@@ -408,8 +414,8 @@
bitclock-master = <&rsnd_for_ak4613>;
frame-master = <&rsnd_for_ak4613>;
- playback = <&ssi0 &src0 &dvc0>;
- capture = <&ssi1 &src1 &dvc1>;
+ playback = <&ssi0>, <&src0>, <&dvc0>;
+ capture = <&ssi1>, <&src1>, <&dvc1>;
};
};
rsnd_port1: port@1 {
diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile
index 62d3abc17a24..c3e00c0e2db7 100644
--- a/arch/arm64/boot/dts/rockchip/Makefile
+++ b/arch/arm64/boot/dts/rockchip/Makefile
@@ -36,6 +36,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-nanopc-t4.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-nanopi-m4.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-nanopi-m4b.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-nanopi-neo4.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-nanopi-r4s.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-orangepi.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-pinebook-pro.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-puma-haikou.dtb
diff --git a/arch/arm64/boot/dts/rockchip/px30-engicam-common.dtsi b/arch/arm64/boot/dts/rockchip/px30-engicam-common.dtsi
index 08b0b9fbcbc9..3429e124d95a 100644
--- a/arch/arm64/boot/dts/rockchip/px30-engicam-common.dtsi
+++ b/arch/arm64/boot/dts/rockchip/px30-engicam-common.dtsi
@@ -6,6 +6,11 @@
*/
/ {
+ aliases {
+ mmc1 = &sdmmc;
+ mmc2 = &sdio;
+ };
+
vcc5v0_sys: vcc5v0-sys {
compatible = "regulator-fixed";
regulator-name = "vcc5v0_sys"; /* +5V */
diff --git a/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi b/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi
index cdacd3483600..7249871530ab 100644
--- a/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi
+++ b/arch/arm64/boot/dts/rockchip/px30-engicam-px30-core.dtsi
@@ -11,6 +11,10 @@
/ {
compatible = "engicam,px30-core", "rockchip,px30";
+
+ aliases {
+ mmc0 = &emmc;
+ };
};
&cpu0 {
diff --git a/arch/arm64/boot/dts/rockchip/px30-evb.dts b/arch/arm64/boot/dts/rockchip/px30-evb.dts
index 5fe905fae9a8..c1ce9c295e5b 100644
--- a/arch/arm64/boot/dts/rockchip/px30-evb.dts
+++ b/arch/arm64/boot/dts/rockchip/px30-evb.dts
@@ -13,6 +13,12 @@
model = "Rockchip PX30 EVB";
compatible = "rockchip,px30-evb", "rockchip,px30";
+ aliases {
+ mmc0 = &sdmmc;
+ mmc1 = &sdio;
+ mmc2 = &emmc;
+ };
+
chosen {
stdout-path = "serial5:115200n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/px30.dtsi b/arch/arm64/boot/dts/rockchip/px30.dtsi
index c45b0cfcae09..09baa8a167ce 100644
--- a/arch/arm64/boot/dts/rockchip/px30.dtsi
+++ b/arch/arm64/boot/dts/rockchip/px30.dtsi
@@ -25,9 +25,6 @@
i2c1 = &i2c1;
i2c2 = &i2c2;
i2c3 = &i2c3;
- mmc0 = &sdmmc;
- mmc1 = &sdio;
- mmc2 = &emmc;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
@@ -603,7 +600,7 @@
};
wdt: watchdog@ff1e0000 {
- compatible = "snps,dw-wdt";
+ compatible = "rockchip,px30-wdt", "snps,dw-wdt";
reg = <0x0 0xff1e0000 0x0 0x100>;
clocks = <&cru PCLK_WDT_NS>;
interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
@@ -984,6 +981,27 @@
status = "disabled";
};
+ gpu_opp_table: opp-table2 {
+ compatible = "operating-points-v2";
+
+ opp-200000000 {
+ opp-hz = /bits/ 64 <200000000>;
+ opp-microvolt = <950000>;
+ };
+ opp-300000000 {
+ opp-hz = /bits/ 64 <300000000>;
+ opp-microvolt = <975000>;
+ };
+ opp-400000000 {
+ opp-hz = /bits/ 64 <400000000>;
+ opp-microvolt = <1050000>;
+ };
+ opp-480000000 {
+ opp-hz = /bits/ 64 <480000000>;
+ opp-microvolt = <1125000>;
+ };
+ };
+
gpu: gpu@ff400000 {
compatible = "rockchip,px30-mali", "arm,mali-bifrost";
reg = <0x0 0xff400000 0x0 0x4000>;
@@ -994,6 +1012,7 @@
clocks = <&cru SCLK_GPU>;
#cooling-cells = <2>;
power-domains = <&power PX30_PD_GPU>;
+ operating-points-v2 = <&gpu_opp_table>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
index 7a96be10eaf0..3dddd4742c3a 100644
--- a/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3308-roc-cc.dts
@@ -9,6 +9,12 @@
/ {
model = "Firefly ROC-RK3308-CC board";
compatible = "firefly,roc-rk3308-cc", "rockchip,rk3308";
+
+ aliases {
+ mmc0 = &sdmmc;
+ mmc1 = &emmc;
+ };
+
chosen {
stdout-path = "serial2:1500000n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3308.dtsi b/arch/arm64/boot/dts/rockchip/rk3308.dtsi
index 3a035a189450..b815ce73e5c6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3308.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3308.dtsi
@@ -24,9 +24,6 @@
i2c1 = &i2c1;
i2c2 = &i2c2;
i2c3 = &i2c3;
- mmc0 = &sdmmc;
- mmc1 = &emmc;
- mmc2 = &sdio;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
@@ -247,7 +244,7 @@
};
wdt: watchdog@ff080000 {
- compatible = "snps,dw-wdt";
+ compatible = "rockchip,rk3308-wdt", "snps,dw-wdt";
reg = <0x0 0xff080000 0x0 0x100>;
clocks = <&cru PCLK_WDT>;
interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
@@ -640,6 +637,28 @@
status = "disabled";
};
+ gmac: ethernet@ff4e0000 {
+ compatible = "rockchip,rk3308-gmac";
+ reg = <0x0 0xff4e0000 0x0 0x10000>;
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ clocks = <&cru SCLK_MAC>, <&cru SCLK_MAC_RX_TX>,
+ <&cru SCLK_MAC_RX_TX>, <&cru SCLK_MAC_REF>,
+ <&cru SCLK_MAC>, <&cru ACLK_MAC>,
+ <&cru PCLK_MAC>, <&cru SCLK_MAC_RMII>;
+ clock-names = "stmmaceth", "mac_clk_rx",
+ "mac_clk_tx", "clk_mac_ref",
+ "clk_mac_refout", "aclk_mac",
+ "pclk_mac", "clk_mac_speed";
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&rmii_pins &mac_refclk_12ma>;
+ resets = <&cru SRST_MAC_A>;
+ reset-names = "stmmaceth";
+ rockchip,grf = <&grf>;
+ status = "disabled";
+ };
+
cru: clock-controller@ff500000 {
compatible = "rockchip,rk3308-cru";
reg = <0x0 0xff500000 0x0 0x1000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3318-a95x-z2.dts b/arch/arm64/boot/dts/rockchip/rk3318-a95x-z2.dts
index 30c73ef25370..763cf9b4620e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3318-a95x-z2.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3318-a95x-z2.dts
@@ -8,6 +8,12 @@
model = "A95X Z2";
compatible = "zkmagic,a95x-z2", "rockchip,rk3318";
+ aliases {
+ mmc0 = &sdmmc;
+ mmc1 = &sdio;
+ mmc2 = &emmc;
+ };
+
chosen {
stdout-path = "serial2:1500000n8";
};
@@ -357,6 +363,11 @@
status = "okay";
};
+&usbdrd3 {
+ dr_mode = "host";
+ status = "okay";
+};
+
&usb_host0_ehci {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts b/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts
index 97fb93e1cc00..49c97f76df77 100644
--- a/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3326-odroid-go2.dts
@@ -14,6 +14,10 @@
model = "ODROID-GO Advance";
compatible = "hardkernel,rk3326-odroid-go2", "rockchip,rk3326";
+ aliases {
+ mmc0 = &sdmmc;
+ };
+
chosen {
stdout-path = "serial2:115200n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-a1.dts b/arch/arm64/boot/dts/rockchip/rk3328-a1.dts
index 37f307cfa4cc..de2d3e88e27f 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-a1.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-a1.dts
@@ -8,6 +8,11 @@
model = "Beelink A1";
compatible = "azw,beelink-a1", "rockchip,rk3328";
+ aliases {
+ mmc0 = &sdmmc;
+ mmc1 = &emmc;
+ };
+
/*
* UART pins, as viewed with bottom of case removed:
*
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts
index a48767931af6..ff6b466e0e07 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-evb.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-evb.dts
@@ -10,6 +10,12 @@
model = "Rockchip RK3328 EVB";
compatible = "rockchip,rk3328-evb", "rockchip,rk3328";
+ aliases {
+ mmc0 = &sdmmc;
+ mmc1 = &sdio;
+ mmc2 = &emmc;
+ };
+
chosen {
stdout-path = "serial2:1500000n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts b/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts
index faf496d789cf..f807bc066ccb 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts
@@ -13,6 +13,10 @@
model = "FriendlyElec NanoPi R2S";
compatible = "friendlyarm,nanopi-r2s", "rockchip,rk3328";
+ aliases {
+ mmc0 = &sdmmc;
+ };
+
chosen {
stdout-path = "serial2:1500000n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
index 19959bfba451..a05732b59f38 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
@@ -10,6 +10,11 @@
model = "Firefly roc-rk3328-cc";
compatible = "firefly,roc-rk3328-cc", "rockchip,rk3328";
+ aliases {
+ mmc0 = &sdmmc;
+ mmc1 = &emmc;
+ };
+
chosen {
stdout-path = "serial2:1500000n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
index 2d71ca7e429c..c7e31efdd2e1 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock-pi-e.dts
@@ -20,6 +20,11 @@
model = "Radxa ROCK Pi E";
compatible = "radxa,rockpi-e", "rockchip,rk3328";
+ aliases {
+ mmc0 = &sdmmc;
+ mmc1 = &emmc;
+ };
+
chosen {
stdout-path = "serial2:1500000n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
index c984662043da..3bef1f39bc6e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
@@ -10,6 +10,11 @@
model = "Pine64 Rock64";
compatible = "pine64,rock64", "rockchip,rk3328";
+ aliases {
+ mmc0 = &sdmmc;
+ mmc1 = &emmc;
+ };
+
chosen {
stdout-path = "serial2:1500000n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
index 063ed0adbec4..3ed69ecbcf3c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
@@ -27,9 +27,6 @@
i2c1 = &i2c1;
i2c2 = &i2c2;
i2c3 = &i2c3;
- mmc0 = &sdmmc;
- mmc1 = &sdio;
- mmc2 = &emmc;
ethernet0 = &gmac2io;
ethernet1 = &gmac2phy;
};
@@ -438,7 +435,7 @@
};
wdt: watchdog@ff1a0000 {
- compatible = "snps,dw-wdt";
+ compatible = "rockchip,rk3328-wdt", "snps,dw-wdt";
reg = <0x0 0xff1a0000 0x0 0x100>;
interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru PCLK_WDT>;
@@ -919,8 +916,8 @@
"mac_clk_tx", "clk_mac_ref",
"aclk_mac", "pclk_mac",
"clk_macphy";
- resets = <&cru SRST_GMAC2PHY_A>, <&cru SRST_MACPHY>;
- reset-names = "stmmaceth", "mac-phy";
+ resets = <&cru SRST_GMAC2PHY_A>;
+ reset-names = "stmmaceth";
phy-mode = "rmii";
phy-handle = <&phy>;
snps,txpbl = <0x4>;
@@ -980,6 +977,25 @@
status = "disabled";
};
+ usbdrd3: usb@ff600000 {
+ compatible = "rockchip,rk3328-dwc3", "snps,dwc3";
+ reg = <0x0 0xff600000 0x0 0x100000>;
+ interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_USB3OTG_REF>, <&cru SCLK_USB3OTG_SUSPEND>,
+ <&cru ACLK_USB3OTG>;
+ clock-names = "ref_clk", "suspend_clk",
+ "bus_clk";
+ dr_mode = "otg";
+ phy_type = "utmi_wide";
+ snps,dis-del-phy-power-chg-quirk;
+ snps,dis_enblslpm_quirk;
+ snps,dis-tx-ipgap-linecheck-quirk;
+ snps,dis-u2-freeclk-exists-quirk;
+ snps,dis_u2_susphy_quirk;
+ snps,dis_u3_susphy_quirk;
+ status = "disabled";
+ };
+
gic: interrupt-controller@ff811000 {
compatible = "arm,gic-400";
#interrupt-cells = <3>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi b/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi
index 87fabc64cc39..15d1fc541c38 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi
@@ -8,6 +8,10 @@
#include "rk3368.dtsi"
/ {
+ aliases {
+ mmc0 = &emmc;
+ };
+
chosen {
stdout-path = "serial2:115200n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts b/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
index 46357d1d77cd..62aa97a0b8c9 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-geekbox.dts
@@ -11,6 +11,10 @@
model = "GeekBox";
compatible = "geekbuying,geekbox", "rockchip,rk3368";
+ aliases {
+ mmc0 = &emmc;
+ };
+
chosen {
stdout-path = "serial2:115200n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-lion-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3368-lion-haikou.dts
index 7fcb1eacea8a..cae01d35b93d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-lion-haikou.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-lion-haikou.dts
@@ -10,6 +10,10 @@
model = "Theobroma Systems RK3368-uQ7 Baseboard";
compatible = "tsd,rk3368-lion-haikou", "rockchip,rk3368";
+ aliases {
+ mmc1 = &sdmmc;
+ };
+
chosen {
stdout-path = "serial0:115200n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi b/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi
index 24d28be4736c..bcd7977fb0f8 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368-lion.dtsi
@@ -7,6 +7,10 @@
#include "rk3368.dtsi"
/ {
+ aliases {
+ mmc0 = &emmc;
+ };
+
chosen {
stdout-path = "serial0:115200n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
index ecce16ecc9c3..3ebe15e03cf4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-orion-r68-meta.dts
@@ -11,6 +11,11 @@
model = "Rockchip Orion R68";
compatible = "tronsmart,orion-r68-meta", "rockchip,rk3368";
+ aliases {
+ mmc0 = &sdmmc;
+ mmc1 = &emmc;
+ };
+
chosen {
stdout-path = "serial2:115200n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts b/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts
index 5ffd7b4d3036..5ccaa5f7a370 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-px5-evb.dts
@@ -11,6 +11,11 @@
model = "Rockchip PX5 EVB";
compatible = "rockchip,px5-evb", "rockchip,px5", "rockchip,rk3368";
+ aliases {
+ mmc0 = &sdmmc;
+ mmc1 = &emmc;
+ };
+
chosen {
stdout-path = "serial4:115200n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-r88.dts b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
index 2582fa4b90e2..959d3cc801f2 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
@@ -11,6 +11,11 @@
model = "Rockchip R88";
compatible = "rockchip,r88", "rockchip,rk3368";
+ aliases {
+ mmc0 = &sdio0;
+ mmc1 = &emmc;
+ };
+
chosen {
stdout-path = "serial2:115200n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
index 7af68ec3feae..dfc6376171d0 100644
--- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -25,9 +25,6 @@
i2c3 = &i2c3;
i2c4 = &i2c4;
i2c5 = &i2c5;
- mmc0 = &sdmmc;
- mmc1 = &sdio0;
- mmc2 = &emmc;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
@@ -561,7 +558,6 @@
pinctrl-names = "default";
pinctrl-0 = <&pwm0_pin>;
clocks = <&cru PCLK_PWM1>;
- clock-names = "pwm";
status = "disabled";
};
@@ -572,7 +568,6 @@
pinctrl-names = "default";
pinctrl-0 = <&pwm1_pin>;
clocks = <&cru PCLK_PWM1>;
- clock-names = "pwm";
status = "disabled";
};
@@ -581,7 +576,6 @@
reg = <0x0 0xff680020 0x0 0x10>;
#pwm-cells = <3>;
clocks = <&cru PCLK_PWM1>;
- clock-names = "pwm";
status = "disabled";
};
@@ -592,7 +586,6 @@
pinctrl-names = "default";
pinctrl-0 = <&pwm3_pin>;
clocks = <&cru PCLK_PWM1>;
- clock-names = "pwm";
status = "disabled";
};
@@ -667,7 +660,7 @@
status = "disabled";
};
- timer@ff810000 {
+ timer0: timer@ff810000 {
compatible = "rockchip,rk3368-timer", "rockchip,rk3288-timer";
reg = <0x0 0xff810000 0x0 0x20>;
interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-evb.dts b/arch/arm64/boot/dts/rockchip/rk3399-evb.dts
index 694b0d08d644..7b717ebec8ff 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-evb.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-evb.dts
@@ -11,6 +11,10 @@
model = "Rockchip RK3399 Evaluation Board";
compatible = "rockchip,rk3399-evb", "rockchip,rk3399";
+ aliases {
+ mmc0 = &sdhci;
+ };
+
backlight: backlight {
compatible = "pwm-backlight";
brightness-levels = <
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
index 6db18808b9c5..45254be1350d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts
@@ -13,6 +13,12 @@
model = "Firefly-RK3399 Board";
compatible = "firefly,firefly-rk3399", "rockchip,rk3399";
+ aliases {
+ mmc0 = &sdio0;
+ mmc1 = &sdmmc;
+ mmc2 = &sdhci;
+ };
+
chosen {
stdout-path = "serial2:1500000n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
index 32dcaf210085..4002742fed4c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
@@ -10,6 +10,11 @@
#include "rk3399-op1-opp.dtsi"
/ {
+ aliases {
+ mmc0 = &sdmmc;
+ mmc1 = &sdhci;
+ };
+
chosen {
stdout-path = "serial2:115200n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts b/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts
index 341d074ed996..bee45c17e2ca 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts
@@ -9,6 +9,12 @@
model = "Hugsun X99 TV BOX";
compatible = "hugsun,x99", "rockchip,rk3399";
+ aliases {
+ mmc0 = &sdio0;
+ mmc1 = &sdmmc;
+ mmc2 = &sdhci;
+ };
+
chosen {
stdout-path = "serial2:1500000n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
index 635afdd99122..d5c7648c841d 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
@@ -11,6 +11,12 @@
#include "rk3399-opp.dtsi"
/ {
+ aliases {
+ mmc0 = &sdio0;
+ mmc1 = &sdmmc;
+ mmc2 = &sdhci;
+ };
+
chosen {
stdout-path = "serial2:1500000n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts b/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
index 66c725a34220..19485b552bc4 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-kobol-helios64.dts
@@ -18,6 +18,11 @@
model = "Kobol Helios64";
compatible = "kobol,helios64", "rockchip,rk3399";
+ aliases {
+ mmc0 = &sdmmc;
+ mmc1 = &sdhci;
+ };
+
avdd_1v8_s0: avdd-1v8-s0 {
compatible = "regulator-fixed";
regulator-name = "avdd_1v8_s0";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts b/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts
index 1fa80ac15464..7c93f840bc64 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts
@@ -13,6 +13,12 @@
model = "Leez RK3399 P710";
compatible = "leez,p710", "rockchip,rk3399";
+ aliases {
+ mmc0 = &sdio0;
+ mmc1 = &sdmmc;
+ mmc2 = &sdhci;
+ };
+
chosen {
stdout-path = "serial2:1500000n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts
new file mode 100644
index 000000000000..fa5809887643
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * FriendlyElec NanoPC-T4 board device tree source
+ *
+ * Copyright (c) 2020 FriendlyElec Computer Tech. Co., Ltd.
+ * (http://www.friendlyarm.com)
+ *
+ * Copyright (c) 2018 Collabora Ltd.
+ *
+ * Copyright (c) 2020 Jensen Huang <jensenhuang@friendlyarm.com>
+ * Copyright (c) 2020 Marty Jones <mj8263788@gmail.com>
+ * Copyright (c) 2021 Tianling Shen <cnsztl@gmail.com>
+ */
+
+/dts-v1/;
+#include "rk3399-nanopi4.dtsi"
+
+/ {
+ model = "FriendlyElec NanoPi R4S";
+ compatible = "friendlyarm,nanopi-r4s", "rockchip,rk3399";
+
+ /delete-node/ display-subsystem;
+
+ gpio-leds {
+ pinctrl-0 = <&lan_led_pin>, <&sys_led_pin>, <&wan_led_pin>;
+
+ /delete-node/ led-0;
+
+ lan_led: led-lan {
+ gpios = <&gpio1 RK_PA1 GPIO_ACTIVE_HIGH>;
+ label = "green:lan";
+ };
+
+ sys_led: led-sys {
+ gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>;
+ label = "red:sys";
+ default-state = "on";
+ };
+
+ wan_led: led-wan {
+ gpios = <&gpio1 RK_PA0 GPIO_ACTIVE_HIGH>;
+ label = "green:wan";
+ };
+ };
+
+ gpio-keys {
+ pinctrl-0 = <&reset_button_pin>;
+
+ /delete-node/ power;
+
+ reset {
+ debounce-interval = <50>;
+ gpios = <&gpio1 RK_PC6 GPIO_ACTIVE_LOW>;
+ label = "reset";
+ linux,code = <KEY_RESTART>;
+ };
+ };
+
+ vdd_5v: vdd-5v {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_5v";
+ regulator-always-on;
+ regulator-boot-on;
+ };
+};
+
+&emmc_phy {
+ status = "disabled";
+};
+
+&i2c4 {
+ status = "disabled";
+};
+
+&pcie0 {
+ max-link-speed = <1>;
+ num-lanes = <1>;
+ vpcie3v3-supply = <&vcc3v3_sys>;
+};
+
+&pinctrl {
+ gpio-leds {
+ /delete-node/ status-led-pin;
+
+ lan_led_pin: lan-led-pin {
+ rockchip,pins = <1 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ sys_led_pin: sys-led-pin {
+ rockchip,pins = <0 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ wan_led_pin: wan-led-pin {
+ rockchip,pins = <1 RK_PA0 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ rockchip-key {
+ /delete-node/ power-key;
+
+ reset_button_pin: reset-button-pin {
+ rockchip,pins = <1 RK_PC6 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+};
+
+&sdhci {
+ status = "disabled";
+};
+
+&sdio0 {
+ status = "disabled";
+};
+
+&u2phy0_host {
+ phy-supply = <&vdd_5v>;
+};
+
+&u2phy1_host {
+ status = "disabled";
+};
+
+&uart0 {
+ status = "disabled";
+};
+
+&usbdrd_dwc3_0 {
+ dr_mode = "host";
+};
+
+&vcc3v3_sys {
+ vin-supply = <&vcc5v0_sys>;
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
index 48ed4aaa37f3..16fd58c4a80f 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi4.dtsi
@@ -17,6 +17,12 @@
#include "rk3399-opp.dtsi"
/ {
+ aliases {
+ mmc0 = &sdio0;
+ mmc1 = &sdmmc;
+ mmc2 = &sdhci;
+ };
+
chosen {
stdout-path = "serial2:1500000n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi
index d6f1095abb04..da41cd81ebb7 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-opp.dtsi
@@ -10,28 +10,28 @@
opp00 {
opp-hz = /bits/ 64 <408000000>;
- opp-microvolt = <800000>;
+ opp-microvolt = <825000 825000 1250000>;
clock-latency-ns = <40000>;
};
opp01 {
opp-hz = /bits/ 64 <600000000>;
- opp-microvolt = <800000>;
+ opp-microvolt = <825000 825000 1250000>;
};
opp02 {
opp-hz = /bits/ 64 <816000000>;
- opp-microvolt = <850000>;
+ opp-microvolt = <850000 850000 1250000>;
};
opp03 {
opp-hz = /bits/ 64 <1008000000>;
- opp-microvolt = <925000>;
+ opp-microvolt = <925000 925000 1250000>;
};
opp04 {
opp-hz = /bits/ 64 <1200000000>;
- opp-microvolt = <1000000>;
+ opp-microvolt = <1000000 1000000 1250000>;
};
opp05 {
opp-hz = /bits/ 64 <1416000000>;
- opp-microvolt = <1125000>;
+ opp-microvolt = <1125000 1125000 1250000>;
};
};
@@ -41,36 +41,36 @@
opp00 {
opp-hz = /bits/ 64 <408000000>;
- opp-microvolt = <800000>;
+ opp-microvolt = <825000 825000 1250000>;
clock-latency-ns = <40000>;
};
opp01 {
opp-hz = /bits/ 64 <600000000>;
- opp-microvolt = <800000>;
+ opp-microvolt = <825000 825000 1250000>;
};
opp02 {
opp-hz = /bits/ 64 <816000000>;
- opp-microvolt = <825000>;
+ opp-microvolt = <825000 825000 1250000>;
};
opp03 {
opp-hz = /bits/ 64 <1008000000>;
- opp-microvolt = <875000>;
+ opp-microvolt = <875000 875000 1250000>;
};
opp04 {
opp-hz = /bits/ 64 <1200000000>;
- opp-microvolt = <950000>;
+ opp-microvolt = <950000 950000 1250000>;
};
opp05 {
opp-hz = /bits/ 64 <1416000000>;
- opp-microvolt = <1025000>;
+ opp-microvolt = <1025000 1025000 1250000>;
};
opp06 {
opp-hz = /bits/ 64 <1608000000>;
- opp-microvolt = <1100000>;
+ opp-microvolt = <1100000 1100000 1250000>;
};
opp07 {
opp-hz = /bits/ 64 <1800000000>;
- opp-microvolt = <1200000>;
+ opp-microvolt = <1200000 1200000 1250000>;
};
};
@@ -79,27 +79,27 @@
opp00 {
opp-hz = /bits/ 64 <200000000>;
- opp-microvolt = <800000>;
+ opp-microvolt = <825000 825000 1150000>;
};
opp01 {
opp-hz = /bits/ 64 <297000000>;
- opp-microvolt = <800000>;
+ opp-microvolt = <825000 825000 1150000>;
};
opp02 {
opp-hz = /bits/ 64 <400000000>;
- opp-microvolt = <825000>;
+ opp-microvolt = <825000 825000 1150000>;
};
opp03 {
opp-hz = /bits/ 64 <500000000>;
- opp-microvolt = <875000>;
+ opp-microvolt = <875000 875000 1150000>;
};
opp04 {
opp-hz = /bits/ 64 <600000000>;
- opp-microvolt = <925000>;
+ opp-microvolt = <925000 925000 1150000>;
};
opp05 {
opp-hz = /bits/ 64 <800000000>;
- opp-microvolt = <1100000>;
+ opp-microvolt = <1100000 1100000 1150000>;
};
};
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts b/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
index ad7c4d00888f..04b54abea3cc 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-orangepi.dts
@@ -15,6 +15,12 @@
model = "Orange Pi RK3399 Board";
compatible = "rockchip,rk3399-orangepi", "rockchip,rk3399";
+ aliases {
+ mmc0 = &sdio0;
+ mmc1 = &sdmmc;
+ mmc2 = &sdhci;
+ };
+
chosen {
stdout-path = "serial2:1500000n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
index 219b7507a10f..2b5f001ff4a6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-pinebook-pro.dts
@@ -18,6 +18,12 @@
model = "Pine64 Pinebook Pro";
compatible = "pine64,pinebook-pro", "rockchip,rk3399";
+ aliases {
+ mmc0 = &sdio0;
+ mmc1 = &sdmmc;
+ mmc2 = &sdhci;
+ };
+
chosen {
stdout-path = "serial2:1500000n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
index a8d363568fd6..292bb7e80cf3 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
@@ -10,6 +10,10 @@
model = "Theobroma Systems RK3399-Q7 SoM";
compatible = "tsd,rk3399-puma-haikou", "rockchip,rk3399";
+ aliases {
+ mmc1 = &sdmmc;
+ };
+
chosen {
stdout-path = "serial0:115200n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
index 4660416c8f38..fb67db4619ea 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi
@@ -8,6 +8,10 @@
#include "rk3399-opp.dtsi"
/ {
+ aliases {
+ mmc0 = &sdhci;
+ };
+
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
@@ -21,57 +25,6 @@
};
};
- /*
- * Overwrite the opp-table for CPUB as this board uses a different
- * regulator (FAN53555) that only allows 10mV steps and therefore
- * can't reach the operation point target voltages from rk3399-opp.dtsi
- */
- /delete-node/ opp-table1;
- cluster1_opp: opp-table1 {
- compatible = "operating-points-v2";
- opp-shared;
-
- opp00 {
- opp-hz = /bits/ 64 <408000000>;
- opp-microvolt = <800000>;
- clock-latency-ns = <40000>;
- };
- opp01 {
- opp-hz = /bits/ 64 <600000000>;
- opp-microvolt = <800000>;
- };
- opp02 {
- opp-hz = /bits/ 64 <816000000>;
- opp-microvolt = <830000>;
- opp-suspend;
- };
- opp03 {
- opp-hz = /bits/ 64 <1008000000>;
- opp-microvolt = <880000>;
- };
- opp04 {
- opp-hz = /bits/ 64 <1200000000>;
- opp-microvolt = <950000>;
- };
- opp05 {
- opp-hz = /bits/ 64 <1416000000>;
- opp-microvolt = <1030000>;
- };
- opp06 {
- opp-hz = /bits/ 64 <1608000000>;
- opp-microvolt = <1100000>;
- };
- opp07 {
- opp-hz = /bits/ 64 <1800000000>;
- opp-microvolt = <1200000>;
- };
- opp08 {
- opp-hz = /bits/ 64 <1992000000>;
- opp-microvolt = <1230000>;
- turbo-mode;
- };
- };
-
clkin_gmac: external-gmac-clock {
compatible = "fixed-clock";
clock-frequency = <125000000>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc-mezzanine.dts b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc-mezzanine.dts
index 754627d97144..9447c8724b65 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc-mezzanine.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc-mezzanine.dts
@@ -11,6 +11,10 @@
model = "Firefly ROC-RK3399-PC Mezzanine Board";
compatible = "firefly,roc-rk3399-pc-mezzanine", "rockchip,rk3399";
+ aliases {
+ mmc2 = &sdio0;
+ };
+
/* MP8009 PoE PD */
poe_12v: poe-12v {
compatible = "regulator-fixed";
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
index 20309076dbac..c172f5a803e7 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-roc-pc.dtsi
@@ -13,6 +13,11 @@
model = "Firefly ROC-RK3399-PC Board";
compatible = "firefly,roc-rk3399-pc", "rockchip,rk3399";
+ aliases {
+ mmc0 = &sdmmc;
+ mmc1 = &sdhci;
+ };
+
chosen {
stdout-path = "serial2:1500000n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
index fb7599f07af4..7d0a7c697703 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
@@ -11,6 +11,11 @@
#include "rk3399-opp.dtsi"
/ {
+ aliases {
+ mmc0 = &sdmmc;
+ mmc1 = &sdhci;
+ };
+
chosen {
stdout-path = "serial2:1500000n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b.dts
index f0055ce2fda0..6c63e617063c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4b.dts
@@ -10,6 +10,10 @@
/ {
model = "Radxa ROCK Pi 4B";
compatible = "radxa,rockpi4b", "radxa,rockpi4", "rockchip,rk3399";
+
+ aliases {
+ mmc2 = &sdio0;
+ };
};
&sdio0 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4c.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4c.dts
index 4c7ebb1c5d2d..99169bcd51c0 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4c.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4c.dts
@@ -11,6 +11,10 @@
/ {
model = "Radxa ROCK Pi 4C";
compatible = "radxa,rockpi4c", "radxa,rockpi4", "rockchip,rk3399";
+
+ aliases {
+ mmc2 = &sdio0;
+ };
};
&sdio0 {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
index 5e3ac589bc54..25dc61c26a94 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
@@ -9,6 +9,12 @@
#include "rk3399-opp.dtsi"
/ {
+ aliases {
+ mmc0 = &sdio0;
+ mmc1 = &sdmmc;
+ mmc2 = &sdhci;
+ };
+
sdio_pwrseq: sdio-pwrseq {
compatible = "mmc-pwrseq-simple";
clocks = <&rk808 1>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi
index 5ab0b9edfc88..6bff8db7d33e 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dtsi
@@ -10,6 +10,12 @@
#include "rk3399-opp.dtsi"
/ {
+ aliases {
+ mmc0 = &sdio0;
+ mmc1 = &sdmmc;
+ mmc2 = &sdhci;
+ };
+
chosen {
stdout-path = "serial2:1500000n8";
};
@@ -36,6 +42,13 @@
};
};
+ ir-receiver {
+ compatible = "gpio-ir-receiver";
+ gpios = <&gpio0 RK_PA6 GPIO_ACTIVE_LOW>;
+ pinctrl-0 = <&ir_int>;
+ pinctrl-names = "default";
+ };
+
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
@@ -604,6 +617,12 @@
};
};
+ ir {
+ ir_int: ir-int {
+ rockchip,pins = <0 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
leds {
work_led_pin: work-led-pin {
rockchip,pins = <0 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
index 73e269a8ae0c..f6b2199a42bd 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts
@@ -10,6 +10,10 @@
model = "Excavator-RK3399 Board";
compatible = "rockchip,rk3399-sapphire-excavator", "rockchip,rk3399";
+ aliases {
+ mmc2 = &sdio0;
+ };
+
adc-keys {
compatible = "adc-keys";
io-channels = <&saradc 1>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
index 701a567d7638..46b0f97a0b1c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi
@@ -11,6 +11,11 @@
/ {
compatible = "rockchip,rk3399-sapphire", "rockchip,rk3399";
+ aliases {
+ mmc0 = &sdmmc;
+ mmc1 = &sdhci;
+ };
+
chosen {
stdout-path = "serial2:1500000n8";
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index edbbf35fe19e..634a91af8e83 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -29,9 +29,6 @@
i2c6 = &i2c6;
i2c7 = &i2c7;
i2c8 = &i2c8;
- mmc0 = &sdio0;
- mmc1 = &sdmmc;
- mmc2 = &sdhci;
serial0 = &uart0;
serial1 = &uart1;
serial2 = &uart2;
@@ -1185,7 +1182,6 @@
pinctrl-names = "default";
pinctrl-0 = <&pwm0_pin>;
clocks = <&pmucru PCLK_RKPWM_PMU>;
- clock-names = "pwm";
status = "disabled";
};
@@ -1196,7 +1192,6 @@
pinctrl-names = "default";
pinctrl-0 = <&pwm1_pin>;
clocks = <&pmucru PCLK_RKPWM_PMU>;
- clock-names = "pwm";
status = "disabled";
};
@@ -1207,7 +1202,6 @@
pinctrl-names = "default";
pinctrl-0 = <&pwm2_pin>;
clocks = <&pmucru PCLK_RKPWM_PMU>;
- clock-names = "pwm";
status = "disabled";
};
@@ -1218,7 +1212,6 @@
pinctrl-names = "default";
pinctrl-0 = <&pwm3a_pin>;
clocks = <&pmucru PCLK_RKPWM_PMU>;
- clock-names = "pwm";
status = "disabled";
};
@@ -1531,7 +1524,7 @@
};
watchdog@ff848000 {
- compatible = "snps,dw-wdt";
+ compatible = "rockchip,rk3399-wdt", "snps,dw-wdt";
reg = <0x0 0xff848000 0x0 0x100>;
clocks = <&cru PCLK_WDT>;
interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH 0>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399pro-vmarc-som.dtsi b/arch/arm64/boot/dts/rockchip/rk3399pro-vmarc-som.dtsi
index 7257494d2831..c0074b3ed4af 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399pro-vmarc-som.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399pro-vmarc-som.dtsi
@@ -12,6 +12,11 @@
/ {
compatible = "vamrs,rk3399pro-vmarc-som", "rockchip,rk3399pro";
+ aliases {
+ mmc0 = &sdmmc;
+ mmc1 = &sdhci;
+ };
+
vcc3v3_pcie: vcc-pcie-regulator {
compatible = "regulator-fixed";
enable-active-high;
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
index a87b8a678719..8f2c1c1e2c64 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
@@ -734,7 +734,7 @@
clocks = <&sys_clk 6>;
reset-names = "ether";
resets = <&sys_rst 6>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
local-mac-address = [00 00 00 00 00 00];
socionext,syscon-phy-mode = <&soc_glue 0>;
diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
index 0e52dadf54b3..be97da132258 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
@@ -564,7 +564,7 @@
clocks = <&sys_clk 6>;
reset-names = "ether";
resets = <&sys_rst 6>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
local-mac-address = [00 00 00 00 00 00];
socionext,syscon-phy-mode = <&soc_glue 0>;
@@ -585,7 +585,7 @@
clocks = <&sys_clk 7>;
reset-names = "ether";
resets = <&sys_rst 7>;
- phy-mode = "rgmii";
+ phy-mode = "rgmii-id";
local-mac-address = [00 00 00 00 00 00];
socionext,syscon-phy-mode = <&soc_glue 1>;
diff --git a/arch/arm64/boot/dts/ti/Makefile b/arch/arm64/boot/dts/ti/Makefile
index 65506f21ba30..d56c742f5a10 100644
--- a/arch/arm64/boot/dts/ti/Makefile
+++ b/arch/arm64/boot/dts/ti/Makefile
@@ -3,11 +3,17 @@
# Make file to build device tree binaries for boards based on
# Texas Instruments Inc processors
#
-# Copyright (C) 2016-2020 Texas Instruments Incorporated - https://www.ti.com/
+# Copyright (C) 2016-2021 Texas Instruments Incorporated - https://www.ti.com/
#
dtb-$(CONFIG_ARCH_K3) += k3-am654-base-board.dtb
+dtb-$(CONFIG_ARCH_K3) += k3-am6528-iot2050-basic.dtb
+dtb-$(CONFIG_ARCH_K3) += k3-am6548-iot2050-advanced.dtb
dtb-$(CONFIG_ARCH_K3) += k3-j721e-common-proc-board.dtb
dtb-$(CONFIG_ARCH_K3) += k3-j7200-common-proc-board.dtb
+
+dtb-$(CONFIG_ARCH_K3) += k3-am642-evm.dtb
+
+dtb-$(CONFIG_ARCH_K3) += k3-am642-sk.dtb
diff --git a/arch/arm64/boot/dts/ti/k3-am64-main.dtsi b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
new file mode 100644
index 000000000000..ca59d1f711f8
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am64-main.dtsi
@@ -0,0 +1,676 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for AM642 SoC Family Main Domain peripherals
+ *
+ * Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+&cbass_main {
+ oc_sram: sram@70000000 {
+ compatible = "mmio-sram";
+ reg = <0x00 0x70000000 0x00 0x200000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x00 0x70000000 0x200000>;
+
+ atf-sram@0 {
+ reg = <0x0 0x1a000>;
+ };
+ };
+
+ gic500: interrupt-controller@1800000 {
+ compatible = "arm,gic-v3";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x00 0x01800000 0x00 0x10000>, /* GICD */
+ <0x00 0x01840000 0x00 0xC0000>; /* GICR */
+ /*
+ * vcpumntirq:
+ * virtual CPU interface maintenance interrupt
+ */
+ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+
+ gic_its: msi-controller@1820000 {
+ compatible = "arm,gic-v3-its";
+ reg = <0x00 0x01820000 0x00 0x10000>;
+ socionext,synquacer-pre-its = <0x1000000 0x400000>;
+ msi-controller;
+ #msi-cells = <1>;
+ };
+ };
+
+ dmss: bus@48000000 {
+ compatible = "simple-mfd";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ dma-ranges;
+ ranges = <0x00 0x48000000 0x00 0x48000000 0x00 0x06400000>;
+
+ ti,sci-dev-id = <25>;
+
+ secure_proxy_main: mailbox@4d000000 {
+ compatible = "ti,am654-secure-proxy";
+ #mbox-cells = <1>;
+ reg-names = "target_data", "rt", "scfg";
+ reg = <0x00 0x4d000000 0x00 0x80000>,
+ <0x00 0x4a600000 0x00 0x80000>,
+ <0x00 0x4a400000 0x00 0x80000>;
+ interrupt-names = "rx_012";
+ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ inta_main_dmss: interrupt-controller@48000000 {
+ compatible = "ti,sci-inta";
+ reg = <0x00 0x48000000 0x00 0x100000>;
+ #interrupt-cells = <0>;
+ interrupt-controller;
+ interrupt-parent = <&gic500>;
+ msi-controller;
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <28>;
+ ti,interrupt-ranges = <4 68 36>;
+ ti,unmapped-event-sources = <&main_bcdma>, <&main_pktdma>;
+ };
+
+ main_bcdma: dma-controller@485c0100 {
+ compatible = "ti,am64-dmss-bcdma";
+ reg = <0x00 0x485c0100 0x00 0x100>,
+ <0x00 0x4c000000 0x00 0x20000>,
+ <0x00 0x4a820000 0x00 0x20000>,
+ <0x00 0x4aa40000 0x00 0x20000>,
+ <0x00 0x4bc00000 0x00 0x100000>;
+ reg-names = "gcfg", "bchanrt", "rchanrt", "tchanrt", "ringrt";
+ msi-parent = <&inta_main_dmss>;
+ #dma-cells = <3>;
+
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <26>;
+ ti,sci-rm-range-bchan = <0x20>; /* BLOCK_COPY_CHAN */
+ ti,sci-rm-range-rchan = <0x21>; /* SPLIT_TR_RX_CHAN */
+ ti,sci-rm-range-tchan = <0x22>; /* SPLIT_TR_TX_CHAN */
+ };
+
+ main_pktdma: dma-controller@485c0000 {
+ compatible = "ti,am64-dmss-pktdma";
+ reg = <0x00 0x485c0000 0x00 0x100>,
+ <0x00 0x4a800000 0x00 0x20000>,
+ <0x00 0x4aa00000 0x00 0x40000>,
+ <0x00 0x4b800000 0x00 0x400000>;
+ reg-names = "gcfg", "rchanrt", "tchanrt", "ringrt";
+ msi-parent = <&inta_main_dmss>;
+ #dma-cells = <2>;
+
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <30>;
+ ti,sci-rm-range-tchan = <0x23>, /* UNMAPPED_TX_CHAN */
+ <0x24>, /* CPSW_TX_CHAN */
+ <0x25>, /* SAUL_TX_0_CHAN */
+ <0x26>, /* SAUL_TX_1_CHAN */
+ <0x27>, /* ICSSG_0_TX_CHAN */
+ <0x28>; /* ICSSG_1_TX_CHAN */
+ ti,sci-rm-range-tflow = <0x10>, /* RING_UNMAPPED_TX_CHAN */
+ <0x11>, /* RING_CPSW_TX_CHAN */
+ <0x12>, /* RING_SAUL_TX_0_CHAN */
+ <0x13>, /* RING_SAUL_TX_1_CHAN */
+ <0x14>, /* RING_ICSSG_0_TX_CHAN */
+ <0x15>; /* RING_ICSSG_1_TX_CHAN */
+ ti,sci-rm-range-rchan = <0x29>, /* UNMAPPED_RX_CHAN */
+ <0x2b>, /* CPSW_RX_CHAN */
+ <0x2d>, /* SAUL_RX_0_CHAN */
+ <0x2f>, /* SAUL_RX_1_CHAN */
+ <0x31>, /* SAUL_RX_2_CHAN */
+ <0x33>, /* SAUL_RX_3_CHAN */
+ <0x35>, /* ICSSG_0_RX_CHAN */
+ <0x37>; /* ICSSG_1_RX_CHAN */
+ ti,sci-rm-range-rflow = <0x2a>, /* FLOW_UNMAPPED_RX_CHAN */
+ <0x2c>, /* FLOW_CPSW_RX_CHAN */
+ <0x2e>, /* FLOW_SAUL_RX_0/1_CHAN */
+ <0x32>, /* FLOW_SAUL_RX_2/3_CHAN */
+ <0x36>, /* FLOW_ICSSG_0_RX_CHAN */
+ <0x38>; /* FLOW_ICSSG_1_RX_CHAN */
+ };
+ };
+
+ dmsc: system-controller@44043000 {
+ compatible = "ti,k2g-sci";
+ ti,host-id = <12>;
+ mbox-names = "rx", "tx";
+ mboxes= <&secure_proxy_main 12>,
+ <&secure_proxy_main 13>;
+ reg-names = "debug_messages";
+ reg = <0x00 0x44043000 0x00 0xfe0>;
+
+ k3_pds: power-controller {
+ compatible = "ti,sci-pm-domain";
+ #power-domain-cells = <2>;
+ };
+
+ k3_clks: clock-controller {
+ compatible = "ti,k2g-sci-clk";
+ #clock-cells = <2>;
+ };
+
+ k3_reset: reset-controller {
+ compatible = "ti,sci-reset";
+ #reset-cells = <2>;
+ };
+ };
+
+ main_pmx0: pinctrl@f4000 {
+ compatible = "pinctrl-single";
+ reg = <0x00 0xf4000 0x00 0x2d0>;
+ #pinctrl-cells = <1>;
+ pinctrl-single,register-width = <32>;
+ pinctrl-single,function-mask = <0xffffffff>;
+ };
+
+ main_conf: syscon@43000000 {
+ compatible = "syscon", "simple-mfd";
+ reg = <0x00 0x43000000 0x00 0x20000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x00 0x00 0x43000000 0x20000>;
+
+ chipid@14 {
+ compatible = "ti,am654-chipid";
+ reg = <0x00000014 0x4>;
+ };
+
+ phy_gmii_sel: phy@4044 {
+ compatible = "ti,am654-phy-gmii-sel";
+ reg = <0x4044 0x8>;
+ #phy-cells = <1>;
+ };
+ };
+
+ main_uart0: serial@2800000 {
+ compatible = "ti,am64-uart", "ti,am654-uart";
+ reg = <0x00 0x02800000 0x00 0x100>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <48000000>;
+ current-speed = <115200>;
+ power-domains = <&k3_pds 146 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 146 0>;
+ clock-names = "fclk";
+ };
+
+ main_uart1: serial@2810000 {
+ compatible = "ti,am64-uart", "ti,am654-uart";
+ reg = <0x00 0x02810000 0x00 0x100>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <48000000>;
+ current-speed = <115200>;
+ power-domains = <&k3_pds 152 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 152 0>;
+ clock-names = "fclk";
+ };
+
+ main_uart2: serial@2820000 {
+ compatible = "ti,am64-uart", "ti,am654-uart";
+ reg = <0x00 0x02820000 0x00 0x100>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <48000000>;
+ current-speed = <115200>;
+ power-domains = <&k3_pds 153 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 153 0>;
+ clock-names = "fclk";
+ };
+
+ main_uart3: serial@2830000 {
+ compatible = "ti,am64-uart", "ti,am654-uart";
+ reg = <0x00 0x02830000 0x00 0x100>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <48000000>;
+ current-speed = <115200>;
+ power-domains = <&k3_pds 154 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 154 0>;
+ clock-names = "fclk";
+ };
+
+ main_uart4: serial@2840000 {
+ compatible = "ti,am64-uart", "ti,am654-uart";
+ reg = <0x00 0x02840000 0x00 0x100>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <48000000>;
+ current-speed = <115200>;
+ power-domains = <&k3_pds 155 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 155 0>;
+ clock-names = "fclk";
+ };
+
+ main_uart5: serial@2850000 {
+ compatible = "ti,am64-uart", "ti,am654-uart";
+ reg = <0x00 0x02850000 0x00 0x100>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <48000000>;
+ current-speed = <115200>;
+ power-domains = <&k3_pds 156 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 156 0>;
+ clock-names = "fclk";
+ };
+
+ main_uart6: serial@2860000 {
+ compatible = "ti,am64-uart", "ti,am654-uart";
+ reg = <0x00 0x02860000 0x00 0x100>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <48000000>;
+ current-speed = <115200>;
+ power-domains = <&k3_pds 158 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 158 0>;
+ clock-names = "fclk";
+ };
+
+ main_i2c0: i2c@20000000 {
+ compatible = "ti,am64-i2c", "ti,omap4-i2c";
+ reg = <0x00 0x20000000 0x00 0x100>;
+ interrupts = <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ power-domains = <&k3_pds 102 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 102 2>;
+ clock-names = "fck";
+ };
+
+ main_i2c1: i2c@20010000 {
+ compatible = "ti,am64-i2c", "ti,omap4-i2c";
+ reg = <0x00 0x20010000 0x00 0x100>;
+ interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ power-domains = <&k3_pds 103 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 103 2>;
+ clock-names = "fck";
+ };
+
+ main_i2c2: i2c@20020000 {
+ compatible = "ti,am64-i2c", "ti,omap4-i2c";
+ reg = <0x00 0x20020000 0x00 0x100>;
+ interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ power-domains = <&k3_pds 104 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 104 2>;
+ clock-names = "fck";
+ };
+
+ main_i2c3: i2c@20030000 {
+ compatible = "ti,am64-i2c", "ti,omap4-i2c";
+ reg = <0x00 0x20030000 0x00 0x100>;
+ interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ power-domains = <&k3_pds 105 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 105 2>;
+ clock-names = "fck";
+ };
+
+ main_spi0: spi@20100000 {
+ compatible = "ti,am654-mcspi", "ti,omap4-mcspi";
+ reg = <0x00 0x20100000 0x00 0x400>;
+ interrupts = <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ power-domains = <&k3_pds 141 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 141 0>;
+ dmas = <&main_pktdma 0xc300 0>, <&main_pktdma 0x4300 0>;
+ dma-names = "tx0", "rx0";
+ };
+
+ main_spi1: spi@20110000 {
+ compatible = "ti,am654-mcspi","ti,omap4-mcspi";
+ reg = <0x00 0x20110000 0x00 0x400>;
+ interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ power-domains = <&k3_pds 142 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 142 0>;
+ };
+
+ main_spi2: spi@20120000 {
+ compatible = "ti,am654-mcspi","ti,omap4-mcspi";
+ reg = <0x00 0x20120000 0x00 0x400>;
+ interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ power-domains = <&k3_pds 143 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 143 0>;
+ };
+
+ main_spi3: spi@20130000 {
+ compatible = "ti,am654-mcspi","ti,omap4-mcspi";
+ reg = <0x00 0x20130000 0x00 0x400>;
+ interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ power-domains = <&k3_pds 144 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 144 0>;
+ };
+
+ main_spi4: spi@20140000 {
+ compatible = "ti,am654-mcspi","ti,omap4-mcspi";
+ reg = <0x00 0x20140000 0x00 0x400>;
+ interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ power-domains = <&k3_pds 145 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 145 0>;
+ };
+
+ main_gpio_intr: interrupt-controller@a00000 {
+ compatible = "ti,sci-intr";
+ reg = <0x00 0x00a00000 0x00 0x800>;
+ ti,intr-trigger-type = <1>;
+ interrupt-controller;
+ interrupt-parent = <&gic500>;
+ #interrupt-cells = <1>;
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <3>;
+ ti,interrupt-ranges = <0 32 16>;
+ };
+
+ main_gpio0: gpio@600000 {
+ compatible = "ti,am64-gpio", "ti,keystone-gpio";
+ reg = <0x0 0x00600000 0x0 0x100>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&main_gpio_intr>;
+ interrupts = <190>, <191>, <192>,
+ <193>, <194>, <195>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ ti,ngpio = <87>;
+ ti,davinci-gpio-unbanked = <0>;
+ power-domains = <&k3_pds 77 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 77 0>;
+ clock-names = "gpio";
+ };
+
+ main_gpio1: gpio@601000 {
+ compatible = "ti,am64-gpio", "ti,keystone-gpio";
+ reg = <0x0 0x00601000 0x0 0x100>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&main_gpio_intr>;
+ interrupts = <180>, <181>, <182>,
+ <183>, <184>, <185>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ ti,ngpio = <88>;
+ ti,davinci-gpio-unbanked = <0>;
+ power-domains = <&k3_pds 78 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 78 0>;
+ clock-names = "gpio";
+ };
+
+ sdhci0: mmc@fa10000 {
+ compatible = "ti,am64-sdhci-8bit";
+ reg = <0x00 0xfa10000 0x00 0x260>, <0x00 0xfa18000 0x00 0x134>;
+ interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&k3_pds 57 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 57 0>, <&k3_clks 57 1>;
+ clock-names = "clk_ahb", "clk_xin";
+ mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
+ ti,trm-icp = <0x2>;
+ ti,otap-del-sel-legacy = <0x0>;
+ ti,otap-del-sel-mmc-hs = <0x0>;
+ ti,otap-del-sel-ddr52 = <0x6>;
+ ti,otap-del-sel-hs200 = <0x7>;
+ ti,otap-del-sel-hs400 = <0x4>;
+ };
+
+ sdhci1: mmc@fa00000 {
+ compatible = "ti,am64-sdhci-4bit";
+ reg = <0x00 0xfa00000 0x00 0x260>, <0x00 0xfa08000 0x00 0x134>;
+ interrupts = <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&k3_pds 58 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 58 3>, <&k3_clks 58 4>;
+ clock-names = "clk_ahb", "clk_xin";
+ ti,trm-icp = <0x2>;
+ ti,otap-del-sel-legacy = <0x0>;
+ ti,otap-del-sel-sd-hs = <0xf>;
+ ti,otap-del-sel-sdr12 = <0xf>;
+ ti,otap-del-sel-sdr25 = <0xf>;
+ ti,otap-del-sel-sdr50 = <0xc>;
+ ti,otap-del-sel-sdr104 = <0x6>;
+ ti,otap-del-sel-ddr50 = <0x9>;
+ ti,clkbuf-sel = <0x7>;
+ };
+
+ cpsw3g: ethernet@8000000 {
+ compatible = "ti,am642-cpsw-nuss";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ reg = <0x0 0x8000000 0x0 0x200000>;
+ reg-names = "cpsw_nuss";
+ ranges = <0x0 0x0 0x0 0x8000000 0x0 0x200000>;
+ clocks = <&k3_clks 13 0>;
+ assigned-clocks = <&k3_clks 13 1>;
+ assigned-clock-parents = <&k3_clks 13 9>;
+ clock-names = "fck";
+ power-domains = <&k3_pds 13 TI_SCI_PD_EXCLUSIVE>;
+
+ dmas = <&main_pktdma 0xC500 15>,
+ <&main_pktdma 0xC501 15>,
+ <&main_pktdma 0xC502 15>,
+ <&main_pktdma 0xC503 15>,
+ <&main_pktdma 0xC504 15>,
+ <&main_pktdma 0xC505 15>,
+ <&main_pktdma 0xC506 15>,
+ <&main_pktdma 0xC507 15>,
+ <&main_pktdma 0x4500 15>;
+ dma-names = "tx0", "tx1", "tx2", "tx3", "tx4", "tx5", "tx6",
+ "tx7", "rx";
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpsw_port1: port@1 {
+ reg = <1>;
+ ti,mac-only;
+ label = "port1";
+ phys = <&phy_gmii_sel 1>;
+ mac-address = [00 00 de ad be ef];
+ };
+
+ cpsw_port2: port@2 {
+ reg = <2>;
+ ti,mac-only;
+ label = "port2";
+ phys = <&phy_gmii_sel 2>;
+ mac-address = [00 01 de ad be ef];
+ };
+ };
+
+ cpsw3g_mdio: mdio@f00 {
+ compatible = "ti,cpsw-mdio","ti,davinci_mdio";
+ reg = <0x0 0xf00 0x0 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&k3_clks 13 0>;
+ clock-names = "fck";
+ bus_freq = <1000000>;
+ };
+
+ cpts@3d000 {
+ compatible = "ti,j721e-cpts";
+ reg = <0x0 0x3d000 0x0 0x400>;
+ clocks = <&k3_clks 13 1>;
+ clock-names = "cpts";
+ interrupts-extended = <&gic500 GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "cpts";
+ ti,cpts-ext-ts-inputs = <4>;
+ ti,cpts-periodic-outputs = <2>;
+ };
+ };
+
+ cpts@39000000 {
+ compatible = "ti,j721e-cpts";
+ reg = <0x0 0x39000000 0x0 0x400>;
+ reg-names = "cpts";
+ power-domains = <&k3_pds 84 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 84 0>;
+ clock-names = "cpts";
+ assigned-clocks = <&k3_clks 84 0>;
+ assigned-clock-parents = <&k3_clks 84 8>;
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "cpts";
+ ti,cpts-periodic-outputs = <6>;
+ ti,cpts-ext-ts-inputs = <8>;
+ };
+
+ usbss0: cdns-usb@f900000{
+ compatible = "ti,am64-usb";
+ reg = <0x00 0xf900000 0x00 0x100>;
+ power-domains = <&k3_pds 161 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 161 9>, <&k3_clks 161 1>;
+ clock-names = "ref", "lpm";
+ assigned-clocks = <&k3_clks 161 9>; /* USB2_REFCLK */
+ assigned-clock-parents = <&k3_clks 161 10>; /* HF0SC0 */
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ usb0: usb@f400000{
+ compatible = "cdns,usb3";
+ reg = <0x00 0xf400000 0x00 0x10000>,
+ <0x00 0xf410000 0x00 0x10000>,
+ <0x00 0xf420000 0x00 0x10000>;
+ reg-names = "otg",
+ "xhci",
+ "dev";
+ interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>, /* irq.0 */
+ <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>, /* irq.6 */
+ <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>; /* otgirq */
+ interrupt-names = "host",
+ "peripheral",
+ "otg";
+ maximum-speed = "super-speed";
+ dr_mode = "otg";
+ };
+ };
+
+ tscadc0: tscadc@28001000 {
+ compatible = "ti,am654-tscadc", "ti,am3359-tscadc";
+ reg = <0x00 0x28001000 0x00 0x1000>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ power-domains = <&k3_pds 0 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 0 0>;
+ assigned-clocks = <&k3_clks 0 0>;
+ assigned-clock-parents = <&k3_clks 0 3>;
+ assigned-clock-rates = <60000000>;
+ clock-names = "adc_tsc_fck";
+
+ adc {
+ #io-channel-cells = <1>;
+ compatible = "ti,am654-adc", "ti,am3359-adc";
+ };
+ };
+
+ fss: bus@fc00000 {
+ compatible = "simple-bus";
+ reg = <0x00 0x0fc00000 0x00 0x70000>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ ospi0: spi@fc40000 {
+ compatible = "ti,am654-ospi", "cdns,qspi-nor";
+ reg = <0x00 0x0fc40000 0x00 0x100>,
+ <0x05 0x00000000 0x01 0x00000000>;
+ interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>;
+ cdns,fifo-depth = <256>;
+ cdns,fifo-width = <4>;
+ cdns,trigger-address = <0x0>;
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ clocks = <&k3_clks 75 6>;
+ assigned-clocks = <&k3_clks 75 6>;
+ assigned-clock-parents = <&k3_clks 75 7>;
+ assigned-clock-rates = <166666666>;
+ power-domains = <&k3_pds 75 TI_SCI_PD_EXCLUSIVE>;
+ };
+ };
+
+ hwspinlock: spinlock@2a000000 {
+ compatible = "ti,am64-hwspinlock";
+ reg = <0x00 0x2a000000 0x00 0x1000>;
+ #hwlock-cells = <1>;
+ };
+
+ mailbox0_cluster2: mailbox@29020000 {
+ compatible = "ti,am64-mailbox";
+ reg = <0x00 0x29020000 0x00 0x200>;
+ interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>;
+ ti,mbox-num-users = <4>;
+ ti,mbox-num-fifos = <16>;
+ };
+
+ mailbox0_cluster3: mailbox@29030000 {
+ compatible = "ti,am64-mailbox";
+ reg = <0x00 0x29030000 0x00 0x200>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>;
+ ti,mbox-num-users = <4>;
+ ti,mbox-num-fifos = <16>;
+ };
+
+ mailbox0_cluster4: mailbox@29040000 {
+ compatible = "ti,am64-mailbox";
+ reg = <0x00 0x29040000 0x00 0x200>;
+ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>;
+ ti,mbox-num-users = <4>;
+ ti,mbox-num-fifos = <16>;
+ };
+
+ mailbox0_cluster5: mailbox@29050000 {
+ compatible = "ti,am64-mailbox";
+ reg = <0x00 0x29050000 0x00 0x200>;
+ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>;
+ ti,mbox-num-users = <4>;
+ ti,mbox-num-fifos = <16>;
+ };
+
+ mailbox0_cluster6: mailbox@29060000 {
+ compatible = "ti,am64-mailbox";
+ reg = <0x00 0x29060000 0x00 0x200>;
+ interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>;
+ ti,mbox-num-users = <4>;
+ ti,mbox-num-fifos = <16>;
+ };
+
+ mailbox0_cluster7: mailbox@29070000 {
+ compatible = "ti,am64-mailbox";
+ reg = <0x00 0x29070000 0x00 0x200>;
+ interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
+ #mbox-cells = <1>;
+ ti,mbox-num-users = <4>;
+ ti,mbox-num-fifos = <16>;
+ };
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi
new file mode 100644
index 000000000000..deb19ae5e168
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am64-mcu.dtsi
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for AM64 SoC Family MCU Domain peripherals
+ *
+ * Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+&cbass_mcu {
+ mcu_uart0: serial@4a00000 {
+ compatible = "ti,am64-uart", "ti,am654-uart";
+ reg = <0x00 0x04a00000 0x00 0x100>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <48000000>;
+ current-speed = <115200>;
+ power-domains = <&k3_pds 149 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 149 0>;
+ clock-names = "fclk";
+ };
+
+ mcu_uart1: serial@4a10000 {
+ compatible = "ti,am64-uart", "ti,am654-uart";
+ reg = <0x00 0x04a10000 0x00 0x100>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <48000000>;
+ current-speed = <115200>;
+ power-domains = <&k3_pds 160 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 160 0>;
+ clock-names = "fclk";
+ };
+
+ mcu_i2c0: i2c@4900000 {
+ compatible = "ti,am64-i2c", "ti,omap4-i2c";
+ reg = <0x00 0x04900000 0x00 0x100>;
+ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ power-domains = <&k3_pds 106 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 106 2>;
+ clock-names = "fck";
+ };
+
+ mcu_i2c1: i2c@4910000 {
+ compatible = "ti,am64-i2c", "ti,omap4-i2c";
+ reg = <0x00 0x04910000 0x00 0x100>;
+ interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ power-domains = <&k3_pds 107 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 107 2>;
+ clock-names = "fck";
+ };
+
+ mcu_spi0: spi@4b00000 {
+ compatible = "ti,am654-mcspi", "ti,omap4-mcspi";
+ reg = <0x00 0x04b00000 0x00 0x400>;
+ interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ power-domains = <&k3_pds 147 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 147 0>;
+ };
+
+ mcu_spi1: spi@4b10000 {
+ compatible = "ti,am654-mcspi","ti,omap4-mcspi";
+ reg = <0x00 0x04b10000 0x00 0x400>;
+ interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ power-domains = <&k3_pds 148 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 148 0>;
+ };
+
+ mcu_gpio_intr: interrupt-controller@4210000 {
+ compatible = "ti,sci-intr";
+ reg = <0x00 0x04210000 0x00 0x200>;
+ ti,intr-trigger-type = <1>;
+ interrupt-controller;
+ interrupt-parent = <&gic500>;
+ #interrupt-cells = <1>;
+ ti,sci = <&dmsc>;
+ ti,sci-dev-id = <5>;
+ ti,interrupt-ranges = <0 104 4>;
+ };
+
+ mcu_gpio0: gpio@4201000 {
+ compatible = "ti,am64-gpio", "keystone-gpio";
+ reg = <0x0 0x4201000 0x0 0x100>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&mcu_gpio_intr>;
+ interrupts = <30>, <31>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ ti,ngpio = <23>;
+ ti,davinci-gpio-unbanked = <0>;
+ power-domains = <&k3_pds 79 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 79 0>;
+ clock-names = "gpio";
+ };
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am64.dtsi b/arch/arm64/boot/dts/ti/k3-am64.dtsi
new file mode 100644
index 000000000000..de6805b0c72c
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am64.dtsi
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for AM642 SoC Family
+ *
+ * Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/pinctrl/k3.h>
+#include <dt-bindings/soc/ti,sci_pm_domain.h>
+
+/ {
+ model = "Texas Instruments K3 AM642 SoC";
+ compatible = "ti,am642";
+ interrupt-parent = <&gic500>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ serial0 = &mcu_uart0;
+ serial1 = &mcu_uart1;
+ serial2 = &main_uart0;
+ serial3 = &main_uart1;
+ serial4 = &main_uart2;
+ serial5 = &main_uart3;
+ serial6 = &main_uart4;
+ serial7 = &main_uart5;
+ serial8 = &main_uart6;
+ ethernet0 = &cpsw_port1;
+ ethernet1 = &cpsw_port2;
+ };
+
+ chosen { };
+
+ firmware {
+ optee {
+ compatible = "linaro,optee-tz";
+ method = "smc";
+ };
+
+ psci: psci {
+ compatible = "arm,psci-1.0";
+ method = "smc";
+ };
+ };
+
+ a53_timer0: timer-cl0-cpu0 {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>, /* cntpsirq */
+ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>, /* cntpnsirq */
+ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>, /* cntvirq */
+ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>; /* cnthpirq */
+ };
+
+ pmu: pmu {
+ compatible = "arm,cortex-a53-pmu";
+ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ cbass_main: bus@f4000 {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x00 0x000f4000 0x00 0x000f4000 0x00 0x000002d0>, /* PINCTRL */
+ <0x00 0x00600000 0x00 0x00600000 0x00 0x00001100>, /* GPIO */
+ <0x00 0x00a40000 0x00 0x00a40000 0x00 0x00000800>, /* Timesync router */
+ <0x00 0x01000000 0x00 0x01000000 0x00 0x02330400>, /* First peripheral window */
+ <0x00 0x08000000 0x00 0x08000000 0x00 0x00200000>, /* Main CPSW */
+ <0x00 0x0d000000 0x00 0x0d000000 0x00 0x00800000>, /* PCIE_CORE */
+ <0x00 0x0f000000 0x00 0x0f000000 0x00 0x00c44200>, /* Second peripheral window */
+ <0x00 0x20000000 0x00 0x20000000 0x00 0x0a008000>, /* Third peripheral window */
+ <0x00 0x30000000 0x00 0x30000000 0x00 0x000bc100>, /* ICSSG0/1 */
+ <0x00 0x37000000 0x00 0x37000000 0x00 0x00040000>, /* TIMERMGR0 TIMERS */
+ <0x00 0x39000000 0x00 0x39000000 0x00 0x00000400>, /* CPTS0 */
+ <0x00 0x3b000000 0x00 0x3b000000 0x00 0x00000400>, /* GPMC0_CFG */
+ <0x00 0x3cd00000 0x00 0x3cd00000 0x00 0x00000200>, /* TIMERMGR0_CONFIG */
+ <0x00 0x3f004000 0x00 0x3f004000 0x00 0x00000400>, /* GICSS0_REGS */
+ <0x00 0x43000000 0x00 0x43000000 0x00 0x00020000>, /* CTRL_MMR0 */
+ <0x00 0x44043000 0x00 0x44043000 0x00 0x00000fe0>, /* TI SCI DEBUG */
+ <0x00 0x48000000 0x00 0x48000000 0x00 0x06400000>, /* DMASS */
+ <0x00 0x50000000 0x00 0x50000000 0x00 0x08000000>, /* GPMC0 DATA */
+ <0x00 0x60000000 0x00 0x60000000 0x00 0x08000000>, /* FSS0 DAT1 */
+ <0x00 0x68000000 0x00 0x68000000 0x00 0x08000000>, /* PCIe DAT0 */
+ <0x00 0x70000000 0x00 0x70000000 0x00 0x00200000>, /* OC SRAM */
+ <0x00 0x78000000 0x00 0x78000000 0x00 0x00800000>, /* Main R5FSS */
+ <0x06 0x00000000 0x06 0x00000000 0x01 0x00000000>, /* PCIe DAT1 */
+ <0x05 0x00000000 0x05 0x00000000 0x01 0x00000000>, /* FSS0 DAT3 */
+
+ /* MCU Domain Range */
+ <0x00 0x04000000 0x00 0x04000000 0x00 0x01ff1400>;
+
+ cbass_mcu: bus@4000000 {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0x00 0x04000000 0x00 0x04000000 0x00 0x01ff1400>; /* Peripheral window */
+ };
+ };
+};
+
+/* Now include the peripherals for each bus segments */
+#include "k3-am64-main.dtsi"
+#include "k3-am64-mcu.dtsi"
diff --git a/arch/arm64/boot/dts/ti/k3-am642-evm.dts b/arch/arm64/boot/dts/ti/k3-am642-evm.dts
new file mode 100644
index 000000000000..dad0efa961ed
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am642-evm.dts
@@ -0,0 +1,468 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/net/ti-dp83867.h>
+#include "k3-am642.dtsi"
+
+/ {
+ compatible = "ti,am642-evm", "ti,am642";
+ model = "Texas Instruments AM642 EVM";
+
+ chosen {
+ stdout-path = "serial2:115200n8";
+ bootargs = "console=ttyS2,115200n8 earlycon=ns16550a,mmio32,0x02800000";
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ /* 2G RAM */
+ reg = <0x00000000 0x80000000 0x00000000 0x80000000>;
+
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ secure_ddr: optee@9e800000 {
+ reg = <0x00 0x9e800000 0x00 0x01800000>; /* for OP-TEE */
+ alignment = <0x1000>;
+ no-map;
+ };
+ };
+
+ evm_12v0: fixedregulator-evm12v0 {
+ /* main DC jack */
+ compatible = "regulator-fixed";
+ regulator-name = "evm_12v0";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vsys_5v0: fixedregulator-vsys5v0 {
+ /* output of LM5140 */
+ compatible = "regulator-fixed";
+ regulator-name = "vsys_5v0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&evm_12v0>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vsys_3v3: fixedregulator-vsys3v3 {
+ /* output of LM5140 */
+ compatible = "regulator-fixed";
+ regulator-name = "vsys_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&evm_12v0>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_mmc1: fixed-regulator-sd {
+ /* TPS2051BD */
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_mmc1";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ enable-active-high;
+ vin-supply = <&vsys_3v3>;
+ gpio = <&exp1 6 GPIO_ACTIVE_HIGH>;
+ };
+
+ vddb: fixedregulator-vddb {
+ compatible = "regulator-fixed";
+ regulator-name = "vddb_3v3_display";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vsys_3v3>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-0 {
+ label = "am64-evm:red:heartbeat";
+ gpios = <&exp1 16 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ function = LED_FUNCTION_HEARTBEAT;
+ default-state = "off";
+ };
+ };
+
+ mdio_mux: mux-controller {
+ compatible = "gpio-mux";
+ #mux-control-cells = <0>;
+
+ mux-gpios = <&exp1 12 GPIO_ACTIVE_HIGH>;
+ };
+
+ mdio-mux-1 {
+ compatible = "mdio-mux-multiplexer";
+ mux-controls = <&mdio_mux>;
+ mdio-parent-bus = <&cpsw3g_mdio>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mdio@1 {
+ reg = <0x1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpsw3g_phy3: ethernet-phy@3 {
+ reg = <3>;
+ };
+ };
+ };
+};
+
+&main_pmx0 {
+ main_mmc1_pins_default: main-mmc1-pins-default {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0294, PIN_INPUT_PULLUP, 0) /* (J19) MMC1_CMD */
+ AM64X_IOPAD(0x028c, PIN_INPUT_PULLDOWN, 0) /* (L20) MMC1_CLK */
+ AM64X_IOPAD(0x0288, PIN_INPUT_PULLUP, 0) /* (K21) MMC1_DAT0 */
+ AM64X_IOPAD(0x0284, PIN_INPUT_PULLUP, 0) /* (L21) MMC1_DAT1 */
+ AM64X_IOPAD(0x0280, PIN_INPUT_PULLUP, 0) /* (K19) MMC1_DAT2 */
+ AM64X_IOPAD(0x027c, PIN_INPUT_PULLUP, 0) /* (K18) MMC1_DAT3 */
+ AM64X_IOPAD(0x0298, PIN_INPUT_PULLUP, 0) /* (D19) MMC1_SDCD */
+ AM64X_IOPAD(0x029c, PIN_INPUT, 0) /* (C20) MMC1_SDWP */
+ AM64X_IOPAD(0x0290, PIN_INPUT, 0) /* MMC1_CLKLB */
+ >;
+ };
+
+ main_uart0_pins_default: main-uart0-pins-default {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0238, PIN_INPUT, 0) /* (B16) UART0_CTSn */
+ AM64X_IOPAD(0x023c, PIN_OUTPUT, 0) /* (A16) UART0_RTSn */
+ AM64X_IOPAD(0x0230, PIN_INPUT, 0) /* (D15) UART0_RXD */
+ AM64X_IOPAD(0x0234, PIN_OUTPUT, 0) /* (C16) UART0_TXD */
+ >;
+ };
+
+ main_spi0_pins_default: main-spi0-pins-default {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0210, PIN_INPUT, 0) /* (D13) SPI0_CLK */
+ AM64X_IOPAD(0x0208, PIN_OUTPUT, 0) /* (D12) SPI0_CS0 */
+ AM64X_IOPAD(0x0214, PIN_OUTPUT, 0) /* (A13) SPI0_D0 */
+ AM64X_IOPAD(0x0218, PIN_INPUT, 0) /* (A14) SPI0_D1 */
+ >;
+ };
+
+ main_i2c1_pins_default: main-i2c1-pins-default {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0268, PIN_INPUT_PULLUP, 0) /* (C18) I2C1_SCL */
+ AM64X_IOPAD(0x026c, PIN_INPUT_PULLUP, 0) /* (B19) I2C1_SDA */
+ >;
+ };
+
+ mdio1_pins_default: mdio1-pins-default {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x01fc, PIN_OUTPUT, 4) /* (R2) PRG0_PRU1_GPO19.MDIO0_MDC */
+ AM64X_IOPAD(0x01f8, PIN_INPUT, 4) /* (P5) PRG0_PRU1_GPO18.MDIO0_MDIO */
+ >;
+ };
+
+ rgmii1_pins_default: rgmii1-pins-default {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x01cc, PIN_INPUT, 4) /* (W5) PRG0_PRU1_GPO7.RGMII1_RD0 */
+ AM64X_IOPAD(0x01d4, PIN_INPUT, 4) /* (Y5) PRG0_PRU1_GPO9.RGMII1_RD1 */
+ AM64X_IOPAD(0x01d8, PIN_INPUT, 4) /* (V6) PRG0_PRU1_GPO10.RGMII1_RD2 */
+ AM64X_IOPAD(0x01f4, PIN_INPUT, 4) /* (V5) PRG0_PRU1_GPO17.RGMII1_RD3 */
+ AM64X_IOPAD(0x0188, PIN_INPUT, 4) /* (AA5) PRG0_PRU0_GPO10.RGMII1_RXC */
+ AM64X_IOPAD(0x0184, PIN_INPUT, 4) /* (W6) PRG0_PRU0_GPO9.RGMII1_RX_CTL */
+ AM64X_IOPAD(0x0124, PIN_OUTPUT, 4) /* (V15) PRG1_PRU1_GPO7.RGMII1_TD0 */
+ AM64X_IOPAD(0x012c, PIN_OUTPUT, 4) /* (V14) PRG1_PRU1_GPO9.RGMII1_TD1 */
+ AM64X_IOPAD(0x0130, PIN_OUTPUT, 4) /* (W14) PRG1_PRU1_GPO10.RGMII1_TD2 */
+ AM64X_IOPAD(0x014c, PIN_OUTPUT, 4) /* (AA14) PRG1_PRU1_GPO17.RGMII1_TD3 */
+ AM64X_IOPAD(0x00e0, PIN_OUTPUT, 4) /* (U14) PRG1_PRU0_GPO10.RGMII1_TXC */
+ AM64X_IOPAD(0x00dc, PIN_OUTPUT, 4) /* (U15) PRG1_PRU0_GPO9.RGMII1_TX_CTL */
+ >;
+ };
+
+ rgmii2_pins_default: rgmii2-pins-default {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0108, PIN_INPUT, 4) /* (W11) PRG1_PRU1_GPO0.RGMII2_RD0 */
+ AM64X_IOPAD(0x010c, PIN_INPUT, 4) /* (V11) PRG1_PRU1_GPO1.RGMII2_RD1 */
+ AM64X_IOPAD(0x0110, PIN_INPUT, 4) /* (AA12) PRG1_PRU1_GPO2.RGMII2_RD2 */
+ AM64X_IOPAD(0x0114, PIN_INPUT, 4) /* (Y12) PRG1_PRU1_GPO3.RGMII2_RD3 */
+ AM64X_IOPAD(0x0120, PIN_INPUT, 4) /* (U11) PRG1_PRU1_GPO6.RGMII2_RXC */
+ AM64X_IOPAD(0x0118, PIN_INPUT, 4) /* (W12) PRG1_PRU1_GPO4.RGMII2_RX_CTL */
+ AM64X_IOPAD(0x0134, PIN_OUTPUT, 4) /* (AA10) PRG1_PRU1_GPO11.RGMII2_TD0 */
+ AM64X_IOPAD(0x0138, PIN_OUTPUT, 4) /* (V10) PRG1_PRU1_GPO12.RGMII2_TD1 */
+ AM64X_IOPAD(0x013c, PIN_OUTPUT, 4) /* (U10) PRG1_PRU1_GPO13.RGMII2_TD2 */
+ AM64X_IOPAD(0x0140, PIN_OUTPUT, 4) /* (AA11) PRG1_PRU1_GPO14.RGMII2_TD3 */
+ AM64X_IOPAD(0x0148, PIN_OUTPUT, 4) /* (Y10) PRG1_PRU1_GPO16.RGMII2_TXC */
+ AM64X_IOPAD(0x0144, PIN_OUTPUT, 4) /* (Y11) PRG1_PRU1_GPO15.RGMII2_TX_CTL */
+ >;
+ };
+
+ main_usb0_pins_default: main-usb0-pins-default {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x02a8, PIN_OUTPUT, 0) /* (E19) USB0_DRVVBUS */
+ >;
+ };
+
+ ospi0_pins_default: ospi0-pins-default {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0000, PIN_OUTPUT, 0) /* (N20) OSPI0_CLK */
+ AM64X_IOPAD(0x002c, PIN_OUTPUT, 0) /* (L19) OSPI0_CSn0 */
+ AM64X_IOPAD(0x000c, PIN_INPUT, 0) /* (M19) OSPI0_D0 */
+ AM64X_IOPAD(0x0010, PIN_INPUT, 0) /* (M18) OSPI0_D1 */
+ AM64X_IOPAD(0x0014, PIN_INPUT, 0) /* (M20) OSPI0_D2 */
+ AM64X_IOPAD(0x0018, PIN_INPUT, 0) /* (M21) OSPI0_D3 */
+ AM64X_IOPAD(0x001c, PIN_INPUT, 0) /* (P21) OSPI0_D4 */
+ AM64X_IOPAD(0x0020, PIN_INPUT, 0) /* (P20) OSPI0_D5 */
+ AM64X_IOPAD(0x0024, PIN_INPUT, 0) /* (N18) OSPI0_D6 */
+ AM64X_IOPAD(0x0028, PIN_INPUT, 0) /* (M17) OSPI0_D7 */
+ AM64X_IOPAD(0x0008, PIN_INPUT, 0) /* (N19) OSPI0_DQS */
+ >;
+ };
+};
+
+&main_uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_uart0_pins_default>;
+};
+
+/* main_uart1 is reserved for firmware usage */
+&main_uart1 {
+ status = "reserved";
+};
+
+&main_uart2 {
+ status = "disabled";
+};
+
+&main_uart3 {
+ status = "disabled";
+};
+
+&main_uart4 {
+ status = "disabled";
+};
+
+&main_uart5 {
+ status = "disabled";
+};
+
+&main_uart6 {
+ status = "disabled";
+};
+
+&mcu_uart0 {
+ status = "disabled";
+};
+
+&mcu_uart1 {
+ status = "disabled";
+};
+
+&main_i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_i2c1_pins_default>;
+ clock-frequency = <400000>;
+
+ exp1: gpio@22 {
+ compatible = "ti,tca6424";
+ reg = <0x22>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-line-names = "GPIO_eMMC_RSTn", "CAN_MUX_SEL",
+ "GPIO_CPSW1_RST", "GPIO_RGMII1_RST",
+ "GPIO_RGMII2_RST", "GPIO_PCIe_RST_OUT",
+ "MMC1_SD_EN", "FSI_FET_SEL",
+ "MCAN0_STB_3V3", "MCAN1_STB_3V3",
+ "CPSW_FET_SEL", "CPSW_FET2_SEL",
+ "PRG1_RGMII2_FET_SEL", "TEST_GPIO2",
+ "GPIO_OLED_RESETn", "VPP_LDO_EN",
+ "TEST_LED1", "TP92", "TP90", "TP88",
+ "TP87", "TP86", "TP89", "TP91";
+ };
+
+ /* osd9616p0899-10 */
+ display@3c {
+ compatible = "solomon,ssd1306fb-i2c";
+ reg = <0x3c>;
+ reset-gpios = <&exp1 14 GPIO_ACTIVE_LOW>;
+ vbat-supply = <&vddb>;
+ solomon,height = <16>;
+ solomon,width = <96>;
+ solomon,com-seq;
+ solomon,com-invdir;
+ solomon,page-offset = <0>;
+ solomon,prechargep1 = <2>;
+ solomon,prechargep2 = <13>;
+ };
+};
+
+/* mcu_gpio0 is reserved for mcu firmware usage */
+&mcu_gpio0 {
+ status = "reserved";
+};
+
+&mcu_i2c0 {
+ status = "disabled";
+};
+
+&mcu_i2c1 {
+ status = "disabled";
+};
+
+&mcu_spi0 {
+ status = "disabled";
+};
+
+&mcu_spi1 {
+ status = "disabled";
+};
+
+&main_spi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_spi0_pins_default>;
+ ti,pindir-d0-out-d1-in = <1>;
+ eeprom@0 {
+ compatible = "microchip,93lc46b";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ spi-cs-high;
+ data-size = <16>;
+ };
+};
+
+&sdhci0 {
+ /* emmc */
+ bus-width = <8>;
+ non-removable;
+ ti,driver-strength-ohm = <50>;
+ disable-wp;
+};
+
+&sdhci1 {
+ /* SD/MMC */
+ vmmc-supply = <&vdd_mmc1>;
+ pinctrl-names = "default";
+ bus-width = <4>;
+ pinctrl-0 = <&main_mmc1_pins_default>;
+ ti,driver-strength-ohm = <50>;
+ disable-wp;
+};
+
+&usbss0 {
+ ti,vbus-divider;
+ ti,usb2-only;
+};
+
+&usb0 {
+ dr_mode = "otg";
+ maximum-speed = "high-speed";
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_usb0_pins_default>;
+};
+
+&cpsw3g {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mdio1_pins_default
+ &rgmii1_pins_default
+ &rgmii2_pins_default>;
+};
+
+&cpsw_port1 {
+ phy-mode = "rgmii-rxid";
+ phy-handle = <&cpsw3g_phy0>;
+};
+
+&cpsw_port2 {
+ phy-mode = "rgmii-rxid";
+ phy-handle = <&cpsw3g_phy3>;
+};
+
+&cpsw3g_mdio {
+ cpsw3g_phy0: ethernet-phy@0 {
+ reg = <0>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ };
+};
+
+&tscadc0 {
+ /* ADC is reserved for R5 usage */
+ status = "reserved";
+};
+
+&ospi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&ospi0_pins_default>;
+
+ flash@0{
+ compatible = "jedec,spi-nor";
+ reg = <0x0>;
+ spi-tx-bus-width = <8>;
+ spi-rx-bus-width = <8>;
+ spi-max-frequency = <25000000>;
+ cdns,tshsl-ns = <60>;
+ cdns,tsd2d-ns = <60>;
+ cdns,tchsh-ns = <60>;
+ cdns,tslch-ns = <60>;
+ cdns,read-delay = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+};
+
+&mailbox0_cluster2 {
+ mbox_main_r5fss0_core0: mbox-main-r5fss0-core0 {
+ ti,mbox-rx = <0 0 2>;
+ ti,mbox-tx = <1 0 2>;
+ };
+
+ mbox_main_r5fss0_core1: mbox-main-r5fss0-core1 {
+ ti,mbox-rx = <2 0 2>;
+ ti,mbox-tx = <3 0 2>;
+ };
+};
+
+&mailbox0_cluster3 {
+ status = "disabled";
+};
+
+&mailbox0_cluster4 {
+ mbox_main_r5fss1_core0: mbox-main-r5fss1-core0 {
+ ti,mbox-rx = <0 0 2>;
+ ti,mbox-tx = <1 0 2>;
+ };
+
+ mbox_main_r5fss1_core1: mbox-main-r5fss1-core1 {
+ ti,mbox-rx = <2 0 2>;
+ ti,mbox-tx = <3 0 2>;
+ };
+};
+
+&mailbox0_cluster5 {
+ status = "disabled";
+};
+
+&mailbox0_cluster6 {
+ mbox_m4_0: mbox-m4-0 {
+ ti,mbox-rx = <0 0 2>;
+ ti,mbox-tx = <1 0 2>;
+ };
+};
+
+&mailbox0_cluster7 {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am642-sk.dts b/arch/arm64/boot/dts/ti/k3-am642-sk.dts
new file mode 100644
index 000000000000..8424cd071955
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am642-sk.dts
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/net/ti-dp83867.h>
+#include "k3-am642.dtsi"
+
+/ {
+ compatible = "ti,am642-sk", "ti,am642";
+ model = "Texas Instruments AM642 SK";
+
+ chosen {
+ stdout-path = "serial2:115200n8";
+ bootargs = "console=ttyS2,115200n8 earlycon=ns16550a,mmio32,0x02800000";
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ /* 2G RAM */
+ reg = <0x00000000 0x80000000 0x00000000 0x80000000>;
+
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ secure_ddr: optee@9e800000 {
+ reg = <0x00 0x9e800000 0x00 0x01800000>; /* for OP-TEE */
+ alignment = <0x1000>;
+ no-map;
+ };
+ };
+
+ vusb_main: fixed-regulator-vusb-main5v0 {
+ /* USB MAIN INPUT 5V DC */
+ compatible = "regulator-fixed";
+ regulator-name = "vusb_main5v0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vcc_3v3_sys: fixedregulator-vcc-3v3-sys {
+ /* output of LP8733xx */
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_3v3_sys";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vusb_main>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_mmc1: fixed-regulator-sd {
+ /* TPS2051BD */
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_mmc1";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ enable-active-high;
+ vin-supply = <&vcc_3v3_sys>;
+ gpio = <&exp1 3 GPIO_ACTIVE_HIGH>;
+ };
+};
+
+&main_pmx0 {
+ main_mmc1_pins_default: main-mmc1-pins-default {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0294, PIN_INPUT, 0) /* (J19) MMC1_CMD */
+ AM64X_IOPAD(0x0290, PIN_INPUT, 0) /* (#N/A) MMC1_CLKLB */
+ AM64X_IOPAD(0x028c, PIN_INPUT, 0) /* (L20) MMC1_CLK */
+ AM64X_IOPAD(0x0288, PIN_INPUT, 0) /* (K21) MMC1_DAT0 */
+ AM64X_IOPAD(0x0284, PIN_INPUT, 0) /* (L21) MMC1_DAT1 */
+ AM64X_IOPAD(0x0280, PIN_INPUT, 0) /* (K19) MMC1_DAT2 */
+ AM64X_IOPAD(0x027c, PIN_INPUT, 0) /* (K18) MMC1_DAT3 */
+ AM64X_IOPAD(0x0298, PIN_INPUT, 0) /* (D19) MMC1_SDCD */
+ >;
+ };
+
+ main_i2c1_pins_default: main-i2c1-pins-default {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0268, PIN_INPUT_PULLUP, 0) /* (C18) I2C1_SCL */
+ AM64X_IOPAD(0x026c, PIN_INPUT_PULLUP, 0) /* (B19) I2C1_SDA */
+ >;
+ };
+
+ mdio1_pins_default: mdio1-pins-default {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x01fc, PIN_OUTPUT, 4) /* (R2) PRG0_PRU1_GPO19.MDIO0_MDC */
+ AM64X_IOPAD(0x01f8, PIN_INPUT, 4) /* (P5) PRG0_PRU1_GPO18.MDIO0_MDIO */
+ >;
+ };
+
+ rgmii1_pins_default: rgmii1-pins-default {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x011c, PIN_INPUT, 4) /* (AA13) PRG1_PRU1_GPO5.RGMII1_RD0 */
+ AM64X_IOPAD(0x0128, PIN_INPUT, 4) /* (U12) PRG1_PRU1_GPO8.RGMII1_RD1 */
+ AM64X_IOPAD(0x0150, PIN_INPUT, 4) /* (Y13) PRG1_PRU1_GPO18.RGMII1_RD2 */
+ AM64X_IOPAD(0x0154, PIN_INPUT, 4) /* (V12) PRG1_PRU1_GPO19.RGMII1_RD3 */
+ AM64X_IOPAD(0x00d8, PIN_INPUT, 4) /* (W13) PRG1_PRU0_GPO8.RGMII1_RXC */
+ AM64X_IOPAD(0x00cc, PIN_INPUT, 4) /* (V13) PRG1_PRU0_GPO5.RGMII1_RX_CTL */
+ AM64X_IOPAD(0x0124, PIN_OUTPUT, 4) /* (V15) PRG1_PRU1_GPO7.RGMII1_TD0 */
+ AM64X_IOPAD(0x012c, PIN_OUTPUT, 4) /* (V14) PRG1_PRU1_GPO9.RGMII1_TD1 */
+ AM64X_IOPAD(0x0130, PIN_OUTPUT, 4) /* (W14) PRG1_PRU1_GPO10.RGMII1_TD2 */
+ AM64X_IOPAD(0x014c, PIN_OUTPUT, 4) /* (AA14) PRG1_PRU1_GPO17.RGMII1_TD3 */
+ AM64X_IOPAD(0x00e0, PIN_OUTPUT, 4) /* (U14) PRG1_PRU0_GPO10.RGMII1_TXC */
+ AM64X_IOPAD(0x00dc, PIN_OUTPUT, 4) /* (U15) PRG1_PRU0_GPO9.RGMII1_TX_CTL */
+ >;
+ };
+
+ rgmii2_pins_default: rgmii2-pins-default {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0108, PIN_INPUT, 4) /* (W11) PRG1_PRU1_GPO0.RGMII2_RD0 */
+ AM64X_IOPAD(0x010c, PIN_INPUT, 4) /* (V11) PRG1_PRU1_GPO1.RGMII2_RD1 */
+ AM64X_IOPAD(0x0110, PIN_INPUT, 4) /* (AA12) PRG1_PRU1_GPO2.RGMII2_RD2 */
+ AM64X_IOPAD(0x0114, PIN_INPUT, 4) /* (Y12) PRG1_PRU1_GPO3.RGMII2_RD3 */
+ AM64X_IOPAD(0x0120, PIN_INPUT, 4) /* (U11) PRG1_PRU1_GPO6.RGMII2_RXC */
+ AM64X_IOPAD(0x0118, PIN_INPUT, 4) /* (W12) PRG1_PRU1_GPO4.RGMII2_RX_CTL */
+ AM64X_IOPAD(0x0134, PIN_OUTPUT, 4) /* (AA10) PRG1_PRU1_GPO11.RGMII2_TD0 */
+ AM64X_IOPAD(0x0138, PIN_OUTPUT, 4) /* (V10) PRG1_PRU1_GPO12.RGMII2_TD1 */
+ AM64X_IOPAD(0x013c, PIN_OUTPUT, 4) /* (U10) PRG1_PRU1_GPO13.RGMII2_TD2 */
+ AM64X_IOPAD(0x0140, PIN_OUTPUT, 4) /* (AA11) PRG1_PRU1_GPO14.RGMII2_TD3 */
+ AM64X_IOPAD(0x0148, PIN_OUTPUT, 4) /* (Y10) PRG1_PRU1_GPO16.RGMII2_TXC */
+ AM64X_IOPAD(0x0144, PIN_OUTPUT, 4) /* (Y11) PRG1_PRU1_GPO15.RGMII2_TX_CTL */
+ >;
+ };
+
+ ospi0_pins_default: ospi0-pins-default {
+ pinctrl-single,pins = <
+ AM64X_IOPAD(0x0000, PIN_OUTPUT, 0) /* (N20) OSPI0_CLK */
+ AM64X_IOPAD(0x002c, PIN_OUTPUT, 0) /* (L19) OSPI0_CSn0 */
+ AM64X_IOPAD(0x000c, PIN_INPUT, 0) /* (M19) OSPI0_D0 */
+ AM64X_IOPAD(0x0010, PIN_INPUT, 0) /* (M18) OSPI0_D1 */
+ AM64X_IOPAD(0x0014, PIN_INPUT, 0) /* (M20) OSPI0_D2 */
+ AM64X_IOPAD(0x0018, PIN_INPUT, 0) /* (M21) OSPI0_D3 */
+ AM64X_IOPAD(0x001c, PIN_INPUT, 0) /* (P21) OSPI0_D4 */
+ AM64X_IOPAD(0x0020, PIN_INPUT, 0) /* (P20) OSPI0_D5 */
+ AM64X_IOPAD(0x0024, PIN_INPUT, 0) /* (N18) OSPI0_D6 */
+ AM64X_IOPAD(0x0028, PIN_INPUT, 0) /* (M17) OSPI0_D7 */
+ AM64X_IOPAD(0x0008, PIN_INPUT, 0) /* (N19) OSPI0_DQS */
+ >;
+ };
+};
+
+&mcu_uart0 {
+ status = "disabled";
+};
+
+&mcu_uart1 {
+ status = "disabled";
+};
+
+&main_uart1 {
+ /* main_uart1 is reserved for firmware usage */
+ status = "reserved";
+};
+
+&main_uart2 {
+ status = "disabled";
+};
+
+&main_uart3 {
+ status = "disabled";
+};
+
+&main_uart4 {
+ status = "disabled";
+};
+
+&main_uart5 {
+ status = "disabled";
+};
+
+&main_uart6 {
+ status = "disabled";
+};
+
+&mcu_i2c0 {
+ status = "disabled";
+};
+
+&mcu_i2c1 {
+ status = "disabled";
+};
+
+&main_i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_i2c1_pins_default>;
+ clock-frequency = <400000>;
+
+ exp1: gpio@70 {
+ compatible = "nxp,pca9538";
+ reg = <0x70>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-line-names = "GPIO_CPSW2_RST", "GPIO_CPSW1_RST",
+ "PRU_DETECT", "MMC1_SD_EN",
+ "VPP_LDO_EN", "RPI_PS_3V3_En",
+ "RPI_PS_5V0_En", "RPI_HAT_DETECT";
+ };
+};
+
+&main_i2c3 {
+ status = "disabled";
+};
+
+&mcu_spi0 {
+ status = "disabled";
+};
+
+&mcu_spi1 {
+ status = "disabled";
+};
+
+/* mcu_gpio0 is reserved for mcu firmware usage */
+&mcu_gpio0 {
+ status = "reserved";
+};
+
+&sdhci1 {
+ /* SD/MMC */
+ vmmc-supply = <&vdd_mmc1>;
+ pinctrl-names = "default";
+ bus-width = <4>;
+ pinctrl-0 = <&main_mmc1_pins_default>;
+ ti,driver-strength-ohm = <50>;
+ disable-wp;
+};
+
+&cpsw3g {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mdio1_pins_default
+ &rgmii1_pins_default
+ &rgmii2_pins_default>;
+};
+
+&cpsw_port1 {
+ phy-mode = "rgmii-rxid";
+ phy-handle = <&cpsw3g_phy0>;
+};
+
+&cpsw_port2 {
+ phy-mode = "rgmii-rxid";
+ phy-handle = <&cpsw3g_phy1>;
+};
+
+&cpsw3g_mdio {
+ cpsw3g_phy0: ethernet-phy@0 {
+ reg = <0>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ };
+
+ cpsw3g_phy1: ethernet-phy@1 {
+ reg = <1>;
+ ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
+ ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
+ };
+};
+
+&tscadc0 {
+ status = "disabled";
+};
+
+&ospi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&ospi0_pins_default>;
+
+ flash@0{
+ compatible = "jedec,spi-nor";
+ reg = <0x0>;
+ spi-tx-bus-width = <8>;
+ spi-rx-bus-width = <8>;
+ spi-max-frequency = <25000000>;
+ cdns,tshsl-ns = <60>;
+ cdns,tsd2d-ns = <60>;
+ cdns,tchsh-ns = <60>;
+ cdns,tslch-ns = <60>;
+ cdns,read-delay = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+};
+
+&mailbox0_cluster2 {
+ mbox_main_r5fss0_core0: mbox-main-r5fss0-core0 {
+ ti,mbox-rx = <0 0 2>;
+ ti,mbox-tx = <1 0 2>;
+ };
+
+ mbox_main_r5fss0_core1: mbox-main-r5fss0-core1 {
+ ti,mbox-rx = <2 0 2>;
+ ti,mbox-tx = <3 0 2>;
+ };
+};
+
+&mailbox0_cluster3 {
+ status = "disabled";
+};
+
+&mailbox0_cluster4 {
+ mbox_main_r5fss1_core0: mbox-main-r5fss1-core0 {
+ ti,mbox-rx = <0 0 2>;
+ ti,mbox-tx = <1 0 2>;
+ };
+
+ mbox_main_r5fss1_core1: mbox-main-r5fss1-core1 {
+ ti,mbox-rx = <2 0 2>;
+ ti,mbox-tx = <3 0 2>;
+ };
+};
+
+&mailbox0_cluster5 {
+ status = "disabled";
+};
+
+&mailbox0_cluster6 {
+ mbox_m4_0: mbox-m4-0 {
+ ti,mbox-rx = <0 0 2>;
+ ti,mbox-tx = <1 0 2>;
+ };
+};
+
+&mailbox0_cluster7 {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am642.dtsi b/arch/arm64/boot/dts/ti/k3-am642.dtsi
new file mode 100644
index 000000000000..e2b397c88401
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am642.dtsi
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device Tree Source for AM642 SoC family in Dual core configuration
+ *
+ * Copyright (C) 2020-2021 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+/dts-v1/;
+
+#include "k3-am64.dtsi"
+
+/ {
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu-map {
+ cluster0: cluster0 {
+ core0 {
+ cpu = <&cpu0>;
+ };
+
+ core1 {
+ cpu = <&cpu1>;
+ };
+ };
+ };
+
+ cpu0: cpu@0 {
+ compatible = "arm,cortex-a53";
+ reg = <0x000>;
+ device_type = "cpu";
+ enable-method = "psci";
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&L2_0>;
+ };
+
+ cpu1: cpu@1 {
+ compatible = "arm,cortex-a53";
+ reg = <0x001>;
+ device_type = "cpu";
+ enable-method = "psci";
+ i-cache-size = <0x8000>;
+ i-cache-line-size = <64>;
+ i-cache-sets = <256>;
+ d-cache-size = <0x8000>;
+ d-cache-line-size = <64>;
+ d-cache-sets = <128>;
+ next-level-cache = <&L2_0>;
+ };
+ };
+
+ L2_0: l2-cache0 {
+ compatible = "cache";
+ cache-level = <2>;
+ cache-size = <0x40000>;
+ cache-line-size = <64>;
+ cache-sets = <512>;
+ };
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi b/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi
new file mode 100644
index 000000000000..de763ca9251c
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am65-iot2050-common.dtsi
@@ -0,0 +1,655 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) Siemens AG, 2018-2021
+ *
+ * Authors:
+ * Le Jin <le.jin@siemens.com>
+ * Jan Kiszka <jan.kiszk@siemens.com>
+ *
+ * Common bits of the IOT2050 Basic and Advanced variants
+ */
+
+/dts-v1/;
+
+#include "k3-am654.dtsi"
+#include <dt-bindings/phy/phy.h>
+
+/ {
+ aliases {
+ spi0 = &mcu_spi0;
+ };
+
+ chosen {
+ stdout-path = "serial3:115200n8";
+ bootargs = "earlycon=ns16550a,mmio32,0x02810000";
+ };
+
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ secure_ddr: secure-ddr@9e800000 {
+ reg = <0 0x9e800000 0 0x01800000>; /* for OP-TEE */
+ alignment = <0x1000>;
+ no-map;
+ };
+
+ mcu_r5fss0_core0_dma_memory_region: r5f-dma-memory@a0000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0xa0000000 0 0x100000>;
+ no-map;
+ };
+
+ mcu_r5fss0_core0_memory_region: r5f-memory@a0100000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0xa0100000 0 0xf00000>;
+ no-map;
+ };
+
+ mcu_r5fss0_core1_dma_memory_region: r5f-dma-memory@a1000000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0xa1000000 0 0x100000>;
+ no-map;
+ };
+
+ mcu_r5fss0_core1_memory_region: r5f-memory@a1100000 {
+ compatible = "shared-dma-pool";
+ reg = <0 0xa1100000 0 0xf00000>;
+ no-map;
+ };
+
+ rtos_ipc_memory_region: ipc-memories@a2000000 {
+ reg = <0x00 0xa2000000 0x00 0x00200000>;
+ alignment = <0x1000>;
+ no-map;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&leds_pins_default>;
+
+ status-led-red {
+ gpios = <&wkup_gpio0 32 GPIO_ACTIVE_HIGH>;
+ panic-indicator;
+ };
+
+ status-led-green {
+ gpios = <&wkup_gpio0 24 GPIO_ACTIVE_HIGH>;
+ };
+
+ user-led1-red {
+ gpios = <&pcal9535_3 14 GPIO_ACTIVE_HIGH>;
+ };
+
+ user-led1-green {
+ gpios = <&pcal9535_2 15 GPIO_ACTIVE_HIGH>;
+ };
+
+ user-led2-red {
+ gpios = <&wkup_gpio0 17 GPIO_ACTIVE_HIGH>;
+ };
+
+ user-led2-green {
+ gpios = <&wkup_gpio0 22 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ dp_refclk: clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <19200000>;
+ };
+};
+
+&wkup_pmx0 {
+ wkup_i2c0_pins_default: wkup-i2c0-pins-default {
+ pinctrl-single,pins = <
+ /* (AC7) WKUP_I2C0_SCL */
+ AM65X_WKUP_IOPAD(0x00e0, PIN_INPUT, 0)
+ /* (AD6) WKUP_I2C0_SDA */
+ AM65X_WKUP_IOPAD(0x00e4, PIN_INPUT, 0)
+ >;
+ };
+
+ mcu_i2c0_pins_default: mcu-i2c0-pins-default {
+ pinctrl-single,pins = <
+ /* (AD8) MCU_I2C0_SCL */
+ AM65X_WKUP_IOPAD(0x00e8, PIN_INPUT, 0)
+ /* (AD7) MCU_I2C0_SDA */
+ AM65X_WKUP_IOPAD(0x00ec, PIN_INPUT, 0)
+ >;
+ };
+
+ arduino_i2c_aio_switch_pins_default: arduino-i2c-aio-switch-pins-default {
+ pinctrl-single,pins = <
+ /* (R2) WKUP_GPIO0_21 */
+ AM65X_WKUP_IOPAD(0x0024, PIN_OUTPUT, 7)
+ >;
+ };
+
+ push_button_pins_default: push-button-pins-default {
+ pinctrl-single,pins = <
+ /* (T1) MCU_OSPI1_CLK.WKUP_GPIO0_25 */
+ AM65X_WKUP_IOPAD(0x0034, PIN_INPUT, 7)
+ >;
+ };
+
+ arduino_uart_pins_default: arduino-uart-pins-default {
+ pinctrl-single,pins = <
+ /* (P4) MCU_UART0_RXD */
+ AM65X_WKUP_IOPAD(0x0044, PIN_INPUT, 4)
+ /* (P5) MCU_UART0_TXD */
+ AM65X_WKUP_IOPAD(0x0048, PIN_OUTPUT, 4)
+ >;
+ };
+
+ arduino_io_d2_to_d3_pins_default: arduino-io-d2-to-d3-pins-default {
+ pinctrl-single,pins = <
+ /* (P1) WKUP_GPIO0_31 */
+ AM65X_WKUP_IOPAD(0x004C, PIN_OUTPUT, 7)
+ /* (N3) WKUP_GPIO0_33 */
+ AM65X_WKUP_IOPAD(0x0054, PIN_OUTPUT, 7)
+ >;
+ };
+
+ arduino_io_oe_pins_default: arduino-io-oe-pins-default {
+ pinctrl-single,pins = <
+ /* (N4) WKUP_GPIO0_34 */
+ AM65X_WKUP_IOPAD(0x0058, PIN_OUTPUT, 7)
+ /* (M2) WKUP_GPIO0_36 */
+ AM65X_WKUP_IOPAD(0x0060, PIN_OUTPUT, 7)
+ /* (M3) WKUP_GPIO0_37 */
+ AM65X_WKUP_IOPAD(0x0064, PIN_OUTPUT, 7)
+ /* (M4) WKUP_GPIO0_38 */
+ AM65X_WKUP_IOPAD(0x0068, PIN_OUTPUT, 7)
+ /* (M1) WKUP_GPIO0_41 */
+ AM65X_WKUP_IOPAD(0x0074, PIN_OUTPUT, 7)
+ >;
+ };
+
+ mcu_fss0_ospi0_pins_default: mcu-fss0-ospi0-pins-default {
+ pinctrl-single,pins = <
+ /* (V1) MCU_OSPI0_CLK */
+ AM65X_WKUP_IOPAD(0x0000, PIN_OUTPUT, 0)
+ /* (U2) MCU_OSPI0_DQS */
+ AM65X_WKUP_IOPAD(0x0008, PIN_INPUT, 0)
+ /* (U4) MCU_OSPI0_D0 */
+ AM65X_WKUP_IOPAD(0x000c, PIN_INPUT, 0)
+ /* (U5) MCU_OSPI0_D1 */
+ AM65X_WKUP_IOPAD(0x0010, PIN_INPUT, 0)
+ /* (R4) MCU_OSPI0_CSn0 */
+ AM65X_WKUP_IOPAD(0x002c, PIN_OUTPUT, 0)
+ >;
+ };
+
+ db9_com_mode_pins_default: db9-com-mode-pins-default {
+ pinctrl-single,pins = <
+ /* (AD3) WKUP_GPIO0_5, used as uart0 mode 0 */
+ AM65X_WKUP_IOPAD(0x00c4, PIN_OUTPUT, 7)
+ /* (AC3) WKUP_GPIO0_4, used as uart0 mode 1 */
+ AM65X_WKUP_IOPAD(0x00c0, PIN_OUTPUT, 7)
+ /* (AC1) WKUP_GPIO0_7, used as uart0 term */
+ AM65X_WKUP_IOPAD(0x00cc, PIN_OUTPUT, 7)
+ /* (AC2) WKUP_GPIO0_6, used as uart0 en */
+ AM65X_WKUP_IOPAD(0x00c8, PIN_OUTPUT, 7)
+ >;
+ };
+
+ leds_pins_default: leds-pins-default {
+ pinctrl-single,pins = <
+ /* (T2) WKUP_GPIO0_17, used as user led1 red */
+ AM65X_WKUP_IOPAD(0x0014, PIN_OUTPUT, 7)
+ /* (R3) WKUP_GPIO0_22, used as user led1 green */
+ AM65X_WKUP_IOPAD(0x0028, PIN_OUTPUT, 7)
+ /* (R5) WKUP_GPIO0_24, used as status led red */
+ AM65X_WKUP_IOPAD(0x0030, PIN_OUTPUT, 7)
+ /* (N2) WKUP_GPIO0_32, used as status led green */
+ AM65X_WKUP_IOPAD(0x0050, PIN_OUTPUT, 7)
+ >;
+ };
+
+ mcu_spi0_pins_default: mcu-spi0-pins-default {
+ pinctrl-single,pins = <
+ /* (Y1) MCU_SPI0_CLK */
+ AM65X_WKUP_IOPAD(0x0090, PIN_INPUT, 0)
+ /* (Y3) MCU_SPI0_D0 */
+ AM65X_WKUP_IOPAD(0x0094, PIN_INPUT, 0)
+ /* (Y2) MCU_SPI0_D1 */
+ AM65X_WKUP_IOPAD(0x0098, PIN_INPUT, 0)
+ /* (Y4) MCU_SPI0_CS0 */
+ AM65X_WKUP_IOPAD(0x009c, PIN_OUTPUT, 0)
+ >;
+ };
+
+ minipcie_pins_default: minipcie-pins-default {
+ pinctrl-single,pins = <
+ /* (P2) MCU_OSPI1_DQS.WKUP_GPIO0_27 */
+ AM65X_WKUP_IOPAD(0x003C, PIN_OUTPUT, 7)
+ >;
+ };
+};
+
+&main_pmx0 {
+ main_uart1_pins_default: main-uart1-pins-default {
+ pinctrl-single,pins = <
+ AM65X_IOPAD(0x0174, PIN_INPUT, 6) /* (AE23) UART1_RXD */
+ AM65X_IOPAD(0x014c, PIN_OUTPUT, 6) /* (AD23) UART1_TXD */
+ AM65X_IOPAD(0x0178, PIN_INPUT, 6) /* (AD22) UART1_CTSn */
+ AM65X_IOPAD(0x017c, PIN_OUTPUT, 6) /* (AC21) UART1_RTSn */
+ >;
+ };
+
+ main_i2c3_pins_default: main-i2c3-pins-default {
+ pinctrl-single,pins = <
+ AM65X_IOPAD(0x01c0, PIN_INPUT, 2) /* (AF13) I2C3_SCL */
+ AM65X_IOPAD(0x01d4, PIN_INPUT, 2) /* (AG12) I2C3_SDA */
+ >;
+ };
+
+ main_mmc1_pins_default: main-mmc1-pins-default {
+ pinctrl-single,pins = <
+ AM65X_IOPAD(0x02d4, PIN_INPUT_PULLDOWN, 0) /* (C27) MMC1_CLK */
+ AM65X_IOPAD(0x02d8, PIN_INPUT_PULLUP, 0) /* (C28) MMC1_CMD */
+ AM65X_IOPAD(0x02d0, PIN_INPUT_PULLUP, 0) /* (D28) MMC1_DAT0 */
+ AM65X_IOPAD(0x02cc, PIN_INPUT_PULLUP, 0) /* (E27) MMC1_DAT1 */
+ AM65X_IOPAD(0x02c8, PIN_INPUT_PULLUP, 0) /* (D26) MMC1_DAT2 */
+ AM65X_IOPAD(0x02c4, PIN_INPUT_PULLUP, 0) /* (D27) MMC1_DAT3 */
+ AM65X_IOPAD(0x02dc, PIN_INPUT_PULLUP, 0) /* (B24) MMC1_SDCD */
+ AM65X_IOPAD(0x02e0, PIN_INPUT_PULLUP, 0) /* (C24) MMC1_SDWP */
+ >;
+ };
+
+ usb0_pins_default: usb0-pins-default {
+ pinctrl-single,pins = <
+ AM65X_IOPAD(0x02bc, PIN_OUTPUT, 0) /* (AD9) USB0_DRVVBUS */
+ >;
+ };
+
+ usb1_pins_default: usb1-pins-default {
+ pinctrl-single,pins = <
+ AM65X_IOPAD(0x02c0, PIN_OUTPUT, 0) /* (AC8) USB1_DRVVBUS */
+ >;
+ };
+
+ arduino_io_d4_to_d9_pins_default: arduino-io-d4-to-d9-pins-default {
+ pinctrl-single,pins = <
+ AM65X_IOPAD(0x0084, PIN_OUTPUT, 7) /* (AG18) GPIO0_33 */
+ AM65X_IOPAD(0x008C, PIN_OUTPUT, 7) /* (AF17) GPIO0_35 */
+ AM65X_IOPAD(0x0098, PIN_OUTPUT, 7) /* (AH16) GPIO0_38 */
+ AM65X_IOPAD(0x00AC, PIN_OUTPUT, 7) /* (AH15) GPIO0_43 */
+ AM65X_IOPAD(0x00C0, PIN_OUTPUT, 7) /* (AG15) GPIO0_48 */
+ AM65X_IOPAD(0x00CC, PIN_OUTPUT, 7) /* (AD15) GPIO0_51 */
+ >;
+ };
+
+ dss_vout1_pins_default: dss-vout1-pins-default {
+ pinctrl-single,pins = <
+ AM65X_IOPAD(0x0000, PIN_OUTPUT, 1) /* VOUT1_DATA0 */
+ AM65X_IOPAD(0x0004, PIN_OUTPUT, 1) /* VOUT1_DATA1 */
+ AM65X_IOPAD(0x0008, PIN_OUTPUT, 1) /* VOUT1_DATA2 */
+ AM65X_IOPAD(0x000c, PIN_OUTPUT, 1) /* VOUT1_DATA3 */
+ AM65X_IOPAD(0x0010, PIN_OUTPUT, 1) /* VOUT1_DATA4 */
+ AM65X_IOPAD(0x0014, PIN_OUTPUT, 1) /* VOUT1_DATA5 */
+ AM65X_IOPAD(0x0018, PIN_OUTPUT, 1) /* VOUT1_DATA6 */
+ AM65X_IOPAD(0x001c, PIN_OUTPUT, 1) /* VOUT1_DATA7 */
+ AM65X_IOPAD(0x0020, PIN_OUTPUT, 1) /* VOUT1_DATA8 */
+ AM65X_IOPAD(0x0024, PIN_OUTPUT, 1) /* VOUT1_DATA9 */
+ AM65X_IOPAD(0x0028, PIN_OUTPUT, 1) /* VOUT1_DATA10 */
+ AM65X_IOPAD(0x002c, PIN_OUTPUT, 1) /* VOUT1_DATA11 */
+ AM65X_IOPAD(0x0030, PIN_OUTPUT, 1) /* VOUT1_DATA12 */
+ AM65X_IOPAD(0x0034, PIN_OUTPUT, 1) /* VOUT1_DATA13 */
+ AM65X_IOPAD(0x0038, PIN_OUTPUT, 1) /* VOUT1_DATA14 */
+ AM65X_IOPAD(0x003c, PIN_OUTPUT, 1) /* VOUT1_DATA15 */
+ AM65X_IOPAD(0x0040, PIN_OUTPUT, 1) /* VOUT1_DATA16 */
+ AM65X_IOPAD(0x0044, PIN_OUTPUT, 1) /* VOUT1_DATA17 */
+ AM65X_IOPAD(0x0048, PIN_OUTPUT, 1) /* VOUT1_DATA18 */
+ AM65X_IOPAD(0x004c, PIN_OUTPUT, 1) /* VOUT1_DATA19 */
+ AM65X_IOPAD(0x0050, PIN_OUTPUT, 1) /* VOUT1_DATA20 */
+ AM65X_IOPAD(0x0054, PIN_OUTPUT, 1) /* VOUT1_DATA21 */
+ AM65X_IOPAD(0x0058, PIN_OUTPUT, 1) /* VOUT1_DATA22 */
+ AM65X_IOPAD(0x005c, PIN_OUTPUT, 1) /* VOUT1_DATA23 */
+ AM65X_IOPAD(0x0060, PIN_OUTPUT, 1) /* VOUT1_VSYNC */
+ AM65X_IOPAD(0x0064, PIN_OUTPUT, 1) /* VOUT1_HSYNC */
+ AM65X_IOPAD(0x0068, PIN_OUTPUT, 1) /* VOUT1_PCLK */
+ AM65X_IOPAD(0x006c, PIN_OUTPUT, 1) /* VOUT1_DE */
+ >;
+ };
+
+ dp_pins_default: dp-pins-default {
+ pinctrl-single,pins = <
+ AM65X_IOPAD(0x0078, PIN_OUTPUT, 7) /* (AF18) DP rst_n */
+ >;
+ };
+
+ main_i2c2_pins_default: main-i2c2-pins-default {
+ pinctrl-single,pins = <
+ AM65X_IOPAD(0x0074, PIN_INPUT, 5) /* (T27) I2C2_SCL */
+ AM65X_IOPAD(0x0070, PIN_INPUT, 5) /* (R25) I2C2_SDA */
+ >;
+ };
+};
+
+&main_pmx1 {
+ main_i2c0_pins_default: main-i2c0-pins-default {
+ pinctrl-single,pins = <
+ AM65X_IOPAD(0x0000, PIN_INPUT, 0) /* (D20) I2C0_SCL */
+ AM65X_IOPAD(0x0004, PIN_INPUT, 0) /* (C21) I2C0_SDA */
+ >;
+ };
+
+ main_i2c1_pins_default: main-i2c1-pins-default {
+ pinctrl-single,pins = <
+ AM65X_IOPAD(0x0008, PIN_INPUT, 0) /* (B21) I2C1_SCL */
+ AM65X_IOPAD(0x000c, PIN_INPUT, 0) /* (E21) I2C1_SDA */
+ >;
+ };
+
+ ecap0_pins_default: ecap0-pins-default {
+ pinctrl-single,pins = <
+ AM65X_IOPAD(0x0010, PIN_INPUT, 0) /* (D21) ECAP0_IN_APWM_OUT */
+ >;
+ };
+};
+
+&wkup_uart0 {
+ /* Wakeup UART is used by System firmware */
+ status = "reserved";
+};
+
+&main_uart1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_uart1_pins_default>;
+};
+
+&main_uart2 {
+ status = "disabled";
+};
+
+&mcu_uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&arduino_uart_pins_default>;
+};
+
+&main_gpio0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&arduino_io_d4_to_d9_pins_default>;
+ gpio-line-names =
+ "main_gpio0-base", "", "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "", "", "",
+ "", "", "", "", "", "", "", "", "", "",
+ "", "", "", "IO4", "", "IO5", "", "", "IO6", "",
+ "", "", "", "IO7", "", "", "", "", "IO8", "",
+ "", "IO9";
+};
+
+&wkup_gpio0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <
+ &arduino_io_d2_to_d3_pins_default
+ &arduino_i2c_aio_switch_pins_default
+ &arduino_io_oe_pins_default
+ &push_button_pins_default
+ &db9_com_mode_pins_default
+ >;
+ gpio-line-names =
+ /* 0..9 */
+ "wkup_gpio0-base", "", "", "", "UART0-mode1", "UART0-mode0",
+ "UART0-enable", "UART0-terminate", "", "WIFI-disable",
+ /* 10..19 */
+ "", "", "", "", "", "", "", "", "", "",
+ /* 20..29 */
+ "", "A4A5-I2C-mux", "", "", "", "USER-button", "", "", "","IO0",
+ /* 30..39 */
+ "IO1", "IO2", "", "IO3", "IO17-direction", "A5",
+ "IO16-direction", "IO15-direction", "IO14-direction", "A3",
+ /* 40..49 */
+ "", "IO18-direction", "A4", "A2", "A1", "A0", "", "", "IO13",
+ "IO11",
+ /* 50..51 */
+ "IO12", "IO10";
+};
+
+&wkup_i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wkup_i2c0_pins_default>;
+ clock-frequency = <400000>;
+};
+
+&mcu_i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_i2c0_pins_default>;
+ clock-frequency = <400000>;
+
+ psu: regulator@60 {
+ compatible = "ti,tps62363";
+ reg = <0x60>;
+ regulator-name = "tps62363-vout";
+ regulator-min-microvolt = <500000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-boot-on;
+ ti,vsel0-state-high;
+ ti,vsel1-state-high;
+ ti,enable-vout-discharge;
+ };
+
+ /* D4200 */
+ pcal9535_1: gpio@20 {
+ compatible = "nxp,pcal9535";
+ reg = <0x20>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-line-names =
+ "A0-pull", "A1-pull", "A2-pull", "A3-pull", "A4-pull",
+ "A5-pull", "", "",
+ "IO14-enable", "IO15-enable", "IO16-enable",
+ "IO17-enable", "IO18-enable", "IO19-enable";
+ };
+
+ /* D4201 */
+ pcal9535_2: gpio@21 {
+ compatible = "nxp,pcal9535";
+ reg = <0x21>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-line-names =
+ "IO0-direction", "IO1-direction", "IO2-direction",
+ "IO3-direction", "IO4-direction", "IO5-direction",
+ "IO6-direction", "IO7-direction",
+ "IO8-direction", "IO9-direction", "IO10-direction",
+ "IO11-direction", "IO12-direction", "IO13-direction",
+ "IO19-direction";
+ };
+
+ /* D4202 */
+ pcal9535_3: gpio@25 {
+ compatible = "nxp,pcal9535";
+ reg = <0x25>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ gpio-line-names =
+ "IO0-pull", "IO1-pull", "IO2-pull", "IO3-pull",
+ "IO4-pull", "IO5-pull", "IO6-pull", "IO7-pull",
+ "IO8-pull", "IO9-pull", "IO10-pull", "IO11-pull",
+ "IO12-pull", "IO13-pull";
+ };
+};
+
+&main_i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_i2c0_pins_default>;
+ clock-frequency = <400000>;
+
+ rtc: rtc8564@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
+ };
+
+ eeprom: eeprom@54 {
+ compatible = "atmel,24c08";
+ reg = <0x54>;
+ pagesize = <16>;
+ };
+};
+
+&main_i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_i2c1_pins_default>;
+ clock-frequency = <400000>;
+};
+
+&main_i2c2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_i2c2_pins_default>;
+ clock-frequency = <400000>;
+};
+
+&main_i2c3 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_i2c3_pins_default>;
+ clock-frequency = <400000>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ edp-bridge@f {
+ compatible = "toshiba,tc358767";
+ reg = <0x0f>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&dp_pins_default>;
+ reset-gpios = <&main_gpio0 30 GPIO_ACTIVE_HIGH>;
+
+ clock-names = "ref";
+ clocks = <&dp_refclk>;
+
+ toshiba,hpd-pin = <0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@1 {
+ reg = <1>;
+
+ bridge_in: endpoint {
+ remote-endpoint = <&dpi_out>;
+ };
+ };
+ };
+ };
+};
+
+&mcu_cpsw {
+ status = "disabled";
+};
+
+&ecap0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&ecap0_pins_default>;
+};
+
+&sdhci1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mmc1_pins_default>;
+ ti,driver-strength-ohm = <50>;
+ disable-wp;
+};
+
+&usb0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb0_pins_default>;
+ dr_mode = "host";
+};
+
+&usb1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb1_pins_default>;
+ dr_mode = "host";
+};
+
+&mcu_spi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_spi0_pins_default>;
+
+ #address-cells = <1>;
+ #size-cells= <0>;
+ ti,pindir-d0-out-d1-in = <1>;
+};
+
+&tscadc0 {
+ status = "disabled";
+};
+
+&tscadc1 {
+ adc {
+ ti,adc-channels = <0 1 2 3 4 5>;
+ };
+};
+
+&ospi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_fss0_ospi0_pins_default>;
+
+ flash@0 {
+ compatible = "jedec,spi-nor";
+ reg = <0x0>;
+ spi-tx-bus-width = <1>;
+ spi-rx-bus-width = <1>;
+ spi-max-frequency = <50000000>;
+ cdns,tshsl-ns = <60>;
+ cdns,tsd2d-ns = <60>;
+ cdns,tchsh-ns = <60>;
+ cdns,tslch-ns = <60>;
+ cdns,read-delay = <2>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+};
+
+&dss {
+ pinctrl-names = "default";
+ pinctrl-0 = <&dss_vout1_pins_default>;
+
+ assigned-clocks = <&k3_clks 67 2>;
+ assigned-clock-parents = <&k3_clks 67 5>;
+};
+
+&dss_ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@1 {
+ reg = <1>;
+
+ dpi_out: endpoint {
+ remote-endpoint = <&bridge_in>;
+ };
+ };
+};
+
+&serdes0 {
+ status = "disabled";
+};
+
+&pcie0_rc {
+ status = "disabled";
+};
+
+&pcie0_ep {
+ status = "disabled";
+};
+
+&pcie1_rc {
+ pinctrl-names = "default";
+ pinctrl-0 = <&minipcie_pins_default>;
+
+ num-lanes = <1>;
+ phys = <&serdes1 PHY_TYPE_PCIE 0>;
+ phy-names = "pcie-phy0";
+ reset-gpios = <&wkup_gpio0 27 GPIO_ACTIVE_HIGH>;
+};
+
+&pcie1_ep {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
index ceb579fb427d..6cd3131eb9ff 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-main.dtsi
@@ -433,8 +433,9 @@
#phy-cells = <0>;
};
- intr_main_gpio: interrupt-controller0 {
+ intr_main_gpio: interrupt-controller@a00000 {
compatible = "ti,sci-intr";
+ reg = <0x0 0x00a00000 0x0 0x400>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
@@ -444,18 +445,19 @@
ti,interrupt-ranges = <0 392 32>;
};
- main-navss {
+ main_navss: bus@30800000 {
compatible = "simple-mfd";
#address-cells = <2>;
#size-cells = <2>;
- ranges;
+ ranges = <0x0 0x30800000 0x0 0x30800000 0x0 0xbc00000>;
dma-coherent;
dma-ranges;
ti,sci-dev-id = <118>;
- intr_main_navss: interrupt-controller1 {
+ intr_main_navss: interrupt-controller@310e0000 {
compatible = "ti,sci-intr";
+ reg = <0x0 0x310e0000 0x0 0x2000>;
ti,intr-trigger-type = <4>;
interrupt-controller;
interrupt-parent = <&gic500>;
@@ -707,6 +709,7 @@
dma-coherent;
interrupts = <GIC_SPI 340 IRQ_TYPE_EDGE_RISING>;
msi-map = <0x0 &gic_its 0x0 0x10000>;
+ device_type = "pci";
};
pcie0_ep: pcie-ep@5500000 {
@@ -739,6 +742,7 @@
dma-coherent;
interrupts = <GIC_SPI 355 IRQ_TYPE_EDGE_RISING>;
msi-map = <0x0 &gic_its 0x10000 0x10000>;
+ device_type = "pci";
};
pcie1_ep: pcie-ep@5600000 {
@@ -919,4 +923,397 @@
clocks = <&ehrpwm_tbclk 5>, <&k3_clks 45 0>;
clock-names = "tbclk", "fck";
};
+
+ icssg0: icssg@b000000 {
+ compatible = "ti,am654-icssg";
+ reg = <0x00 0xb000000 0x00 0x80000>;
+ power-domains = <&k3_pds 62 TI_SCI_PD_EXCLUSIVE>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x00 0xb000000 0x80000>;
+
+ icssg0_mem: memories@0 {
+ reg = <0x0 0x2000>,
+ <0x2000 0x2000>,
+ <0x10000 0x10000>;
+ reg-names = "dram0", "dram1",
+ "shrdram2";
+ };
+
+ icssg0_cfg: cfg@26000 {
+ compatible = "ti,pruss-cfg", "syscon";
+ reg = <0x26000 0x200>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x26000 0x2000>;
+
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ icssg0_coreclk_mux: coreclk-mux@3c {
+ reg = <0x3c>;
+ #clock-cells = <0>;
+ clocks = <&k3_clks 62 19>, /* icssg0_core_clk */
+ <&k3_clks 62 3>; /* icssg0_iclk */
+ assigned-clocks = <&icssg0_coreclk_mux>;
+ assigned-clock-parents = <&k3_clks 62 3>;
+ };
+
+ icssg0_iepclk_mux: iepclk-mux@30 {
+ reg = <0x30>;
+ #clock-cells = <0>;
+ clocks = <&k3_clks 62 10>, /* icssg0_iep_clk */
+ <&icssg0_coreclk_mux>; /* core_clk */
+ assigned-clocks = <&icssg0_iepclk_mux>;
+ assigned-clock-parents = <&icssg0_coreclk_mux>;
+ };
+ };
+ };
+
+ icssg0_mii_rt: mii-rt@32000 {
+ compatible = "ti,pruss-mii", "syscon";
+ reg = <0x32000 0x100>;
+ };
+
+ icssg0_mii_g_rt: mii-g-rt@33000 {
+ compatible = "ti,pruss-mii-g", "syscon";
+ reg = <0x33000 0x1000>;
+ };
+
+ icssg0_intc: interrupt-controller@20000 {
+ compatible = "ti,icssg-intc";
+ reg = <0x20000 0x2000>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupts = <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 255 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host_intr0", "host_intr1",
+ "host_intr2", "host_intr3",
+ "host_intr4", "host_intr5",
+ "host_intr6", "host_intr7";
+ };
+
+ pru0_0: pru@34000 {
+ compatible = "ti,am654-pru";
+ reg = <0x34000 0x4000>,
+ <0x22000 0x100>,
+ <0x22400 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "am65x-pru0_0-fw";
+ };
+
+ rtu0_0: rtu@4000 {
+ compatible = "ti,am654-rtu";
+ reg = <0x4000 0x2000>,
+ <0x23000 0x100>,
+ <0x23400 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "am65x-rtu0_0-fw";
+ };
+
+ tx_pru0_0: txpru@a000 {
+ compatible = "ti,am654-tx-pru";
+ reg = <0xa000 0x1800>,
+ <0x25000 0x100>,
+ <0x25400 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "am65x-txpru0_0-fw";
+ };
+
+ pru0_1: pru@38000 {
+ compatible = "ti,am654-pru";
+ reg = <0x38000 0x4000>,
+ <0x24000 0x100>,
+ <0x24400 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "am65x-pru0_1-fw";
+ };
+
+ rtu0_1: rtu@6000 {
+ compatible = "ti,am654-rtu";
+ reg = <0x6000 0x2000>,
+ <0x23800 0x100>,
+ <0x23c00 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "am65x-rtu0_1-fw";
+ };
+
+ tx_pru0_1: txpru@c000 {
+ compatible = "ti,am654-tx-pru";
+ reg = <0xc000 0x1800>,
+ <0x25800 0x100>,
+ <0x25c00 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "am65x-txpru0_1-fw";
+ };
+ };
+
+ icssg1: icssg@b100000 {
+ compatible = "ti,am654-icssg";
+ reg = <0x00 0xb100000 0x00 0x80000>;
+ power-domains = <&k3_pds 63 TI_SCI_PD_EXCLUSIVE>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x00 0xb100000 0x80000>;
+
+ icssg1_mem: memories@0 {
+ reg = <0x0 0x2000>,
+ <0x2000 0x2000>,
+ <0x10000 0x10000>;
+ reg-names = "dram0", "dram1",
+ "shrdram2";
+ };
+
+ icssg1_cfg: cfg@26000 {
+ compatible = "ti,pruss-cfg", "syscon";
+ reg = <0x26000 0x200>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x26000 0x2000>;
+
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ icssg1_coreclk_mux: coreclk-mux@3c {
+ reg = <0x3c>;
+ #clock-cells = <0>;
+ clocks = <&k3_clks 63 19>, /* icssg1_core_clk */
+ <&k3_clks 63 3>; /* icssg1_iclk */
+ assigned-clocks = <&icssg1_coreclk_mux>;
+ assigned-clock-parents = <&k3_clks 63 3>;
+ };
+
+ icssg1_iepclk_mux: iepclk-mux@30 {
+ reg = <0x30>;
+ #clock-cells = <0>;
+ clocks = <&k3_clks 63 10>, /* icssg1_iep_clk */
+ <&icssg1_coreclk_mux>; /* core_clk */
+ assigned-clocks = <&icssg1_iepclk_mux>;
+ assigned-clock-parents = <&icssg1_coreclk_mux>;
+ };
+ };
+ };
+
+ icssg1_mii_rt: mii-rt@32000 {
+ compatible = "ti,pruss-mii", "syscon";
+ reg = <0x32000 0x100>;
+ };
+
+ icssg1_mii_g_rt: mii-g-rt@33000 {
+ compatible = "ti,pruss-mii-g", "syscon";
+ reg = <0x33000 0x1000>;
+ };
+
+ icssg1_intc: interrupt-controller@20000 {
+ compatible = "ti,icssg-intc";
+ reg = <0x20000 0x2000>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupts = <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 264 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host_intr0", "host_intr1",
+ "host_intr2", "host_intr3",
+ "host_intr4", "host_intr5",
+ "host_intr6", "host_intr7";
+ };
+
+ pru1_0: pru@34000 {
+ compatible = "ti,am654-pru";
+ reg = <0x34000 0x4000>,
+ <0x22000 0x100>,
+ <0x22400 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "am65x-pru1_0-fw";
+ };
+
+ rtu1_0: rtu@4000 {
+ compatible = "ti,am654-rtu";
+ reg = <0x4000 0x2000>,
+ <0x23000 0x100>,
+ <0x23400 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "am65x-rtu1_0-fw";
+ };
+
+ tx_pru1_0: txpru@a000 {
+ compatible = "ti,am654-tx-pru";
+ reg = <0xa000 0x1800>,
+ <0x25000 0x100>,
+ <0x25400 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "am65x-txpru1_0-fw";
+ };
+
+ pru1_1: pru@38000 {
+ compatible = "ti,am654-pru";
+ reg = <0x38000 0x4000>,
+ <0x24000 0x100>,
+ <0x24400 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "am65x-pru1_1-fw";
+ };
+
+ rtu1_1: rtu@6000 {
+ compatible = "ti,am654-rtu";
+ reg = <0x6000 0x2000>,
+ <0x23800 0x100>,
+ <0x23c00 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "am65x-rtu1_1-fw";
+ };
+
+ tx_pru1_1: txpru@c000 {
+ compatible = "ti,am654-tx-pru";
+ reg = <0xc000 0x1800>,
+ <0x25800 0x100>,
+ <0x25c00 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "am65x-txpru1_1-fw";
+ };
+ };
+
+ icssg2: icssg@b200000 {
+ compatible = "ti,am654-icssg";
+ reg = <0x00 0xb200000 0x00 0x80000>;
+ power-domains = <&k3_pds 64 TI_SCI_PD_EXCLUSIVE>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x00 0xb200000 0x80000>;
+
+ icssg2_mem: memories@0 {
+ reg = <0x0 0x2000>,
+ <0x2000 0x2000>,
+ <0x10000 0x10000>;
+ reg-names = "dram0", "dram1",
+ "shrdram2";
+ };
+
+ icssg2_cfg: cfg@26000 {
+ compatible = "ti,pruss-cfg", "syscon";
+ reg = <0x26000 0x200>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x26000 0x2000>;
+
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ icssg2_coreclk_mux: coreclk-mux@3c {
+ reg = <0x3c>;
+ #clock-cells = <0>;
+ clocks = <&k3_clks 64 19>, /* icssg1_core_clk */
+ <&k3_clks 64 3>; /* icssg1_iclk */
+ assigned-clocks = <&icssg2_coreclk_mux>;
+ assigned-clock-parents = <&k3_clks 64 3>;
+ };
+
+ icssg2_iepclk_mux: iepclk-mux@30 {
+ reg = <0x30>;
+ #clock-cells = <0>;
+ clocks = <&k3_clks 64 10>, /* icssg1_iep_clk */
+ <&icssg2_coreclk_mux>; /* core_clk */
+ assigned-clocks = <&icssg2_iepclk_mux>;
+ assigned-clock-parents = <&icssg2_coreclk_mux>;
+ };
+ };
+ };
+
+ icssg2_mii_rt: mii-rt@32000 {
+ compatible = "ti,pruss-mii", "syscon";
+ reg = <0x32000 0x100>;
+ };
+
+ icssg2_mii_g_rt: mii-g-rt@33000 {
+ compatible = "ti,pruss-mii-g", "syscon";
+ reg = <0x33000 0x1000>;
+ };
+
+ icssg2_intc: interrupt-controller@20000 {
+ compatible = "ti,icssg-intc";
+ reg = <0x20000 0x2000>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupts = <GIC_SPI 270 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 271 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 273 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 274 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 275 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 276 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 277 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host_intr0", "host_intr1",
+ "host_intr2", "host_intr3",
+ "host_intr4", "host_intr5",
+ "host_intr6", "host_intr7";
+ };
+
+ pru2_0: pru@34000 {
+ compatible = "ti,am654-pru";
+ reg = <0x34000 0x4000>,
+ <0x22000 0x100>,
+ <0x22400 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "am65x-pru2_0-fw";
+ };
+
+ rtu2_0: rtu@4000 {
+ compatible = "ti,am654-rtu";
+ reg = <0x4000 0x2000>,
+ <0x23000 0x100>,
+ <0x23400 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "am65x-rtu2_0-fw";
+ };
+
+ tx_pru2_0: txpru@a000 {
+ compatible = "ti,am654-tx-pru";
+ reg = <0xa000 0x1800>,
+ <0x25000 0x100>,
+ <0x25400 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "am65x-txpru2_0-fw";
+ };
+
+ pru2_1: pru@38000 {
+ compatible = "ti,am654-pru";
+ reg = <0x38000 0x4000>,
+ <0x24000 0x100>,
+ <0x24400 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "am65x-pru2_1-fw";
+ };
+
+ rtu2_1: rtu@6000 {
+ compatible = "ti,am654-rtu";
+ reg = <0x6000 0x2000>,
+ <0x23800 0x100>,
+ <0x23c00 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "am65x-rtu2_1-fw";
+ };
+
+ tx_pru2_1: txpru@c000 {
+ compatible = "ti,am654-tx-pru";
+ reg = <0xc000 0x1800>,
+ <0x25800 0x100>,
+ <0x25c00 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "am65x-txpru2_1-fw";
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
index 7454c8cec0cc..f5b8ef2f5f77 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-mcu.dtsi
@@ -116,11 +116,11 @@
};
};
- mcu-navss {
+ mcu_navss: bus@28380000 {
compatible = "simple-mfd";
#address-cells = <2>;
#size-cells = <2>;
- ranges;
+ ranges = <0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>;
dma-coherent;
dma-ranges;
@@ -308,4 +308,13 @@
ti,loczrama = <1>;
};
};
+
+ mcu_rti1: watchdog@40610000 {
+ compatible = "ti,j7-rti-wdt";
+ reg = <0x0 0x40610000 0x0 0x100>;
+ clocks = <&k3_clks 135 0>;
+ power-domains = <&k3_pds 135 TI_SCI_PD_SHARED>;
+ assigned-clocks = <&k3_clks 135 0>;
+ assigned-clock-parents = <&k3_clks 135 4>;
+ };
};
diff --git a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
index ed42f13e7663..7cb864b4d74a 100644
--- a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
@@ -6,24 +6,24 @@
*/
&cbass_wakeup {
- dmsc: dmsc {
+ dmsc: system-controller@44083000 {
compatible = "ti,am654-sci";
ti,host-id = <12>;
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
mbox-names = "rx", "tx";
mboxes= <&secure_proxy_main 11>,
<&secure_proxy_main 13>;
+ reg-names = "debug_messages";
+ reg = <0x44083000 0x1000>;
+
k3_pds: power-controller {
compatible = "ti,sci-pm-domain";
#power-domain-cells = <2>;
};
- k3_clks: clocks {
+ k3_clks: clock-controller {
compatible = "ti,k2g-sci-clk";
#clock-cells = <2>;
};
@@ -69,8 +69,9 @@
power-domains = <&k3_pds 115 TI_SCI_PD_EXCLUSIVE>;
};
- intr_wkup_gpio: interrupt-controller2 {
+ intr_wkup_gpio: interrupt-controller@42200000 {
compatible = "ti,sci-intr";
+ reg = <0x42200000 0x200>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
diff --git a/arch/arm64/boot/dts/ti/k3-am6528-iot2050-basic.dts b/arch/arm64/boot/dts/ti/k3-am6528-iot2050-basic.dts
new file mode 100644
index 000000000000..4f7e3f2a6265
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am6528-iot2050-basic.dts
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) Siemens AG, 2018-2021
+ *
+ * Authors:
+ * Le Jin <le.jin@siemens.com>
+ * Jan Kiszka <jan.kiszk@siemens.com>
+ *
+ * AM6528-based (dual-core) IOT2050 Basic variant
+ * 1 GB RAM, no eMMC, main_uart0 on connector X30
+ */
+
+/dts-v1/;
+
+#include "k3-am65-iot2050-common.dtsi"
+
+/ {
+ compatible = "siemens,iot2050-basic", "ti,am654";
+ model = "SIMATIC IOT2050 Basic";
+
+ memory@80000000 {
+ device_type = "memory";
+ /* 1G RAM */
+ reg = <0x00000000 0x80000000 0x00000000 0x40000000>;
+ };
+
+ cpus {
+ cpu-map {
+ /delete-node/ cluster1;
+ };
+ /delete-node/ cpu@100;
+ /delete-node/ cpu@101;
+ };
+
+ /delete-node/ l2-cache1;
+};
+
+/* eMMC */
+&sdhci0 {
+ status = "disabled";
+};
+
+&main_pmx0 {
+ main_uart0_pins_default: main-uart0-pins-default {
+ pinctrl-single,pins = <
+ AM65X_IOPAD(0x01e4, PIN_INPUT, 0) /* (AF11) UART0_RXD */
+ AM65X_IOPAD(0x01e8, PIN_OUTPUT, 0) /* (AE11) UART0_TXD */
+ AM65X_IOPAD(0x01ec, PIN_INPUT, 0) /* (AG11) UART0_CTSn */
+ AM65X_IOPAD(0x01f0, PIN_OUTPUT, 0) /* (AD11) UART0_RTSn */
+ AM65X_IOPAD(0x0188, PIN_INPUT, 1) /* (D25) UART0_DCDn */
+ AM65X_IOPAD(0x018c, PIN_INPUT, 1) /* (B26) UART0_DSRn */
+ AM65X_IOPAD(0x0190, PIN_OUTPUT, 1) /* (A24) UART0_DTRn */
+ AM65X_IOPAD(0x0194, PIN_INPUT, 1) /* (E24) UART0_RIN */
+ >;
+ };
+};
+
+&main_uart0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_uart0_pins_default>;
+};
diff --git a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
index fe3043943906..eddb2ffb93ca 100644
--- a/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-am654-base-board.dts
@@ -85,12 +85,6 @@
gpios = <&wkup_gpio0 27 GPIO_ACTIVE_LOW>;
};
};
-
- clk_ov5640_fixed: clock {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <24000000>;
- };
};
&wkup_pmx0 {
@@ -287,23 +281,6 @@
pinctrl-names = "default";
pinctrl-0 = <&main_i2c1_pins_default>;
clock-frequency = <400000>;
-
- ov5640: camera@3c {
- compatible = "ovti,ov5640";
- reg = <0x3c>;
-
- clocks = <&clk_ov5640_fixed>;
- clock-names = "xclk";
-
- port {
- csi2_cam0: endpoint {
- remote-endpoint = <&csi2_phy0>;
- clock-lanes = <0>;
- data-lanes = <1 2>;
- };
- };
- };
-
};
&main_i2c2 {
@@ -483,9 +460,9 @@
flash@0{
compatible = "jedec,spi-nor";
reg = <0x0>;
- spi-tx-bus-width = <1>;
+ spi-tx-bus-width = <8>;
spi-rx-bus-width = <8>;
- spi-max-frequency = <40000000>;
+ spi-max-frequency = <25000000>;
cdns,tshsl-ns = <60>;
cdns,tsd2d-ns = <60>;
cdns,tchsh-ns = <60>;
@@ -496,14 +473,6 @@
};
};
-&csi2_0 {
- csi2_phy0: endpoint {
- remote-endpoint = <&csi2_cam0>;
- clock-lanes = <0>;
- data-lanes = <1 2>;
- };
-};
-
&mcu_cpsw {
pinctrl-names = "default";
pinctrl-0 = <&mcu_cpsw_pins_default &mcu_mdio_pins_default>;
diff --git a/arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced.dts b/arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced.dts
new file mode 100644
index 000000000000..ec9617c13cdb
--- /dev/null
+++ b/arch/arm64/boot/dts/ti/k3-am6548-iot2050-advanced.dts
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) Siemens AG, 2018-2021
+ *
+ * Authors:
+ * Le Jin <le.jin@siemens.com>
+ * Jan Kiszka <jan.kiszk@siemens.com>
+ *
+ * AM6548-based (quad-core) IOT2050 Advanced variant
+ * 2 GB RAM, 16 GB eMMC, USB-serial converter on connector X30
+ */
+
+/dts-v1/;
+
+#include "k3-am65-iot2050-common.dtsi"
+
+/ {
+ compatible = "siemens,iot2050-advanced", "ti,am654";
+ model = "SIMATIC IOT2050 Advanced";
+
+ memory@80000000 {
+ device_type = "memory";
+ /* 2G RAM */
+ reg = <0x00000000 0x80000000 0x00000000 0x80000000>;
+ };
+};
+
+&main_pmx0 {
+ main_mmc0_pins_default: main-mmc0-pins-default {
+ pinctrl-single,pins = <
+ AM65X_IOPAD(0x01a8, PIN_INPUT_PULLDOWN, 0) /* (B25) MMC0_CLK */
+ AM65X_IOPAD(0x01ac, PIN_INPUT_PULLUP, 0) /* (B27) MMC0_CMD */
+ AM65X_IOPAD(0x01a4, PIN_INPUT_PULLUP, 0) /* (A26) MMC0_DAT0 */
+ AM65X_IOPAD(0x01a0, PIN_INPUT_PULLUP, 0) /* (E25) MMC0_DAT1 */
+ AM65X_IOPAD(0x019c, PIN_INPUT_PULLUP, 0) /* (C26) MMC0_DAT2 */
+ AM65X_IOPAD(0x0198, PIN_INPUT_PULLUP, 0) /* (A25) MMC0_DAT3 */
+ AM65X_IOPAD(0x0194, PIN_INPUT_PULLUP, 0) /* (E24) MMC0_DAT4 */
+ AM65X_IOPAD(0x0190, PIN_INPUT_PULLUP, 0) /* (A24) MMC0_DAT5 */
+ AM65X_IOPAD(0x018c, PIN_INPUT_PULLUP, 0) /* (B26) MMC0_DAT6 */
+ AM65X_IOPAD(0x0188, PIN_INPUT_PULLUP, 0) /* (D25) MMC0_DAT7 */
+ AM65X_IOPAD(0x01b8, PIN_OUTPUT_PULLUP, 7) /* (B23) MMC0_SDWP */
+ AM65X_IOPAD(0x01b4, PIN_INPUT_PULLUP, 0) /* (A23) MMC0_SDCD */
+ AM65X_IOPAD(0x01b0, PIN_INPUT, 0) /* (C25) MMC0_DS */
+ >;
+ };
+};
+
+/* eMMC */
+&sdhci0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_mmc0_pins_default>;
+ bus-width = <8>;
+ non-removable;
+ ti,driver-strength-ohm = <50>;
+ disable-wp;
+};
+
+&main_uart0 {
+ status = "disabled";
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
index 4a7182abccf5..bedd01b7a32c 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-j7200-common-proc-board.dts
@@ -16,6 +16,65 @@
stdout-path = "serial2:115200n8";
bootargs = "console=ttyS2,115200n8 earlycon=ns16550a,mmio32,0x02800000";
};
+
+ evm_12v0: fixedregulator-evm12v0 {
+ /* main supply */
+ compatible = "regulator-fixed";
+ regulator-name = "evm_12v0";
+ regulator-min-microvolt = <12000000>;
+ regulator-max-microvolt = <12000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vsys_3v3: fixedregulator-vsys3v3 {
+ /* Output of LM5140 */
+ compatible = "regulator-fixed";
+ regulator-name = "vsys_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&evm_12v0>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vsys_5v0: fixedregulator-vsys5v0 {
+ /* Output of LM5140 */
+ compatible = "regulator-fixed";
+ regulator-name = "vsys_5v0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ vin-supply = <&evm_12v0>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vdd_mmc1: fixedregulator-sd {
+ /* Output of TPS22918 */
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_mmc1";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ enable-active-high;
+ vin-supply = <&vsys_3v3>;
+ gpio = <&exp2 2 GPIO_ACTIVE_HIGH>;
+ };
+
+ vdd_sd_dv: gpio-regulator-TLV71033 {
+ /* Output of TLV71033 */
+ compatible = "regulator-gpio";
+ regulator-name = "tlv71033";
+ pinctrl-names = "default";
+ pinctrl-0 = <&vdd_sd_dv_pins_default>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ vin-supply = <&vsys_5v0>;
+ gpios = <&main_gpio0 55 GPIO_ACTIVE_HIGH>;
+ states = <1800000 0x0>,
+ <3300000 0x1>;
+ };
};
&wkup_pmx0 {
@@ -45,6 +104,13 @@
};
&main_pmx0 {
+ main_i2c0_pins_default: main-i2c0-pins-default {
+ pinctrl-single,pins = <
+ J721E_IOPAD(0xd4, PIN_INPUT_PULLUP, 0) /* (V3) I2C0_SCL */
+ J721E_IOPAD(0xd8, PIN_INPUT_PULLUP, 0) /* (W2) I2C0_SDA */
+ >;
+ };
+
main_i2c1_pins_default: main-i2c1-pins-default {
pinctrl-single,pins = <
J721E_IOPAD(0xdc, PIN_INPUT_PULLUP, 3) /* (U3) ECAP0_IN_APWM_OUT.I2C1_SCL */
@@ -70,6 +136,12 @@
J721E_IOPAD(0x120, PIN_OUTPUT, 0) /* (T4) USB0_DRVVBUS */
>;
};
+
+ vdd_sd_dv_pins_default: vdd-sd-dv-pins-default {
+ pinctrl-single,pins = <
+ J721E_IOPAD(0xd0, PIN_OUTPUT, 7) /* (T5) SPI0_D1.GPIO0_55 */
+ >;
+ };
};
&wkup_uart0 {
@@ -122,6 +194,22 @@
status = "disabled";
};
+&main_gpio2 {
+ status = "disabled";
+};
+
+&main_gpio4 {
+ status = "disabled";
+};
+
+&main_gpio6 {
+ status = "disabled";
+};
+
+&wkup_gpio1 {
+ status = "disabled";
+};
+
&mcu_cpsw {
pinctrl-names = "default";
pinctrl-0 = <&mcu_cpsw_pins_default &mcu_mdio_pins_default>;
@@ -141,6 +229,10 @@
};
&main_i2c0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&main_i2c0_pins_default>;
+ clock-frequency = <400000>;
+
exp1: gpio@20 {
compatible = "ti,tca6416";
reg = <0x20>;
@@ -190,6 +282,8 @@
/* SD card */
pinctrl-0 = <&main_mmc1_pins_default>;
pinctrl-names = "default";
+ vmmc-supply = <&vdd_mmc1>;
+ vqmmc-supply = <&vdd_sd_dv>;
ti,driver-strength-ohm = <50>;
disable-wp;
};
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
index 17477ab0fd8e..19fea8adbcff 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi
@@ -68,8 +68,9 @@
};
};
- main_gpio_intr: interrupt-controller0 {
+ main_gpio_intr: interrupt-controller@a00000 {
compatible = "ti,sci-intr";
+ reg = <0x00 0x00a00000 0x00 0x800>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
@@ -85,9 +86,12 @@
#size-cells = <2>;
ranges = <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>;
ti,sci-dev-id = <199>;
+ dma-coherent;
+ dma-ranges;
- main_navss_intr: interrupt-controller1 {
+ main_navss_intr: interrupt-controller@310e0000 {
compatible = "ti,sci-intr";
+ reg = <0x00 0x310e0000 0x00 0x4000>;
ti,intr-trigger-type = <4>;
interrupt-controller;
interrupt-parent = <&gic500>;
@@ -512,11 +516,16 @@
ti,otap-del-sel-mmc-hs = <0x0>;
ti,otap-del-sel-ddr52 = <0x6>;
ti,otap-del-sel-hs200 = <0x8>;
- ti,otap-del-sel-hs400 = <0x0>;
+ ti,otap-del-sel-hs400 = <0x5>;
+ ti,itap-del-sel-legacy = <0x10>;
+ ti,itap-del-sel-mmc-hs = <0xa>;
ti,strobe-sel = <0x77>;
+ ti,clkbuf-sel = <0x7>;
ti,trm-icp = <0x8>;
bus-width = <8>;
mmc-ddr-1_8v;
+ mmc-hs200-1_8v;
+ mmc-hs400-1_8v;
dma-coherent;
};
@@ -534,7 +543,12 @@
ti,otap-del-sel-sdr50 = <0xc>;
ti,otap-del-sel-sdr104 = <0x5>;
ti,otap-del-sel-ddr50 = <0xc>;
- no-1-8-v;
+ ti,itap-del-sel-legacy = <0x0>;
+ ti,itap-del-sel-sd-hs = <0x0>;
+ ti,itap-del-sel-sdr12 = <0x0>;
+ ti,itap-del-sel-sdr25 = <0x0>;
+ ti,clkbuf-sel = <0x7>;
+ ti,trm-icp = <0x8>;
dma-coherent;
};
@@ -672,6 +686,78 @@
};
};
+ main_gpio0: gpio@600000 {
+ compatible = "ti,j721e-gpio", "ti,keystone-gpio";
+ reg = <0x00 0x00600000 0x00 0x100>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&main_gpio_intr>;
+ interrupts = <145>, <146>, <147>, <148>,
+ <149>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ #address-cells = <0>;
+ ti,ngpio = <69>;
+ ti,davinci-gpio-unbanked = <0>;
+ power-domains = <&k3_pds 105 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 105 0>;
+ clock-names = "gpio";
+ };
+
+ main_gpio2: gpio@610000 {
+ compatible = "ti,j721e-gpio", "ti,keystone-gpio";
+ reg = <0x00 0x00610000 0x00 0x100>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&main_gpio_intr>;
+ interrupts = <154>, <155>, <156>, <157>,
+ <158>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ #address-cells = <0>;
+ ti,ngpio = <69>;
+ ti,davinci-gpio-unbanked = <0>;
+ power-domains = <&k3_pds 107 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 107 0>;
+ clock-names = "gpio";
+ };
+
+ main_gpio4: gpio@620000 {
+ compatible = "ti,j721e-gpio", "ti,keystone-gpio";
+ reg = <0x00 0x00620000 0x00 0x100>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&main_gpio_intr>;
+ interrupts = <163>, <164>, <165>, <166>,
+ <167>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ #address-cells = <0>;
+ ti,ngpio = <69>;
+ ti,davinci-gpio-unbanked = <0>;
+ power-domains = <&k3_pds 109 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 109 0>;
+ clock-names = "gpio";
+ };
+
+ main_gpio6: gpio@630000 {
+ compatible = "ti,j721e-gpio", "ti,keystone-gpio";
+ reg = <0x00 0x00630000 0x00 0x100>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&main_gpio_intr>;
+ interrupts = <172>, <173>, <174>, <175>,
+ <176>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ #address-cells = <0>;
+ ti,ngpio = <69>;
+ ti,davinci-gpio-unbanked = <0>;
+ power-domains = <&k3_pds 111 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 111 0>;
+ clock-names = "gpio";
+ };
+
main_r5fss0: r5fss@5c00000 {
compatible = "ti,j7200-r5fss";
ti,cluster-mode = <1>;
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
index 359e3e8a8cd0..5663fe3ea466 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j7200-mcu-wakeup.dtsi
@@ -6,7 +6,7 @@
*/
&cbass_mcu_wakeup {
- dmsc: dmsc@44083000 {
+ dmsc: system-controller@44083000 {
compatible = "ti,k2g-sci";
ti,host-id = <12>;
@@ -23,7 +23,7 @@
#power-domain-cells = <2>;
};
- k3_clks: clocks {
+ k3_clks: clock-controller {
compatible = "ti,k2g-sci-clk";
#clock-cells = <2>;
};
@@ -96,8 +96,9 @@
clock-names = "fclk";
};
- wkup_gpio_intr: interrupt-controller2 {
+ wkup_gpio_intr: interrupt-controller@42200000 {
compatible = "ti,sci-intr";
+ reg = <0x00 0x42200000 0x00 0x400>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
@@ -107,6 +108,40 @@
ti,interrupt-ranges = <16 960 16>;
};
+ wkup_gpio0: gpio@42110000 {
+ compatible = "ti,j721e-gpio", "ti,keystone-gpio";
+ reg = <0x00 0x42110000 0x00 0x100>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&wkup_gpio_intr>;
+ interrupts = <103>, <104>, <105>, <106>, <107>, <108>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ #address-cells = <0>;
+ ti,ngpio = <85>;
+ ti,davinci-gpio-unbanked = <0>;
+ power-domains = <&k3_pds 113 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 113 0>;
+ clock-names = "gpio";
+ };
+
+ wkup_gpio1: gpio@42100000 {
+ compatible = "ti,j721e-gpio", "ti,keystone-gpio";
+ reg = <0x00 0x42100000 0x00 0x100>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-parent = <&wkup_gpio_intr>;
+ interrupts = <112>, <113>, <114>, <115>, <116>, <117>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ #address-cells = <0>;
+ ti,ngpio = <85>;
+ ti,davinci-gpio-unbanked = <0>;
+ power-domains = <&k3_pds 114 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 114 0>;
+ clock-names = "gpio";
+ };
+
mcu_navss: bus@28380000 {
compatible = "simple-mfd";
#address-cells = <2>;
@@ -269,6 +304,23 @@
#size-cells = <1>;
mux-controls = <&hbmc_mux 0>;
};
+
+ ospi0: spi@47040000 {
+ compatible = "ti,am654-ospi", "cdns,qspi-nor";
+ reg = <0x0 0x47040000 0x0 0x100>,
+ <0x5 0x00000000 0x1 0x0000000>;
+ interrupts = <GIC_SPI 840 IRQ_TYPE_LEVEL_HIGH>;
+ cdns,fifo-depth = <256>;
+ cdns,fifo-width = <4>;
+ cdns,trigger-address = <0x0>;
+ clocks = <&k3_clks 103 0>;
+ assigned-clocks = <&k3_clks 103 0>;
+ assigned-clock-parents = <&k3_clks 103 2>;
+ assigned-clock-rates = <166666666>;
+ power-domains = <&k3_pds 103 TI_SCI_PD_EXCLUSIVE>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
};
tscadc0: tscadc@40200000 {
diff --git a/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi
index a988e2ab2ba1..34724440171a 100644
--- a/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j7200-som-p0.dtsi
@@ -100,6 +100,22 @@
J721E_WKUP_IOPAD(0x28, PIN_INPUT, 1) /* (A7) MCU_OSPI0_D7.MCU_HYPERBUS0_DQ7 */
>;
};
+
+ mcu_fss0_ospi0_pins_default: mcu-fss0-ospi0-pins-default {
+ pinctrl-single,pins = <
+ J721E_WKUP_IOPAD(0x0000, PIN_OUTPUT, 0) /* MCU_OSPI0_CLK */
+ J721E_WKUP_IOPAD(0x002c, PIN_OUTPUT, 0) /* MCU_OSPI0_CSn0 */
+ J721E_WKUP_IOPAD(0x000c, PIN_INPUT, 0) /* MCU_OSPI0_D0 */
+ J721E_WKUP_IOPAD(0x0010, PIN_INPUT, 0) /* MCU_OSPI0_D1 */
+ J721E_WKUP_IOPAD(0x0014, PIN_INPUT, 0) /* MCU_OSPI0_D2 */
+ J721E_WKUP_IOPAD(0x0018, PIN_INPUT, 0) /* MCU_OSPI0_D3 */
+ J721E_WKUP_IOPAD(0x001c, PIN_INPUT, 0) /* MCU_OSPI0_D4 */
+ J721E_WKUP_IOPAD(0x0020, PIN_INPUT, 0) /* MCU_OSPI0_D5 */
+ J721E_WKUP_IOPAD(0x0024, PIN_INPUT, 0) /* MCU_OSPI0_D6 */
+ J721E_WKUP_IOPAD(0x0028, PIN_INPUT, 0) /* MCU_OSPI0_D7 */
+ J721E_WKUP_IOPAD(0x0008, PIN_INPUT, 0) /* MCU_OSPI0_DQS */
+ >;
+ };
};
&main_pmx0 {
@@ -235,3 +251,23 @@
"GPIO_LIN_EN", "CAN_STB";
};
};
+
+&ospi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&mcu_fss0_ospi0_pins_default>;
+
+ flash@0{
+ compatible = "jedec,spi-nor";
+ reg = <0x0>;
+ spi-tx-bus-width = <8>;
+ spi-rx-bus-width = <8>;
+ spi-max-frequency = <25000000>;
+ cdns,tshsl-ns = <60>;
+ cdns,tsd2d-ns = <60>;
+ cdns,tchsh-ns = <60>;
+ cdns,tslch-ns = <60>;
+ cdns,read-delay = <4>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+};
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
index 8c84dafb7125..3bcafe4c1742 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-main.dtsi
@@ -76,8 +76,9 @@
};
};
- main_gpio_intr: interrupt-controller0 {
+ main_gpio_intr: interrupt-controller@a00000 {
compatible = "ti,sci-intr";
+ reg = <0x00 0x00a00000 0x00 0x800>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
@@ -87,18 +88,19 @@
ti,interrupt-ranges = <8 392 56>;
};
- main-navss {
+ main_navss: bus@30000000 {
compatible = "simple-mfd";
#address-cells = <2>;
#size-cells = <2>;
- ranges;
+ ranges = <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>;
dma-coherent;
dma-ranges;
ti,sci-dev-id = <199>;
- main_navss_intr: interrupt-controller1 {
+ main_navss_intr: interrupt-controller@310e0000 {
compatible = "ti,sci-intr";
+ reg = <0x0 0x310e0000 0x0 0x4000>;
ti,intr-trigger-type = <4>;
interrupt-controller;
interrupt-parent = <&gic500>;
@@ -1042,13 +1044,16 @@
assigned-clocks = <&k3_clks 91 1>;
assigned-clock-parents = <&k3_clks 91 2>;
bus-width = <8>;
- mmc-hs400-1_8v;
+ mmc-hs200-1_8v;
mmc-ddr-1_8v;
ti,otap-del-sel-legacy = <0xf>;
ti,otap-del-sel-mmc-hs = <0xf>;
ti,otap-del-sel-ddr52 = <0x5>;
ti,otap-del-sel-hs200 = <0x6>;
ti,otap-del-sel-hs400 = <0x0>;
+ ti,itap-del-sel-legacy = <0x10>;
+ ti,itap-del-sel-mmc-hs = <0xa>;
+ ti,itap-del-sel-ddr52 = <0x3>;
ti,trm-icp = <0x8>;
ti,strobe-sel = <0x77>;
dma-coherent;
@@ -1069,9 +1074,15 @@
ti,otap-del-sel-sdr25 = <0xf>;
ti,otap-del-sel-sdr50 = <0xc>;
ti,otap-del-sel-ddr50 = <0xc>;
+ ti,itap-del-sel-legacy = <0x0>;
+ ti,itap-del-sel-sd-hs = <0x0>;
+ ti,itap-del-sel-sdr12 = <0x0>;
+ ti,itap-del-sel-sdr25 = <0x0>;
+ ti,itap-del-sel-ddr50 = <0x2>;
ti,trm-icp = <0x8>;
ti,clkbuf-sel = <0x7>;
dma-coherent;
+ sdhci-caps-mask = <0x2 0x0>;
};
main_sdhci2: mmc@4f98000 {
@@ -1089,9 +1100,15 @@
ti,otap-del-sel-sdr25 = <0xf>;
ti,otap-del-sel-sdr50 = <0xc>;
ti,otap-del-sel-ddr50 = <0xc>;
+ ti,itap-del-sel-legacy = <0x0>;
+ ti,itap-del-sel-sd-hs = <0x0>;
+ ti,itap-del-sel-sdr12 = <0x0>;
+ ti,itap-del-sel-sdr25 = <0x0>;
+ ti,itap-del-sel-ddr50 = <0x2>;
ti,trm-icp = <0x8>;
ti,clkbuf-sel = <0x7>;
dma-coherent;
+ sdhci-caps-mask = <0x2 0x0>;
};
usbss0: cdns-usb@4104000 {
@@ -1647,4 +1664,266 @@
resets = <&k3_reset 15 1>;
firmware-name = "j7-c71_0-fw";
};
+
+ icssg0: icssg@b000000 {
+ compatible = "ti,j721e-icssg";
+ reg = <0x00 0xb000000 0x00 0x80000>;
+ power-domains = <&k3_pds 119 TI_SCI_PD_EXCLUSIVE>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x00 0x0b000000 0x100000>;
+
+ icssg0_mem: memories@0 {
+ reg = <0x0 0x2000>,
+ <0x2000 0x2000>,
+ <0x10000 0x10000>;
+ reg-names = "dram0", "dram1",
+ "shrdram2";
+ };
+
+ icssg0_cfg: cfg@26000 {
+ compatible = "ti,pruss-cfg", "syscon";
+ reg = <0x26000 0x200>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x26000 0x2000>;
+
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ icssg0_coreclk_mux: coreclk-mux@3c {
+ reg = <0x3c>;
+ #clock-cells = <0>;
+ clocks = <&k3_clks 119 24>, /* icssg0_core_clk */
+ <&k3_clks 119 1>; /* icssg0_iclk */
+ assigned-clocks = <&icssg0_coreclk_mux>;
+ assigned-clock-parents = <&k3_clks 119 1>;
+ };
+
+ icssg0_iepclk_mux: iepclk-mux@30 {
+ reg = <0x30>;
+ #clock-cells = <0>;
+ clocks = <&k3_clks 119 3>, /* icssg0_iep_clk */
+ <&icssg0_coreclk_mux>; /* core_clk */
+ assigned-clocks = <&icssg0_iepclk_mux>;
+ assigned-clock-parents = <&icssg0_coreclk_mux>;
+ };
+ };
+ };
+
+ icssg0_mii_rt: mii-rt@32000 {
+ compatible = "ti,pruss-mii", "syscon";
+ reg = <0x32000 0x100>;
+ };
+
+ icssg0_mii_g_rt: mii-g-rt@33000 {
+ compatible = "ti,pruss-mii-g", "syscon";
+ reg = <0x33000 0x1000>;
+ };
+
+ icssg0_intc: interrupt-controller@20000 {
+ compatible = "ti,icssg-intc";
+ reg = <0x20000 0x2000>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupts = <GIC_SPI 254 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 255 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 257 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 258 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 259 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host_intr0", "host_intr1",
+ "host_intr2", "host_intr3",
+ "host_intr4", "host_intr5",
+ "host_intr6", "host_intr7";
+ };
+
+ pru0_0: pru@34000 {
+ compatible = "ti,j721e-pru";
+ reg = <0x34000 0x3000>,
+ <0x22000 0x100>,
+ <0x22400 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "j7-pru0_0-fw";
+ };
+
+ rtu0_0: rtu@4000 {
+ compatible = "ti,j721e-rtu";
+ reg = <0x4000 0x2000>,
+ <0x23000 0x100>,
+ <0x23400 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "j7-rtu0_0-fw";
+ };
+
+ tx_pru0_0: txpru@a000 {
+ compatible = "ti,j721e-tx-pru";
+ reg = <0xa000 0x1800>,
+ <0x25000 0x100>,
+ <0x25400 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "j7-txpru0_0-fw";
+ };
+
+ pru0_1: pru@38000 {
+ compatible = "ti,j721e-pru";
+ reg = <0x38000 0x3000>,
+ <0x24000 0x100>,
+ <0x24400 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "j7-pru0_1-fw";
+ };
+
+ rtu0_1: rtu@6000 {
+ compatible = "ti,j721e-rtu";
+ reg = <0x6000 0x2000>,
+ <0x23800 0x100>,
+ <0x23c00 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "j7-rtu0_1-fw";
+ };
+
+ tx_pru0_1: txpru@c000 {
+ compatible = "ti,j721e-tx-pru";
+ reg = <0xc000 0x1800>,
+ <0x25800 0x100>,
+ <0x25c00 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "j7-txpru0_1-fw";
+ };
+ };
+
+ icssg1: icssg@b100000 {
+ compatible = "ti,j721e-icssg";
+ reg = <0x00 0xb100000 0x00 0x80000>;
+ power-domains = <&k3_pds 120 TI_SCI_PD_EXCLUSIVE>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x00 0x0b100000 0x100000>;
+
+ icssg1_mem: memories@b100000 {
+ reg = <0x0 0x2000>,
+ <0x2000 0x2000>,
+ <0x10000 0x10000>;
+ reg-names = "dram0", "dram1",
+ "shrdram2";
+ };
+
+ icssg1_cfg: cfg@26000 {
+ compatible = "ti,pruss-cfg", "syscon";
+ reg = <0x26000 0x200>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x26000 0x2000>;
+
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ icssg1_coreclk_mux: coreclk-mux@3c {
+ reg = <0x3c>;
+ #clock-cells = <0>;
+ clocks = <&k3_clks 120 54>, /* icssg1_core_clk */
+ <&k3_clks 120 4>; /* icssg1_iclk */
+ assigned-clocks = <&icssg1_coreclk_mux>;
+ assigned-clock-parents = <&k3_clks 120 4>;
+ };
+
+ icssg1_iepclk_mux: iepclk-mux@30 {
+ reg = <0x30>;
+ #clock-cells = <0>;
+ clocks = <&k3_clks 120 9>, /* icssg1_iep_clk */
+ <&icssg1_coreclk_mux>; /* core_clk */
+ assigned-clocks = <&icssg1_iepclk_mux>;
+ assigned-clock-parents = <&icssg1_coreclk_mux>;
+ };
+ };
+ };
+
+ icssg1_mii_rt: mii-rt@32000 {
+ compatible = "ti,pruss-mii", "syscon";
+ reg = <0x32000 0x100>;
+ };
+
+ icssg1_mii_g_rt: mii-g-rt@33000 {
+ compatible = "ti,pruss-mii-g", "syscon";
+ reg = <0x33000 0x1000>;
+ };
+
+ icssg1_intc: interrupt-controller@20000 {
+ compatible = "ti,icssg-intc";
+ reg = <0x20000 0x2000>;
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupts = <GIC_SPI 262 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 264 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host_intr0", "host_intr1",
+ "host_intr2", "host_intr3",
+ "host_intr4", "host_intr5",
+ "host_intr6", "host_intr7";
+ };
+
+ pru1_0: pru@34000 {
+ compatible = "ti,j721e-pru";
+ reg = <0x34000 0x4000>,
+ <0x22000 0x100>,
+ <0x22400 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "j7-pru1_0-fw";
+ };
+
+ rtu1_0: rtu@4000 {
+ compatible = "ti,j721e-rtu";
+ reg = <0x4000 0x2000>,
+ <0x23000 0x100>,
+ <0x23400 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "j7-rtu1_0-fw";
+ };
+
+ tx_pru1_0: txpru@a000 {
+ compatible = "ti,j721e-tx-pru";
+ reg = <0xa000 0x1800>,
+ <0x25000 0x100>,
+ <0x25400 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "j7-txpru1_0-fw";
+ };
+
+ pru1_1: pru@38000 {
+ compatible = "ti,j721e-pru";
+ reg = <0x38000 0x4000>,
+ <0x24000 0x100>,
+ <0x24400 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "j7-pru1_1-fw";
+ };
+
+ rtu1_1: rtu@6000 {
+ compatible = "ti,j721e-rtu";
+ reg = <0x6000 0x2000>,
+ <0x23800 0x100>,
+ <0x23c00 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "j7-rtu1_1-fw";
+ };
+
+ tx_pru1_1: txpru@c000 {
+ compatible = "ti,j721e-tx-pru";
+ reg = <0xc000 0x1800>,
+ <0x25800 0x100>,
+ <0x25c00 0x100>;
+ reg-names = "iram", "control", "debug";
+ firmware-name = "j7-txpru1_1-fw";
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
index 6c44afae9187..5e825e4d0306 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-mcu-wakeup.dtsi
@@ -6,7 +6,7 @@
*/
&cbass_mcu_wakeup {
- dmsc: dmsc@44083000 {
+ dmsc: system-controller@44083000 {
compatible = "ti,k2g-sci";
ti,host-id = <12>;
@@ -23,7 +23,7 @@
#power-domain-cells = <2>;
};
- k3_clks: clocks {
+ k3_clks: clock-controller {
compatible = "ti,k2g-sci-clk";
#clock-cells = <2>;
};
@@ -96,8 +96,9 @@
clock-names = "fclk";
};
- wkup_gpio_intr: interrupt-controller2 {
+ wkup_gpio_intr: interrupt-controller@42200000 {
compatible = "ti,sci-intr";
+ reg = <0x00 0x42200000 0x00 0x400>;
ti,intr-trigger-type = <1>;
interrupt-controller;
interrupt-parent = <&gic500>;
@@ -180,7 +181,7 @@
ranges;
ospi0: spi@47040000 {
- compatible = "ti,am654-ospi";
+ compatible = "ti,am654-ospi", "cdns,qspi-nor";
reg = <0x0 0x47040000 0x0 0x100>,
<0x5 0x00000000 0x1 0x0000000>;
interrupts = <GIC_SPI 840 IRQ_TYPE_LEVEL_HIGH>;
@@ -197,7 +198,7 @@
};
ospi1: spi@47050000 {
- compatible = "ti,am654-ospi";
+ compatible = "ti,am654-ospi", "cdns,qspi-nor";
reg = <0x0 0x47050000 0x0 0x100>,
<0x7 0x00000000 0x1 0x00000000>;
interrupts = <GIC_SPI 841 IRQ_TYPE_LEVEL_HIGH>;
@@ -249,11 +250,11 @@
};
};
- mcu-navss {
+ mcu_navss: bus@28380000 {
compatible = "simple-mfd";
#address-cells = <2>;
#size-cells = <2>;
- ranges;
+ ranges = <0x00 0x28380000 0x00 0x28380000 0x00 0x03880000>;
dma-coherent;
dma-ranges;
diff --git a/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi b/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi
index 57720e6a04c5..2fee2906183d 100644
--- a/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721e-som-p0.dtsi
@@ -174,9 +174,9 @@
flash@0{
compatible = "jedec,spi-nor";
reg = <0x0>;
- spi-tx-bus-width = <1>;
+ spi-tx-bus-width = <8>;
spi-rx-bus-width = <8>;
- spi-max-frequency = <40000000>;
+ spi-max-frequency = <25000000>;
cdns,tshsl-ns = <60>;
cdns,tsd2d-ns = <60>;
cdns,tchsh-ns = <60>;
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
index 12e8bd48dc8c..eca6c2de84a7 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu102-revA.dts
@@ -580,25 +580,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <4>;
- si5328: clock-generator@69 {/* SI5328 - u20 */
- reg = <0x69>;
- /*
- * Chip has interrupt present connected to PL
- * interrupt-parent = <&>;
- * interrupts = <>;
- */
- #address-cells = <1>;
- #size-cells = <0>;
- #clock-cells = <1>;
- clocks = <&refhdmi>;
- clock-names = "xtal";
- clock-output-names = "si5328";
-
- si5328_clk: clk0@0 {
- reg = <0>;
- clock-frequency = <27000000>;
- };
- };
+ /* SI5328 - u20 */
};
/* 5 - 7 unconnected */
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts
index 18771e868399..eff7c6447087 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu106-revA.dts
@@ -581,25 +581,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <4>;
- si5328: clock-generator@69 {/* SI5328 - u20 */
- reg = <0x69>;
- /*
- * Chip has interrupt present connected to PL
- * interrupt-parent = <&>;
- * interrupts = <>;
- */
- #address-cells = <1>;
- #size-cells = <0>;
- #clock-cells = <1>;
- clocks = <&refhdmi>;
- clock-names = "xtal";
- clock-output-names = "si5328";
-
- si5328_clk: clk0@0 {
- reg = <0>;
- clock-frequency = <27000000>;
- };
- };
+ /* SI5328 - u20 */
};
i2c@5 {
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index a3b391d18787..28dccb891a53 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -856,6 +856,7 @@
interrupts = <0 122 4>;
interrupt-parent = <&gic>;
clock-names = "axi_clk";
+ power-domains = <&zynqmp_firmware PD_DP>;
#dma-cells = <1>;
};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index d612f633b771..08c6f769df9a 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -29,8 +29,10 @@ CONFIG_KALLSYMS_ALL=y
CONFIG_PROFILING=y
CONFIG_ARCH_ACTIONS=y
CONFIG_ARCH_AGILEX=y
+CONFIG_ARCH_N5X=y
CONFIG_ARCH_SUNXI=y
CONFIG_ARCH_ALPINE=y
+CONFIG_ARCH_APPLE=y
CONFIG_ARCH_BCM2835=y
CONFIG_ARCH_BCM4908=y
CONFIG_ARCH_BCM_IPROC=y
@@ -41,6 +43,7 @@ CONFIG_ARCH_K3=y
CONFIG_ARCH_LAYERSCAPE=y
CONFIG_ARCH_LG1K=y
CONFIG_ARCH_HISI=y
+CONFIG_ARCH_KEEMBAY=y
CONFIG_ARCH_MEDIATEK=y
CONFIG_ARCH_MESON=y
CONFIG_ARCH_MVEBU=y
@@ -50,7 +53,7 @@ CONFIG_ARCH_RENESAS=y
CONFIG_ARCH_ROCKCHIP=y
CONFIG_ARCH_S32=y
CONFIG_ARCH_SEATTLE=y
-CONFIG_ARCH_STRATIX10=y
+CONFIG_ARCH_INTEL_SOCFPGA=y
CONFIG_ARCH_SYNQUACER=y
CONFIG_ARCH_TEGRA=y
CONFIG_ARCH_SPRD=y
@@ -225,6 +228,7 @@ CONFIG_PCI_HOST_THUNDER_PEM=y
CONFIG_PCI_HOST_THUNDER_ECAM=y
CONFIG_PCIE_ROCKCHIP_HOST=m
CONFIG_PCIE_BRCMSTB=m
+CONFIG_PCI_IMX6=y
CONFIG_PCI_LAYERSCAPE=y
CONFIG_PCIE_LAYERSCAPE_GEN4=y
CONFIG_PCI_HISI=y
@@ -359,7 +363,7 @@ CONFIG_MESON_GXL_PHY=m
CONFIG_MICREL_PHY=y
CONFIG_MICROSEMI_PHY=y
CONFIG_AT803X_PHY=y
-CONFIG_REALTEK_PHY=m
+CONFIG_REALTEK_PHY=y
CONFIG_ROCKCHIP_PHY=y
CONFIG_VITESSE_PHY=y
CONFIG_USB_PEGASUS=m
@@ -448,6 +452,7 @@ CONFIG_I2C_GPIO=m
CONFIG_I2C_IMX=y
CONFIG_I2C_IMX_LPI2C=y
CONFIG_I2C_MESON=y
+CONFIG_I2C_MT65XX=y
CONFIG_I2C_MV64XXX=y
CONFIG_I2C_OMAP=y
CONFIG_I2C_OWL=y
@@ -465,6 +470,9 @@ CONFIG_SPI=y
CONFIG_SPI_ARMADA_3700=y
CONFIG_SPI_BCM2835=m
CONFIG_SPI_BCM2835AUX=m
+CONFIG_SPI_DESIGNWARE=m
+CONFIG_SPI_DW_DMA=y
+CONFIG_SPI_DW_MMIO=m
CONFIG_SPI_FSL_LPSPI=y
CONFIG_SPI_FSL_QUADSPI=y
CONFIG_SPI_NXP_FLEXSPI=y
@@ -494,6 +502,7 @@ CONFIG_PINCTRL_IMX8MM=y
CONFIG_PINCTRL_IMX8MN=y
CONFIG_PINCTRL_IMX8MP=y
CONFIG_PINCTRL_IMX8MQ=y
+CONFIG_PINCTRL_IMX8QM=y
CONFIG_PINCTRL_IMX8QXP=y
CONFIG_PINCTRL_IMX8DXL=y
CONFIG_PINCTRL_MSM=y
@@ -510,6 +519,7 @@ CONFIG_PINCTRL_SC7180=y
CONFIG_PINCTRL_SDM845=y
CONFIG_PINCTRL_SM8150=y
CONFIG_PINCTRL_SM8250=y
+CONFIG_PINCTRL_SM8350=y
CONFIG_PINCTRL_LPASS_LPI=m
CONFIG_GPIO_ALTERA=m
CONFIG_GPIO_DAVINCI=y
@@ -594,6 +604,7 @@ CONFIG_MFD_EXYNOS_LPASS=m
CONFIG_MFD_HI6421_PMIC=y
CONFIG_MFD_HI655X_PMIC=y
CONFIG_MFD_MAX77620=y
+CONFIG_MFD_MT6397=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_MFD_RK808=y
CONFIG_MFD_SEC_CORE=y
@@ -611,6 +622,8 @@ CONFIG_REGULATOR_HI655X=y
CONFIG_REGULATOR_MAX77620=y
CONFIG_REGULATOR_MAX8973=y
CONFIG_REGULATOR_MP8859=y
+CONFIG_REGULATOR_MT6358=y
+CONFIG_REGULATOR_MT6397=y
CONFIG_REGULATOR_PCA9450=y
CONFIG_REGULATOR_PF8X00=y
CONFIG_REGULATOR_PFUZE100=y
@@ -682,6 +695,7 @@ CONFIG_DRM_MSM=m
CONFIG_DRM_TEGRA=m
CONFIG_DRM_PANEL_LVDS=m
CONFIG_DRM_PANEL_SIMPLE=m
+CONFIG_DRM_PANEL_BOE_TV101WUM_NL6=m
CONFIG_DRM_PANEL_MANTIX_MLAF057WE51=m
CONFIG_DRM_PANEL_RAYDIUM_RM67191=m
CONFIG_DRM_PANEL_SITRONIX_ST7703=m
@@ -689,6 +703,7 @@ CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA=m
CONFIG_DRM_DISPLAY_CONNECTOR=m
CONFIG_DRM_NWL_MIPI_DSI=m
CONFIG_DRM_LONTIUM_LT9611=m
+CONFIG_DRM_PARADE_PS8640=m
CONFIG_DRM_SII902X=m
CONFIG_DRM_SIMPLE_BRIDGE=m
CONFIG_DRM_THINE_THC63LVD1024=m
@@ -703,6 +718,8 @@ CONFIG_DRM_VC4=m
CONFIG_DRM_ETNAVIV=m
CONFIG_DRM_HISI_HIBMC=m
CONFIG_DRM_HISI_KIRIN=m
+CONFIG_DRM_MEDIATEK=m
+CONFIG_DRM_MEDIATEK_HDMI=m
CONFIG_DRM_MXSFB=m
CONFIG_DRM_MESON=m
CONFIG_DRM_PL111=m
@@ -760,9 +777,11 @@ CONFIG_SND_SOC_GTM601=m
CONFIG_SND_SOC_PCM3168A_I2C=m
CONFIG_SND_SOC_RT5659=m
CONFIG_SND_SOC_SIMPLE_AMPLIFIER=m
+CONFIG_SND_SOC_SIMPLE_MUX=m
CONFIG_SND_SOC_TAS571X=m
CONFIG_SND_SOC_WCD934X=m
CONFIG_SND_SOC_WM8904=m
+CONFIG_SND_SOC_WM8960=m
CONFIG_SND_SOC_WM8962=m
CONFIG_SND_SOC_WSA881X=m
CONFIG_SND_SOC_LPASS_WSA_MACRO=m
@@ -787,6 +806,7 @@ CONFIG_USB_RENESAS_USBHS_HCD=m
CONFIG_USB_RENESAS_USBHS=m
CONFIG_USB_ACM=m
CONFIG_USB_STORAGE=y
+CONFIG_USB_MTU3=y
CONFIG_USB_MUSB_HDRC=y
CONFIG_USB_MUSB_SUNXI=y
CONFIG_USB_DWC3=y
@@ -879,6 +899,7 @@ CONFIG_RTC_DRV_DS3232=y
CONFIG_RTC_DRV_PCF2127=m
CONFIG_RTC_DRV_EFI=y
CONFIG_RTC_DRV_CROS_EC=y
+CONFIG_RTC_DRV_FSL_FTM_ALARM=m
CONFIG_RTC_DRV_S3C=y
CONFIG_RTC_DRV_PL031=y
CONFIG_RTC_DRV_SUN6I=y
@@ -915,6 +936,9 @@ CONFIG_VIRTIO_MMIO=y
CONFIG_XEN_GNTDEV=y
CONFIG_XEN_GRANT_DEV_ALLOC=y
CONFIG_MFD_CROS_EC_DEV=y
+CONFIG_STAGING=y
+CONFIG_STAGING_MEDIA=y
+CONFIG_VIDEO_HANTRO=m
CONFIG_CHROME_PLATFORMS=y
CONFIG_CROS_EC=y
CONFIG_CROS_EC_I2C=y
@@ -957,6 +981,7 @@ CONFIG_SDM_VIDEOCC_845=y
CONFIG_SDM_DISPCC_845=y
CONFIG_SM_GCC_8150=y
CONFIG_SM_GCC_8250=y
+CONFIG_SM_GCC_8350=y
CONFIG_SM_GPUCC_8150=y
CONFIG_SM_GPUCC_8250=y
CONFIG_SM_DISPCC_8250=y
@@ -974,6 +999,7 @@ CONFIG_ROCKCHIP_IOMMU=y
CONFIG_TEGRA_IOMMU_SMMU=y
CONFIG_ARM_SMMU=y
CONFIG_ARM_SMMU_V3=y
+CONFIG_MTK_IOMMU=y
CONFIG_QCOM_IOMMU=y
CONFIG_REMOTEPROC=y
CONFIG_QCOM_Q6V5_MSS=m
@@ -988,6 +1014,8 @@ CONFIG_OWL_PM_DOMAINS=y
CONFIG_RASPBERRYPI_POWER=y
CONFIG_FSL_DPAA=y
CONFIG_FSL_MC_DPIO=y
+CONFIG_FSL_RCPM=y
+CONFIG_MTK_PMIC_WRAP=y
CONFIG_QCOM_AOSS_QMP=y
CONFIG_QCOM_COMMAND_DB=y
CONFIG_QCOM_GENI_SE=y
@@ -1022,6 +1050,8 @@ CONFIG_ARCH_TEGRA_186_SOC=y
CONFIG_ARCH_TEGRA_194_SOC=y
CONFIG_ARCH_TEGRA_234_SOC=y
CONFIG_TI_SCI_PM_DOMAINS=y
+CONFIG_ARM_IMX_BUS_DEVFREQ=m
+CONFIG_ARM_IMX8M_DDRC_DEVFREQ=m
CONFIG_EXTCON_PTN5150=m
CONFIG_EXTCON_USB_GPIO=y
CONFIG_EXTCON_USBC_CROS_EC=y
@@ -1045,6 +1075,8 @@ CONFIG_PWM_BCM2835=m
CONFIG_PWM_CROS_EC=m
CONFIG_PWM_IMX27=m
CONFIG_PWM_MESON=m
+CONFIG_PWM_MTK_DISP=m
+CONFIG_PWM_MEDIATEK=m
CONFIG_PWM_RCAR=m
CONFIG_PWM_ROCKCHIP=y
CONFIG_PWM_SAMSUNG=y
@@ -1064,6 +1096,7 @@ CONFIG_PHY_HI6220_USB=y
CONFIG_PHY_HISTB_COMBPHY=y
CONFIG_PHY_HISI_INNO_USB2=y
CONFIG_PHY_MVEBU_CP110_COMPHY=y
+CONFIG_PHY_MTK_TPHY=y
CONFIG_PHY_QCOM_QMP=m
CONFIG_PHY_QCOM_QUSB2=m
CONFIG_PHY_QCOM_USB_HS=y
@@ -1088,6 +1121,7 @@ CONFIG_QCOM_L3_PMU=y
CONFIG_NVMEM_IMX_OCOTP=y
CONFIG_NVMEM_IMX_OCOTP_SCU=y
CONFIG_QCOM_QFPROM=y
+CONFIG_MTK_EFUSE=y
CONFIG_ROCKCHIP_EFUSE=y
CONFIG_NVMEM_SUNXI_SID=y
CONFIG_UNIPHIER_EFUSE=y
@@ -1156,6 +1190,7 @@ CONFIG_CRYPTO_DEV_HISI_TRNG=m
CONFIG_CMA_SIZE_MBYTES=32
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_REDUCED=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index d0901e610df3..09a805cc32d7 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -68,19 +68,13 @@ CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS
$(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
$(call if_changed_rule,cc_o_c)
-ifdef REGENERATE_ARM64_CRYPTO
quiet_cmd_perlasm = PERLASM $@
cmd_perlasm = $(PERL) $(<) void $(@)
-$(src)/poly1305-core.S_shipped: $(src)/poly1305-armv8.pl
+$(obj)/%-core.S: $(src)/%-armv8.pl
$(call cmd,perlasm)
-$(src)/sha256-core.S_shipped: $(src)/sha512-armv8.pl
+$(obj)/sha256-core.S: $(src)/sha512-armv8.pl
$(call cmd,perlasm)
-$(src)/sha512-core.S_shipped: $(src)/sha512-armv8.pl
- $(call cmd,perlasm)
-
-endif
-
clean-files += poly1305-core.S sha256-core.S sha512-core.S
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
index bbdb54702aa7..b495de22bb38 100644
--- a/arch/arm64/crypto/aes-modes.S
+++ b/arch/arm64/crypto/aes-modes.S
@@ -359,6 +359,7 @@ ST5( mov v4.16b, vctr.16b )
ins vctr.d[0], x8
/* apply carry to N counter blocks for N := x12 */
+ cbz x12, 2f
adr x16, 1f
sub x16, x16, x12, lsl #3
br x16
@@ -700,7 +701,7 @@ AES_FUNC_START(aes_mac_update)
cbz w5, .Lmacout
encrypt_block v0, w2, x1, x7, w8
st1 {v0.16b}, [x4] /* return dg */
- cond_yield .Lmacout, x7
+ cond_yield .Lmacout, x7, x8
b .Lmacloop4x
.Lmac1x:
add w3, w3, #4
diff --git a/arch/arm64/crypto/poly1305-core.S_shipped b/arch/arm64/crypto/poly1305-core.S_shipped
deleted file mode 100644
index fb2822abf63a..000000000000
--- a/arch/arm64/crypto/poly1305-core.S_shipped
+++ /dev/null
@@ -1,835 +0,0 @@
-#ifndef __KERNEL__
-# include "arm_arch.h"
-.extern OPENSSL_armcap_P
-#endif
-
-.text
-
-// forward "declarations" are required for Apple
-.globl poly1305_blocks
-.globl poly1305_emit
-
-.globl poly1305_init
-.type poly1305_init,%function
-.align 5
-poly1305_init:
- cmp x1,xzr
- stp xzr,xzr,[x0] // zero hash value
- stp xzr,xzr,[x0,#16] // [along with is_base2_26]
-
- csel x0,xzr,x0,eq
- b.eq .Lno_key
-
-#ifndef __KERNEL__
- adrp x17,OPENSSL_armcap_P
- ldr w17,[x17,#:lo12:OPENSSL_armcap_P]
-#endif
-
- ldp x7,x8,[x1] // load key
- mov x9,#0xfffffffc0fffffff
- movk x9,#0x0fff,lsl#48
-#ifdef __AARCH64EB__
- rev x7,x7 // flip bytes
- rev x8,x8
-#endif
- and x7,x7,x9 // &=0ffffffc0fffffff
- and x9,x9,#-4
- and x8,x8,x9 // &=0ffffffc0ffffffc
- mov w9,#-1
- stp x7,x8,[x0,#32] // save key value
- str w9,[x0,#48] // impossible key power value
-
-#ifndef __KERNEL__
- tst w17,#ARMV7_NEON
-
- adr x12,.Lpoly1305_blocks
- adr x7,.Lpoly1305_blocks_neon
- adr x13,.Lpoly1305_emit
-
- csel x12,x12,x7,eq
-
-# ifdef __ILP32__
- stp w12,w13,[x2]
-# else
- stp x12,x13,[x2]
-# endif
-#endif
- mov x0,#1
-.Lno_key:
- ret
-.size poly1305_init,.-poly1305_init
-
-.type poly1305_blocks,%function
-.align 5
-poly1305_blocks:
-.Lpoly1305_blocks:
- ands x2,x2,#-16
- b.eq .Lno_data
-
- ldp x4,x5,[x0] // load hash value
- ldp x6,x17,[x0,#16] // [along with is_base2_26]
- ldp x7,x8,[x0,#32] // load key value
-
-#ifdef __AARCH64EB__
- lsr x12,x4,#32
- mov w13,w4
- lsr x14,x5,#32
- mov w15,w5
- lsr x16,x6,#32
-#else
- mov w12,w4
- lsr x13,x4,#32
- mov w14,w5
- lsr x15,x5,#32
- mov w16,w6
-#endif
-
- add x12,x12,x13,lsl#26 // base 2^26 -> base 2^64
- lsr x13,x14,#12
- adds x12,x12,x14,lsl#52
- add x13,x13,x15,lsl#14
- adc x13,x13,xzr
- lsr x14,x16,#24
- adds x13,x13,x16,lsl#40
- adc x14,x14,xzr
-
- cmp x17,#0 // is_base2_26?
- add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
- csel x4,x4,x12,eq // choose between radixes
- csel x5,x5,x13,eq
- csel x6,x6,x14,eq
-
-.Loop:
- ldp x10,x11,[x1],#16 // load input
- sub x2,x2,#16
-#ifdef __AARCH64EB__
- rev x10,x10
- rev x11,x11
-#endif
- adds x4,x4,x10 // accumulate input
- adcs x5,x5,x11
-
- mul x12,x4,x7 // h0*r0
- adc x6,x6,x3
- umulh x13,x4,x7
-
- mul x10,x5,x9 // h1*5*r1
- umulh x11,x5,x9
-
- adds x12,x12,x10
- mul x10,x4,x8 // h0*r1
- adc x13,x13,x11
- umulh x14,x4,x8
-
- adds x13,x13,x10
- mul x10,x5,x7 // h1*r0
- adc x14,x14,xzr
- umulh x11,x5,x7
-
- adds x13,x13,x10
- mul x10,x6,x9 // h2*5*r1
- adc x14,x14,x11
- mul x11,x6,x7 // h2*r0
-
- adds x13,x13,x10
- adc x14,x14,x11
-
- and x10,x14,#-4 // final reduction
- and x6,x14,#3
- add x10,x10,x14,lsr#2
- adds x4,x12,x10
- adcs x5,x13,xzr
- adc x6,x6,xzr
-
- cbnz x2,.Loop
-
- stp x4,x5,[x0] // store hash value
- stp x6,xzr,[x0,#16] // [and clear is_base2_26]
-
-.Lno_data:
- ret
-.size poly1305_blocks,.-poly1305_blocks
-
-.type poly1305_emit,%function
-.align 5
-poly1305_emit:
-.Lpoly1305_emit:
- ldp x4,x5,[x0] // load hash base 2^64
- ldp x6,x7,[x0,#16] // [along with is_base2_26]
- ldp x10,x11,[x2] // load nonce
-
-#ifdef __AARCH64EB__
- lsr x12,x4,#32
- mov w13,w4
- lsr x14,x5,#32
- mov w15,w5
- lsr x16,x6,#32
-#else
- mov w12,w4
- lsr x13,x4,#32
- mov w14,w5
- lsr x15,x5,#32
- mov w16,w6
-#endif
-
- add x12,x12,x13,lsl#26 // base 2^26 -> base 2^64
- lsr x13,x14,#12
- adds x12,x12,x14,lsl#52
- add x13,x13,x15,lsl#14
- adc x13,x13,xzr
- lsr x14,x16,#24
- adds x13,x13,x16,lsl#40
- adc x14,x14,xzr
-
- cmp x7,#0 // is_base2_26?
- csel x4,x4,x12,eq // choose between radixes
- csel x5,x5,x13,eq
- csel x6,x6,x14,eq
-
- adds x12,x4,#5 // compare to modulus
- adcs x13,x5,xzr
- adc x14,x6,xzr
-
- tst x14,#-4 // see if it's carried/borrowed
-
- csel x4,x4,x12,eq
- csel x5,x5,x13,eq
-
-#ifdef __AARCH64EB__
- ror x10,x10,#32 // flip nonce words
- ror x11,x11,#32
-#endif
- adds x4,x4,x10 // accumulate nonce
- adc x5,x5,x11
-#ifdef __AARCH64EB__
- rev x4,x4 // flip output bytes
- rev x5,x5
-#endif
- stp x4,x5,[x1] // write result
-
- ret
-.size poly1305_emit,.-poly1305_emit
-.type poly1305_mult,%function
-.align 5
-poly1305_mult:
- mul x12,x4,x7 // h0*r0
- umulh x13,x4,x7
-
- mul x10,x5,x9 // h1*5*r1
- umulh x11,x5,x9
-
- adds x12,x12,x10
- mul x10,x4,x8 // h0*r1
- adc x13,x13,x11
- umulh x14,x4,x8
-
- adds x13,x13,x10
- mul x10,x5,x7 // h1*r0
- adc x14,x14,xzr
- umulh x11,x5,x7
-
- adds x13,x13,x10
- mul x10,x6,x9 // h2*5*r1
- adc x14,x14,x11
- mul x11,x6,x7 // h2*r0
-
- adds x13,x13,x10
- adc x14,x14,x11
-
- and x10,x14,#-4 // final reduction
- and x6,x14,#3
- add x10,x10,x14,lsr#2
- adds x4,x12,x10
- adcs x5,x13,xzr
- adc x6,x6,xzr
-
- ret
-.size poly1305_mult,.-poly1305_mult
-
-.type poly1305_splat,%function
-.align 4
-poly1305_splat:
- and x12,x4,#0x03ffffff // base 2^64 -> base 2^26
- ubfx x13,x4,#26,#26
- extr x14,x5,x4,#52
- and x14,x14,#0x03ffffff
- ubfx x15,x5,#14,#26
- extr x16,x6,x5,#40
-
- str w12,[x0,#16*0] // r0
- add w12,w13,w13,lsl#2 // r1*5
- str w13,[x0,#16*1] // r1
- add w13,w14,w14,lsl#2 // r2*5
- str w12,[x0,#16*2] // s1
- str w14,[x0,#16*3] // r2
- add w14,w15,w15,lsl#2 // r3*5
- str w13,[x0,#16*4] // s2
- str w15,[x0,#16*5] // r3
- add w15,w16,w16,lsl#2 // r4*5
- str w14,[x0,#16*6] // s3
- str w16,[x0,#16*7] // r4
- str w15,[x0,#16*8] // s4
-
- ret
-.size poly1305_splat,.-poly1305_splat
-
-#ifdef __KERNEL__
-.globl poly1305_blocks_neon
-#endif
-.type poly1305_blocks_neon,%function
-.align 5
-poly1305_blocks_neon:
-.Lpoly1305_blocks_neon:
- ldr x17,[x0,#24]
- cmp x2,#128
- b.lo .Lpoly1305_blocks
-
- .inst 0xd503233f // paciasp
- stp x29,x30,[sp,#-80]!
- add x29,sp,#0
-
- stp d8,d9,[sp,#16] // meet ABI requirements
- stp d10,d11,[sp,#32]
- stp d12,d13,[sp,#48]
- stp d14,d15,[sp,#64]
-
- cbz x17,.Lbase2_64_neon
-
- ldp w10,w11,[x0] // load hash value base 2^26
- ldp w12,w13,[x0,#8]
- ldr w14,[x0,#16]
-
- tst x2,#31
- b.eq .Leven_neon
-
- ldp x7,x8,[x0,#32] // load key value
-
- add x4,x10,x11,lsl#26 // base 2^26 -> base 2^64
- lsr x5,x12,#12
- adds x4,x4,x12,lsl#52
- add x5,x5,x13,lsl#14
- adc x5,x5,xzr
- lsr x6,x14,#24
- adds x5,x5,x14,lsl#40
- adc x14,x6,xzr // can be partially reduced...
-
- ldp x12,x13,[x1],#16 // load input
- sub x2,x2,#16
- add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
-
-#ifdef __AARCH64EB__
- rev x12,x12
- rev x13,x13
-#endif
- adds x4,x4,x12 // accumulate input
- adcs x5,x5,x13
- adc x6,x6,x3
-
- bl poly1305_mult
-
- and x10,x4,#0x03ffffff // base 2^64 -> base 2^26
- ubfx x11,x4,#26,#26
- extr x12,x5,x4,#52
- and x12,x12,#0x03ffffff
- ubfx x13,x5,#14,#26
- extr x14,x6,x5,#40
-
- b .Leven_neon
-
-.align 4
-.Lbase2_64_neon:
- ldp x7,x8,[x0,#32] // load key value
-
- ldp x4,x5,[x0] // load hash value base 2^64
- ldr x6,[x0,#16]
-
- tst x2,#31
- b.eq .Linit_neon
-
- ldp x12,x13,[x1],#16 // load input
- sub x2,x2,#16
- add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
-#ifdef __AARCH64EB__
- rev x12,x12
- rev x13,x13
-#endif
- adds x4,x4,x12 // accumulate input
- adcs x5,x5,x13
- adc x6,x6,x3
-
- bl poly1305_mult
-
-.Linit_neon:
- ldr w17,[x0,#48] // first table element
- and x10,x4,#0x03ffffff // base 2^64 -> base 2^26
- ubfx x11,x4,#26,#26
- extr x12,x5,x4,#52
- and x12,x12,#0x03ffffff
- ubfx x13,x5,#14,#26
- extr x14,x6,x5,#40
-
- cmp w17,#-1 // is value impossible?
- b.ne .Leven_neon
-
- fmov d24,x10
- fmov d25,x11
- fmov d26,x12
- fmov d27,x13
- fmov d28,x14
-
- ////////////////////////////////// initialize r^n table
- mov x4,x7 // r^1
- add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2)
- mov x5,x8
- mov x6,xzr
- add x0,x0,#48+12
- bl poly1305_splat
-
- bl poly1305_mult // r^2
- sub x0,x0,#4
- bl poly1305_splat
-
- bl poly1305_mult // r^3
- sub x0,x0,#4
- bl poly1305_splat
-
- bl poly1305_mult // r^4
- sub x0,x0,#4
- bl poly1305_splat
- sub x0,x0,#48 // restore original x0
- b .Ldo_neon
-
-.align 4
-.Leven_neon:
- fmov d24,x10
- fmov d25,x11
- fmov d26,x12
- fmov d27,x13
- fmov d28,x14
-
-.Ldo_neon:
- ldp x8,x12,[x1,#32] // inp[2:3]
- subs x2,x2,#64
- ldp x9,x13,[x1,#48]
- add x16,x1,#96
- adr x17,.Lzeros
-
- lsl x3,x3,#24
- add x15,x0,#48
-
-#ifdef __AARCH64EB__
- rev x8,x8
- rev x12,x12
- rev x9,x9
- rev x13,x13
-#endif
- and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
- and x5,x9,#0x03ffffff
- ubfx x6,x8,#26,#26
- ubfx x7,x9,#26,#26
- add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
- extr x8,x12,x8,#52
- extr x9,x13,x9,#52
- add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
- fmov d14,x4
- and x8,x8,#0x03ffffff
- and x9,x9,#0x03ffffff
- ubfx x10,x12,#14,#26
- ubfx x11,x13,#14,#26
- add x12,x3,x12,lsr#40
- add x13,x3,x13,lsr#40
- add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
- fmov d15,x6
- add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
- add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
- fmov d16,x8
- fmov d17,x10
- fmov d18,x12
-
- ldp x8,x12,[x1],#16 // inp[0:1]
- ldp x9,x13,[x1],#48
-
- ld1 {v0.4s,v1.4s,v2.4s,v3.4s},[x15],#64
- ld1 {v4.4s,v5.4s,v6.4s,v7.4s},[x15],#64
- ld1 {v8.4s},[x15]
-
-#ifdef __AARCH64EB__
- rev x8,x8
- rev x12,x12
- rev x9,x9
- rev x13,x13
-#endif
- and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
- and x5,x9,#0x03ffffff
- ubfx x6,x8,#26,#26
- ubfx x7,x9,#26,#26
- add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
- extr x8,x12,x8,#52
- extr x9,x13,x9,#52
- add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
- fmov d9,x4
- and x8,x8,#0x03ffffff
- and x9,x9,#0x03ffffff
- ubfx x10,x12,#14,#26
- ubfx x11,x13,#14,#26
- add x12,x3,x12,lsr#40
- add x13,x3,x13,lsr#40
- add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
- fmov d10,x6
- add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
- add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
- movi v31.2d,#-1
- fmov d11,x8
- fmov d12,x10
- fmov d13,x12
- ushr v31.2d,v31.2d,#38
-
- b.ls .Lskip_loop
-
-.align 4
-.Loop_neon:
- ////////////////////////////////////////////////////////////////
- // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
- // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
- // ___________________/
- // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
- // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
- // ___________________/ ____________________/
- //
- // Note that we start with inp[2:3]*r^2. This is because it
- // doesn't depend on reduction in previous iteration.
- ////////////////////////////////////////////////////////////////
- // d4 = h0*r4 + h1*r3 + h2*r2 + h3*r1 + h4*r0
- // d3 = h0*r3 + h1*r2 + h2*r1 + h3*r0 + h4*5*r4
- // d2 = h0*r2 + h1*r1 + h2*r0 + h3*5*r4 + h4*5*r3
- // d1 = h0*r1 + h1*r0 + h2*5*r4 + h3*5*r3 + h4*5*r2
- // d0 = h0*r0 + h1*5*r4 + h2*5*r3 + h3*5*r2 + h4*5*r1
-
- subs x2,x2,#64
- umull v23.2d,v14.2s,v7.s[2]
- csel x16,x17,x16,lo
- umull v22.2d,v14.2s,v5.s[2]
- umull v21.2d,v14.2s,v3.s[2]
- ldp x8,x12,[x16],#16 // inp[2:3] (or zero)
- umull v20.2d,v14.2s,v1.s[2]
- ldp x9,x13,[x16],#48
- umull v19.2d,v14.2s,v0.s[2]
-#ifdef __AARCH64EB__
- rev x8,x8
- rev x12,x12
- rev x9,x9
- rev x13,x13
-#endif
-
- umlal v23.2d,v15.2s,v5.s[2]
- and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
- umlal v22.2d,v15.2s,v3.s[2]
- and x5,x9,#0x03ffffff
- umlal v21.2d,v15.2s,v1.s[2]
- ubfx x6,x8,#26,#26
- umlal v20.2d,v15.2s,v0.s[2]
- ubfx x7,x9,#26,#26
- umlal v19.2d,v15.2s,v8.s[2]
- add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
-
- umlal v23.2d,v16.2s,v3.s[2]
- extr x8,x12,x8,#52
- umlal v22.2d,v16.2s,v1.s[2]
- extr x9,x13,x9,#52
- umlal v21.2d,v16.2s,v0.s[2]
- add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
- umlal v20.2d,v16.2s,v8.s[2]
- fmov d14,x4
- umlal v19.2d,v16.2s,v6.s[2]
- and x8,x8,#0x03ffffff
-
- umlal v23.2d,v17.2s,v1.s[2]
- and x9,x9,#0x03ffffff
- umlal v22.2d,v17.2s,v0.s[2]
- ubfx x10,x12,#14,#26
- umlal v21.2d,v17.2s,v8.s[2]
- ubfx x11,x13,#14,#26
- umlal v20.2d,v17.2s,v6.s[2]
- add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
- umlal v19.2d,v17.2s,v4.s[2]
- fmov d15,x6
-
- add v11.2s,v11.2s,v26.2s
- add x12,x3,x12,lsr#40
- umlal v23.2d,v18.2s,v0.s[2]
- add x13,x3,x13,lsr#40
- umlal v22.2d,v18.2s,v8.s[2]
- add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
- umlal v21.2d,v18.2s,v6.s[2]
- add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
- umlal v20.2d,v18.2s,v4.s[2]
- fmov d16,x8
- umlal v19.2d,v18.2s,v2.s[2]
- fmov d17,x10
-
- ////////////////////////////////////////////////////////////////
- // (hash+inp[0:1])*r^4 and accumulate
-
- add v9.2s,v9.2s,v24.2s
- fmov d18,x12
- umlal v22.2d,v11.2s,v1.s[0]
- ldp x8,x12,[x1],#16 // inp[0:1]
- umlal v19.2d,v11.2s,v6.s[0]
- ldp x9,x13,[x1],#48
- umlal v23.2d,v11.2s,v3.s[0]
- umlal v20.2d,v11.2s,v8.s[0]
- umlal v21.2d,v11.2s,v0.s[0]
-#ifdef __AARCH64EB__
- rev x8,x8
- rev x12,x12
- rev x9,x9
- rev x13,x13
-#endif
-
- add v10.2s,v10.2s,v25.2s
- umlal v22.2d,v9.2s,v5.s[0]
- umlal v23.2d,v9.2s,v7.s[0]
- and x4,x8,#0x03ffffff // base 2^64 -> base 2^26
- umlal v21.2d,v9.2s,v3.s[0]
- and x5,x9,#0x03ffffff
- umlal v19.2d,v9.2s,v0.s[0]
- ubfx x6,x8,#26,#26
- umlal v20.2d,v9.2s,v1.s[0]
- ubfx x7,x9,#26,#26
-
- add v12.2s,v12.2s,v27.2s
- add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32
- umlal v22.2d,v10.2s,v3.s[0]
- extr x8,x12,x8,#52
- umlal v23.2d,v10.2s,v5.s[0]
- extr x9,x13,x9,#52
- umlal v19.2d,v10.2s,v8.s[0]
- add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32
- umlal v21.2d,v10.2s,v1.s[0]
- fmov d9,x4
- umlal v20.2d,v10.2s,v0.s[0]
- and x8,x8,#0x03ffffff
-
- add v13.2s,v13.2s,v28.2s
- and x9,x9,#0x03ffffff
- umlal v22.2d,v12.2s,v0.s[0]
- ubfx x10,x12,#14,#26
- umlal v19.2d,v12.2s,v4.s[0]
- ubfx x11,x13,#14,#26
- umlal v23.2d,v12.2s,v1.s[0]
- add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32
- umlal v20.2d,v12.2s,v6.s[0]
- fmov d10,x6
- umlal v21.2d,v12.2s,v8.s[0]
- add x12,x3,x12,lsr#40
-
- umlal v22.2d,v13.2s,v8.s[0]
- add x13,x3,x13,lsr#40
- umlal v19.2d,v13.2s,v2.s[0]
- add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32
- umlal v23.2d,v13.2s,v0.s[0]
- add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32
- umlal v20.2d,v13.2s,v4.s[0]
- fmov d11,x8
- umlal v21.2d,v13.2s,v6.s[0]
- fmov d12,x10
- fmov d13,x12
-
- /////////////////////////////////////////////////////////////////
- // lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
- // and P. Schwabe
- //
- // [see discussion in poly1305-armv4 module]
-
- ushr v29.2d,v22.2d,#26
- xtn v27.2s,v22.2d
- ushr v30.2d,v19.2d,#26
- and v19.16b,v19.16b,v31.16b
- add v23.2d,v23.2d,v29.2d // h3 -> h4
- bic v27.2s,#0xfc,lsl#24 // &=0x03ffffff
- add v20.2d,v20.2d,v30.2d // h0 -> h1
-
- ushr v29.2d,v23.2d,#26
- xtn v28.2s,v23.2d
- ushr v30.2d,v20.2d,#26
- xtn v25.2s,v20.2d
- bic v28.2s,#0xfc,lsl#24
- add v21.2d,v21.2d,v30.2d // h1 -> h2
-
- add v19.2d,v19.2d,v29.2d
- shl v29.2d,v29.2d,#2
- shrn v30.2s,v21.2d,#26
- xtn v26.2s,v21.2d
- add v19.2d,v19.2d,v29.2d // h4 -> h0
- bic v25.2s,#0xfc,lsl#24
- add v27.2s,v27.2s,v30.2s // h2 -> h3
- bic v26.2s,#0xfc,lsl#24
-
- shrn v29.2s,v19.2d,#26
- xtn v24.2s,v19.2d
- ushr v30.2s,v27.2s,#26
- bic v27.2s,#0xfc,lsl#24
- bic v24.2s,#0xfc,lsl#24
- add v25.2s,v25.2s,v29.2s // h0 -> h1
- add v28.2s,v28.2s,v30.2s // h3 -> h4
-
- b.hi .Loop_neon
-
-.Lskip_loop:
- dup v16.2d,v16.d[0]
- add v11.2s,v11.2s,v26.2s
-
- ////////////////////////////////////////////////////////////////
- // multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
-
- adds x2,x2,#32
- b.ne .Long_tail
-
- dup v16.2d,v11.d[0]
- add v14.2s,v9.2s,v24.2s
- add v17.2s,v12.2s,v27.2s
- add v15.2s,v10.2s,v25.2s
- add v18.2s,v13.2s,v28.2s
-
-.Long_tail:
- dup v14.2d,v14.d[0]
- umull2 v19.2d,v16.4s,v6.4s
- umull2 v22.2d,v16.4s,v1.4s
- umull2 v23.2d,v16.4s,v3.4s
- umull2 v21.2d,v16.4s,v0.4s
- umull2 v20.2d,v16.4s,v8.4s
-
- dup v15.2d,v15.d[0]
- umlal2 v19.2d,v14.4s,v0.4s
- umlal2 v21.2d,v14.4s,v3.4s
- umlal2 v22.2d,v14.4s,v5.4s
- umlal2 v23.2d,v14.4s,v7.4s
- umlal2 v20.2d,v14.4s,v1.4s
-
- dup v17.2d,v17.d[0]
- umlal2 v19.2d,v15.4s,v8.4s
- umlal2 v22.2d,v15.4s,v3.4s
- umlal2 v21.2d,v15.4s,v1.4s
- umlal2 v23.2d,v15.4s,v5.4s
- umlal2 v20.2d,v15.4s,v0.4s
-
- dup v18.2d,v18.d[0]
- umlal2 v22.2d,v17.4s,v0.4s
- umlal2 v23.2d,v17.4s,v1.4s
- umlal2 v19.2d,v17.4s,v4.4s
- umlal2 v20.2d,v17.4s,v6.4s
- umlal2 v21.2d,v17.4s,v8.4s
-
- umlal2 v22.2d,v18.4s,v8.4s
- umlal2 v19.2d,v18.4s,v2.4s
- umlal2 v23.2d,v18.4s,v0.4s
- umlal2 v20.2d,v18.4s,v4.4s
- umlal2 v21.2d,v18.4s,v6.4s
-
- b.eq .Lshort_tail
-
- ////////////////////////////////////////////////////////////////
- // (hash+inp[0:1])*r^4:r^3 and accumulate
-
- add v9.2s,v9.2s,v24.2s
- umlal v22.2d,v11.2s,v1.2s
- umlal v19.2d,v11.2s,v6.2s
- umlal v23.2d,v11.2s,v3.2s
- umlal v20.2d,v11.2s,v8.2s
- umlal v21.2d,v11.2s,v0.2s
-
- add v10.2s,v10.2s,v25.2s
- umlal v22.2d,v9.2s,v5.2s
- umlal v19.2d,v9.2s,v0.2s
- umlal v23.2d,v9.2s,v7.2s
- umlal v20.2d,v9.2s,v1.2s
- umlal v21.2d,v9.2s,v3.2s
-
- add v12.2s,v12.2s,v27.2s
- umlal v22.2d,v10.2s,v3.2s
- umlal v19.2d,v10.2s,v8.2s
- umlal v23.2d,v10.2s,v5.2s
- umlal v20.2d,v10.2s,v0.2s
- umlal v21.2d,v10.2s,v1.2s
-
- add v13.2s,v13.2s,v28.2s
- umlal v22.2d,v12.2s,v0.2s
- umlal v19.2d,v12.2s,v4.2s
- umlal v23.2d,v12.2s,v1.2s
- umlal v20.2d,v12.2s,v6.2s
- umlal v21.2d,v12.2s,v8.2s
-
- umlal v22.2d,v13.2s,v8.2s
- umlal v19.2d,v13.2s,v2.2s
- umlal v23.2d,v13.2s,v0.2s
- umlal v20.2d,v13.2s,v4.2s
- umlal v21.2d,v13.2s,v6.2s
-
-.Lshort_tail:
- ////////////////////////////////////////////////////////////////
- // horizontal add
-
- addp v22.2d,v22.2d,v22.2d
- ldp d8,d9,[sp,#16] // meet ABI requirements
- addp v19.2d,v19.2d,v19.2d
- ldp d10,d11,[sp,#32]
- addp v23.2d,v23.2d,v23.2d
- ldp d12,d13,[sp,#48]
- addp v20.2d,v20.2d,v20.2d
- ldp d14,d15,[sp,#64]
- addp v21.2d,v21.2d,v21.2d
- ldr x30,[sp,#8]
-
- ////////////////////////////////////////////////////////////////
- // lazy reduction, but without narrowing
-
- ushr v29.2d,v22.2d,#26
- and v22.16b,v22.16b,v31.16b
- ushr v30.2d,v19.2d,#26
- and v19.16b,v19.16b,v31.16b
-
- add v23.2d,v23.2d,v29.2d // h3 -> h4
- add v20.2d,v20.2d,v30.2d // h0 -> h1
-
- ushr v29.2d,v23.2d,#26
- and v23.16b,v23.16b,v31.16b
- ushr v30.2d,v20.2d,#26
- and v20.16b,v20.16b,v31.16b
- add v21.2d,v21.2d,v30.2d // h1 -> h2
-
- add v19.2d,v19.2d,v29.2d
- shl v29.2d,v29.2d,#2
- ushr v30.2d,v21.2d,#26
- and v21.16b,v21.16b,v31.16b
- add v19.2d,v19.2d,v29.2d // h4 -> h0
- add v22.2d,v22.2d,v30.2d // h2 -> h3
-
- ushr v29.2d,v19.2d,#26
- and v19.16b,v19.16b,v31.16b
- ushr v30.2d,v22.2d,#26
- and v22.16b,v22.16b,v31.16b
- add v20.2d,v20.2d,v29.2d // h0 -> h1
- add v23.2d,v23.2d,v30.2d // h3 -> h4
-
- ////////////////////////////////////////////////////////////////
- // write the result, can be partially reduced
-
- st4 {v19.s,v20.s,v21.s,v22.s}[0],[x0],#16
- mov x4,#1
- st1 {v23.s}[0],[x0]
- str x4,[x0,#8] // set is_base2_26
-
- ldr x29,[sp],#80
- .inst 0xd50323bf // autiasp
- ret
-.size poly1305_blocks_neon,.-poly1305_blocks_neon
-
-.align 5
-.Lzeros:
-.long 0,0,0,0,0,0,0,0
-.asciz "Poly1305 for ARMv8, CRYPTOGAMS by @dot-asm"
-.align 2
-#if !defined(__KERNEL__) && !defined(_WIN64)
-.comm OPENSSL_armcap_P,4,4
-.hidden OPENSSL_armcap_P
-#endif
diff --git a/arch/arm64/crypto/poly1305-glue.c b/arch/arm64/crypto/poly1305-glue.c
index 683de671741a..9c3d86e397bf 100644
--- a/arch/arm64/crypto/poly1305-glue.c
+++ b/arch/arm64/crypto/poly1305-glue.c
@@ -25,7 +25,7 @@ asmlinkage void poly1305_emit(void *state, u8 *digest, const u32 *nonce);
static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
-void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
+void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
{
poly1305_init_arm64(&dctx->h, key);
dctx->s[0] = get_unaligned_le32(key + 16);
diff --git a/arch/arm64/crypto/sha1-ce-core.S b/arch/arm64/crypto/sha1-ce-core.S
index 8c02bbc2684e..889ca0f8972b 100644
--- a/arch/arm64/crypto/sha1-ce-core.S
+++ b/arch/arm64/crypto/sha1-ce-core.S
@@ -121,7 +121,7 @@ CPU_LE( rev32 v11.16b, v11.16b )
add dgav.4s, dgav.4s, dg0v.4s
cbz w2, 2f
- cond_yield 3f, x5
+ cond_yield 3f, x5, x6
b 0b
/*
diff --git a/arch/arm64/crypto/sha2-ce-core.S b/arch/arm64/crypto/sha2-ce-core.S
index 6cdea7d56059..491179922f49 100644
--- a/arch/arm64/crypto/sha2-ce-core.S
+++ b/arch/arm64/crypto/sha2-ce-core.S
@@ -129,7 +129,7 @@ CPU_LE( rev32 v19.16b, v19.16b )
/* handled all input blocks? */
cbz w2, 2f
- cond_yield 3f, x5
+ cond_yield 3f, x5, x6
b 0b
/*
diff --git a/arch/arm64/crypto/sha256-core.S_shipped b/arch/arm64/crypto/sha256-core.S_shipped
deleted file mode 100644
index 7c7ce2e3bad6..000000000000
--- a/arch/arm64/crypto/sha256-core.S_shipped
+++ /dev/null
@@ -1,2069 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// This code is taken from the OpenSSL project but the author (Andy Polyakov)
-// has relicensed it under the GPLv2. Therefore this program is free software;
-// you can redistribute it and/or modify it under the terms of the GNU General
-// Public License version 2 as published by the Free Software Foundation.
-//
-// The original headers, including the original license headers, are
-// included below for completeness.
-
-// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
-//
-// Licensed under the OpenSSL license (the "License"). You may not use
-// this file except in compliance with the License. You can obtain a copy
-// in the file LICENSE in the source distribution or at
-// https://www.openssl.org/source/license.html
-
-// ====================================================================
-// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
-// project. The module is, however, dual licensed under OpenSSL and
-// CRYPTOGAMS licenses depending on where you obtain it. For further
-// details see http://www.openssl.org/~appro/cryptogams/.
-// ====================================================================
-//
-// SHA256/512 for ARMv8.
-//
-// Performance in cycles per processed byte and improvement coefficient
-// over code generated with "default" compiler:
-//
-// SHA256-hw SHA256(*) SHA512
-// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
-// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
-// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
-// Denver 2.01 10.5 (+26%) 6.70 (+8%)
-// X-Gene 20.0 (+100%) 12.8 (+300%(***))
-// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
-//
-// (*) Software SHA256 results are of lesser relevance, presented
-// mostly for informational purposes.
-// (**) The result is a trade-off: it's possible to improve it by
-// 10% (or by 1 cycle per round), but at the cost of 20% loss
-// on Cortex-A53 (or by 4 cycles per round).
-// (***) Super-impressive coefficients over gcc-generated code are
-// indication of some compiler "pathology", most notably code
-// generated with -mgeneral-regs-only is significanty faster
-// and the gap is only 40-90%.
-//
-// October 2016.
-//
-// Originally it was reckoned that it makes no sense to implement NEON
-// version of SHA256 for 64-bit processors. This is because performance
-// improvement on most wide-spread Cortex-A5x processors was observed
-// to be marginal, same on Cortex-A53 and ~10% on A57. But then it was
-// observed that 32-bit NEON SHA256 performs significantly better than
-// 64-bit scalar version on *some* of the more recent processors. As
-// result 64-bit NEON version of SHA256 was added to provide best
-// all-round performance. For example it executes ~30% faster on X-Gene
-// and Mongoose. [For reference, NEON version of SHA512 is bound to
-// deliver much less improvement, likely *negative* on Cortex-A5x.
-// Which is why NEON support is limited to SHA256.]
-
-#ifndef __KERNEL__
-# include "arm_arch.h"
-#endif
-
-.text
-
-.extern OPENSSL_armcap_P
-.globl sha256_block_data_order
-.type sha256_block_data_order,%function
-.align 6
-sha256_block_data_order:
-#ifndef __KERNEL__
-# ifdef __ILP32__
- ldrsw x16,.LOPENSSL_armcap_P
-# else
- ldr x16,.LOPENSSL_armcap_P
-# endif
- adr x17,.LOPENSSL_armcap_P
- add x16,x16,x17
- ldr w16,[x16]
- tst w16,#ARMV8_SHA256
- b.ne .Lv8_entry
- tst w16,#ARMV7_NEON
- b.ne .Lneon_entry
-#endif
- stp x29,x30,[sp,#-128]!
- add x29,sp,#0
-
- stp x19,x20,[sp,#16]
- stp x21,x22,[sp,#32]
- stp x23,x24,[sp,#48]
- stp x25,x26,[sp,#64]
- stp x27,x28,[sp,#80]
- sub sp,sp,#4*4
-
- ldp w20,w21,[x0] // load context
- ldp w22,w23,[x0,#2*4]
- ldp w24,w25,[x0,#4*4]
- add x2,x1,x2,lsl#6 // end of input
- ldp w26,w27,[x0,#6*4]
- adr x30,.LK256
- stp x0,x2,[x29,#96]
-
-.Loop:
- ldp w3,w4,[x1],#2*4
- ldr w19,[x30],#4 // *K++
- eor w28,w21,w22 // magic seed
- str x1,[x29,#112]
-#ifndef __AARCH64EB__
- rev w3,w3 // 0
-#endif
- ror w16,w24,#6
- add w27,w27,w19 // h+=K[i]
- eor w6,w24,w24,ror#14
- and w17,w25,w24
- bic w19,w26,w24
- add w27,w27,w3 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w20,w21 // a^b, b^c in next round
- eor w16,w16,w6,ror#11 // Sigma1(e)
- ror w6,w20,#2
- add w27,w27,w17 // h+=Ch(e,f,g)
- eor w17,w20,w20,ror#9
- add w27,w27,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w23,w23,w27 // d+=h
- eor w28,w28,w21 // Maj(a,b,c)
- eor w17,w6,w17,ror#13 // Sigma0(a)
- add w27,w27,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w27,w27,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w4,w4 // 1
-#endif
- ldp w5,w6,[x1],#2*4
- add w27,w27,w17 // h+=Sigma0(a)
- ror w16,w23,#6
- add w26,w26,w28 // h+=K[i]
- eor w7,w23,w23,ror#14
- and w17,w24,w23
- bic w28,w25,w23
- add w26,w26,w4 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w27,w20 // a^b, b^c in next round
- eor w16,w16,w7,ror#11 // Sigma1(e)
- ror w7,w27,#2
- add w26,w26,w17 // h+=Ch(e,f,g)
- eor w17,w27,w27,ror#9
- add w26,w26,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w22,w22,w26 // d+=h
- eor w19,w19,w20 // Maj(a,b,c)
- eor w17,w7,w17,ror#13 // Sigma0(a)
- add w26,w26,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w26,w26,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w5,w5 // 2
-#endif
- add w26,w26,w17 // h+=Sigma0(a)
- ror w16,w22,#6
- add w25,w25,w19 // h+=K[i]
- eor w8,w22,w22,ror#14
- and w17,w23,w22
- bic w19,w24,w22
- add w25,w25,w5 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w26,w27 // a^b, b^c in next round
- eor w16,w16,w8,ror#11 // Sigma1(e)
- ror w8,w26,#2
- add w25,w25,w17 // h+=Ch(e,f,g)
- eor w17,w26,w26,ror#9
- add w25,w25,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w21,w21,w25 // d+=h
- eor w28,w28,w27 // Maj(a,b,c)
- eor w17,w8,w17,ror#13 // Sigma0(a)
- add w25,w25,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w25,w25,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w6,w6 // 3
-#endif
- ldp w7,w8,[x1],#2*4
- add w25,w25,w17 // h+=Sigma0(a)
- ror w16,w21,#6
- add w24,w24,w28 // h+=K[i]
- eor w9,w21,w21,ror#14
- and w17,w22,w21
- bic w28,w23,w21
- add w24,w24,w6 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w25,w26 // a^b, b^c in next round
- eor w16,w16,w9,ror#11 // Sigma1(e)
- ror w9,w25,#2
- add w24,w24,w17 // h+=Ch(e,f,g)
- eor w17,w25,w25,ror#9
- add w24,w24,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w20,w20,w24 // d+=h
- eor w19,w19,w26 // Maj(a,b,c)
- eor w17,w9,w17,ror#13 // Sigma0(a)
- add w24,w24,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w24,w24,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w7,w7 // 4
-#endif
- add w24,w24,w17 // h+=Sigma0(a)
- ror w16,w20,#6
- add w23,w23,w19 // h+=K[i]
- eor w10,w20,w20,ror#14
- and w17,w21,w20
- bic w19,w22,w20
- add w23,w23,w7 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w24,w25 // a^b, b^c in next round
- eor w16,w16,w10,ror#11 // Sigma1(e)
- ror w10,w24,#2
- add w23,w23,w17 // h+=Ch(e,f,g)
- eor w17,w24,w24,ror#9
- add w23,w23,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w27,w27,w23 // d+=h
- eor w28,w28,w25 // Maj(a,b,c)
- eor w17,w10,w17,ror#13 // Sigma0(a)
- add w23,w23,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w23,w23,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w8,w8 // 5
-#endif
- ldp w9,w10,[x1],#2*4
- add w23,w23,w17 // h+=Sigma0(a)
- ror w16,w27,#6
- add w22,w22,w28 // h+=K[i]
- eor w11,w27,w27,ror#14
- and w17,w20,w27
- bic w28,w21,w27
- add w22,w22,w8 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w23,w24 // a^b, b^c in next round
- eor w16,w16,w11,ror#11 // Sigma1(e)
- ror w11,w23,#2
- add w22,w22,w17 // h+=Ch(e,f,g)
- eor w17,w23,w23,ror#9
- add w22,w22,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w26,w26,w22 // d+=h
- eor w19,w19,w24 // Maj(a,b,c)
- eor w17,w11,w17,ror#13 // Sigma0(a)
- add w22,w22,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w22,w22,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w9,w9 // 6
-#endif
- add w22,w22,w17 // h+=Sigma0(a)
- ror w16,w26,#6
- add w21,w21,w19 // h+=K[i]
- eor w12,w26,w26,ror#14
- and w17,w27,w26
- bic w19,w20,w26
- add w21,w21,w9 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w22,w23 // a^b, b^c in next round
- eor w16,w16,w12,ror#11 // Sigma1(e)
- ror w12,w22,#2
- add w21,w21,w17 // h+=Ch(e,f,g)
- eor w17,w22,w22,ror#9
- add w21,w21,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w25,w25,w21 // d+=h
- eor w28,w28,w23 // Maj(a,b,c)
- eor w17,w12,w17,ror#13 // Sigma0(a)
- add w21,w21,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w21,w21,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w10,w10 // 7
-#endif
- ldp w11,w12,[x1],#2*4
- add w21,w21,w17 // h+=Sigma0(a)
- ror w16,w25,#6
- add w20,w20,w28 // h+=K[i]
- eor w13,w25,w25,ror#14
- and w17,w26,w25
- bic w28,w27,w25
- add w20,w20,w10 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w21,w22 // a^b, b^c in next round
- eor w16,w16,w13,ror#11 // Sigma1(e)
- ror w13,w21,#2
- add w20,w20,w17 // h+=Ch(e,f,g)
- eor w17,w21,w21,ror#9
- add w20,w20,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w24,w24,w20 // d+=h
- eor w19,w19,w22 // Maj(a,b,c)
- eor w17,w13,w17,ror#13 // Sigma0(a)
- add w20,w20,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w20,w20,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w11,w11 // 8
-#endif
- add w20,w20,w17 // h+=Sigma0(a)
- ror w16,w24,#6
- add w27,w27,w19 // h+=K[i]
- eor w14,w24,w24,ror#14
- and w17,w25,w24
- bic w19,w26,w24
- add w27,w27,w11 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w20,w21 // a^b, b^c in next round
- eor w16,w16,w14,ror#11 // Sigma1(e)
- ror w14,w20,#2
- add w27,w27,w17 // h+=Ch(e,f,g)
- eor w17,w20,w20,ror#9
- add w27,w27,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w23,w23,w27 // d+=h
- eor w28,w28,w21 // Maj(a,b,c)
- eor w17,w14,w17,ror#13 // Sigma0(a)
- add w27,w27,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w27,w27,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w12,w12 // 9
-#endif
- ldp w13,w14,[x1],#2*4
- add w27,w27,w17 // h+=Sigma0(a)
- ror w16,w23,#6
- add w26,w26,w28 // h+=K[i]
- eor w15,w23,w23,ror#14
- and w17,w24,w23
- bic w28,w25,w23
- add w26,w26,w12 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w27,w20 // a^b, b^c in next round
- eor w16,w16,w15,ror#11 // Sigma1(e)
- ror w15,w27,#2
- add w26,w26,w17 // h+=Ch(e,f,g)
- eor w17,w27,w27,ror#9
- add w26,w26,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w22,w22,w26 // d+=h
- eor w19,w19,w20 // Maj(a,b,c)
- eor w17,w15,w17,ror#13 // Sigma0(a)
- add w26,w26,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w26,w26,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w13,w13 // 10
-#endif
- add w26,w26,w17 // h+=Sigma0(a)
- ror w16,w22,#6
- add w25,w25,w19 // h+=K[i]
- eor w0,w22,w22,ror#14
- and w17,w23,w22
- bic w19,w24,w22
- add w25,w25,w13 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w26,w27 // a^b, b^c in next round
- eor w16,w16,w0,ror#11 // Sigma1(e)
- ror w0,w26,#2
- add w25,w25,w17 // h+=Ch(e,f,g)
- eor w17,w26,w26,ror#9
- add w25,w25,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w21,w21,w25 // d+=h
- eor w28,w28,w27 // Maj(a,b,c)
- eor w17,w0,w17,ror#13 // Sigma0(a)
- add w25,w25,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w25,w25,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w14,w14 // 11
-#endif
- ldp w15,w0,[x1],#2*4
- add w25,w25,w17 // h+=Sigma0(a)
- str w6,[sp,#12]
- ror w16,w21,#6
- add w24,w24,w28 // h+=K[i]
- eor w6,w21,w21,ror#14
- and w17,w22,w21
- bic w28,w23,w21
- add w24,w24,w14 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w25,w26 // a^b, b^c in next round
- eor w16,w16,w6,ror#11 // Sigma1(e)
- ror w6,w25,#2
- add w24,w24,w17 // h+=Ch(e,f,g)
- eor w17,w25,w25,ror#9
- add w24,w24,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w20,w20,w24 // d+=h
- eor w19,w19,w26 // Maj(a,b,c)
- eor w17,w6,w17,ror#13 // Sigma0(a)
- add w24,w24,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w24,w24,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w15,w15 // 12
-#endif
- add w24,w24,w17 // h+=Sigma0(a)
- str w7,[sp,#0]
- ror w16,w20,#6
- add w23,w23,w19 // h+=K[i]
- eor w7,w20,w20,ror#14
- and w17,w21,w20
- bic w19,w22,w20
- add w23,w23,w15 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w24,w25 // a^b, b^c in next round
- eor w16,w16,w7,ror#11 // Sigma1(e)
- ror w7,w24,#2
- add w23,w23,w17 // h+=Ch(e,f,g)
- eor w17,w24,w24,ror#9
- add w23,w23,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w27,w27,w23 // d+=h
- eor w28,w28,w25 // Maj(a,b,c)
- eor w17,w7,w17,ror#13 // Sigma0(a)
- add w23,w23,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w23,w23,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w0,w0 // 13
-#endif
- ldp w1,w2,[x1]
- add w23,w23,w17 // h+=Sigma0(a)
- str w8,[sp,#4]
- ror w16,w27,#6
- add w22,w22,w28 // h+=K[i]
- eor w8,w27,w27,ror#14
- and w17,w20,w27
- bic w28,w21,w27
- add w22,w22,w0 // h+=X[i]
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w23,w24 // a^b, b^c in next round
- eor w16,w16,w8,ror#11 // Sigma1(e)
- ror w8,w23,#2
- add w22,w22,w17 // h+=Ch(e,f,g)
- eor w17,w23,w23,ror#9
- add w22,w22,w16 // h+=Sigma1(e)
- and w19,w19,w28 // (b^c)&=(a^b)
- add w26,w26,w22 // d+=h
- eor w19,w19,w24 // Maj(a,b,c)
- eor w17,w8,w17,ror#13 // Sigma0(a)
- add w22,w22,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- //add w22,w22,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w1,w1 // 14
-#endif
- ldr w6,[sp,#12]
- add w22,w22,w17 // h+=Sigma0(a)
- str w9,[sp,#8]
- ror w16,w26,#6
- add w21,w21,w19 // h+=K[i]
- eor w9,w26,w26,ror#14
- and w17,w27,w26
- bic w19,w20,w26
- add w21,w21,w1 // h+=X[i]
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w22,w23 // a^b, b^c in next round
- eor w16,w16,w9,ror#11 // Sigma1(e)
- ror w9,w22,#2
- add w21,w21,w17 // h+=Ch(e,f,g)
- eor w17,w22,w22,ror#9
- add w21,w21,w16 // h+=Sigma1(e)
- and w28,w28,w19 // (b^c)&=(a^b)
- add w25,w25,w21 // d+=h
- eor w28,w28,w23 // Maj(a,b,c)
- eor w17,w9,w17,ror#13 // Sigma0(a)
- add w21,w21,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- //add w21,w21,w17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev w2,w2 // 15
-#endif
- ldr w7,[sp,#0]
- add w21,w21,w17 // h+=Sigma0(a)
- str w10,[sp,#12]
- ror w16,w25,#6
- add w20,w20,w28 // h+=K[i]
- ror w9,w4,#7
- and w17,w26,w25
- ror w8,w1,#17
- bic w28,w27,w25
- ror w10,w21,#2
- add w20,w20,w2 // h+=X[i]
- eor w16,w16,w25,ror#11
- eor w9,w9,w4,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w21,w22 // a^b, b^c in next round
- eor w16,w16,w25,ror#25 // Sigma1(e)
- eor w10,w10,w21,ror#13
- add w20,w20,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w8,w8,w1,ror#19
- eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
- add w20,w20,w16 // h+=Sigma1(e)
- eor w19,w19,w22 // Maj(a,b,c)
- eor w17,w10,w21,ror#22 // Sigma0(a)
- eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
- add w3,w3,w12
- add w24,w24,w20 // d+=h
- add w20,w20,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w3,w3,w9
- add w20,w20,w17 // h+=Sigma0(a)
- add w3,w3,w8
-.Loop_16_xx:
- ldr w8,[sp,#4]
- str w11,[sp,#0]
- ror w16,w24,#6
- add w27,w27,w19 // h+=K[i]
- ror w10,w5,#7
- and w17,w25,w24
- ror w9,w2,#17
- bic w19,w26,w24
- ror w11,w20,#2
- add w27,w27,w3 // h+=X[i]
- eor w16,w16,w24,ror#11
- eor w10,w10,w5,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w20,w21 // a^b, b^c in next round
- eor w16,w16,w24,ror#25 // Sigma1(e)
- eor w11,w11,w20,ror#13
- add w27,w27,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w9,w9,w2,ror#19
- eor w10,w10,w5,lsr#3 // sigma0(X[i+1])
- add w27,w27,w16 // h+=Sigma1(e)
- eor w28,w28,w21 // Maj(a,b,c)
- eor w17,w11,w20,ror#22 // Sigma0(a)
- eor w9,w9,w2,lsr#10 // sigma1(X[i+14])
- add w4,w4,w13
- add w23,w23,w27 // d+=h
- add w27,w27,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w4,w4,w10
- add w27,w27,w17 // h+=Sigma0(a)
- add w4,w4,w9
- ldr w9,[sp,#8]
- str w12,[sp,#4]
- ror w16,w23,#6
- add w26,w26,w28 // h+=K[i]
- ror w11,w6,#7
- and w17,w24,w23
- ror w10,w3,#17
- bic w28,w25,w23
- ror w12,w27,#2
- add w26,w26,w4 // h+=X[i]
- eor w16,w16,w23,ror#11
- eor w11,w11,w6,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w27,w20 // a^b, b^c in next round
- eor w16,w16,w23,ror#25 // Sigma1(e)
- eor w12,w12,w27,ror#13
- add w26,w26,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w10,w10,w3,ror#19
- eor w11,w11,w6,lsr#3 // sigma0(X[i+1])
- add w26,w26,w16 // h+=Sigma1(e)
- eor w19,w19,w20 // Maj(a,b,c)
- eor w17,w12,w27,ror#22 // Sigma0(a)
- eor w10,w10,w3,lsr#10 // sigma1(X[i+14])
- add w5,w5,w14
- add w22,w22,w26 // d+=h
- add w26,w26,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w5,w5,w11
- add w26,w26,w17 // h+=Sigma0(a)
- add w5,w5,w10
- ldr w10,[sp,#12]
- str w13,[sp,#8]
- ror w16,w22,#6
- add w25,w25,w19 // h+=K[i]
- ror w12,w7,#7
- and w17,w23,w22
- ror w11,w4,#17
- bic w19,w24,w22
- ror w13,w26,#2
- add w25,w25,w5 // h+=X[i]
- eor w16,w16,w22,ror#11
- eor w12,w12,w7,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w26,w27 // a^b, b^c in next round
- eor w16,w16,w22,ror#25 // Sigma1(e)
- eor w13,w13,w26,ror#13
- add w25,w25,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w11,w11,w4,ror#19
- eor w12,w12,w7,lsr#3 // sigma0(X[i+1])
- add w25,w25,w16 // h+=Sigma1(e)
- eor w28,w28,w27 // Maj(a,b,c)
- eor w17,w13,w26,ror#22 // Sigma0(a)
- eor w11,w11,w4,lsr#10 // sigma1(X[i+14])
- add w6,w6,w15
- add w21,w21,w25 // d+=h
- add w25,w25,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w6,w6,w12
- add w25,w25,w17 // h+=Sigma0(a)
- add w6,w6,w11
- ldr w11,[sp,#0]
- str w14,[sp,#12]
- ror w16,w21,#6
- add w24,w24,w28 // h+=K[i]
- ror w13,w8,#7
- and w17,w22,w21
- ror w12,w5,#17
- bic w28,w23,w21
- ror w14,w25,#2
- add w24,w24,w6 // h+=X[i]
- eor w16,w16,w21,ror#11
- eor w13,w13,w8,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w25,w26 // a^b, b^c in next round
- eor w16,w16,w21,ror#25 // Sigma1(e)
- eor w14,w14,w25,ror#13
- add w24,w24,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w12,w12,w5,ror#19
- eor w13,w13,w8,lsr#3 // sigma0(X[i+1])
- add w24,w24,w16 // h+=Sigma1(e)
- eor w19,w19,w26 // Maj(a,b,c)
- eor w17,w14,w25,ror#22 // Sigma0(a)
- eor w12,w12,w5,lsr#10 // sigma1(X[i+14])
- add w7,w7,w0
- add w20,w20,w24 // d+=h
- add w24,w24,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w7,w7,w13
- add w24,w24,w17 // h+=Sigma0(a)
- add w7,w7,w12
- ldr w12,[sp,#4]
- str w15,[sp,#0]
- ror w16,w20,#6
- add w23,w23,w19 // h+=K[i]
- ror w14,w9,#7
- and w17,w21,w20
- ror w13,w6,#17
- bic w19,w22,w20
- ror w15,w24,#2
- add w23,w23,w7 // h+=X[i]
- eor w16,w16,w20,ror#11
- eor w14,w14,w9,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w24,w25 // a^b, b^c in next round
- eor w16,w16,w20,ror#25 // Sigma1(e)
- eor w15,w15,w24,ror#13
- add w23,w23,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w13,w13,w6,ror#19
- eor w14,w14,w9,lsr#3 // sigma0(X[i+1])
- add w23,w23,w16 // h+=Sigma1(e)
- eor w28,w28,w25 // Maj(a,b,c)
- eor w17,w15,w24,ror#22 // Sigma0(a)
- eor w13,w13,w6,lsr#10 // sigma1(X[i+14])
- add w8,w8,w1
- add w27,w27,w23 // d+=h
- add w23,w23,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w8,w8,w14
- add w23,w23,w17 // h+=Sigma0(a)
- add w8,w8,w13
- ldr w13,[sp,#8]
- str w0,[sp,#4]
- ror w16,w27,#6
- add w22,w22,w28 // h+=K[i]
- ror w15,w10,#7
- and w17,w20,w27
- ror w14,w7,#17
- bic w28,w21,w27
- ror w0,w23,#2
- add w22,w22,w8 // h+=X[i]
- eor w16,w16,w27,ror#11
- eor w15,w15,w10,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w23,w24 // a^b, b^c in next round
- eor w16,w16,w27,ror#25 // Sigma1(e)
- eor w0,w0,w23,ror#13
- add w22,w22,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w14,w14,w7,ror#19
- eor w15,w15,w10,lsr#3 // sigma0(X[i+1])
- add w22,w22,w16 // h+=Sigma1(e)
- eor w19,w19,w24 // Maj(a,b,c)
- eor w17,w0,w23,ror#22 // Sigma0(a)
- eor w14,w14,w7,lsr#10 // sigma1(X[i+14])
- add w9,w9,w2
- add w26,w26,w22 // d+=h
- add w22,w22,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w9,w9,w15
- add w22,w22,w17 // h+=Sigma0(a)
- add w9,w9,w14
- ldr w14,[sp,#12]
- str w1,[sp,#8]
- ror w16,w26,#6
- add w21,w21,w19 // h+=K[i]
- ror w0,w11,#7
- and w17,w27,w26
- ror w15,w8,#17
- bic w19,w20,w26
- ror w1,w22,#2
- add w21,w21,w9 // h+=X[i]
- eor w16,w16,w26,ror#11
- eor w0,w0,w11,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w22,w23 // a^b, b^c in next round
- eor w16,w16,w26,ror#25 // Sigma1(e)
- eor w1,w1,w22,ror#13
- add w21,w21,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w15,w15,w8,ror#19
- eor w0,w0,w11,lsr#3 // sigma0(X[i+1])
- add w21,w21,w16 // h+=Sigma1(e)
- eor w28,w28,w23 // Maj(a,b,c)
- eor w17,w1,w22,ror#22 // Sigma0(a)
- eor w15,w15,w8,lsr#10 // sigma1(X[i+14])
- add w10,w10,w3
- add w25,w25,w21 // d+=h
- add w21,w21,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w10,w10,w0
- add w21,w21,w17 // h+=Sigma0(a)
- add w10,w10,w15
- ldr w15,[sp,#0]
- str w2,[sp,#12]
- ror w16,w25,#6
- add w20,w20,w28 // h+=K[i]
- ror w1,w12,#7
- and w17,w26,w25
- ror w0,w9,#17
- bic w28,w27,w25
- ror w2,w21,#2
- add w20,w20,w10 // h+=X[i]
- eor w16,w16,w25,ror#11
- eor w1,w1,w12,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w21,w22 // a^b, b^c in next round
- eor w16,w16,w25,ror#25 // Sigma1(e)
- eor w2,w2,w21,ror#13
- add w20,w20,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w0,w0,w9,ror#19
- eor w1,w1,w12,lsr#3 // sigma0(X[i+1])
- add w20,w20,w16 // h+=Sigma1(e)
- eor w19,w19,w22 // Maj(a,b,c)
- eor w17,w2,w21,ror#22 // Sigma0(a)
- eor w0,w0,w9,lsr#10 // sigma1(X[i+14])
- add w11,w11,w4
- add w24,w24,w20 // d+=h
- add w20,w20,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w11,w11,w1
- add w20,w20,w17 // h+=Sigma0(a)
- add w11,w11,w0
- ldr w0,[sp,#4]
- str w3,[sp,#0]
- ror w16,w24,#6
- add w27,w27,w19 // h+=K[i]
- ror w2,w13,#7
- and w17,w25,w24
- ror w1,w10,#17
- bic w19,w26,w24
- ror w3,w20,#2
- add w27,w27,w11 // h+=X[i]
- eor w16,w16,w24,ror#11
- eor w2,w2,w13,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w20,w21 // a^b, b^c in next round
- eor w16,w16,w24,ror#25 // Sigma1(e)
- eor w3,w3,w20,ror#13
- add w27,w27,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w1,w1,w10,ror#19
- eor w2,w2,w13,lsr#3 // sigma0(X[i+1])
- add w27,w27,w16 // h+=Sigma1(e)
- eor w28,w28,w21 // Maj(a,b,c)
- eor w17,w3,w20,ror#22 // Sigma0(a)
- eor w1,w1,w10,lsr#10 // sigma1(X[i+14])
- add w12,w12,w5
- add w23,w23,w27 // d+=h
- add w27,w27,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w12,w12,w2
- add w27,w27,w17 // h+=Sigma0(a)
- add w12,w12,w1
- ldr w1,[sp,#8]
- str w4,[sp,#4]
- ror w16,w23,#6
- add w26,w26,w28 // h+=K[i]
- ror w3,w14,#7
- and w17,w24,w23
- ror w2,w11,#17
- bic w28,w25,w23
- ror w4,w27,#2
- add w26,w26,w12 // h+=X[i]
- eor w16,w16,w23,ror#11
- eor w3,w3,w14,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w27,w20 // a^b, b^c in next round
- eor w16,w16,w23,ror#25 // Sigma1(e)
- eor w4,w4,w27,ror#13
- add w26,w26,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w2,w2,w11,ror#19
- eor w3,w3,w14,lsr#3 // sigma0(X[i+1])
- add w26,w26,w16 // h+=Sigma1(e)
- eor w19,w19,w20 // Maj(a,b,c)
- eor w17,w4,w27,ror#22 // Sigma0(a)
- eor w2,w2,w11,lsr#10 // sigma1(X[i+14])
- add w13,w13,w6
- add w22,w22,w26 // d+=h
- add w26,w26,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w13,w13,w3
- add w26,w26,w17 // h+=Sigma0(a)
- add w13,w13,w2
- ldr w2,[sp,#12]
- str w5,[sp,#8]
- ror w16,w22,#6
- add w25,w25,w19 // h+=K[i]
- ror w4,w15,#7
- and w17,w23,w22
- ror w3,w12,#17
- bic w19,w24,w22
- ror w5,w26,#2
- add w25,w25,w13 // h+=X[i]
- eor w16,w16,w22,ror#11
- eor w4,w4,w15,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w26,w27 // a^b, b^c in next round
- eor w16,w16,w22,ror#25 // Sigma1(e)
- eor w5,w5,w26,ror#13
- add w25,w25,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w3,w3,w12,ror#19
- eor w4,w4,w15,lsr#3 // sigma0(X[i+1])
- add w25,w25,w16 // h+=Sigma1(e)
- eor w28,w28,w27 // Maj(a,b,c)
- eor w17,w5,w26,ror#22 // Sigma0(a)
- eor w3,w3,w12,lsr#10 // sigma1(X[i+14])
- add w14,w14,w7
- add w21,w21,w25 // d+=h
- add w25,w25,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w14,w14,w4
- add w25,w25,w17 // h+=Sigma0(a)
- add w14,w14,w3
- ldr w3,[sp,#0]
- str w6,[sp,#12]
- ror w16,w21,#6
- add w24,w24,w28 // h+=K[i]
- ror w5,w0,#7
- and w17,w22,w21
- ror w4,w13,#17
- bic w28,w23,w21
- ror w6,w25,#2
- add w24,w24,w14 // h+=X[i]
- eor w16,w16,w21,ror#11
- eor w5,w5,w0,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w25,w26 // a^b, b^c in next round
- eor w16,w16,w21,ror#25 // Sigma1(e)
- eor w6,w6,w25,ror#13
- add w24,w24,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w4,w4,w13,ror#19
- eor w5,w5,w0,lsr#3 // sigma0(X[i+1])
- add w24,w24,w16 // h+=Sigma1(e)
- eor w19,w19,w26 // Maj(a,b,c)
- eor w17,w6,w25,ror#22 // Sigma0(a)
- eor w4,w4,w13,lsr#10 // sigma1(X[i+14])
- add w15,w15,w8
- add w20,w20,w24 // d+=h
- add w24,w24,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w15,w15,w5
- add w24,w24,w17 // h+=Sigma0(a)
- add w15,w15,w4
- ldr w4,[sp,#4]
- str w7,[sp,#0]
- ror w16,w20,#6
- add w23,w23,w19 // h+=K[i]
- ror w6,w1,#7
- and w17,w21,w20
- ror w5,w14,#17
- bic w19,w22,w20
- ror w7,w24,#2
- add w23,w23,w15 // h+=X[i]
- eor w16,w16,w20,ror#11
- eor w6,w6,w1,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w24,w25 // a^b, b^c in next round
- eor w16,w16,w20,ror#25 // Sigma1(e)
- eor w7,w7,w24,ror#13
- add w23,w23,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w5,w5,w14,ror#19
- eor w6,w6,w1,lsr#3 // sigma0(X[i+1])
- add w23,w23,w16 // h+=Sigma1(e)
- eor w28,w28,w25 // Maj(a,b,c)
- eor w17,w7,w24,ror#22 // Sigma0(a)
- eor w5,w5,w14,lsr#10 // sigma1(X[i+14])
- add w0,w0,w9
- add w27,w27,w23 // d+=h
- add w23,w23,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w0,w0,w6
- add w23,w23,w17 // h+=Sigma0(a)
- add w0,w0,w5
- ldr w5,[sp,#8]
- str w8,[sp,#4]
- ror w16,w27,#6
- add w22,w22,w28 // h+=K[i]
- ror w7,w2,#7
- and w17,w20,w27
- ror w6,w15,#17
- bic w28,w21,w27
- ror w8,w23,#2
- add w22,w22,w0 // h+=X[i]
- eor w16,w16,w27,ror#11
- eor w7,w7,w2,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w23,w24 // a^b, b^c in next round
- eor w16,w16,w27,ror#25 // Sigma1(e)
- eor w8,w8,w23,ror#13
- add w22,w22,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w6,w6,w15,ror#19
- eor w7,w7,w2,lsr#3 // sigma0(X[i+1])
- add w22,w22,w16 // h+=Sigma1(e)
- eor w19,w19,w24 // Maj(a,b,c)
- eor w17,w8,w23,ror#22 // Sigma0(a)
- eor w6,w6,w15,lsr#10 // sigma1(X[i+14])
- add w1,w1,w10
- add w26,w26,w22 // d+=h
- add w22,w22,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w1,w1,w7
- add w22,w22,w17 // h+=Sigma0(a)
- add w1,w1,w6
- ldr w6,[sp,#12]
- str w9,[sp,#8]
- ror w16,w26,#6
- add w21,w21,w19 // h+=K[i]
- ror w8,w3,#7
- and w17,w27,w26
- ror w7,w0,#17
- bic w19,w20,w26
- ror w9,w22,#2
- add w21,w21,w1 // h+=X[i]
- eor w16,w16,w26,ror#11
- eor w8,w8,w3,ror#18
- orr w17,w17,w19 // Ch(e,f,g)
- eor w19,w22,w23 // a^b, b^c in next round
- eor w16,w16,w26,ror#25 // Sigma1(e)
- eor w9,w9,w22,ror#13
- add w21,w21,w17 // h+=Ch(e,f,g)
- and w28,w28,w19 // (b^c)&=(a^b)
- eor w7,w7,w0,ror#19
- eor w8,w8,w3,lsr#3 // sigma0(X[i+1])
- add w21,w21,w16 // h+=Sigma1(e)
- eor w28,w28,w23 // Maj(a,b,c)
- eor w17,w9,w22,ror#22 // Sigma0(a)
- eor w7,w7,w0,lsr#10 // sigma1(X[i+14])
- add w2,w2,w11
- add w25,w25,w21 // d+=h
- add w21,w21,w28 // h+=Maj(a,b,c)
- ldr w28,[x30],#4 // *K++, w19 in next round
- add w2,w2,w8
- add w21,w21,w17 // h+=Sigma0(a)
- add w2,w2,w7
- ldr w7,[sp,#0]
- str w10,[sp,#12]
- ror w16,w25,#6
- add w20,w20,w28 // h+=K[i]
- ror w9,w4,#7
- and w17,w26,w25
- ror w8,w1,#17
- bic w28,w27,w25
- ror w10,w21,#2
- add w20,w20,w2 // h+=X[i]
- eor w16,w16,w25,ror#11
- eor w9,w9,w4,ror#18
- orr w17,w17,w28 // Ch(e,f,g)
- eor w28,w21,w22 // a^b, b^c in next round
- eor w16,w16,w25,ror#25 // Sigma1(e)
- eor w10,w10,w21,ror#13
- add w20,w20,w17 // h+=Ch(e,f,g)
- and w19,w19,w28 // (b^c)&=(a^b)
- eor w8,w8,w1,ror#19
- eor w9,w9,w4,lsr#3 // sigma0(X[i+1])
- add w20,w20,w16 // h+=Sigma1(e)
- eor w19,w19,w22 // Maj(a,b,c)
- eor w17,w10,w21,ror#22 // Sigma0(a)
- eor w8,w8,w1,lsr#10 // sigma1(X[i+14])
- add w3,w3,w12
- add w24,w24,w20 // d+=h
- add w20,w20,w19 // h+=Maj(a,b,c)
- ldr w19,[x30],#4 // *K++, w28 in next round
- add w3,w3,w9
- add w20,w20,w17 // h+=Sigma0(a)
- add w3,w3,w8
- cbnz w19,.Loop_16_xx
-
- ldp x0,x2,[x29,#96]
- ldr x1,[x29,#112]
- sub x30,x30,#260 // rewind
-
- ldp w3,w4,[x0]
- ldp w5,w6,[x0,#2*4]
- add x1,x1,#14*4 // advance input pointer
- ldp w7,w8,[x0,#4*4]
- add w20,w20,w3
- ldp w9,w10,[x0,#6*4]
- add w21,w21,w4
- add w22,w22,w5
- add w23,w23,w6
- stp w20,w21,[x0]
- add w24,w24,w7
- add w25,w25,w8
- stp w22,w23,[x0,#2*4]
- add w26,w26,w9
- add w27,w27,w10
- cmp x1,x2
- stp w24,w25,[x0,#4*4]
- stp w26,w27,[x0,#6*4]
- b.ne .Loop
-
- ldp x19,x20,[x29,#16]
- add sp,sp,#4*4
- ldp x21,x22,[x29,#32]
- ldp x23,x24,[x29,#48]
- ldp x25,x26,[x29,#64]
- ldp x27,x28,[x29,#80]
- ldp x29,x30,[sp],#128
- ret
-.size sha256_block_data_order,.-sha256_block_data_order
-
-.align 6
-.type .LK256,%object
-.LK256:
- .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
- .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
- .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
- .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
- .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
- .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
- .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
- .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
- .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
- .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
- .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
- .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
- .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
- .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
- .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
- .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
- .long 0 //terminator
-.size .LK256,.-.LK256
-#ifndef __KERNEL__
-.align 3
-.LOPENSSL_armcap_P:
-# ifdef __ILP32__
- .long OPENSSL_armcap_P-.
-# else
- .quad OPENSSL_armcap_P-.
-# endif
-#endif
-.asciz "SHA256 block transform for ARMv8, CRYPTOGAMS by <appro@openssl.org>"
-.align 2
-#ifndef __KERNEL__
-.type sha256_block_armv8,%function
-.align 6
-sha256_block_armv8:
-.Lv8_entry:
- stp x29,x30,[sp,#-16]!
- add x29,sp,#0
-
- ld1 {v0.4s,v1.4s},[x0]
- adr x3,.LK256
-
-.Loop_hw:
- ld1 {v4.16b-v7.16b},[x1],#64
- sub x2,x2,#1
- ld1 {v16.4s},[x3],#16
- rev32 v4.16b,v4.16b
- rev32 v5.16b,v5.16b
- rev32 v6.16b,v6.16b
- rev32 v7.16b,v7.16b
- orr v18.16b,v0.16b,v0.16b // offload
- orr v19.16b,v1.16b,v1.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v4.4s
- .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
- .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v5.4s
- .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
- .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v6.4s
- .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
- .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v7.4s
- .inst 0x5e282887 //sha256su0 v7.16b,v4.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
- .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v4.4s
- .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
- .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v5.4s
- .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
- .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v6.4s
- .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
- .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v7.4s
- .inst 0x5e282887 //sha256su0 v7.16b,v4.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
- .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v4.4s
- .inst 0x5e2828a4 //sha256su0 v4.16b,v5.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
- .inst 0x5e0760c4 //sha256su1 v4.16b,v6.16b,v7.16b
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v5.4s
- .inst 0x5e2828c5 //sha256su0 v5.16b,v6.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
- .inst 0x5e0460e5 //sha256su1 v5.16b,v7.16b,v4.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v6.4s
- .inst 0x5e2828e6 //sha256su0 v6.16b,v7.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
- .inst 0x5e056086 //sha256su1 v6.16b,v4.16b,v5.16b
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v7.4s
- .inst 0x5e282887 //sha256su0 v7.16b,v4.16b
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
- .inst 0x5e0660a7 //sha256su1 v7.16b,v5.16b,v6.16b
- ld1 {v17.4s},[x3],#16
- add v16.4s,v16.4s,v4.4s
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
-
- ld1 {v16.4s},[x3],#16
- add v17.4s,v17.4s,v5.4s
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
-
- ld1 {v17.4s},[x3]
- add v16.4s,v16.4s,v6.4s
- sub x3,x3,#64*4-16 // rewind
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e104020 //sha256h v0.16b,v1.16b,v16.4s
- .inst 0x5e105041 //sha256h2 v1.16b,v2.16b,v16.4s
-
- add v17.4s,v17.4s,v7.4s
- orr v2.16b,v0.16b,v0.16b
- .inst 0x5e114020 //sha256h v0.16b,v1.16b,v17.4s
- .inst 0x5e115041 //sha256h2 v1.16b,v2.16b,v17.4s
-
- add v0.4s,v0.4s,v18.4s
- add v1.4s,v1.4s,v19.4s
-
- cbnz x2,.Loop_hw
-
- st1 {v0.4s,v1.4s},[x0]
-
- ldr x29,[sp],#16
- ret
-.size sha256_block_armv8,.-sha256_block_armv8
-#endif
-#ifdef __KERNEL__
-.globl sha256_block_neon
-#endif
-.type sha256_block_neon,%function
-.align 4
-sha256_block_neon:
-.Lneon_entry:
- stp x29, x30, [sp, #-16]!
- mov x29, sp
- sub sp,sp,#16*4
-
- adr x16,.LK256
- add x2,x1,x2,lsl#6 // len to point at the end of inp
-
- ld1 {v0.16b},[x1], #16
- ld1 {v1.16b},[x1], #16
- ld1 {v2.16b},[x1], #16
- ld1 {v3.16b},[x1], #16
- ld1 {v4.4s},[x16], #16
- ld1 {v5.4s},[x16], #16
- ld1 {v6.4s},[x16], #16
- ld1 {v7.4s},[x16], #16
- rev32 v0.16b,v0.16b // yes, even on
- rev32 v1.16b,v1.16b // big-endian
- rev32 v2.16b,v2.16b
- rev32 v3.16b,v3.16b
- mov x17,sp
- add v4.4s,v4.4s,v0.4s
- add v5.4s,v5.4s,v1.4s
- add v6.4s,v6.4s,v2.4s
- st1 {v4.4s-v5.4s},[x17], #32
- add v7.4s,v7.4s,v3.4s
- st1 {v6.4s-v7.4s},[x17]
- sub x17,x17,#32
-
- ldp w3,w4,[x0]
- ldp w5,w6,[x0,#8]
- ldp w7,w8,[x0,#16]
- ldp w9,w10,[x0,#24]
- ldr w12,[sp,#0]
- mov w13,wzr
- eor w14,w4,w5
- mov w15,wzr
- b .L_00_48
-
-.align 4
-.L_00_48:
- ext v4.16b,v0.16b,v1.16b,#4
- add w10,w10,w12
- add w3,w3,w15
- and w12,w8,w7
- bic w15,w9,w7
- ext v7.16b,v2.16b,v3.16b,#4
- eor w11,w7,w7,ror#5
- add w3,w3,w13
- mov d19,v3.d[1]
- orr w12,w12,w15
- eor w11,w11,w7,ror#19
- ushr v6.4s,v4.4s,#7
- eor w15,w3,w3,ror#11
- ushr v5.4s,v4.4s,#3
- add w10,w10,w12
- add v0.4s,v0.4s,v7.4s
- ror w11,w11,#6
- sli v6.4s,v4.4s,#25
- eor w13,w3,w4
- eor w15,w15,w3,ror#20
- ushr v7.4s,v4.4s,#18
- add w10,w10,w11
- ldr w12,[sp,#4]
- and w14,w14,w13
- eor v5.16b,v5.16b,v6.16b
- ror w15,w15,#2
- add w6,w6,w10
- sli v7.4s,v4.4s,#14
- eor w14,w14,w4
- ushr v16.4s,v19.4s,#17
- add w9,w9,w12
- add w10,w10,w15
- and w12,w7,w6
- eor v5.16b,v5.16b,v7.16b
- bic w15,w8,w6
- eor w11,w6,w6,ror#5
- sli v16.4s,v19.4s,#15
- add w10,w10,w14
- orr w12,w12,w15
- ushr v17.4s,v19.4s,#10
- eor w11,w11,w6,ror#19
- eor w15,w10,w10,ror#11
- ushr v7.4s,v19.4s,#19
- add w9,w9,w12
- ror w11,w11,#6
- add v0.4s,v0.4s,v5.4s
- eor w14,w10,w3
- eor w15,w15,w10,ror#20
- sli v7.4s,v19.4s,#13
- add w9,w9,w11
- ldr w12,[sp,#8]
- and w13,w13,w14
- eor v17.16b,v17.16b,v16.16b
- ror w15,w15,#2
- add w5,w5,w9
- eor w13,w13,w3
- eor v17.16b,v17.16b,v7.16b
- add w8,w8,w12
- add w9,w9,w15
- and w12,w6,w5
- add v0.4s,v0.4s,v17.4s
- bic w15,w7,w5
- eor w11,w5,w5,ror#5
- add w9,w9,w13
- ushr v18.4s,v0.4s,#17
- orr w12,w12,w15
- ushr v19.4s,v0.4s,#10
- eor w11,w11,w5,ror#19
- eor w15,w9,w9,ror#11
- sli v18.4s,v0.4s,#15
- add w8,w8,w12
- ushr v17.4s,v0.4s,#19
- ror w11,w11,#6
- eor w13,w9,w10
- eor v19.16b,v19.16b,v18.16b
- eor w15,w15,w9,ror#20
- add w8,w8,w11
- sli v17.4s,v0.4s,#13
- ldr w12,[sp,#12]
- and w14,w14,w13
- ror w15,w15,#2
- ld1 {v4.4s},[x16], #16
- add w4,w4,w8
- eor v19.16b,v19.16b,v17.16b
- eor w14,w14,w10
- eor v17.16b,v17.16b,v17.16b
- add w7,w7,w12
- add w8,w8,w15
- and w12,w5,w4
- mov v17.d[1],v19.d[0]
- bic w15,w6,w4
- eor w11,w4,w4,ror#5
- add w8,w8,w14
- add v0.4s,v0.4s,v17.4s
- orr w12,w12,w15
- eor w11,w11,w4,ror#19
- eor w15,w8,w8,ror#11
- add v4.4s,v4.4s,v0.4s
- add w7,w7,w12
- ror w11,w11,#6
- eor w14,w8,w9
- eor w15,w15,w8,ror#20
- add w7,w7,w11
- ldr w12,[sp,#16]
- and w13,w13,w14
- ror w15,w15,#2
- add w3,w3,w7
- eor w13,w13,w9
- st1 {v4.4s},[x17], #16
- ext v4.16b,v1.16b,v2.16b,#4
- add w6,w6,w12
- add w7,w7,w15
- and w12,w4,w3
- bic w15,w5,w3
- ext v7.16b,v3.16b,v0.16b,#4
- eor w11,w3,w3,ror#5
- add w7,w7,w13
- mov d19,v0.d[1]
- orr w12,w12,w15
- eor w11,w11,w3,ror#19
- ushr v6.4s,v4.4s,#7
- eor w15,w7,w7,ror#11
- ushr v5.4s,v4.4s,#3
- add w6,w6,w12
- add v1.4s,v1.4s,v7.4s
- ror w11,w11,#6
- sli v6.4s,v4.4s,#25
- eor w13,w7,w8
- eor w15,w15,w7,ror#20
- ushr v7.4s,v4.4s,#18
- add w6,w6,w11
- ldr w12,[sp,#20]
- and w14,w14,w13
- eor v5.16b,v5.16b,v6.16b
- ror w15,w15,#2
- add w10,w10,w6
- sli v7.4s,v4.4s,#14
- eor w14,w14,w8
- ushr v16.4s,v19.4s,#17
- add w5,w5,w12
- add w6,w6,w15
- and w12,w3,w10
- eor v5.16b,v5.16b,v7.16b
- bic w15,w4,w10
- eor w11,w10,w10,ror#5
- sli v16.4s,v19.4s,#15
- add w6,w6,w14
- orr w12,w12,w15
- ushr v17.4s,v19.4s,#10
- eor w11,w11,w10,ror#19
- eor w15,w6,w6,ror#11
- ushr v7.4s,v19.4s,#19
- add w5,w5,w12
- ror w11,w11,#6
- add v1.4s,v1.4s,v5.4s
- eor w14,w6,w7
- eor w15,w15,w6,ror#20
- sli v7.4s,v19.4s,#13
- add w5,w5,w11
- ldr w12,[sp,#24]
- and w13,w13,w14
- eor v17.16b,v17.16b,v16.16b
- ror w15,w15,#2
- add w9,w9,w5
- eor w13,w13,w7
- eor v17.16b,v17.16b,v7.16b
- add w4,w4,w12
- add w5,w5,w15
- and w12,w10,w9
- add v1.4s,v1.4s,v17.4s
- bic w15,w3,w9
- eor w11,w9,w9,ror#5
- add w5,w5,w13
- ushr v18.4s,v1.4s,#17
- orr w12,w12,w15
- ushr v19.4s,v1.4s,#10
- eor w11,w11,w9,ror#19
- eor w15,w5,w5,ror#11
- sli v18.4s,v1.4s,#15
- add w4,w4,w12
- ushr v17.4s,v1.4s,#19
- ror w11,w11,#6
- eor w13,w5,w6
- eor v19.16b,v19.16b,v18.16b
- eor w15,w15,w5,ror#20
- add w4,w4,w11
- sli v17.4s,v1.4s,#13
- ldr w12,[sp,#28]
- and w14,w14,w13
- ror w15,w15,#2
- ld1 {v4.4s},[x16], #16
- add w8,w8,w4
- eor v19.16b,v19.16b,v17.16b
- eor w14,w14,w6
- eor v17.16b,v17.16b,v17.16b
- add w3,w3,w12
- add w4,w4,w15
- and w12,w9,w8
- mov v17.d[1],v19.d[0]
- bic w15,w10,w8
- eor w11,w8,w8,ror#5
- add w4,w4,w14
- add v1.4s,v1.4s,v17.4s
- orr w12,w12,w15
- eor w11,w11,w8,ror#19
- eor w15,w4,w4,ror#11
- add v4.4s,v4.4s,v1.4s
- add w3,w3,w12
- ror w11,w11,#6
- eor w14,w4,w5
- eor w15,w15,w4,ror#20
- add w3,w3,w11
- ldr w12,[sp,#32]
- and w13,w13,w14
- ror w15,w15,#2
- add w7,w7,w3
- eor w13,w13,w5
- st1 {v4.4s},[x17], #16
- ext v4.16b,v2.16b,v3.16b,#4
- add w10,w10,w12
- add w3,w3,w15
- and w12,w8,w7
- bic w15,w9,w7
- ext v7.16b,v0.16b,v1.16b,#4
- eor w11,w7,w7,ror#5
- add w3,w3,w13
- mov d19,v1.d[1]
- orr w12,w12,w15
- eor w11,w11,w7,ror#19
- ushr v6.4s,v4.4s,#7
- eor w15,w3,w3,ror#11
- ushr v5.4s,v4.4s,#3
- add w10,w10,w12
- add v2.4s,v2.4s,v7.4s
- ror w11,w11,#6
- sli v6.4s,v4.4s,#25
- eor w13,w3,w4
- eor w15,w15,w3,ror#20
- ushr v7.4s,v4.4s,#18
- add w10,w10,w11
- ldr w12,[sp,#36]
- and w14,w14,w13
- eor v5.16b,v5.16b,v6.16b
- ror w15,w15,#2
- add w6,w6,w10
- sli v7.4s,v4.4s,#14
- eor w14,w14,w4
- ushr v16.4s,v19.4s,#17
- add w9,w9,w12
- add w10,w10,w15
- and w12,w7,w6
- eor v5.16b,v5.16b,v7.16b
- bic w15,w8,w6
- eor w11,w6,w6,ror#5
- sli v16.4s,v19.4s,#15
- add w10,w10,w14
- orr w12,w12,w15
- ushr v17.4s,v19.4s,#10
- eor w11,w11,w6,ror#19
- eor w15,w10,w10,ror#11
- ushr v7.4s,v19.4s,#19
- add w9,w9,w12
- ror w11,w11,#6
- add v2.4s,v2.4s,v5.4s
- eor w14,w10,w3
- eor w15,w15,w10,ror#20
- sli v7.4s,v19.4s,#13
- add w9,w9,w11
- ldr w12,[sp,#40]
- and w13,w13,w14
- eor v17.16b,v17.16b,v16.16b
- ror w15,w15,#2
- add w5,w5,w9
- eor w13,w13,w3
- eor v17.16b,v17.16b,v7.16b
- add w8,w8,w12
- add w9,w9,w15
- and w12,w6,w5
- add v2.4s,v2.4s,v17.4s
- bic w15,w7,w5
- eor w11,w5,w5,ror#5
- add w9,w9,w13
- ushr v18.4s,v2.4s,#17
- orr w12,w12,w15
- ushr v19.4s,v2.4s,#10
- eor w11,w11,w5,ror#19
- eor w15,w9,w9,ror#11
- sli v18.4s,v2.4s,#15
- add w8,w8,w12
- ushr v17.4s,v2.4s,#19
- ror w11,w11,#6
- eor w13,w9,w10
- eor v19.16b,v19.16b,v18.16b
- eor w15,w15,w9,ror#20
- add w8,w8,w11
- sli v17.4s,v2.4s,#13
- ldr w12,[sp,#44]
- and w14,w14,w13
- ror w15,w15,#2
- ld1 {v4.4s},[x16], #16
- add w4,w4,w8
- eor v19.16b,v19.16b,v17.16b
- eor w14,w14,w10
- eor v17.16b,v17.16b,v17.16b
- add w7,w7,w12
- add w8,w8,w15
- and w12,w5,w4
- mov v17.d[1],v19.d[0]
- bic w15,w6,w4
- eor w11,w4,w4,ror#5
- add w8,w8,w14
- add v2.4s,v2.4s,v17.4s
- orr w12,w12,w15
- eor w11,w11,w4,ror#19
- eor w15,w8,w8,ror#11
- add v4.4s,v4.4s,v2.4s
- add w7,w7,w12
- ror w11,w11,#6
- eor w14,w8,w9
- eor w15,w15,w8,ror#20
- add w7,w7,w11
- ldr w12,[sp,#48]
- and w13,w13,w14
- ror w15,w15,#2
- add w3,w3,w7
- eor w13,w13,w9
- st1 {v4.4s},[x17], #16
- ext v4.16b,v3.16b,v0.16b,#4
- add w6,w6,w12
- add w7,w7,w15
- and w12,w4,w3
- bic w15,w5,w3
- ext v7.16b,v1.16b,v2.16b,#4
- eor w11,w3,w3,ror#5
- add w7,w7,w13
- mov d19,v2.d[1]
- orr w12,w12,w15
- eor w11,w11,w3,ror#19
- ushr v6.4s,v4.4s,#7
- eor w15,w7,w7,ror#11
- ushr v5.4s,v4.4s,#3
- add w6,w6,w12
- add v3.4s,v3.4s,v7.4s
- ror w11,w11,#6
- sli v6.4s,v4.4s,#25
- eor w13,w7,w8
- eor w15,w15,w7,ror#20
- ushr v7.4s,v4.4s,#18
- add w6,w6,w11
- ldr w12,[sp,#52]
- and w14,w14,w13
- eor v5.16b,v5.16b,v6.16b
- ror w15,w15,#2
- add w10,w10,w6
- sli v7.4s,v4.4s,#14
- eor w14,w14,w8
- ushr v16.4s,v19.4s,#17
- add w5,w5,w12
- add w6,w6,w15
- and w12,w3,w10
- eor v5.16b,v5.16b,v7.16b
- bic w15,w4,w10
- eor w11,w10,w10,ror#5
- sli v16.4s,v19.4s,#15
- add w6,w6,w14
- orr w12,w12,w15
- ushr v17.4s,v19.4s,#10
- eor w11,w11,w10,ror#19
- eor w15,w6,w6,ror#11
- ushr v7.4s,v19.4s,#19
- add w5,w5,w12
- ror w11,w11,#6
- add v3.4s,v3.4s,v5.4s
- eor w14,w6,w7
- eor w15,w15,w6,ror#20
- sli v7.4s,v19.4s,#13
- add w5,w5,w11
- ldr w12,[sp,#56]
- and w13,w13,w14
- eor v17.16b,v17.16b,v16.16b
- ror w15,w15,#2
- add w9,w9,w5
- eor w13,w13,w7
- eor v17.16b,v17.16b,v7.16b
- add w4,w4,w12
- add w5,w5,w15
- and w12,w10,w9
- add v3.4s,v3.4s,v17.4s
- bic w15,w3,w9
- eor w11,w9,w9,ror#5
- add w5,w5,w13
- ushr v18.4s,v3.4s,#17
- orr w12,w12,w15
- ushr v19.4s,v3.4s,#10
- eor w11,w11,w9,ror#19
- eor w15,w5,w5,ror#11
- sli v18.4s,v3.4s,#15
- add w4,w4,w12
- ushr v17.4s,v3.4s,#19
- ror w11,w11,#6
- eor w13,w5,w6
- eor v19.16b,v19.16b,v18.16b
- eor w15,w15,w5,ror#20
- add w4,w4,w11
- sli v17.4s,v3.4s,#13
- ldr w12,[sp,#60]
- and w14,w14,w13
- ror w15,w15,#2
- ld1 {v4.4s},[x16], #16
- add w8,w8,w4
- eor v19.16b,v19.16b,v17.16b
- eor w14,w14,w6
- eor v17.16b,v17.16b,v17.16b
- add w3,w3,w12
- add w4,w4,w15
- and w12,w9,w8
- mov v17.d[1],v19.d[0]
- bic w15,w10,w8
- eor w11,w8,w8,ror#5
- add w4,w4,w14
- add v3.4s,v3.4s,v17.4s
- orr w12,w12,w15
- eor w11,w11,w8,ror#19
- eor w15,w4,w4,ror#11
- add v4.4s,v4.4s,v3.4s
- add w3,w3,w12
- ror w11,w11,#6
- eor w14,w4,w5
- eor w15,w15,w4,ror#20
- add w3,w3,w11
- ldr w12,[x16]
- and w13,w13,w14
- ror w15,w15,#2
- add w7,w7,w3
- eor w13,w13,w5
- st1 {v4.4s},[x17], #16
- cmp w12,#0 // check for K256 terminator
- ldr w12,[sp,#0]
- sub x17,x17,#64
- bne .L_00_48
-
- sub x16,x16,#256 // rewind x16
- cmp x1,x2
- mov x17, #64
- csel x17, x17, xzr, eq
- sub x1,x1,x17 // avoid SEGV
- mov x17,sp
- add w10,w10,w12
- add w3,w3,w15
- and w12,w8,w7
- ld1 {v0.16b},[x1],#16
- bic w15,w9,w7
- eor w11,w7,w7,ror#5
- ld1 {v4.4s},[x16],#16
- add w3,w3,w13
- orr w12,w12,w15
- eor w11,w11,w7,ror#19
- eor w15,w3,w3,ror#11
- rev32 v0.16b,v0.16b
- add w10,w10,w12
- ror w11,w11,#6
- eor w13,w3,w4
- eor w15,w15,w3,ror#20
- add v4.4s,v4.4s,v0.4s
- add w10,w10,w11
- ldr w12,[sp,#4]
- and w14,w14,w13
- ror w15,w15,#2
- add w6,w6,w10
- eor w14,w14,w4
- add w9,w9,w12
- add w10,w10,w15
- and w12,w7,w6
- bic w15,w8,w6
- eor w11,w6,w6,ror#5
- add w10,w10,w14
- orr w12,w12,w15
- eor w11,w11,w6,ror#19
- eor w15,w10,w10,ror#11
- add w9,w9,w12
- ror w11,w11,#6
- eor w14,w10,w3
- eor w15,w15,w10,ror#20
- add w9,w9,w11
- ldr w12,[sp,#8]
- and w13,w13,w14
- ror w15,w15,#2
- add w5,w5,w9
- eor w13,w13,w3
- add w8,w8,w12
- add w9,w9,w15
- and w12,w6,w5
- bic w15,w7,w5
- eor w11,w5,w5,ror#5
- add w9,w9,w13
- orr w12,w12,w15
- eor w11,w11,w5,ror#19
- eor w15,w9,w9,ror#11
- add w8,w8,w12
- ror w11,w11,#6
- eor w13,w9,w10
- eor w15,w15,w9,ror#20
- add w8,w8,w11
- ldr w12,[sp,#12]
- and w14,w14,w13
- ror w15,w15,#2
- add w4,w4,w8
- eor w14,w14,w10
- add w7,w7,w12
- add w8,w8,w15
- and w12,w5,w4
- bic w15,w6,w4
- eor w11,w4,w4,ror#5
- add w8,w8,w14
- orr w12,w12,w15
- eor w11,w11,w4,ror#19
- eor w15,w8,w8,ror#11
- add w7,w7,w12
- ror w11,w11,#6
- eor w14,w8,w9
- eor w15,w15,w8,ror#20
- add w7,w7,w11
- ldr w12,[sp,#16]
- and w13,w13,w14
- ror w15,w15,#2
- add w3,w3,w7
- eor w13,w13,w9
- st1 {v4.4s},[x17], #16
- add w6,w6,w12
- add w7,w7,w15
- and w12,w4,w3
- ld1 {v1.16b},[x1],#16
- bic w15,w5,w3
- eor w11,w3,w3,ror#5
- ld1 {v4.4s},[x16],#16
- add w7,w7,w13
- orr w12,w12,w15
- eor w11,w11,w3,ror#19
- eor w15,w7,w7,ror#11
- rev32 v1.16b,v1.16b
- add w6,w6,w12
- ror w11,w11,#6
- eor w13,w7,w8
- eor w15,w15,w7,ror#20
- add v4.4s,v4.4s,v1.4s
- add w6,w6,w11
- ldr w12,[sp,#20]
- and w14,w14,w13
- ror w15,w15,#2
- add w10,w10,w6
- eor w14,w14,w8
- add w5,w5,w12
- add w6,w6,w15
- and w12,w3,w10
- bic w15,w4,w10
- eor w11,w10,w10,ror#5
- add w6,w6,w14
- orr w12,w12,w15
- eor w11,w11,w10,ror#19
- eor w15,w6,w6,ror#11
- add w5,w5,w12
- ror w11,w11,#6
- eor w14,w6,w7
- eor w15,w15,w6,ror#20
- add w5,w5,w11
- ldr w12,[sp,#24]
- and w13,w13,w14
- ror w15,w15,#2
- add w9,w9,w5
- eor w13,w13,w7
- add w4,w4,w12
- add w5,w5,w15
- and w12,w10,w9
- bic w15,w3,w9
- eor w11,w9,w9,ror#5
- add w5,w5,w13
- orr w12,w12,w15
- eor w11,w11,w9,ror#19
- eor w15,w5,w5,ror#11
- add w4,w4,w12
- ror w11,w11,#6
- eor w13,w5,w6
- eor w15,w15,w5,ror#20
- add w4,w4,w11
- ldr w12,[sp,#28]
- and w14,w14,w13
- ror w15,w15,#2
- add w8,w8,w4
- eor w14,w14,w6
- add w3,w3,w12
- add w4,w4,w15
- and w12,w9,w8
- bic w15,w10,w8
- eor w11,w8,w8,ror#5
- add w4,w4,w14
- orr w12,w12,w15
- eor w11,w11,w8,ror#19
- eor w15,w4,w4,ror#11
- add w3,w3,w12
- ror w11,w11,#6
- eor w14,w4,w5
- eor w15,w15,w4,ror#20
- add w3,w3,w11
- ldr w12,[sp,#32]
- and w13,w13,w14
- ror w15,w15,#2
- add w7,w7,w3
- eor w13,w13,w5
- st1 {v4.4s},[x17], #16
- add w10,w10,w12
- add w3,w3,w15
- and w12,w8,w7
- ld1 {v2.16b},[x1],#16
- bic w15,w9,w7
- eor w11,w7,w7,ror#5
- ld1 {v4.4s},[x16],#16
- add w3,w3,w13
- orr w12,w12,w15
- eor w11,w11,w7,ror#19
- eor w15,w3,w3,ror#11
- rev32 v2.16b,v2.16b
- add w10,w10,w12
- ror w11,w11,#6
- eor w13,w3,w4
- eor w15,w15,w3,ror#20
- add v4.4s,v4.4s,v2.4s
- add w10,w10,w11
- ldr w12,[sp,#36]
- and w14,w14,w13
- ror w15,w15,#2
- add w6,w6,w10
- eor w14,w14,w4
- add w9,w9,w12
- add w10,w10,w15
- and w12,w7,w6
- bic w15,w8,w6
- eor w11,w6,w6,ror#5
- add w10,w10,w14
- orr w12,w12,w15
- eor w11,w11,w6,ror#19
- eor w15,w10,w10,ror#11
- add w9,w9,w12
- ror w11,w11,#6
- eor w14,w10,w3
- eor w15,w15,w10,ror#20
- add w9,w9,w11
- ldr w12,[sp,#40]
- and w13,w13,w14
- ror w15,w15,#2
- add w5,w5,w9
- eor w13,w13,w3
- add w8,w8,w12
- add w9,w9,w15
- and w12,w6,w5
- bic w15,w7,w5
- eor w11,w5,w5,ror#5
- add w9,w9,w13
- orr w12,w12,w15
- eor w11,w11,w5,ror#19
- eor w15,w9,w9,ror#11
- add w8,w8,w12
- ror w11,w11,#6
- eor w13,w9,w10
- eor w15,w15,w9,ror#20
- add w8,w8,w11
- ldr w12,[sp,#44]
- and w14,w14,w13
- ror w15,w15,#2
- add w4,w4,w8
- eor w14,w14,w10
- add w7,w7,w12
- add w8,w8,w15
- and w12,w5,w4
- bic w15,w6,w4
- eor w11,w4,w4,ror#5
- add w8,w8,w14
- orr w12,w12,w15
- eor w11,w11,w4,ror#19
- eor w15,w8,w8,ror#11
- add w7,w7,w12
- ror w11,w11,#6
- eor w14,w8,w9
- eor w15,w15,w8,ror#20
- add w7,w7,w11
- ldr w12,[sp,#48]
- and w13,w13,w14
- ror w15,w15,#2
- add w3,w3,w7
- eor w13,w13,w9
- st1 {v4.4s},[x17], #16
- add w6,w6,w12
- add w7,w7,w15
- and w12,w4,w3
- ld1 {v3.16b},[x1],#16
- bic w15,w5,w3
- eor w11,w3,w3,ror#5
- ld1 {v4.4s},[x16],#16
- add w7,w7,w13
- orr w12,w12,w15
- eor w11,w11,w3,ror#19
- eor w15,w7,w7,ror#11
- rev32 v3.16b,v3.16b
- add w6,w6,w12
- ror w11,w11,#6
- eor w13,w7,w8
- eor w15,w15,w7,ror#20
- add v4.4s,v4.4s,v3.4s
- add w6,w6,w11
- ldr w12,[sp,#52]
- and w14,w14,w13
- ror w15,w15,#2
- add w10,w10,w6
- eor w14,w14,w8
- add w5,w5,w12
- add w6,w6,w15
- and w12,w3,w10
- bic w15,w4,w10
- eor w11,w10,w10,ror#5
- add w6,w6,w14
- orr w12,w12,w15
- eor w11,w11,w10,ror#19
- eor w15,w6,w6,ror#11
- add w5,w5,w12
- ror w11,w11,#6
- eor w14,w6,w7
- eor w15,w15,w6,ror#20
- add w5,w5,w11
- ldr w12,[sp,#56]
- and w13,w13,w14
- ror w15,w15,#2
- add w9,w9,w5
- eor w13,w13,w7
- add w4,w4,w12
- add w5,w5,w15
- and w12,w10,w9
- bic w15,w3,w9
- eor w11,w9,w9,ror#5
- add w5,w5,w13
- orr w12,w12,w15
- eor w11,w11,w9,ror#19
- eor w15,w5,w5,ror#11
- add w4,w4,w12
- ror w11,w11,#6
- eor w13,w5,w6
- eor w15,w15,w5,ror#20
- add w4,w4,w11
- ldr w12,[sp,#60]
- and w14,w14,w13
- ror w15,w15,#2
- add w8,w8,w4
- eor w14,w14,w6
- add w3,w3,w12
- add w4,w4,w15
- and w12,w9,w8
- bic w15,w10,w8
- eor w11,w8,w8,ror#5
- add w4,w4,w14
- orr w12,w12,w15
- eor w11,w11,w8,ror#19
- eor w15,w4,w4,ror#11
- add w3,w3,w12
- ror w11,w11,#6
- eor w14,w4,w5
- eor w15,w15,w4,ror#20
- add w3,w3,w11
- and w13,w13,w14
- ror w15,w15,#2
- add w7,w7,w3
- eor w13,w13,w5
- st1 {v4.4s},[x17], #16
- add w3,w3,w15 // h+=Sigma0(a) from the past
- ldp w11,w12,[x0,#0]
- add w3,w3,w13 // h+=Maj(a,b,c) from the past
- ldp w13,w14,[x0,#8]
- add w3,w3,w11 // accumulate
- add w4,w4,w12
- ldp w11,w12,[x0,#16]
- add w5,w5,w13
- add w6,w6,w14
- ldp w13,w14,[x0,#24]
- add w7,w7,w11
- add w8,w8,w12
- ldr w12,[sp,#0]
- stp w3,w4,[x0,#0]
- add w9,w9,w13
- mov w13,wzr
- stp w5,w6,[x0,#8]
- add w10,w10,w14
- stp w7,w8,[x0,#16]
- eor w14,w4,w5
- stp w9,w10,[x0,#24]
- mov w15,wzr
- mov x17,sp
- b.ne .L_00_48
-
- ldr x29,[x29]
- add sp,sp,#16*4+16
- ret
-.size sha256_block_neon,.-sha256_block_neon
-#ifndef __KERNEL__
-.comm OPENSSL_armcap_P,4,4
-#endif
diff --git a/arch/arm64/crypto/sha3-ce-core.S b/arch/arm64/crypto/sha3-ce-core.S
index 6f5208414fe3..9c77313f5a60 100644
--- a/arch/arm64/crypto/sha3-ce-core.S
+++ b/arch/arm64/crypto/sha3-ce-core.S
@@ -184,11 +184,11 @@ SYM_FUNC_START(sha3_ce_transform)
eor v0.16b, v0.16b, v31.16b
cbnz w8, 3b
- cond_yield 3f, x8
+ cond_yield 4f, x8, x9
cbnz w2, 0b
/* save state */
-3: st1 { v0.1d- v3.1d}, [x0], #32
+4: st1 { v0.1d- v3.1d}, [x0], #32
st1 { v4.1d- v7.1d}, [x0], #32
st1 { v8.1d-v11.1d}, [x0], #32
st1 {v12.1d-v15.1d}, [x0], #32
diff --git a/arch/arm64/crypto/sha512-ce-core.S b/arch/arm64/crypto/sha512-ce-core.S
index d6e7f6c95fa6..b6a3a36e15f5 100644
--- a/arch/arm64/crypto/sha512-ce-core.S
+++ b/arch/arm64/crypto/sha512-ce-core.S
@@ -195,7 +195,7 @@ CPU_LE( rev64 v19.16b, v19.16b )
add v10.2d, v10.2d, v2.2d
add v11.2d, v11.2d, v3.2d
- cond_yield 3f, x4
+ cond_yield 3f, x4, x5
/* handled all input blocks? */
cbnz w2, 0b
diff --git a/arch/arm64/crypto/sha512-core.S_shipped b/arch/arm64/crypto/sha512-core.S_shipped
deleted file mode 100644
index e063a6106720..000000000000
--- a/arch/arm64/crypto/sha512-core.S_shipped
+++ /dev/null
@@ -1,1093 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-// This code is taken from the OpenSSL project but the author (Andy Polyakov)
-// has relicensed it under the GPLv2. Therefore this program is free software;
-// you can redistribute it and/or modify it under the terms of the GNU General
-// Public License version 2 as published by the Free Software Foundation.
-//
-// The original headers, including the original license headers, are
-// included below for completeness.
-
-// Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
-//
-// Licensed under the OpenSSL license (the "License"). You may not use
-// this file except in compliance with the License. You can obtain a copy
-// in the file LICENSE in the source distribution or at
-// https://www.openssl.org/source/license.html
-
-// ====================================================================
-// Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
-// project. The module is, however, dual licensed under OpenSSL and
-// CRYPTOGAMS licenses depending on where you obtain it. For further
-// details see http://www.openssl.org/~appro/cryptogams/.
-// ====================================================================
-//
-// SHA256/512 for ARMv8.
-//
-// Performance in cycles per processed byte and improvement coefficient
-// over code generated with "default" compiler:
-//
-// SHA256-hw SHA256(*) SHA512
-// Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**))
-// Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***))
-// Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***))
-// Denver 2.01 10.5 (+26%) 6.70 (+8%)
-// X-Gene 20.0 (+100%) 12.8 (+300%(***))
-// Mongoose 2.36 13.0 (+50%) 8.36 (+33%)
-//
-// (*) Software SHA256 results are of lesser relevance, presented
-// mostly for informational purposes.
-// (**) The result is a trade-off: it's possible to improve it by
-// 10% (or by 1 cycle per round), but at the cost of 20% loss
-// on Cortex-A53 (or by 4 cycles per round).
-// (***) Super-impressive coefficients over gcc-generated code are
-// indication of some compiler "pathology", most notably code
-// generated with -mgeneral-regs-only is significanty faster
-// and the gap is only 40-90%.
-//
-// October 2016.
-//
-// Originally it was reckoned that it makes no sense to implement NEON
-// version of SHA256 for 64-bit processors. This is because performance
-// improvement on most wide-spread Cortex-A5x processors was observed
-// to be marginal, same on Cortex-A53 and ~10% on A57. But then it was
-// observed that 32-bit NEON SHA256 performs significantly better than
-// 64-bit scalar version on *some* of the more recent processors. As
-// result 64-bit NEON version of SHA256 was added to provide best
-// all-round performance. For example it executes ~30% faster on X-Gene
-// and Mongoose. [For reference, NEON version of SHA512 is bound to
-// deliver much less improvement, likely *negative* on Cortex-A5x.
-// Which is why NEON support is limited to SHA256.]
-
-#ifndef __KERNEL__
-# include "arm_arch.h"
-#endif
-
-.text
-
-.extern OPENSSL_armcap_P
-.globl sha512_block_data_order
-.type sha512_block_data_order,%function
-.align 6
-sha512_block_data_order:
- stp x29,x30,[sp,#-128]!
- add x29,sp,#0
-
- stp x19,x20,[sp,#16]
- stp x21,x22,[sp,#32]
- stp x23,x24,[sp,#48]
- stp x25,x26,[sp,#64]
- stp x27,x28,[sp,#80]
- sub sp,sp,#4*8
-
- ldp x20,x21,[x0] // load context
- ldp x22,x23,[x0,#2*8]
- ldp x24,x25,[x0,#4*8]
- add x2,x1,x2,lsl#7 // end of input
- ldp x26,x27,[x0,#6*8]
- adr x30,.LK512
- stp x0,x2,[x29,#96]
-
-.Loop:
- ldp x3,x4,[x1],#2*8
- ldr x19,[x30],#8 // *K++
- eor x28,x21,x22 // magic seed
- str x1,[x29,#112]
-#ifndef __AARCH64EB__
- rev x3,x3 // 0
-#endif
- ror x16,x24,#14
- add x27,x27,x19 // h+=K[i]
- eor x6,x24,x24,ror#23
- and x17,x25,x24
- bic x19,x26,x24
- add x27,x27,x3 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x20,x21 // a^b, b^c in next round
- eor x16,x16,x6,ror#18 // Sigma1(e)
- ror x6,x20,#28
- add x27,x27,x17 // h+=Ch(e,f,g)
- eor x17,x20,x20,ror#5
- add x27,x27,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x23,x23,x27 // d+=h
- eor x28,x28,x21 // Maj(a,b,c)
- eor x17,x6,x17,ror#34 // Sigma0(a)
- add x27,x27,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x27,x27,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x4,x4 // 1
-#endif
- ldp x5,x6,[x1],#2*8
- add x27,x27,x17 // h+=Sigma0(a)
- ror x16,x23,#14
- add x26,x26,x28 // h+=K[i]
- eor x7,x23,x23,ror#23
- and x17,x24,x23
- bic x28,x25,x23
- add x26,x26,x4 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x27,x20 // a^b, b^c in next round
- eor x16,x16,x7,ror#18 // Sigma1(e)
- ror x7,x27,#28
- add x26,x26,x17 // h+=Ch(e,f,g)
- eor x17,x27,x27,ror#5
- add x26,x26,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x22,x22,x26 // d+=h
- eor x19,x19,x20 // Maj(a,b,c)
- eor x17,x7,x17,ror#34 // Sigma0(a)
- add x26,x26,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x26,x26,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x5,x5 // 2
-#endif
- add x26,x26,x17 // h+=Sigma0(a)
- ror x16,x22,#14
- add x25,x25,x19 // h+=K[i]
- eor x8,x22,x22,ror#23
- and x17,x23,x22
- bic x19,x24,x22
- add x25,x25,x5 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x26,x27 // a^b, b^c in next round
- eor x16,x16,x8,ror#18 // Sigma1(e)
- ror x8,x26,#28
- add x25,x25,x17 // h+=Ch(e,f,g)
- eor x17,x26,x26,ror#5
- add x25,x25,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x21,x21,x25 // d+=h
- eor x28,x28,x27 // Maj(a,b,c)
- eor x17,x8,x17,ror#34 // Sigma0(a)
- add x25,x25,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x25,x25,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x6,x6 // 3
-#endif
- ldp x7,x8,[x1],#2*8
- add x25,x25,x17 // h+=Sigma0(a)
- ror x16,x21,#14
- add x24,x24,x28 // h+=K[i]
- eor x9,x21,x21,ror#23
- and x17,x22,x21
- bic x28,x23,x21
- add x24,x24,x6 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x25,x26 // a^b, b^c in next round
- eor x16,x16,x9,ror#18 // Sigma1(e)
- ror x9,x25,#28
- add x24,x24,x17 // h+=Ch(e,f,g)
- eor x17,x25,x25,ror#5
- add x24,x24,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x20,x20,x24 // d+=h
- eor x19,x19,x26 // Maj(a,b,c)
- eor x17,x9,x17,ror#34 // Sigma0(a)
- add x24,x24,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x24,x24,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x7,x7 // 4
-#endif
- add x24,x24,x17 // h+=Sigma0(a)
- ror x16,x20,#14
- add x23,x23,x19 // h+=K[i]
- eor x10,x20,x20,ror#23
- and x17,x21,x20
- bic x19,x22,x20
- add x23,x23,x7 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x24,x25 // a^b, b^c in next round
- eor x16,x16,x10,ror#18 // Sigma1(e)
- ror x10,x24,#28
- add x23,x23,x17 // h+=Ch(e,f,g)
- eor x17,x24,x24,ror#5
- add x23,x23,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x27,x27,x23 // d+=h
- eor x28,x28,x25 // Maj(a,b,c)
- eor x17,x10,x17,ror#34 // Sigma0(a)
- add x23,x23,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x23,x23,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x8,x8 // 5
-#endif
- ldp x9,x10,[x1],#2*8
- add x23,x23,x17 // h+=Sigma0(a)
- ror x16,x27,#14
- add x22,x22,x28 // h+=K[i]
- eor x11,x27,x27,ror#23
- and x17,x20,x27
- bic x28,x21,x27
- add x22,x22,x8 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x23,x24 // a^b, b^c in next round
- eor x16,x16,x11,ror#18 // Sigma1(e)
- ror x11,x23,#28
- add x22,x22,x17 // h+=Ch(e,f,g)
- eor x17,x23,x23,ror#5
- add x22,x22,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x26,x26,x22 // d+=h
- eor x19,x19,x24 // Maj(a,b,c)
- eor x17,x11,x17,ror#34 // Sigma0(a)
- add x22,x22,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x22,x22,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x9,x9 // 6
-#endif
- add x22,x22,x17 // h+=Sigma0(a)
- ror x16,x26,#14
- add x21,x21,x19 // h+=K[i]
- eor x12,x26,x26,ror#23
- and x17,x27,x26
- bic x19,x20,x26
- add x21,x21,x9 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x22,x23 // a^b, b^c in next round
- eor x16,x16,x12,ror#18 // Sigma1(e)
- ror x12,x22,#28
- add x21,x21,x17 // h+=Ch(e,f,g)
- eor x17,x22,x22,ror#5
- add x21,x21,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x25,x25,x21 // d+=h
- eor x28,x28,x23 // Maj(a,b,c)
- eor x17,x12,x17,ror#34 // Sigma0(a)
- add x21,x21,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x21,x21,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x10,x10 // 7
-#endif
- ldp x11,x12,[x1],#2*8
- add x21,x21,x17 // h+=Sigma0(a)
- ror x16,x25,#14
- add x20,x20,x28 // h+=K[i]
- eor x13,x25,x25,ror#23
- and x17,x26,x25
- bic x28,x27,x25
- add x20,x20,x10 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x21,x22 // a^b, b^c in next round
- eor x16,x16,x13,ror#18 // Sigma1(e)
- ror x13,x21,#28
- add x20,x20,x17 // h+=Ch(e,f,g)
- eor x17,x21,x21,ror#5
- add x20,x20,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x24,x24,x20 // d+=h
- eor x19,x19,x22 // Maj(a,b,c)
- eor x17,x13,x17,ror#34 // Sigma0(a)
- add x20,x20,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x20,x20,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x11,x11 // 8
-#endif
- add x20,x20,x17 // h+=Sigma0(a)
- ror x16,x24,#14
- add x27,x27,x19 // h+=K[i]
- eor x14,x24,x24,ror#23
- and x17,x25,x24
- bic x19,x26,x24
- add x27,x27,x11 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x20,x21 // a^b, b^c in next round
- eor x16,x16,x14,ror#18 // Sigma1(e)
- ror x14,x20,#28
- add x27,x27,x17 // h+=Ch(e,f,g)
- eor x17,x20,x20,ror#5
- add x27,x27,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x23,x23,x27 // d+=h
- eor x28,x28,x21 // Maj(a,b,c)
- eor x17,x14,x17,ror#34 // Sigma0(a)
- add x27,x27,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x27,x27,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x12,x12 // 9
-#endif
- ldp x13,x14,[x1],#2*8
- add x27,x27,x17 // h+=Sigma0(a)
- ror x16,x23,#14
- add x26,x26,x28 // h+=K[i]
- eor x15,x23,x23,ror#23
- and x17,x24,x23
- bic x28,x25,x23
- add x26,x26,x12 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x27,x20 // a^b, b^c in next round
- eor x16,x16,x15,ror#18 // Sigma1(e)
- ror x15,x27,#28
- add x26,x26,x17 // h+=Ch(e,f,g)
- eor x17,x27,x27,ror#5
- add x26,x26,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x22,x22,x26 // d+=h
- eor x19,x19,x20 // Maj(a,b,c)
- eor x17,x15,x17,ror#34 // Sigma0(a)
- add x26,x26,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x26,x26,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x13,x13 // 10
-#endif
- add x26,x26,x17 // h+=Sigma0(a)
- ror x16,x22,#14
- add x25,x25,x19 // h+=K[i]
- eor x0,x22,x22,ror#23
- and x17,x23,x22
- bic x19,x24,x22
- add x25,x25,x13 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x26,x27 // a^b, b^c in next round
- eor x16,x16,x0,ror#18 // Sigma1(e)
- ror x0,x26,#28
- add x25,x25,x17 // h+=Ch(e,f,g)
- eor x17,x26,x26,ror#5
- add x25,x25,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x21,x21,x25 // d+=h
- eor x28,x28,x27 // Maj(a,b,c)
- eor x17,x0,x17,ror#34 // Sigma0(a)
- add x25,x25,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x25,x25,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x14,x14 // 11
-#endif
- ldp x15,x0,[x1],#2*8
- add x25,x25,x17 // h+=Sigma0(a)
- str x6,[sp,#24]
- ror x16,x21,#14
- add x24,x24,x28 // h+=K[i]
- eor x6,x21,x21,ror#23
- and x17,x22,x21
- bic x28,x23,x21
- add x24,x24,x14 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x25,x26 // a^b, b^c in next round
- eor x16,x16,x6,ror#18 // Sigma1(e)
- ror x6,x25,#28
- add x24,x24,x17 // h+=Ch(e,f,g)
- eor x17,x25,x25,ror#5
- add x24,x24,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x20,x20,x24 // d+=h
- eor x19,x19,x26 // Maj(a,b,c)
- eor x17,x6,x17,ror#34 // Sigma0(a)
- add x24,x24,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x24,x24,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x15,x15 // 12
-#endif
- add x24,x24,x17 // h+=Sigma0(a)
- str x7,[sp,#0]
- ror x16,x20,#14
- add x23,x23,x19 // h+=K[i]
- eor x7,x20,x20,ror#23
- and x17,x21,x20
- bic x19,x22,x20
- add x23,x23,x15 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x24,x25 // a^b, b^c in next round
- eor x16,x16,x7,ror#18 // Sigma1(e)
- ror x7,x24,#28
- add x23,x23,x17 // h+=Ch(e,f,g)
- eor x17,x24,x24,ror#5
- add x23,x23,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x27,x27,x23 // d+=h
- eor x28,x28,x25 // Maj(a,b,c)
- eor x17,x7,x17,ror#34 // Sigma0(a)
- add x23,x23,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x23,x23,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x0,x0 // 13
-#endif
- ldp x1,x2,[x1]
- add x23,x23,x17 // h+=Sigma0(a)
- str x8,[sp,#8]
- ror x16,x27,#14
- add x22,x22,x28 // h+=K[i]
- eor x8,x27,x27,ror#23
- and x17,x20,x27
- bic x28,x21,x27
- add x22,x22,x0 // h+=X[i]
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x23,x24 // a^b, b^c in next round
- eor x16,x16,x8,ror#18 // Sigma1(e)
- ror x8,x23,#28
- add x22,x22,x17 // h+=Ch(e,f,g)
- eor x17,x23,x23,ror#5
- add x22,x22,x16 // h+=Sigma1(e)
- and x19,x19,x28 // (b^c)&=(a^b)
- add x26,x26,x22 // d+=h
- eor x19,x19,x24 // Maj(a,b,c)
- eor x17,x8,x17,ror#34 // Sigma0(a)
- add x22,x22,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- //add x22,x22,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x1,x1 // 14
-#endif
- ldr x6,[sp,#24]
- add x22,x22,x17 // h+=Sigma0(a)
- str x9,[sp,#16]
- ror x16,x26,#14
- add x21,x21,x19 // h+=K[i]
- eor x9,x26,x26,ror#23
- and x17,x27,x26
- bic x19,x20,x26
- add x21,x21,x1 // h+=X[i]
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x22,x23 // a^b, b^c in next round
- eor x16,x16,x9,ror#18 // Sigma1(e)
- ror x9,x22,#28
- add x21,x21,x17 // h+=Ch(e,f,g)
- eor x17,x22,x22,ror#5
- add x21,x21,x16 // h+=Sigma1(e)
- and x28,x28,x19 // (b^c)&=(a^b)
- add x25,x25,x21 // d+=h
- eor x28,x28,x23 // Maj(a,b,c)
- eor x17,x9,x17,ror#34 // Sigma0(a)
- add x21,x21,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- //add x21,x21,x17 // h+=Sigma0(a)
-#ifndef __AARCH64EB__
- rev x2,x2 // 15
-#endif
- ldr x7,[sp,#0]
- add x21,x21,x17 // h+=Sigma0(a)
- str x10,[sp,#24]
- ror x16,x25,#14
- add x20,x20,x28 // h+=K[i]
- ror x9,x4,#1
- and x17,x26,x25
- ror x8,x1,#19
- bic x28,x27,x25
- ror x10,x21,#28
- add x20,x20,x2 // h+=X[i]
- eor x16,x16,x25,ror#18
- eor x9,x9,x4,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x21,x22 // a^b, b^c in next round
- eor x16,x16,x25,ror#41 // Sigma1(e)
- eor x10,x10,x21,ror#34
- add x20,x20,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x8,x8,x1,ror#61
- eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
- add x20,x20,x16 // h+=Sigma1(e)
- eor x19,x19,x22 // Maj(a,b,c)
- eor x17,x10,x21,ror#39 // Sigma0(a)
- eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
- add x3,x3,x12
- add x24,x24,x20 // d+=h
- add x20,x20,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x3,x3,x9
- add x20,x20,x17 // h+=Sigma0(a)
- add x3,x3,x8
-.Loop_16_xx:
- ldr x8,[sp,#8]
- str x11,[sp,#0]
- ror x16,x24,#14
- add x27,x27,x19 // h+=K[i]
- ror x10,x5,#1
- and x17,x25,x24
- ror x9,x2,#19
- bic x19,x26,x24
- ror x11,x20,#28
- add x27,x27,x3 // h+=X[i]
- eor x16,x16,x24,ror#18
- eor x10,x10,x5,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x20,x21 // a^b, b^c in next round
- eor x16,x16,x24,ror#41 // Sigma1(e)
- eor x11,x11,x20,ror#34
- add x27,x27,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x9,x9,x2,ror#61
- eor x10,x10,x5,lsr#7 // sigma0(X[i+1])
- add x27,x27,x16 // h+=Sigma1(e)
- eor x28,x28,x21 // Maj(a,b,c)
- eor x17,x11,x20,ror#39 // Sigma0(a)
- eor x9,x9,x2,lsr#6 // sigma1(X[i+14])
- add x4,x4,x13
- add x23,x23,x27 // d+=h
- add x27,x27,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x4,x4,x10
- add x27,x27,x17 // h+=Sigma0(a)
- add x4,x4,x9
- ldr x9,[sp,#16]
- str x12,[sp,#8]
- ror x16,x23,#14
- add x26,x26,x28 // h+=K[i]
- ror x11,x6,#1
- and x17,x24,x23
- ror x10,x3,#19
- bic x28,x25,x23
- ror x12,x27,#28
- add x26,x26,x4 // h+=X[i]
- eor x16,x16,x23,ror#18
- eor x11,x11,x6,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x27,x20 // a^b, b^c in next round
- eor x16,x16,x23,ror#41 // Sigma1(e)
- eor x12,x12,x27,ror#34
- add x26,x26,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x10,x10,x3,ror#61
- eor x11,x11,x6,lsr#7 // sigma0(X[i+1])
- add x26,x26,x16 // h+=Sigma1(e)
- eor x19,x19,x20 // Maj(a,b,c)
- eor x17,x12,x27,ror#39 // Sigma0(a)
- eor x10,x10,x3,lsr#6 // sigma1(X[i+14])
- add x5,x5,x14
- add x22,x22,x26 // d+=h
- add x26,x26,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x5,x5,x11
- add x26,x26,x17 // h+=Sigma0(a)
- add x5,x5,x10
- ldr x10,[sp,#24]
- str x13,[sp,#16]
- ror x16,x22,#14
- add x25,x25,x19 // h+=K[i]
- ror x12,x7,#1
- and x17,x23,x22
- ror x11,x4,#19
- bic x19,x24,x22
- ror x13,x26,#28
- add x25,x25,x5 // h+=X[i]
- eor x16,x16,x22,ror#18
- eor x12,x12,x7,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x26,x27 // a^b, b^c in next round
- eor x16,x16,x22,ror#41 // Sigma1(e)
- eor x13,x13,x26,ror#34
- add x25,x25,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x11,x11,x4,ror#61
- eor x12,x12,x7,lsr#7 // sigma0(X[i+1])
- add x25,x25,x16 // h+=Sigma1(e)
- eor x28,x28,x27 // Maj(a,b,c)
- eor x17,x13,x26,ror#39 // Sigma0(a)
- eor x11,x11,x4,lsr#6 // sigma1(X[i+14])
- add x6,x6,x15
- add x21,x21,x25 // d+=h
- add x25,x25,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x6,x6,x12
- add x25,x25,x17 // h+=Sigma0(a)
- add x6,x6,x11
- ldr x11,[sp,#0]
- str x14,[sp,#24]
- ror x16,x21,#14
- add x24,x24,x28 // h+=K[i]
- ror x13,x8,#1
- and x17,x22,x21
- ror x12,x5,#19
- bic x28,x23,x21
- ror x14,x25,#28
- add x24,x24,x6 // h+=X[i]
- eor x16,x16,x21,ror#18
- eor x13,x13,x8,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x25,x26 // a^b, b^c in next round
- eor x16,x16,x21,ror#41 // Sigma1(e)
- eor x14,x14,x25,ror#34
- add x24,x24,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x12,x12,x5,ror#61
- eor x13,x13,x8,lsr#7 // sigma0(X[i+1])
- add x24,x24,x16 // h+=Sigma1(e)
- eor x19,x19,x26 // Maj(a,b,c)
- eor x17,x14,x25,ror#39 // Sigma0(a)
- eor x12,x12,x5,lsr#6 // sigma1(X[i+14])
- add x7,x7,x0
- add x20,x20,x24 // d+=h
- add x24,x24,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x7,x7,x13
- add x24,x24,x17 // h+=Sigma0(a)
- add x7,x7,x12
- ldr x12,[sp,#8]
- str x15,[sp,#0]
- ror x16,x20,#14
- add x23,x23,x19 // h+=K[i]
- ror x14,x9,#1
- and x17,x21,x20
- ror x13,x6,#19
- bic x19,x22,x20
- ror x15,x24,#28
- add x23,x23,x7 // h+=X[i]
- eor x16,x16,x20,ror#18
- eor x14,x14,x9,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x24,x25 // a^b, b^c in next round
- eor x16,x16,x20,ror#41 // Sigma1(e)
- eor x15,x15,x24,ror#34
- add x23,x23,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x13,x13,x6,ror#61
- eor x14,x14,x9,lsr#7 // sigma0(X[i+1])
- add x23,x23,x16 // h+=Sigma1(e)
- eor x28,x28,x25 // Maj(a,b,c)
- eor x17,x15,x24,ror#39 // Sigma0(a)
- eor x13,x13,x6,lsr#6 // sigma1(X[i+14])
- add x8,x8,x1
- add x27,x27,x23 // d+=h
- add x23,x23,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x8,x8,x14
- add x23,x23,x17 // h+=Sigma0(a)
- add x8,x8,x13
- ldr x13,[sp,#16]
- str x0,[sp,#8]
- ror x16,x27,#14
- add x22,x22,x28 // h+=K[i]
- ror x15,x10,#1
- and x17,x20,x27
- ror x14,x7,#19
- bic x28,x21,x27
- ror x0,x23,#28
- add x22,x22,x8 // h+=X[i]
- eor x16,x16,x27,ror#18
- eor x15,x15,x10,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x23,x24 // a^b, b^c in next round
- eor x16,x16,x27,ror#41 // Sigma1(e)
- eor x0,x0,x23,ror#34
- add x22,x22,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x14,x14,x7,ror#61
- eor x15,x15,x10,lsr#7 // sigma0(X[i+1])
- add x22,x22,x16 // h+=Sigma1(e)
- eor x19,x19,x24 // Maj(a,b,c)
- eor x17,x0,x23,ror#39 // Sigma0(a)
- eor x14,x14,x7,lsr#6 // sigma1(X[i+14])
- add x9,x9,x2
- add x26,x26,x22 // d+=h
- add x22,x22,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x9,x9,x15
- add x22,x22,x17 // h+=Sigma0(a)
- add x9,x9,x14
- ldr x14,[sp,#24]
- str x1,[sp,#16]
- ror x16,x26,#14
- add x21,x21,x19 // h+=K[i]
- ror x0,x11,#1
- and x17,x27,x26
- ror x15,x8,#19
- bic x19,x20,x26
- ror x1,x22,#28
- add x21,x21,x9 // h+=X[i]
- eor x16,x16,x26,ror#18
- eor x0,x0,x11,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x22,x23 // a^b, b^c in next round
- eor x16,x16,x26,ror#41 // Sigma1(e)
- eor x1,x1,x22,ror#34
- add x21,x21,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x15,x15,x8,ror#61
- eor x0,x0,x11,lsr#7 // sigma0(X[i+1])
- add x21,x21,x16 // h+=Sigma1(e)
- eor x28,x28,x23 // Maj(a,b,c)
- eor x17,x1,x22,ror#39 // Sigma0(a)
- eor x15,x15,x8,lsr#6 // sigma1(X[i+14])
- add x10,x10,x3
- add x25,x25,x21 // d+=h
- add x21,x21,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x10,x10,x0
- add x21,x21,x17 // h+=Sigma0(a)
- add x10,x10,x15
- ldr x15,[sp,#0]
- str x2,[sp,#24]
- ror x16,x25,#14
- add x20,x20,x28 // h+=K[i]
- ror x1,x12,#1
- and x17,x26,x25
- ror x0,x9,#19
- bic x28,x27,x25
- ror x2,x21,#28
- add x20,x20,x10 // h+=X[i]
- eor x16,x16,x25,ror#18
- eor x1,x1,x12,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x21,x22 // a^b, b^c in next round
- eor x16,x16,x25,ror#41 // Sigma1(e)
- eor x2,x2,x21,ror#34
- add x20,x20,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x0,x0,x9,ror#61
- eor x1,x1,x12,lsr#7 // sigma0(X[i+1])
- add x20,x20,x16 // h+=Sigma1(e)
- eor x19,x19,x22 // Maj(a,b,c)
- eor x17,x2,x21,ror#39 // Sigma0(a)
- eor x0,x0,x9,lsr#6 // sigma1(X[i+14])
- add x11,x11,x4
- add x24,x24,x20 // d+=h
- add x20,x20,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x11,x11,x1
- add x20,x20,x17 // h+=Sigma0(a)
- add x11,x11,x0
- ldr x0,[sp,#8]
- str x3,[sp,#0]
- ror x16,x24,#14
- add x27,x27,x19 // h+=K[i]
- ror x2,x13,#1
- and x17,x25,x24
- ror x1,x10,#19
- bic x19,x26,x24
- ror x3,x20,#28
- add x27,x27,x11 // h+=X[i]
- eor x16,x16,x24,ror#18
- eor x2,x2,x13,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x20,x21 // a^b, b^c in next round
- eor x16,x16,x24,ror#41 // Sigma1(e)
- eor x3,x3,x20,ror#34
- add x27,x27,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x1,x1,x10,ror#61
- eor x2,x2,x13,lsr#7 // sigma0(X[i+1])
- add x27,x27,x16 // h+=Sigma1(e)
- eor x28,x28,x21 // Maj(a,b,c)
- eor x17,x3,x20,ror#39 // Sigma0(a)
- eor x1,x1,x10,lsr#6 // sigma1(X[i+14])
- add x12,x12,x5
- add x23,x23,x27 // d+=h
- add x27,x27,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x12,x12,x2
- add x27,x27,x17 // h+=Sigma0(a)
- add x12,x12,x1
- ldr x1,[sp,#16]
- str x4,[sp,#8]
- ror x16,x23,#14
- add x26,x26,x28 // h+=K[i]
- ror x3,x14,#1
- and x17,x24,x23
- ror x2,x11,#19
- bic x28,x25,x23
- ror x4,x27,#28
- add x26,x26,x12 // h+=X[i]
- eor x16,x16,x23,ror#18
- eor x3,x3,x14,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x27,x20 // a^b, b^c in next round
- eor x16,x16,x23,ror#41 // Sigma1(e)
- eor x4,x4,x27,ror#34
- add x26,x26,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x2,x2,x11,ror#61
- eor x3,x3,x14,lsr#7 // sigma0(X[i+1])
- add x26,x26,x16 // h+=Sigma1(e)
- eor x19,x19,x20 // Maj(a,b,c)
- eor x17,x4,x27,ror#39 // Sigma0(a)
- eor x2,x2,x11,lsr#6 // sigma1(X[i+14])
- add x13,x13,x6
- add x22,x22,x26 // d+=h
- add x26,x26,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x13,x13,x3
- add x26,x26,x17 // h+=Sigma0(a)
- add x13,x13,x2
- ldr x2,[sp,#24]
- str x5,[sp,#16]
- ror x16,x22,#14
- add x25,x25,x19 // h+=K[i]
- ror x4,x15,#1
- and x17,x23,x22
- ror x3,x12,#19
- bic x19,x24,x22
- ror x5,x26,#28
- add x25,x25,x13 // h+=X[i]
- eor x16,x16,x22,ror#18
- eor x4,x4,x15,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x26,x27 // a^b, b^c in next round
- eor x16,x16,x22,ror#41 // Sigma1(e)
- eor x5,x5,x26,ror#34
- add x25,x25,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x3,x3,x12,ror#61
- eor x4,x4,x15,lsr#7 // sigma0(X[i+1])
- add x25,x25,x16 // h+=Sigma1(e)
- eor x28,x28,x27 // Maj(a,b,c)
- eor x17,x5,x26,ror#39 // Sigma0(a)
- eor x3,x3,x12,lsr#6 // sigma1(X[i+14])
- add x14,x14,x7
- add x21,x21,x25 // d+=h
- add x25,x25,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x14,x14,x4
- add x25,x25,x17 // h+=Sigma0(a)
- add x14,x14,x3
- ldr x3,[sp,#0]
- str x6,[sp,#24]
- ror x16,x21,#14
- add x24,x24,x28 // h+=K[i]
- ror x5,x0,#1
- and x17,x22,x21
- ror x4,x13,#19
- bic x28,x23,x21
- ror x6,x25,#28
- add x24,x24,x14 // h+=X[i]
- eor x16,x16,x21,ror#18
- eor x5,x5,x0,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x25,x26 // a^b, b^c in next round
- eor x16,x16,x21,ror#41 // Sigma1(e)
- eor x6,x6,x25,ror#34
- add x24,x24,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x4,x4,x13,ror#61
- eor x5,x5,x0,lsr#7 // sigma0(X[i+1])
- add x24,x24,x16 // h+=Sigma1(e)
- eor x19,x19,x26 // Maj(a,b,c)
- eor x17,x6,x25,ror#39 // Sigma0(a)
- eor x4,x4,x13,lsr#6 // sigma1(X[i+14])
- add x15,x15,x8
- add x20,x20,x24 // d+=h
- add x24,x24,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x15,x15,x5
- add x24,x24,x17 // h+=Sigma0(a)
- add x15,x15,x4
- ldr x4,[sp,#8]
- str x7,[sp,#0]
- ror x16,x20,#14
- add x23,x23,x19 // h+=K[i]
- ror x6,x1,#1
- and x17,x21,x20
- ror x5,x14,#19
- bic x19,x22,x20
- ror x7,x24,#28
- add x23,x23,x15 // h+=X[i]
- eor x16,x16,x20,ror#18
- eor x6,x6,x1,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x24,x25 // a^b, b^c in next round
- eor x16,x16,x20,ror#41 // Sigma1(e)
- eor x7,x7,x24,ror#34
- add x23,x23,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x5,x5,x14,ror#61
- eor x6,x6,x1,lsr#7 // sigma0(X[i+1])
- add x23,x23,x16 // h+=Sigma1(e)
- eor x28,x28,x25 // Maj(a,b,c)
- eor x17,x7,x24,ror#39 // Sigma0(a)
- eor x5,x5,x14,lsr#6 // sigma1(X[i+14])
- add x0,x0,x9
- add x27,x27,x23 // d+=h
- add x23,x23,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x0,x0,x6
- add x23,x23,x17 // h+=Sigma0(a)
- add x0,x0,x5
- ldr x5,[sp,#16]
- str x8,[sp,#8]
- ror x16,x27,#14
- add x22,x22,x28 // h+=K[i]
- ror x7,x2,#1
- and x17,x20,x27
- ror x6,x15,#19
- bic x28,x21,x27
- ror x8,x23,#28
- add x22,x22,x0 // h+=X[i]
- eor x16,x16,x27,ror#18
- eor x7,x7,x2,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x23,x24 // a^b, b^c in next round
- eor x16,x16,x27,ror#41 // Sigma1(e)
- eor x8,x8,x23,ror#34
- add x22,x22,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x6,x6,x15,ror#61
- eor x7,x7,x2,lsr#7 // sigma0(X[i+1])
- add x22,x22,x16 // h+=Sigma1(e)
- eor x19,x19,x24 // Maj(a,b,c)
- eor x17,x8,x23,ror#39 // Sigma0(a)
- eor x6,x6,x15,lsr#6 // sigma1(X[i+14])
- add x1,x1,x10
- add x26,x26,x22 // d+=h
- add x22,x22,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x1,x1,x7
- add x22,x22,x17 // h+=Sigma0(a)
- add x1,x1,x6
- ldr x6,[sp,#24]
- str x9,[sp,#16]
- ror x16,x26,#14
- add x21,x21,x19 // h+=K[i]
- ror x8,x3,#1
- and x17,x27,x26
- ror x7,x0,#19
- bic x19,x20,x26
- ror x9,x22,#28
- add x21,x21,x1 // h+=X[i]
- eor x16,x16,x26,ror#18
- eor x8,x8,x3,ror#8
- orr x17,x17,x19 // Ch(e,f,g)
- eor x19,x22,x23 // a^b, b^c in next round
- eor x16,x16,x26,ror#41 // Sigma1(e)
- eor x9,x9,x22,ror#34
- add x21,x21,x17 // h+=Ch(e,f,g)
- and x28,x28,x19 // (b^c)&=(a^b)
- eor x7,x7,x0,ror#61
- eor x8,x8,x3,lsr#7 // sigma0(X[i+1])
- add x21,x21,x16 // h+=Sigma1(e)
- eor x28,x28,x23 // Maj(a,b,c)
- eor x17,x9,x22,ror#39 // Sigma0(a)
- eor x7,x7,x0,lsr#6 // sigma1(X[i+14])
- add x2,x2,x11
- add x25,x25,x21 // d+=h
- add x21,x21,x28 // h+=Maj(a,b,c)
- ldr x28,[x30],#8 // *K++, x19 in next round
- add x2,x2,x8
- add x21,x21,x17 // h+=Sigma0(a)
- add x2,x2,x7
- ldr x7,[sp,#0]
- str x10,[sp,#24]
- ror x16,x25,#14
- add x20,x20,x28 // h+=K[i]
- ror x9,x4,#1
- and x17,x26,x25
- ror x8,x1,#19
- bic x28,x27,x25
- ror x10,x21,#28
- add x20,x20,x2 // h+=X[i]
- eor x16,x16,x25,ror#18
- eor x9,x9,x4,ror#8
- orr x17,x17,x28 // Ch(e,f,g)
- eor x28,x21,x22 // a^b, b^c in next round
- eor x16,x16,x25,ror#41 // Sigma1(e)
- eor x10,x10,x21,ror#34
- add x20,x20,x17 // h+=Ch(e,f,g)
- and x19,x19,x28 // (b^c)&=(a^b)
- eor x8,x8,x1,ror#61
- eor x9,x9,x4,lsr#7 // sigma0(X[i+1])
- add x20,x20,x16 // h+=Sigma1(e)
- eor x19,x19,x22 // Maj(a,b,c)
- eor x17,x10,x21,ror#39 // Sigma0(a)
- eor x8,x8,x1,lsr#6 // sigma1(X[i+14])
- add x3,x3,x12
- add x24,x24,x20 // d+=h
- add x20,x20,x19 // h+=Maj(a,b,c)
- ldr x19,[x30],#8 // *K++, x28 in next round
- add x3,x3,x9
- add x20,x20,x17 // h+=Sigma0(a)
- add x3,x3,x8
- cbnz x19,.Loop_16_xx
-
- ldp x0,x2,[x29,#96]
- ldr x1,[x29,#112]
- sub x30,x30,#648 // rewind
-
- ldp x3,x4,[x0]
- ldp x5,x6,[x0,#2*8]
- add x1,x1,#14*8 // advance input pointer
- ldp x7,x8,[x0,#4*8]
- add x20,x20,x3
- ldp x9,x10,[x0,#6*8]
- add x21,x21,x4
- add x22,x22,x5
- add x23,x23,x6
- stp x20,x21,[x0]
- add x24,x24,x7
- add x25,x25,x8
- stp x22,x23,[x0,#2*8]
- add x26,x26,x9
- add x27,x27,x10
- cmp x1,x2
- stp x24,x25,[x0,#4*8]
- stp x26,x27,[x0,#6*8]
- b.ne .Loop
-
- ldp x19,x20,[x29,#16]
- add sp,sp,#4*8
- ldp x21,x22,[x29,#32]
- ldp x23,x24,[x29,#48]
- ldp x25,x26,[x29,#64]
- ldp x27,x28,[x29,#80]
- ldp x29,x30,[sp],#128
- ret
-.size sha512_block_data_order,.-sha512_block_data_order
-
-.align 6
-.type .LK512,%object
-.LK512:
- .quad 0x428a2f98d728ae22,0x7137449123ef65cd
- .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
- .quad 0x3956c25bf348b538,0x59f111f1b605d019
- .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
- .quad 0xd807aa98a3030242,0x12835b0145706fbe
- .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
- .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
- .quad 0x9bdc06a725c71235,0xc19bf174cf692694
- .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
- .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
- .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
- .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
- .quad 0x983e5152ee66dfab,0xa831c66d2db43210
- .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
- .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
- .quad 0x06ca6351e003826f,0x142929670a0e6e70
- .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
- .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
- .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
- .quad 0x81c2c92e47edaee6,0x92722c851482353b
- .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
- .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
- .quad 0xd192e819d6ef5218,0xd69906245565a910
- .quad 0xf40e35855771202a,0x106aa07032bbd1b8
- .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
- .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
- .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
- .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
- .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
- .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
- .quad 0x90befffa23631e28,0xa4506cebde82bde9
- .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
- .quad 0xca273eceea26619c,0xd186b8c721c0c207
- .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
- .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
- .quad 0x113f9804bef90dae,0x1b710b35131c471b
- .quad 0x28db77f523047d84,0x32caab7b40c72493
- .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
- .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
- .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
- .quad 0 // terminator
-.size .LK512,.-.LK512
-#ifndef __KERNEL__
-.align 3
-.LOPENSSL_armcap_P:
-# ifdef __ILP32__
- .long OPENSSL_armcap_P-.
-# else
- .quad OPENSSL_armcap_P-.
-# endif
-#endif
-.asciz "SHA512 block transform for ARMv8, CRYPTOGAMS by <appro@openssl.org>"
-.align 2
-#ifndef __KERNEL__
-.comm OPENSSL_armcap_P,4,4
-#endif
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 07ac208edc89..26889dbfe904 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -5,3 +5,5 @@ generic-y += qrwlock.h
generic-y += qspinlock.h
generic-y += set_memory.h
generic-y += user.h
+
+generated-y += cpucaps.h
diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h
index 5df500dcc627..7e157ab6cd50 100644
--- a/arch/arm64/include/asm/alternative-macros.h
+++ b/arch/arm64/include/asm/alternative-macros.h
@@ -3,12 +3,10 @@
#define __ASM_ALTERNATIVE_MACROS_H
#include <asm/cpucaps.h>
+#include <asm/insn-def.h>
#define ARM64_CB_PATCH ARM64_NCAPS
-/* A64 instructions are always 32 bits. */
-#define AARCH64_INSN_SIZE 4
-
#ifndef __ASSEMBLY__
#include <linux/stringify.h>
@@ -97,9 +95,9 @@
.popsection
.subsection 1
663: \insn2
-664: .previous
- .org . - (664b-663b) + (662b-661b)
+664: .org . - (664b-663b) + (662b-661b)
.org . - (662b-661b) + (664b-663b)
+ .previous
.endif
.endm
@@ -169,11 +167,11 @@
*/
.macro alternative_endif
664:
+ .org . - (664b-663b) + (662b-661b)
+ .org . - (662b-661b) + (664b-663b)
.if .Lasm_alt_mode==0
.previous
.endif
- .org . - (664b-663b) + (662b-661b)
- .org . - (662b-661b) + (664b-663b)
.endm
/*
@@ -197,11 +195,6 @@ alternative_endif
#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \
alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
-.macro user_alt, label, oldinstr, newinstr, cond
-9999: alternative_insn "\oldinstr", "\newinstr", \cond
- _asm_extable 9999b, \label
-.endm
-
#endif /* __ASSEMBLY__ */
/*
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 880b9054d75c..4ad22c3135db 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -124,7 +124,8 @@ static inline u32 gic_read_rpr(void)
#define gic_read_lpir(c) readq_relaxed(c)
#define gic_write_lpir(v, c) writeq_relaxed(v, c)
-#define gic_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
+#define gic_flush_dcache_to_poc(a,l) \
+ dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
#define gits_read_baser(c) readq_relaxed(c)
#define gits_write_baser(v, c) writeq_relaxed(v, c)
@@ -173,7 +174,7 @@ static inline void gic_pmr_mask_irqs(void)
static inline void gic_arch_enable_irqs(void)
{
- asm volatile ("msr daifclr, #2" : : : "memory");
+ asm volatile ("msr daifclr, #3" : : : "memory");
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index 9f0ec21d6327..88d20f04c64a 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -165,25 +165,6 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl)
isb();
}
-/*
- * Ensure that reads of the counter are treated the same as memory reads
- * for the purposes of ordering by subsequent memory barriers.
- *
- * This insanity brought to you by speculative system register reads,
- * out-of-order memory accesses, sequence locks and Thomas Gleixner.
- *
- * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
- */
-#define arch_counter_enforce_ordering(val) do { \
- u64 tmp, _val = (val); \
- \
- asm volatile( \
- " eor %0, %1, %1\n" \
- " add %0, sp, %0\n" \
- " ldr xzr, [%0]" \
- : "=r" (tmp) : "r" (_val)); \
-} while (0)
-
static __always_inline u64 __arch_counter_get_cntpct_stable(void)
{
u64 cnt;
@@ -224,8 +205,6 @@ static __always_inline u64 __arch_counter_get_cntvct(void)
return cnt;
}
-#undef arch_counter_enforce_ordering
-
static inline int arch_timer_arch_init(void)
{
return 0;
diff --git a/arch/arm64/include/asm/asm-prototypes.h b/arch/arm64/include/asm/asm-prototypes.h
index 1c9a3a0c5fa5..ec1d9655f885 100644
--- a/arch/arm64/include/asm/asm-prototypes.h
+++ b/arch/arm64/include/asm/asm-prototypes.h
@@ -23,4 +23,10 @@ long long __ashlti3(long long a, int b);
long long __ashrti3(long long a, int b);
long long __lshrti3(long long a, int b);
+/*
+ * This function uses a custom calling convention and cannot be called from C so
+ * this prototype is not entirely accurate.
+ */
+void __hwasan_tag_mismatch(unsigned long addr, unsigned long access_info);
+
#endif /* __ASM_PROTOTYPES_H */
diff --git a/arch/arm64/include/asm/asm_pointer_auth.h b/arch/arm64/include/asm/asm_pointer_auth.h
index 52dead2a8640..f1bba5fc61c4 100644
--- a/arch/arm64/include/asm/asm_pointer_auth.h
+++ b/arch/arm64/include/asm/asm_pointer_auth.h
@@ -7,37 +7,7 @@
#include <asm/cpufeature.h>
#include <asm/sysreg.h>
-#ifdef CONFIG_ARM64_PTR_AUTH
-/*
- * thread.keys_user.ap* as offset exceeds the #imm offset range
- * so use the base value of ldp as thread.keys_user and offset as
- * thread.keys_user.ap*.
- */
- .macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
- mov \tmp1, #THREAD_KEYS_USER
- add \tmp1, \tsk, \tmp1
-alternative_if_not ARM64_HAS_ADDRESS_AUTH
- b .Laddr_auth_skip_\@
-alternative_else_nop_endif
- ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIA]
- msr_s SYS_APIAKEYLO_EL1, \tmp2
- msr_s SYS_APIAKEYHI_EL1, \tmp3
- ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIB]
- msr_s SYS_APIBKEYLO_EL1, \tmp2
- msr_s SYS_APIBKEYHI_EL1, \tmp3
- ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APDA]
- msr_s SYS_APDAKEYLO_EL1, \tmp2
- msr_s SYS_APDAKEYHI_EL1, \tmp3
- ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APDB]
- msr_s SYS_APDBKEYLO_EL1, \tmp2
- msr_s SYS_APDBKEYHI_EL1, \tmp3
-.Laddr_auth_skip_\@:
-alternative_if ARM64_HAS_GENERIC_AUTH
- ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APGA]
- msr_s SYS_APGAKEYLO_EL1, \tmp2
- msr_s SYS_APGAKEYHI_EL1, \tmp3
-alternative_else_nop_endif
- .endm
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
.macro __ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
mov \tmp1, #THREAD_KEYS_KERNEL
@@ -60,6 +30,33 @@ alternative_if ARM64_HAS_ADDRESS_AUTH
alternative_else_nop_endif
.endm
+#else /* CONFIG_ARM64_PTR_AUTH_KERNEL */
+
+ .macro __ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
+ .endm
+
+ .macro ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
+ .endm
+
+ .macro ptrauth_keys_install_kernel tsk, tmp1, tmp2, tmp3
+ .endm
+
+#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
+
+#ifdef CONFIG_ARM64_PTR_AUTH
+/*
+ * thread.keys_user.ap* as offset exceeds the #imm offset range
+ * so use the base value of ldp as thread.keys_user and offset as
+ * thread.keys_user.ap*.
+ */
+ .macro __ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
+ mov \tmp1, #THREAD_KEYS_USER
+ add \tmp1, \tsk, \tmp1
+ ldp \tmp2, \tmp3, [\tmp1, #PTRAUTH_USER_KEY_APIA]
+ msr_s SYS_APIAKEYLO_EL1, \tmp2
+ msr_s SYS_APIAKEYHI_EL1, \tmp3
+ .endm
+
.macro __ptrauth_keys_init_cpu tsk, tmp1, tmp2, tmp3
mrs \tmp1, id_aa64isar1_el1
ubfx \tmp1, \tmp1, #ID_AA64ISAR1_APA_SHIFT, #8
@@ -82,17 +79,11 @@ alternative_else_nop_endif
.Lno_addr_auth\@:
.endm
-#else /* CONFIG_ARM64_PTR_AUTH */
+#else /* !CONFIG_ARM64_PTR_AUTH */
.macro ptrauth_keys_install_user tsk, tmp1, tmp2, tmp3
.endm
- .macro ptrauth_keys_install_kernel_nosync tsk, tmp1, tmp2, tmp3
- .endm
-
- .macro ptrauth_keys_install_kernel tsk, tmp1, tmp2, tmp3
- .endm
-
#endif /* CONFIG_ARM64_PTR_AUTH */
#endif /* __ASM_ASM_POINTER_AUTH_H */
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index ca31594d3d6c..89faca0e740d 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -15,6 +15,8 @@
#include <asm-generic/export.h>
#include <asm/asm-offsets.h>
+#include <asm/alternative.h>
+#include <asm/asm-bug.h>
#include <asm/cpufeature.h>
#include <asm/cputype.h>
#include <asm/debug-monitors.h>
@@ -23,6 +25,14 @@
#include <asm/ptrace.h>
#include <asm/thread_info.h>
+ /*
+ * Provide a wxN alias for each wN register so what we can paste a xN
+ * reference after a 'w' to obtain the 32-bit version.
+ */
+ .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
+ wx\n .req w\n
+ .endr
+
.macro save_and_disable_daif, flags
mrs \flags, daif
msr daifset, #0xf
@@ -40,9 +50,9 @@
msr daif, \flags
.endm
- /* IRQ is the lowest priority flag, unconditionally unmask the rest. */
- .macro enable_da_f
- msr daifclr, #(8 | 4 | 1)
+ /* IRQ/FIQ are the lowest priority flags, unconditionally unmask the rest. */
+ .macro enable_da
+ msr daifclr, #(8 | 4)
.endm
/*
@@ -50,7 +60,7 @@
*/
.macro save_and_disable_irq, flags
mrs \flags, daif
- msr daifset, #2
+ msr daifset, #3
.endm
.macro restore_irq, flags
@@ -120,15 +130,27 @@ alternative_endif
.endm
/*
- * Emit an entry into the exception table
+ * Create an exception table entry for `insn`, which will branch to `fixup`
+ * when an unhandled fault is taken.
*/
- .macro _asm_extable, from, to
+ .macro _asm_extable, insn, fixup
.pushsection __ex_table, "a"
.align 3
- .long (\from - .), (\to - .)
+ .long (\insn - .), (\fixup - .)
.popsection
.endm
+/*
+ * Create an exception table entry for `insn` if `fixup` is provided. Otherwise
+ * do nothing.
+ */
+ .macro _cond_extable, insn, fixup
+ .ifnc \fixup,
+ _asm_extable \insn, \fixup
+ .endif
+ .endm
+
+
#define USER(l, x...) \
9999: x; \
_asm_extable 9999b, l
@@ -222,17 +244,25 @@ lr .req x30 // link register
* @dst: destination register
*/
#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
- .macro this_cpu_offset, dst
+ .macro get_this_cpu_offset, dst
mrs \dst, tpidr_el2
.endm
#else
- .macro this_cpu_offset, dst
+ .macro get_this_cpu_offset, dst
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs \dst, tpidr_el1
alternative_else
mrs \dst, tpidr_el2
alternative_endif
.endm
+
+ .macro set_this_cpu_offset, src
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+ msr tpidr_el1, \src
+alternative_else
+ msr tpidr_el2, \src
+alternative_endif
+ .endm
#endif
/*
@@ -243,7 +273,7 @@ alternative_endif
.macro adr_this_cpu, dst, sym, tmp
adrp \tmp, \sym
add \dst, \tmp, #:lo12:\sym
- this_cpu_offset \tmp
+ get_this_cpu_offset \tmp
add \dst, \dst, \tmp
.endm
@@ -254,7 +284,7 @@ alternative_endif
*/
.macro ldr_this_cpu dst, sym, tmp
adr_l \dst, \sym
- this_cpu_offset \tmp
+ get_this_cpu_offset \tmp
ldr \dst, [\dst, \tmp]
.endm
@@ -270,12 +300,24 @@ alternative_endif
* provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
*/
.macro read_ctr, reg
+#ifndef __KVM_NVHE_HYPERVISOR__
alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
mrs \reg, ctr_el0 // read CTR
nop
alternative_else
ldr_l \reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
alternative_endif
+#else
+alternative_if_not ARM64_KVM_PROTECTED_MODE
+ ASM_BUG()
+alternative_else_nop_endif
+alternative_cb kvm_compute_final_ctr_el0
+ movz \reg, #0
+ movk \reg, #0, lsl #16
+ movk \reg, #0, lsl #32
+ movk \reg, #0, lsl #48
+alternative_cb_end
+#endif
.endm
@@ -353,51 +395,53 @@ alternative_endif
bfi \tcr, \tmp0, \pos, #3
.endm
+ .macro __dcache_op_workaround_clean_cache, op, addr
+alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
+ dc \op, \addr
+alternative_else
+ dc civac, \addr
+alternative_endif
+ .endm
+
/*
* Macro to perform a data cache maintenance for the interval
- * [kaddr, kaddr + size)
+ * [start, end)
*
* op: operation passed to dc instruction
* domain: domain used in dsb instruciton
- * kaddr: starting virtual address of the region
- * size: size of the region
- * Corrupts: kaddr, size, tmp1, tmp2
+ * start: starting virtual address of the region
+ * end: end virtual address of the region
+ * fixup: optional label to branch to on user fault
+ * Corrupts: start, end, tmp1, tmp2
*/
- .macro __dcache_op_workaround_clean_cache, op, kaddr
-alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
- dc \op, \kaddr
-alternative_else
- dc civac, \kaddr
-alternative_endif
- .endm
-
- .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
+ .macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup
dcache_line_size \tmp1, \tmp2
- add \size, \kaddr, \size
sub \tmp2, \tmp1, #1
- bic \kaddr, \kaddr, \tmp2
-9998:
+ bic \start, \start, \tmp2
+.Ldcache_op\@:
.ifc \op, cvau
- __dcache_op_workaround_clean_cache \op, \kaddr
+ __dcache_op_workaround_clean_cache \op, \start
.else
.ifc \op, cvac
- __dcache_op_workaround_clean_cache \op, \kaddr
+ __dcache_op_workaround_clean_cache \op, \start
.else
.ifc \op, cvap
- sys 3, c7, c12, 1, \kaddr // dc cvap
+ sys 3, c7, c12, 1, \start // dc cvap
.else
.ifc \op, cvadp
- sys 3, c7, c13, 1, \kaddr // dc cvadp
+ sys 3, c7, c13, 1, \start // dc cvadp
.else
- dc \op, \kaddr
+ dc \op, \start
.endif
.endif
.endif
.endif
- add \kaddr, \kaddr, \tmp1
- cmp \kaddr, \size
- b.lo 9998b
+ add \start, \start, \tmp1
+ cmp \start, \end
+ b.lo .Ldcache_op\@
dsb \domain
+
+ _cond_extable .Ldcache_op\@, \fixup
.endm
/*
@@ -405,20 +449,22 @@ alternative_endif
* [start, end)
*
* start, end: virtual addresses describing the region
- * label: A label to branch to on user fault.
+ * fixup: optional label to branch to on user fault
* Corrupts: tmp1, tmp2
*/
- .macro invalidate_icache_by_line start, end, tmp1, tmp2, label
+ .macro invalidate_icache_by_line start, end, tmp1, tmp2, fixup
icache_line_size \tmp1, \tmp2
sub \tmp2, \tmp1, #1
bic \tmp2, \start, \tmp2
-9997:
-USER(\label, ic ivau, \tmp2) // invalidate I line PoU
+.Licache_op\@:
+ ic ivau, \tmp2 // invalidate I line PoU
add \tmp2, \tmp2, \tmp1
cmp \tmp2, \end
- b.lo 9997b
+ b.lo .Licache_op\@
dsb ish
isb
+
+ _cond_extable .Licache_op\@, \fixup
.endm
/*
@@ -676,11 +722,11 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
.endm
/*
- * Set SCTLR_EL1 to the passed value, and invalidate the local icache
+ * Set SCTLR_ELx to the @reg value, and invalidate the local icache
* in the process. This is called when setting the MMU on.
*/
-.macro set_sctlr_el1, reg
- msr sctlr_el1, \reg
+.macro set_sctlr, sreg, reg
+ msr \sreg, \reg
isb
/*
* Invalidate the local I-cache so that any instructions fetched
@@ -692,90 +738,41 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU
isb
.endm
-/*
- * Check whether to yield to another runnable task from kernel mode NEON code
- * (which runs with preemption disabled).
- *
- * if_will_cond_yield_neon
- * // pre-yield patchup code
- * do_cond_yield_neon
- * // post-yield patchup code
- * endif_yield_neon <label>
- *
- * where <label> is optional, and marks the point where execution will resume
- * after a yield has been performed. If omitted, execution resumes right after
- * the endif_yield_neon invocation. Note that the entire sequence, including
- * the provided patchup code, will be omitted from the image if
- * CONFIG_PREEMPTION is not defined.
- *
- * As a convenience, in the case where no patchup code is required, the above
- * sequence may be abbreviated to
- *
- * cond_yield_neon <label>
- *
- * Note that the patchup code does not support assembler directives that change
- * the output section, any use of such directives is undefined.
- *
- * The yield itself consists of the following:
- * - Check whether the preempt count is exactly 1 and a reschedule is also
- * needed. If so, calling of preempt_enable() in kernel_neon_end() will
- * trigger a reschedule. If it is not the case, yielding is pointless.
- * - Disable and re-enable kernel mode NEON, and branch to the yield fixup
- * code.
- *
- * This macro sequence may clobber all CPU state that is not guaranteed by the
- * AAPCS to be preserved across an ordinary function call.
- */
-
- .macro cond_yield_neon, lbl
- if_will_cond_yield_neon
- do_cond_yield_neon
- endif_yield_neon \lbl
- .endm
-
- .macro if_will_cond_yield_neon
-#ifdef CONFIG_PREEMPTION
- get_current_task x0
- ldr x0, [x0, #TSK_TI_PREEMPT]
- sub x0, x0, #PREEMPT_DISABLE_OFFSET
- cbz x0, .Lyield_\@
- /* fall through to endif_yield_neon */
- .subsection 1
-.Lyield_\@ :
-#else
- .section ".discard.cond_yield_neon", "ax"
-#endif
- .endm
-
- .macro do_cond_yield_neon
- bl kernel_neon_end
- bl kernel_neon_begin
- .endm
+.macro set_sctlr_el1, reg
+ set_sctlr sctlr_el1, \reg
+.endm
- .macro endif_yield_neon, lbl
- .ifnb \lbl
- b \lbl
- .else
- b .Lyield_out_\@
- .endif
- .previous
-.Lyield_out_\@ :
- .endm
+.macro set_sctlr_el2, reg
+ set_sctlr sctlr_el2, \reg
+.endm
/*
- * Check whether preempt-disabled code should yield as soon as it
- * is able. This is the case if re-enabling preemption a single
- * time results in a preempt count of zero, and the TIF_NEED_RESCHED
- * flag is set. (Note that the latter is stored negated in the
- * top word of the thread_info::preempt_count field)
+ * Check whether preempt/bh-disabled asm code should yield as soon as
+ * it is able. This is the case if we are currently running in task
+ * context, and either a softirq is pending, or the TIF_NEED_RESCHED
+ * flag is set and re-enabling preemption a single time would result in
+ * a preempt count of zero. (Note that the TIF_NEED_RESCHED flag is
+ * stored negated in the top word of the thread_info::preempt_count
+ * field)
*/
- .macro cond_yield, lbl:req, tmp:req
-#ifdef CONFIG_PREEMPTION
+ .macro cond_yield, lbl:req, tmp:req, tmp2:req
get_current_task \tmp
ldr \tmp, [\tmp, #TSK_TI_PREEMPT]
+ /*
+ * If we are serving a softirq, there is no point in yielding: the
+ * softirq will not be preempted no matter what we do, so we should
+ * run to completion as quickly as we can.
+ */
+ tbnz \tmp, #SOFTIRQ_SHIFT, .Lnoyield_\@
+#ifdef CONFIG_PREEMPTION
sub \tmp, \tmp, #PREEMPT_DISABLE_OFFSET
cbz \tmp, \lbl
#endif
+ adr_l \tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
+ get_this_cpu_offset \tmp2
+ ldr w\tmp, [\tmp, \tmp2]
+ cbnz w\tmp, \lbl // yield on pending softirq in task context
+.Lnoyield_\@:
.endm
/*
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index b56a4b2bc248..c9979273d389 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -223,6 +223,4 @@ static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v)
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
-#define ARCH_ATOMIC
-
#endif /* __ASM_ATOMIC_H */
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index c3009b0e5239..451e11e5fd23 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -23,12 +23,9 @@
#define dsb(opt) asm volatile("dsb " #opt : : : "memory")
#define psb_csync() asm volatile("hint #17" : : : "memory")
+#define tsb_csync() asm volatile("hint #18" : : : "memory")
#define csdb() asm volatile("hint #20" : : : "memory")
-#define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \
- SB_BARRIER_INSN"nop\n", \
- ARM64_HAS_SB))
-
#ifdef CONFIG_ARM64_PSEUDO_NMI
#define pmr_sync() \
do { \
@@ -70,6 +67,25 @@ static inline unsigned long array_index_mask_nospec(unsigned long idx,
return mask;
}
+/*
+ * Ensure that reads of the counter are treated the same as memory reads
+ * for the purposes of ordering by subsequent memory barriers.
+ *
+ * This insanity brought to you by speculative system register reads,
+ * out-of-order memory accesses, sequence locks and Thomas Gleixner.
+ *
+ * https://lore.kernel.org/r/alpine.DEB.2.21.1902081950260.1662@nanos.tec.linutronix.de/
+ */
+#define arch_counter_enforce_ordering(val) do { \
+ u64 tmp, _val = (val); \
+ \
+ asm volatile( \
+ " eor %0, %1, %1\n" \
+ " add %0, sp, %0\n" \
+ " ldr xzr, [%0]" \
+ : "=r" (tmp) : "r" (_val)); \
+} while (0)
+
#define __smp_mb() dmb(ish)
#define __smp_rmb() dmb(ishld)
#define __smp_wmb() dmb(ishst)
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index a074459f8f2f..a9c0716e7440 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -47,7 +47,7 @@
* cache before the transfer is done, causing old data to be seen by
* the CPU.
*/
-#define ARCH_DMA_MINALIGN (128)
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#ifdef CONFIG_KASAN_SW_TAGS
#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 52e5c1623224..543c997eb3b7 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -30,45 +30,58 @@
* the implementation assumes non-aliasing VIPT D-cache and (aliasing)
* VIPT I-cache.
*
- * flush_icache_range(start, end)
+ * All functions below apply to the interval [start, end)
+ * - start - virtual start address (inclusive)
+ * - end - virtual end address (exclusive)
*
- * Ensure coherency between the I-cache and the D-cache in the
- * region described by start, end.
- * - start - virtual start address
- * - end - virtual end address
+ * caches_clean_inval_pou(start, end)
*
- * invalidate_icache_range(start, end)
+ * Ensure coherency between the I-cache and the D-cache region to
+ * the Point of Unification.
*
- * Invalidate the I-cache in the region described by start, end.
- * - start - virtual start address
- * - end - virtual end address
+ * caches_clean_inval_user_pou(start, end)
*
- * __flush_cache_user_range(start, end)
+ * Ensure coherency between the I-cache and the D-cache region to
+ * the Point of Unification.
+ * Use only if the region might access user memory.
*
- * Ensure coherency between the I-cache and the D-cache in the
- * region described by start, end.
- * - start - virtual start address
- * - end - virtual end address
+ * icache_inval_pou(start, end)
*
- * __flush_dcache_area(kaddr, size)
+ * Invalidate I-cache region to the Point of Unification.
*
- * Ensure that the data held in page is written back.
- * - kaddr - page address
- * - size - region size
+ * dcache_clean_inval_poc(start, end)
+ *
+ * Clean and invalidate D-cache region to the Point of Coherency.
+ *
+ * dcache_inval_poc(start, end)
+ *
+ * Invalidate D-cache region to the Point of Coherency.
+ *
+ * dcache_clean_poc(start, end)
+ *
+ * Clean D-cache region to the Point of Coherency.
+ *
+ * dcache_clean_pop(start, end)
+ *
+ * Clean D-cache region to the Point of Persistence.
+ *
+ * dcache_clean_pou(start, end)
+ *
+ * Clean D-cache region to the Point of Unification.
*/
-extern void __flush_icache_range(unsigned long start, unsigned long end);
-extern int invalidate_icache_range(unsigned long start, unsigned long end);
-extern void __flush_dcache_area(void *addr, size_t len);
-extern void __inval_dcache_area(void *addr, size_t len);
-extern void __clean_dcache_area_poc(void *addr, size_t len);
-extern void __clean_dcache_area_pop(void *addr, size_t len);
-extern void __clean_dcache_area_pou(void *addr, size_t len);
-extern long __flush_cache_user_range(unsigned long start, unsigned long end);
-extern void sync_icache_aliases(void *kaddr, unsigned long len);
+extern void caches_clean_inval_pou(unsigned long start, unsigned long end);
+extern void icache_inval_pou(unsigned long start, unsigned long end);
+extern void dcache_clean_inval_poc(unsigned long start, unsigned long end);
+extern void dcache_inval_poc(unsigned long start, unsigned long end);
+extern void dcache_clean_poc(unsigned long start, unsigned long end);
+extern void dcache_clean_pop(unsigned long start, unsigned long end);
+extern void dcache_clean_pou(unsigned long start, unsigned long end);
+extern long caches_clean_inval_user_pou(unsigned long start, unsigned long end);
+extern void sync_icache_aliases(unsigned long start, unsigned long end);
static inline void flush_icache_range(unsigned long start, unsigned long end)
{
- __flush_icache_range(start, end);
+ caches_clean_inval_pou(start, end);
/*
* IPI all online CPUs so that they undergo a context synchronization
@@ -122,7 +135,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *);
-static __always_inline void __flush_icache_all(void)
+static __always_inline void icache_inval_all_pou(void)
{
if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
return;
diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h
index 93a161b3bf3f..dc52b733675d 100644
--- a/arch/arm64/include/asm/checksum.h
+++ b/arch/arm64/include/asm/checksum.h
@@ -37,7 +37,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
} while (--n > 0);
sum += ((sum >> 32) | (sum << 32));
- return csum_fold((__force u32)(sum >> 32));
+ return csum_fold((__force __wsum)(sum >> 32));
}
#define ip_fast_csum ip_fast_csum
diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h
index 6fb2e6bcc392..dc3ea4080e2e 100644
--- a/arch/arm64/include/asm/compiler.h
+++ b/arch/arm64/include/asm/compiler.h
@@ -23,4 +23,20 @@
#define __builtin_return_address(val) \
(void *)(ptrauth_clear_pac((unsigned long)__builtin_return_address(val)))
+#ifdef CONFIG_CFI_CLANG
+/*
+ * With CONFIG_CFI_CLANG, the compiler replaces function address
+ * references with the address of the function's CFI jump table
+ * entry. The function_nocfi macro always returns the address of the
+ * actual function instead.
+ */
+#define function_nocfi(x) ({ \
+ void *addr; \
+ asm("adrp %0, " __stringify(x) "\n\t" \
+ "add %0, %0, :lo12:" __stringify(x) \
+ : "=r" (addr)); \
+ addr; \
+})
+#endif
+
#endif /* __ASM_COMPILER_H */
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
index 7faae6ff3ab4..0f6d16faa540 100644
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -12,26 +12,7 @@
/*
* Records attributes of an individual CPU.
*/
-struct cpuinfo_arm64 {
- struct cpu cpu;
- struct kobject kobj;
- u32 reg_ctr;
- u32 reg_cntfrq;
- u32 reg_dczid;
- u32 reg_midr;
- u32 reg_revidr;
-
- u64 reg_id_aa64dfr0;
- u64 reg_id_aa64dfr1;
- u64 reg_id_aa64isar0;
- u64 reg_id_aa64isar1;
- u64 reg_id_aa64mmfr0;
- u64 reg_id_aa64mmfr1;
- u64 reg_id_aa64mmfr2;
- u64 reg_id_aa64pfr0;
- u64 reg_id_aa64pfr1;
- u64 reg_id_aa64zfr0;
-
+struct cpuinfo_32bit {
u32 reg_id_dfr0;
u32 reg_id_dfr1;
u32 reg_id_isar0;
@@ -54,6 +35,30 @@ struct cpuinfo_arm64 {
u32 reg_mvfr0;
u32 reg_mvfr1;
u32 reg_mvfr2;
+};
+
+struct cpuinfo_arm64 {
+ struct cpu cpu;
+ struct kobject kobj;
+ u64 reg_ctr;
+ u64 reg_cntfrq;
+ u64 reg_dczid;
+ u64 reg_midr;
+ u64 reg_revidr;
+ u64 reg_gmid;
+
+ u64 reg_id_aa64dfr0;
+ u64 reg_id_aa64dfr1;
+ u64 reg_id_aa64isar0;
+ u64 reg_id_aa64isar1;
+ u64 reg_id_aa64mmfr0;
+ u64 reg_id_aa64mmfr1;
+ u64 reg_id_aa64mmfr2;
+ u64 reg_id_aa64pfr0;
+ u64 reg_id_aa64pfr1;
+ u64 reg_id_aa64zfr0;
+
+ struct cpuinfo_32bit aarch32;
/* pseudo-ZCR for recording maximum ZCR_EL1 LEN value: */
u64 reg_zcr;
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
deleted file mode 100644
index b77d997b173b..000000000000
--- a/arch/arm64/include/asm/cpucaps.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * arch/arm64/include/asm/cpucaps.h
- *
- * Copyright (C) 2016 ARM Ltd.
- */
-#ifndef __ASM_CPUCAPS_H
-#define __ASM_CPUCAPS_H
-
-#define ARM64_WORKAROUND_CLEAN_CACHE 0
-#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
-#define ARM64_WORKAROUND_845719 2
-#define ARM64_HAS_SYSREG_GIC_CPUIF 3
-#define ARM64_HAS_PAN 4
-#define ARM64_HAS_LSE_ATOMICS 5
-#define ARM64_WORKAROUND_CAVIUM_23154 6
-#define ARM64_WORKAROUND_834220 7
-#define ARM64_HAS_NO_HW_PREFETCH 8
-#define ARM64_HAS_VIRT_HOST_EXTN 11
-#define ARM64_WORKAROUND_CAVIUM_27456 12
-#define ARM64_HAS_32BIT_EL0 13
-#define ARM64_SPECTRE_V3A 14
-#define ARM64_HAS_CNP 15
-#define ARM64_HAS_NO_FPSIMD 16
-#define ARM64_WORKAROUND_REPEAT_TLBI 17
-#define ARM64_WORKAROUND_QCOM_FALKOR_E1003 18
-#define ARM64_WORKAROUND_858921 19
-#define ARM64_WORKAROUND_CAVIUM_30115 20
-#define ARM64_HAS_DCPOP 21
-#define ARM64_SVE 22
-#define ARM64_UNMAP_KERNEL_AT_EL0 23
-#define ARM64_SPECTRE_V2 24
-#define ARM64_HAS_RAS_EXTN 25
-#define ARM64_WORKAROUND_843419 26
-#define ARM64_HAS_CACHE_IDC 27
-#define ARM64_HAS_CACHE_DIC 28
-#define ARM64_HW_DBM 29
-#define ARM64_SPECTRE_V4 30
-#define ARM64_MISMATCHED_CACHE_TYPE 31
-#define ARM64_HAS_STAGE2_FWB 32
-#define ARM64_HAS_CRC32 33
-#define ARM64_SSBS 34
-#define ARM64_WORKAROUND_1418040 35
-#define ARM64_HAS_SB 36
-#define ARM64_WORKAROUND_SPECULATIVE_AT 37
-#define ARM64_HAS_ADDRESS_AUTH_ARCH 38
-#define ARM64_HAS_ADDRESS_AUTH_IMP_DEF 39
-#define ARM64_HAS_GENERIC_AUTH_ARCH 40
-#define ARM64_HAS_GENERIC_AUTH_IMP_DEF 41
-#define ARM64_HAS_IRQ_PRIO_MASKING 42
-#define ARM64_HAS_DCPODP 43
-#define ARM64_WORKAROUND_1463225 44
-#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM 45
-#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
-#define ARM64_WORKAROUND_1542419 47
-#define ARM64_HAS_E0PD 48
-#define ARM64_HAS_RNG 49
-#define ARM64_HAS_AMU_EXTN 50
-#define ARM64_HAS_ADDRESS_AUTH 51
-#define ARM64_HAS_GENERIC_AUTH 52
-#define ARM64_HAS_32BIT_EL1 53
-#define ARM64_BTI 54
-#define ARM64_HAS_ARMv8_4_TTL 55
-#define ARM64_HAS_TLB_RANGE 56
-#define ARM64_MTE 57
-#define ARM64_WORKAROUND_1508412 58
-#define ARM64_HAS_LDAPR 59
-#define ARM64_KVM_PROTECTED_MODE 60
-
-#define ARM64_NCAPS 61
-
-#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 61177bac49fa..9bb9d11750d7 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -63,6 +63,23 @@ struct arm64_ftr_bits {
s64 safe_val; /* safe value for FTR_EXACT features */
};
+/*
+ * Describe the early feature override to the core override code:
+ *
+ * @val Values that are to be merged into the final
+ * sanitised value of the register. Only the bitfields
+ * set to 1 in @mask are valid
+ * @mask Mask of the features that are overridden by @val
+ *
+ * A @mask field set to full-1 indicates that the corresponding field
+ * in @val is a valid override.
+ *
+ * A @mask field set to full-0 with the corresponding @val field set
+ * to full-0 denotes that this field has no override
+ *
+ * A @mask field set to full-0 with the corresponding @val field set
+ * to full-1 denotes thath this field has an invalid override.
+ */
struct arm64_ftr_override {
u64 val;
u64 mask;
@@ -602,6 +619,13 @@ static inline bool id_aa64pfr0_sve(u64 pfr0)
return val > 0;
}
+static inline bool id_aa64pfr1_mte(u64 pfr1)
+{
+ u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_MTE_SHIFT);
+
+ return val >= ID_AA64PFR1_MTE;
+}
+
void __init setup_cpu_features(void);
void check_local_cpu_capabilities(void);
@@ -613,9 +637,15 @@ static inline bool cpu_supports_mixed_endian_el0(void)
return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
}
+const struct cpumask *system_32bit_el0_cpumask(void);
+DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
+
static inline bool system_supports_32bit_el0(void)
{
- return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
+ u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+
+ return static_branch_unlikely(&arm64_mismatched_32bit_el0) ||
+ id_aa64pfr0_32bit_el0(pfr0);
}
static inline bool system_supports_4kb_granule(void)
diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h
index 3c5ddb429ea2..14a19d1141bd 100644
--- a/arch/arm64/include/asm/cpuidle.h
+++ b/arch/arm64/include/asm/cpuidle.h
@@ -18,4 +18,39 @@ static inline int arm_cpuidle_suspend(int index)
return -EOPNOTSUPP;
}
#endif
+
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+#include <asm/arch_gicv3.h>
+
+struct arm_cpuidle_irq_context {
+ unsigned long pmr;
+ unsigned long daif_bits;
+};
+
+#define arm_cpuidle_save_irq_context(__c) \
+ do { \
+ struct arm_cpuidle_irq_context *c = __c; \
+ if (system_uses_irq_prio_masking()) { \
+ c->daif_bits = read_sysreg(daif); \
+ write_sysreg(c->daif_bits | PSR_I_BIT | PSR_F_BIT, \
+ daif); \
+ c->pmr = gic_read_pmr(); \
+ gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); \
+ } \
+ } while (0)
+
+#define arm_cpuidle_restore_irq_context(__c) \
+ do { \
+ struct arm_cpuidle_irq_context *c = __c; \
+ if (system_uses_irq_prio_masking()) { \
+ gic_write_pmr(c->pmr); \
+ write_sysreg(c->daif_bits, daif); \
+ } \
+ } while (0)
+#else
+struct arm_cpuidle_irq_context { };
+
+#define arm_cpuidle_save_irq_context(c) (void)c
+#define arm_cpuidle_restore_irq_context(c) (void)c
+#endif
#endif
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index ef5b040dee44..6231e1f0abe7 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -59,6 +59,7 @@
#define ARM_CPU_IMP_NVIDIA 0x4E
#define ARM_CPU_IMP_FUJITSU 0x46
#define ARM_CPU_IMP_HISI 0x48
+#define ARM_CPU_IMP_APPLE 0x61
#define ARM_CPU_PART_AEM_V8 0xD0F
#define ARM_CPU_PART_FOUNDATION 0xD00
@@ -99,6 +100,9 @@
#define HISI_CPU_PART_TSV110 0xD01
+#define APPLE_CPU_PART_M1_ICESTORM 0x022
+#define APPLE_CPU_PART_M1_FIRESTORM 0x023
+
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
@@ -127,6 +131,8 @@
#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
#define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110)
+#define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM)
+#define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM)
/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
#define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX
diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
index 1c26d7baa67f..55f57dfa8e2f 100644
--- a/arch/arm64/include/asm/daifflags.h
+++ b/arch/arm64/include/asm/daifflags.h
@@ -13,8 +13,8 @@
#include <asm/ptrace.h>
#define DAIF_PROCCTX 0
-#define DAIF_PROCCTX_NOIRQ PSR_I_BIT
-#define DAIF_ERRCTX (PSR_I_BIT | PSR_A_BIT)
+#define DAIF_PROCCTX_NOIRQ (PSR_I_BIT | PSR_F_BIT)
+#define DAIF_ERRCTX (PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
#define DAIF_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
@@ -47,7 +47,7 @@ static inline unsigned long local_daif_save_flags(void)
if (system_uses_irq_prio_masking()) {
/* If IRQs are masked with PMR, reflect it in the flags */
if (read_sysreg_s(SYS_ICC_PMR_EL1) != GIC_PRIO_IRQON)
- flags |= PSR_I_BIT;
+ flags |= PSR_I_BIT | PSR_F_BIT;
}
return flags;
@@ -69,7 +69,7 @@ static inline void local_daif_restore(unsigned long flags)
bool irq_disabled = flags & PSR_I_BIT;
WARN_ON(system_has_prio_mask_debugging() &&
- !(read_sysreg(daif) & PSR_I_BIT));
+ (read_sysreg(daif) & (PSR_I_BIT | PSR_F_BIT)) != (PSR_I_BIT | PSR_F_BIT));
if (!irq_disabled) {
trace_hardirqs_on();
@@ -86,7 +86,7 @@ static inline void local_daif_restore(unsigned long flags)
* If interrupts are disabled but we can take
* asynchronous errors, we can take NMIs
*/
- flags &= ~PSR_I_BIT;
+ flags &= ~(PSR_I_BIT | PSR_F_BIT);
pmr = GIC_PRIO_IRQOFF;
} else {
pmr = GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET;
@@ -131,6 +131,9 @@ static inline void local_daif_inherit(struct pt_regs *regs)
if (interrupts_enabled(regs))
trace_hardirqs_on();
+ if (system_uses_irq_prio_masking())
+ gic_write_pmr(regs->pmr_save);
+
/*
* We can't use local_daif_restore(regs->pstate) here as
* system_has_prio_mask_debugging() won't restore the I bit if it can
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 3578aba9c608..1bed37eb013a 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -137,7 +137,7 @@ void efi_virtmap_unload(void);
static inline void efi_capsule_flush_cache_range(void *addr, int size)
{
- __flush_dcache_area(addr, size);
+ dcache_clean_inval_poc((unsigned long)addr, (unsigned long)addr + size);
}
#endif /* _ASM_EFI_H */
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index d77d358f9395..21fa330f498d 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -65,6 +65,19 @@
// use EL1&0 translation.
.Lskip_spe_\@:
+ /* Trace buffer */
+ ubfx x0, x1, #ID_AA64DFR0_TRBE_SHIFT, #4
+ cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present
+
+ mrs_s x0, SYS_TRBIDR_EL1
+ and x0, x0, TRBIDR_PROG
+ cbnz x0, .Lskip_trace_\@ // If TRBE is available at EL2
+
+ mov x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT)
+ orr x2, x2, x0 // allow the EL1&0 translation
+ // to own it.
+
+.Lskip_trace_\@:
msr mdcr_el2, x2 // Configure debug traps
.endm
@@ -131,6 +144,26 @@
.Lskip_sve_\@:
.endm
+/* Disable any fine grained traps */
+.macro __init_el2_fgt
+ mrs x1, id_aa64mmfr0_el1
+ ubfx x1, x1, #ID_AA64MMFR0_FGT_SHIFT, #4
+ cbz x1, .Lskip_fgt_\@
+
+ msr_s SYS_HDFGRTR_EL2, xzr
+ msr_s SYS_HDFGWTR_EL2, xzr
+ msr_s SYS_HFGRTR_EL2, xzr
+ msr_s SYS_HFGWTR_EL2, xzr
+ msr_s SYS_HFGITR_EL2, xzr
+
+ mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU
+ ubfx x1, x1, #ID_AA64PFR0_AMU_SHIFT, #4
+ cbz x1, .Lskip_fgt_\@
+
+ msr_s SYS_HAFGRTR_EL2, xzr
+.Lskip_fgt_\@:
+.endm
+
.macro __init_el2_nvhe_prepare_eret
mov x0, #INIT_PSTATE_EL1
msr spsr_el2, x0
@@ -155,6 +188,7 @@
__init_el2_nvhe_idregs
__init_el2_nvhe_cptr
__init_el2_nvhe_sve
+ __init_el2_fgt
__init_el2_nvhe_prepare_eret
.endm
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index 6546158d2f2d..4afbc45b8bb0 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -31,20 +31,35 @@ static inline u32 disr_to_esr(u64 disr)
return esr;
}
-asmlinkage void el1_sync_handler(struct pt_regs *regs);
-asmlinkage void el0_sync_handler(struct pt_regs *regs);
-asmlinkage void el0_sync_compat_handler(struct pt_regs *regs);
+asmlinkage void handle_bad_stack(struct pt_regs *regs);
-asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs);
-asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs);
+asmlinkage void el1t_64_sync_handler(struct pt_regs *regs);
+asmlinkage void el1t_64_irq_handler(struct pt_regs *regs);
+asmlinkage void el1t_64_fiq_handler(struct pt_regs *regs);
+asmlinkage void el1t_64_error_handler(struct pt_regs *regs);
+
+asmlinkage void el1h_64_sync_handler(struct pt_regs *regs);
+asmlinkage void el1h_64_irq_handler(struct pt_regs *regs);
+asmlinkage void el1h_64_fiq_handler(struct pt_regs *regs);
+asmlinkage void el1h_64_error_handler(struct pt_regs *regs);
+
+asmlinkage void el0t_64_sync_handler(struct pt_regs *regs);
+asmlinkage void el0t_64_irq_handler(struct pt_regs *regs);
+asmlinkage void el0t_64_fiq_handler(struct pt_regs *regs);
+asmlinkage void el0t_64_error_handler(struct pt_regs *regs);
+
+asmlinkage void el0t_32_sync_handler(struct pt_regs *regs);
+asmlinkage void el0t_32_irq_handler(struct pt_regs *regs);
+asmlinkage void el0t_32_fiq_handler(struct pt_regs *regs);
+asmlinkage void el0t_32_error_handler(struct pt_regs *regs);
+
+asmlinkage void call_on_irq_stack(struct pt_regs *regs,
+ void (*func)(struct pt_regs *));
asmlinkage void enter_from_user_mode(void);
asmlinkage void exit_to_user_mode(void);
-void arm64_enter_nmi(struct pt_regs *regs);
-void arm64_exit_nmi(struct pt_regs *regs);
void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs);
void do_undefinstr(struct pt_regs *regs);
void do_bti(struct pt_regs *regs);
-asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr);
void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
struct pt_regs *regs);
void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs);
@@ -57,4 +72,7 @@ void do_cp15instr(unsigned int esr, struct pt_regs *regs);
void do_el0_svc(struct pt_regs *regs);
void do_el0_svc_compat(struct pt_regs *regs);
void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr);
+void do_serror(struct pt_regs *regs, unsigned int esr);
+
+void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far);
#endif /* __ASM_EXCEPTION_H */
diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
index bec5f14b622a..c072161d5c65 100644
--- a/arch/arm64/include/asm/fpsimd.h
+++ b/arch/arm64/include/asm/fpsimd.h
@@ -69,10 +69,11 @@ static inline void *sve_pffr(struct thread_struct *thread)
extern void sve_save_state(void *state, u32 *pfpsr);
extern void sve_load_state(void const *state, u32 const *pfpsr,
unsigned long vq_minus_1);
-extern void sve_flush_live(void);
+extern void sve_flush_live(unsigned long vq_minus_1);
extern void sve_load_from_fpsimd_state(struct user_fpsimd_state const *state,
unsigned long vq_minus_1);
extern unsigned int sve_get_vl(void);
+extern void sve_set_vq(unsigned long vq_minus_1);
struct arm64_cpu_capabilities;
extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused);
@@ -130,6 +131,15 @@ static inline void sve_user_enable(void)
sysreg_clear_set(cpacr_el1, 0, CPACR_EL1_ZEN_EL0EN);
}
+#define sve_cond_update_zcr_vq(val, reg) \
+ do { \
+ u64 __zcr = read_sysreg_s((reg)); \
+ u64 __new = __zcr & ~ZCR_ELx_LEN_MASK; \
+ __new |= (val) & ZCR_ELx_LEN_MASK; \
+ if (__zcr != __new) \
+ write_sysreg_s(__new, (reg)); \
+ } while (0)
+
/*
* Probing and setup functions.
* Calls to these functions must be serialised with one another.
@@ -159,6 +169,8 @@ static inline int sve_get_current_vl(void)
static inline void sve_user_disable(void) { BUILD_BUG(); }
static inline void sve_user_enable(void) { BUILD_BUG(); }
+#define sve_cond_update_zcr_vq(val, reg) do { } while (0)
+
static inline void sve_init_vq_map(void) { }
static inline void sve_update_vq_map(void) { }
static inline int sve_verify_vq_map(void) { return 0; }
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h
index af43367534c7..059204477ce6 100644
--- a/arch/arm64/include/asm/fpsimdmacros.h
+++ b/arch/arm64/include/asm/fpsimdmacros.h
@@ -6,6 +6,8 @@
* Author: Catalin Marinas <catalin.marinas@arm.com>
*/
+#include <asm/assembler.h>
+
.macro fpsimd_save state, tmpnr
stp q0, q1, [\state, #16 * 0]
stp q2, q3, [\state, #16 * 2]
@@ -211,8 +213,10 @@
mov v\nz\().16b, v\nz\().16b
.endm
-.macro sve_flush
+.macro sve_flush_z
_for n, 0, 31, _sve_flush_z \n
+.endm
+.macro sve_flush_p_ffr
_for n, 0, 15, _sve_pfalse \n
_sve_wrffr 0
.endm
@@ -230,8 +234,7 @@
str w\nxtmp, [\xpfpsr, #4]
.endm
-.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp, xtmp2
- sve_load_vq \xvqminus1, x\nxtmp, \xtmp2
+.macro __sve_load nxbase, xpfpsr, nxtmp
_for n, 0, 31, _sve_ldr_v \n, \nxbase, \n - 34
_sve_ldr_p 0, \nxbase
_sve_wrffr 0
@@ -242,3 +245,8 @@
ldr w\nxtmp, [\xpfpsr, #4]
msr fpcr, x\nxtmp
.endm
+
+.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp, xtmp2
+ sve_load_vq \xvqminus1, x\nxtmp, \xtmp2
+ __sve_load \nxbase, \xpfpsr, \nxtmp
+.endm
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index 5abf91e3494c..1242f71937f8 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -23,8 +23,7 @@ static inline void arch_clear_hugepage_flags(struct page *page)
}
#define arch_clear_hugepage_flags arch_clear_hugepage_flags
-extern pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable);
+pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
#define arch_make_huge_pte arch_make_huge_pte
#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
diff --git a/arch/arm64/include/asm/hyp_image.h b/arch/arm64/include/asm/hyp_image.h
index 737ded6b6d0d..b4b3076a76fb 100644
--- a/arch/arm64/include/asm/hyp_image.h
+++ b/arch/arm64/include/asm/hyp_image.h
@@ -10,11 +10,15 @@
#define __HYP_CONCAT(a, b) a ## b
#define HYP_CONCAT(a, b) __HYP_CONCAT(a, b)
+#ifndef __KVM_NVHE_HYPERVISOR__
/*
* KVM nVHE code has its own symbol namespace prefixed with __kvm_nvhe_,
* to separate it from the kernel proper.
*/
#define kvm_nvhe_sym(sym) __kvm_nvhe_##sym
+#else
+#define kvm_nvhe_sym(sym) sym
+#endif
#ifdef LINKER_SCRIPT
@@ -56,6 +60,9 @@
*/
#define KVM_NVHE_ALIAS(sym) kvm_nvhe_sym(sym) = sym;
+/* Defines a linker script alias for KVM nVHE hyp symbols */
+#define KVM_NVHE_ALIAS_HYP(first, sec) kvm_nvhe_sym(first) = kvm_nvhe_sym(sec);
+
#endif /* LINKER_SCRIPT */
#endif /* __ARM64_HYP_IMAGE_H__ */
diff --git a/arch/arm64/include/asm/hypervisor.h b/arch/arm64/include/asm/hypervisor.h
index f9cc1d021791..0ae427f352c8 100644
--- a/arch/arm64/include/asm/hypervisor.h
+++ b/arch/arm64/include/asm/hypervisor.h
@@ -4,4 +4,7 @@
#include <asm/xen/hypervisor.h>
+void kvm_init_hyp_services(void);
+bool kvm_arm_hyp_service_available(u32 func_id);
+
#endif
diff --git a/arch/arm64/include/asm/insn-def.h b/arch/arm64/include/asm/insn-def.h
new file mode 100644
index 000000000000..2c075f615c6a
--- /dev/null
+++ b/arch/arm64/include/asm/insn-def.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_INSN_DEF_H
+#define __ASM_INSN_DEF_H
+
+/* A64 instructions are always 32 bits. */
+#define AARCH64_INSN_SIZE 4
+
+#endif /* __ASM_INSN_DEF_H */
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 4ebb9c054ccc..6b776c8667b2 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -10,7 +10,7 @@
#include <linux/build_bug.h>
#include <linux/types.h>
-#include <asm/alternative.h>
+#include <asm/insn-def.h>
#ifndef __ASSEMBLY__
/*
@@ -30,6 +30,7 @@
*/
enum aarch64_insn_encoding_class {
AARCH64_INSN_CLS_UNKNOWN, /* UNALLOCATED */
+ AARCH64_INSN_CLS_SVE, /* SVE instructions */
AARCH64_INSN_CLS_DP_IMM, /* Data processing - immediate */
AARCH64_INSN_CLS_DP_REG, /* Data processing - register */
AARCH64_INSN_CLS_DP_FPSIMD, /* Data processing - SIMD and FP */
@@ -294,6 +295,12 @@ __AARCH64_INSN_FUNCS(adr, 0x9F000000, 0x10000000)
__AARCH64_INSN_FUNCS(adrp, 0x9F000000, 0x90000000)
__AARCH64_INSN_FUNCS(prfm, 0x3FC00000, 0x39800000)
__AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000)
+__AARCH64_INSN_FUNCS(store_imm, 0x3FC00000, 0x39000000)
+__AARCH64_INSN_FUNCS(load_imm, 0x3FC00000, 0x39400000)
+__AARCH64_INSN_FUNCS(store_pre, 0x3FE00C00, 0x38000C00)
+__AARCH64_INSN_FUNCS(load_pre, 0x3FE00C00, 0x38400C00)
+__AARCH64_INSN_FUNCS(store_post, 0x3FE00C00, 0x38000400)
+__AARCH64_INSN_FUNCS(load_post, 0x3FE00C00, 0x38400400)
__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800)
__AARCH64_INSN_FUNCS(ldadd, 0x3F20FC00, 0x38200000)
__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
@@ -302,6 +309,8 @@ __AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000)
__AARCH64_INSN_FUNCS(exclusive, 0x3F800000, 0x08000000)
__AARCH64_INSN_FUNCS(load_ex, 0x3F400000, 0x08400000)
__AARCH64_INSN_FUNCS(store_ex, 0x3F400000, 0x08000000)
+__AARCH64_INSN_FUNCS(stp, 0x7FC00000, 0x29000000)
+__AARCH64_INSN_FUNCS(ldp, 0x7FC00000, 0x29400000)
__AARCH64_INSN_FUNCS(stp_post, 0x7FC00000, 0x28800000)
__AARCH64_INSN_FUNCS(ldp_post, 0x7FC00000, 0x28C00000)
__AARCH64_INSN_FUNCS(stp_pre, 0x7FC00000, 0x29800000)
@@ -334,6 +343,7 @@ __AARCH64_INSN_FUNCS(rev64, 0x7FFFFC00, 0x5AC00C00)
__AARCH64_INSN_FUNCS(and, 0x7F200000, 0x0A000000)
__AARCH64_INSN_FUNCS(bic, 0x7F200000, 0x0A200000)
__AARCH64_INSN_FUNCS(orr, 0x7F200000, 0x2A000000)
+__AARCH64_INSN_FUNCS(mov_reg, 0x7FE0FFE0, 0x2A0003E0)
__AARCH64_INSN_FUNCS(orn, 0x7F200000, 0x2A200000)
__AARCH64_INSN_FUNCS(eor, 0x7F200000, 0x4A000000)
__AARCH64_INSN_FUNCS(eon, 0x7F200000, 0x4A200000)
@@ -368,6 +378,14 @@ __AARCH64_INSN_FUNCS(eret_auth, 0xFFFFFBFF, 0xD69F0BFF)
__AARCH64_INSN_FUNCS(mrs, 0xFFF00000, 0xD5300000)
__AARCH64_INSN_FUNCS(msr_imm, 0xFFF8F01F, 0xD500401F)
__AARCH64_INSN_FUNCS(msr_reg, 0xFFF00000, 0xD5100000)
+__AARCH64_INSN_FUNCS(dmb, 0xFFFFF0FF, 0xD50330BF)
+__AARCH64_INSN_FUNCS(dsb_base, 0xFFFFF0FF, 0xD503309F)
+__AARCH64_INSN_FUNCS(dsb_nxs, 0xFFFFF3FF, 0xD503323F)
+__AARCH64_INSN_FUNCS(isb, 0xFFFFF0FF, 0xD50330DF)
+__AARCH64_INSN_FUNCS(sb, 0xFFFFFFFF, 0xD50330FF)
+__AARCH64_INSN_FUNCS(clrex, 0xFFFFF0FF, 0xD503305F)
+__AARCH64_INSN_FUNCS(ssbb, 0xFFFFFFFF, 0xD503309F)
+__AARCH64_INSN_FUNCS(pssbb, 0xFFFFFFFF, 0xD503349F)
#undef __AARCH64_INSN_FUNCS
@@ -379,8 +397,47 @@ static inline bool aarch64_insn_is_adr_adrp(u32 insn)
return aarch64_insn_is_adr(insn) || aarch64_insn_is_adrp(insn);
}
-int aarch64_insn_read(void *addr, u32 *insnp);
-int aarch64_insn_write(void *addr, u32 insn);
+static inline bool aarch64_insn_is_dsb(u32 insn)
+{
+ return aarch64_insn_is_dsb_base(insn) || aarch64_insn_is_dsb_nxs(insn);
+}
+
+static inline bool aarch64_insn_is_barrier(u32 insn)
+{
+ return aarch64_insn_is_dmb(insn) || aarch64_insn_is_dsb(insn) ||
+ aarch64_insn_is_isb(insn) || aarch64_insn_is_sb(insn) ||
+ aarch64_insn_is_clrex(insn) || aarch64_insn_is_ssbb(insn) ||
+ aarch64_insn_is_pssbb(insn);
+}
+
+static inline bool aarch64_insn_is_store_single(u32 insn)
+{
+ return aarch64_insn_is_store_imm(insn) ||
+ aarch64_insn_is_store_pre(insn) ||
+ aarch64_insn_is_store_post(insn);
+}
+
+static inline bool aarch64_insn_is_store_pair(u32 insn)
+{
+ return aarch64_insn_is_stp(insn) ||
+ aarch64_insn_is_stp_pre(insn) ||
+ aarch64_insn_is_stp_post(insn);
+}
+
+static inline bool aarch64_insn_is_load_single(u32 insn)
+{
+ return aarch64_insn_is_load_imm(insn) ||
+ aarch64_insn_is_load_pre(insn) ||
+ aarch64_insn_is_load_post(insn);
+}
+
+static inline bool aarch64_insn_is_load_pair(u32 insn)
+{
+ return aarch64_insn_is_ldp(insn) ||
+ aarch64_insn_is_ldp_pre(insn) ||
+ aarch64_insn_is_ldp_post(insn);
+}
+
enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn);
bool aarch64_insn_uses_literal(u32 insn);
bool aarch64_insn_is_branch(u32 insn);
@@ -487,9 +544,6 @@ u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
s32 aarch64_get_branch_offset(u32 insn);
u32 aarch64_set_branch_offset(u32 insn, s32 offset);
-int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
-int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
-
s32 aarch64_insn_adrp_get_offset(u32 insn);
u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset);
@@ -506,6 +560,7 @@ u32 aarch32_insn_mcr_extract_crm(u32 insn);
typedef bool (pstate_check_t)(unsigned long);
extern pstate_check_t * const aarch32_opcode_cond_checks[16];
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_INSN_H */
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 5ea8656a2030..7fd836bea7eb 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -169,16 +169,7 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
-
-/*
- * PCI configuration space mapping function.
- *
- * The PCI specification disallows posted write configuration transactions.
- * Add an arch specific pci_remap_cfgspace() definition that is implemented
- * through nGnRnE device memory attribute as recommended by the ARM v8
- * Architecture reference manual Issue A.k B2.8.2 "Device memory".
- */
-#define pci_remap_cfgspace(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRnE))
+#define ioremap_np(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRnE))
/*
* io{read,write}{16,32,64}be() macros
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index b2b0c6405eb0..fac08e18bcd5 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -8,6 +8,10 @@
struct pt_regs;
+int set_handle_irq(void (*handle_irq)(struct pt_regs *));
+#define set_handle_irq set_handle_irq
+int set_handle_fiq(void (*handle_fiq)(struct pt_regs *));
+
static inline int nr_legacy_irqs(void)
{
return 0;
diff --git a/arch/arm64/include/asm/irq_work.h b/arch/arm64/include/asm/irq_work.h
index a1020285ea75..81bbfa3a035b 100644
--- a/arch/arm64/include/asm/irq_work.h
+++ b/arch/arm64/include/asm/irq_work.h
@@ -2,6 +2,8 @@
#ifndef __ASM_IRQ_WORK_H
#define __ASM_IRQ_WORK_H
+extern void arch_irq_work_raise(void);
+
static inline bool arch_irq_work_has_interrupt(void)
{
return true;
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index ff328e5bbb75..b57b9b1e4344 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -12,15 +12,13 @@
/*
* Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
- * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'dai'
+ * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'daif'
* order:
* Masking debug exceptions causes all other exceptions to be masked too/
- * Masking SError masks irq, but not debug exceptions. Masking irqs has no
- * side effects for other flags. Keeping to this order makes it easier for
- * entry.S to know which exceptions should be unmasked.
- *
- * FIQ is never expected, but we mask it when we disable debug exceptions, and
- * unmask it at all other times.
+ * Masking SError masks IRQ/FIQ, but not debug exceptions. IRQ and FIQ are
+ * always masked and unmasked together, and have no side effects for other
+ * flags. Keeping to this order makes it easier for entry.S to know which
+ * exceptions should be unmasked.
*/
/*
@@ -35,7 +33,7 @@ static inline void arch_local_irq_enable(void)
}
asm volatile(ALTERNATIVE(
- "msr daifclr, #2 // arch_local_irq_enable",
+ "msr daifclr, #3 // arch_local_irq_enable",
__msr_s(SYS_ICC_PMR_EL1, "%0"),
ARM64_HAS_IRQ_PRIO_MASKING)
:
@@ -54,7 +52,7 @@ static inline void arch_local_irq_disable(void)
}
asm volatile(ALTERNATIVE(
- "msr daifset, #2 // arch_local_irq_disable",
+ "msr daifset, #3 // arch_local_irq_disable",
__msr_s(SYS_ICC_PMR_EL1, "%0"),
ARM64_HAS_IRQ_PRIO_MASKING)
:
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index 587c504a4c8b..3512184cfec1 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -18,9 +18,9 @@
* 64K (section size = 512M).
*/
#ifdef CONFIG_ARM64_4K_PAGES
-#define ARM64_SWAPPER_USES_SECTION_MAPS 1
+#define ARM64_KERNEL_USES_PMD_MAPS 1
#else
-#define ARM64_SWAPPER_USES_SECTION_MAPS 0
+#define ARM64_KERNEL_USES_PMD_MAPS 0
#endif
/*
@@ -33,7 +33,7 @@
* VA range, so pages required to map highest possible PA are reserved in all
* cases.
*/
-#if ARM64_SWAPPER_USES_SECTION_MAPS
+#if ARM64_KERNEL_USES_PMD_MAPS
#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1)
#define IDMAP_PGTABLE_LEVELS (ARM64_HW_PGTABLE_LEVELS(PHYS_MASK_SHIFT) - 1)
#else
@@ -90,9 +90,9 @@
#define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
/* Initial memory map size */
-#if ARM64_SWAPPER_USES_SECTION_MAPS
-#define SWAPPER_BLOCK_SHIFT SECTION_SHIFT
-#define SWAPPER_BLOCK_SIZE SECTION_SIZE
+#if ARM64_KERNEL_USES_PMD_MAPS
+#define SWAPPER_BLOCK_SHIFT PMD_SHIFT
+#define SWAPPER_BLOCK_SIZE PMD_SIZE
#define SWAPPER_TABLE_SHIFT PUD_SHIFT
#else
#define SWAPPER_BLOCK_SHIFT PAGE_SHIFT
@@ -100,16 +100,13 @@
#define SWAPPER_TABLE_SHIFT PMD_SHIFT
#endif
-/* The size of the initial kernel direct mapping */
-#define SWAPPER_INIT_MAP_SIZE (_AC(1, UL) << SWAPPER_TABLE_SHIFT)
-
/*
* Initial memory map attributes.
*/
#define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
-#if ARM64_SWAPPER_USES_SECTION_MAPS
+#if ARM64_KERNEL_USES_PMD_MAPS
#define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
#else
#define SWAPPER_MM_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
@@ -125,7 +122,7 @@
#if defined(CONFIG_ARM64_4K_PAGES)
#define ARM64_MEMSTART_SHIFT PUD_SHIFT
#elif defined(CONFIG_ARM64_16K_PAGES)
-#define ARM64_MEMSTART_SHIFT (PMD_SHIFT + 5)
+#define ARM64_MEMSTART_SHIFT CONT_PMD_SHIFT
#else
#define ARM64_MEMSTART_SHIFT PMD_SHIFT
#endif
@@ -136,7 +133,7 @@
* has a direct correspondence, and needs to appear sufficiently aligned
* in the virtual address space.
*/
-#if defined(CONFIG_SPARSEMEM_VMEMMAP) && ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS
+#if ARM64_MEMSTART_SHIFT < SECTION_SIZE_BITS
#define ARM64_MEMSTART_ALIGN (1UL << SECTION_SIZE_BITS)
#else
#define ARM64_MEMSTART_ALIGN (1UL << ARM64_MEMSTART_SHIFT)
diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h
index 9befcd87e9a8..00dbcc71aeb2 100644
--- a/arch/arm64/include/asm/kexec.h
+++ b/arch/arm64/include/asm/kexec.h
@@ -96,10 +96,6 @@ struct kimage_arch {
void *dtb;
phys_addr_t dtb_mem;
phys_addr_t kern_reloc;
- /* Core ELF header buffer */
- void *elf_headers;
- unsigned long elf_headers_mem;
- unsigned long elf_headers_sz;
};
#ifdef CONFIG_KEXEC_FILE
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 4e90c2debf70..d436831dd706 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -12,7 +12,8 @@
#include <asm/types.h>
/* Hyp Configuration Register (HCR) bits */
-#define HCR_ATA (UL(1) << 56)
+#define HCR_ATA_SHIFT 56
+#define HCR_ATA (UL(1) << HCR_ATA_SHIFT)
#define HCR_FWB (UL(1) << 46)
#define HCR_API (UL(1) << 41)
#define HCR_APK (UL(1) << 40)
@@ -278,6 +279,9 @@
#define CPTR_EL2_DEFAULT CPTR_EL2_RES1
/* Hyp Debug Configuration Register bits */
+#define MDCR_EL2_E2TB_MASK (UL(0x3))
+#define MDCR_EL2_E2TB_SHIFT (UL(24))
+#define MDCR_EL2_TTRF (1 << 19)
#define MDCR_EL2_TPMS (1 << 14)
#define MDCR_EL2_E2PB_MASK (UL(0x3))
#define MDCR_EL2_E2PB_SHIFT (UL(12))
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 22d933e9b59e..9f0bf2109be7 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -8,6 +8,7 @@
#define __ARM_KVM_ASM_H__
#include <asm/hyp_image.h>
+#include <asm/insn.h>
#include <asm/virt.h>
#define ARM_EXIT_WITH_SERROR_BIT 31
@@ -47,16 +48,23 @@
#define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context 2
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa 3
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid 4
-#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_local_vmid 5
+#define __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context 5
#define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff 6
#define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs 7
-#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_ich_vtr_el2 8
+#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config 8
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr 9
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr 10
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs 11
#define __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 12
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs 13
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs 14
+#define __KVM_HOST_SMCCC_FUNC___pkvm_init 15
+#define __KVM_HOST_SMCCC_FUNC___pkvm_create_mappings 16
+#define __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping 17
+#define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector 18
+#define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize 19
+#define __KVM_HOST_SMCCC_FUNC___pkvm_mark_hyp 20
+#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc 21
#ifndef __ASSEMBLY__
@@ -154,6 +162,9 @@ struct kvm_nvhe_init_params {
unsigned long tpidr_el2;
unsigned long stack_hyp_va;
phys_addr_t pgd_pa;
+ unsigned long hcr_el2;
+ unsigned long vttbr;
+ unsigned long vtcr;
};
/* Translate a kernel address @ptr into its equivalent linear mapping */
@@ -183,16 +194,18 @@ DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
extern void __kvm_flush_vm_context(void);
+extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
int level);
extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
-extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
extern void __kvm_timer_set_cntvoff(u64 cntvoff);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
-extern u64 __vgic_v3_get_ich_vtr_el2(void);
+extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu);
+
+extern u64 __vgic_v3_get_gic_config(void);
extern u64 __vgic_v3_read_vmcr(void);
extern void __vgic_v3_write_vmcr(u32 vmcr);
extern void __vgic_v3_init_lrs(void);
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index f612c090f2e4..fd418955e31e 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -84,6 +84,9 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
vcpu_el1_is_32bit(vcpu))
vcpu->arch.hcr_el2 |= HCR_TID2;
+
+ if (kvm_has_mte(vcpu->kvm))
+ vcpu->arch.hcr_el2 |= HCR_ATA;
}
static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
@@ -463,4 +466,9 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC;
}
+static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
+{
+ return test_bit(feature, vcpu->arch.features);
+}
+
#endif /* __ARM64_KVM_EMULATE_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 3d10e6527f7d..41911585ae0c 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -46,6 +46,7 @@
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
+#define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
KVM_DIRTY_LOG_INITIALLY_SET)
@@ -94,7 +95,7 @@ struct kvm_s2_mmu {
/* The last vcpu id that ran on each physical CPU */
int __percpu *last_vcpu_ran;
- struct kvm *kvm;
+ struct kvm_arch *arch;
};
struct kvm_arch_memory_slot {
@@ -132,6 +133,9 @@ struct kvm_arch {
u8 pfr0_csv2;
u8 pfr0_csv3;
+
+ /* Memory Tagging Extension enabled for the guest */
+ bool mte_enabled;
};
struct kvm_vcpu_fault_info {
@@ -206,6 +210,12 @@ enum vcpu_sysreg {
CNTP_CVAL_EL0,
CNTP_CTL_EL0,
+ /* Memory Tagging Extension registers */
+ RGSR_EL1, /* Random Allocation Tag Seed Register */
+ GCR_EL1, /* Tag Control Register */
+ TFSR_EL1, /* Tag Fault Status Register (EL1) */
+ TFSRE0_EL1, /* Tag Fault Status Register (EL0) */
+
/* 32bit specific registers. Keep them at the end of the range */
DACR32_EL2, /* Domain Access Control Register */
IFSR32_EL2, /* Instruction Fault Status Register */
@@ -315,6 +325,8 @@ struct kvm_vcpu_arch {
struct kvm_guest_debug_arch regs;
/* Statistical profiling extension */
u64 pmscr_el1;
+ /* Self-hosted trace */
+ u64 trfcr_el1;
} host_debug_state;
/* VGIC state */
@@ -372,8 +384,10 @@ struct kvm_vcpu_arch {
};
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
-#define vcpu_sve_pffr(vcpu) ((void *)((char *)((vcpu)->arch.sve_state) + \
- sve_ffr_offset((vcpu)->arch.sve_max_vl)))
+#define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
+ sve_ffr_offset((vcpu)->arch.sve_max_vl))
+
+#define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl)
#define vcpu_sve_state_size(vcpu) ({ \
size_t __size_ret; \
@@ -382,7 +396,7 @@ struct kvm_vcpu_arch {
if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \
__size_ret = 0; \
} else { \
- __vcpu_vq = sve_vq_from_vl((vcpu)->arch.sve_max_vl); \
+ __vcpu_vq = vcpu_sve_max_vq(vcpu); \
__size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \
} \
\
@@ -400,7 +414,13 @@ struct kvm_vcpu_arch {
#define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */
#define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */
#define KVM_ARM64_EXCEPT_MASK (7 << 9) /* Target EL/MODE */
+#define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */
+#define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */
+#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
+ KVM_GUESTDBG_USE_SW_BP | \
+ KVM_GUESTDBG_USE_HW | \
+ KVM_GUESTDBG_SINGLESTEP)
/*
* When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can
* take the following values:
@@ -546,16 +566,11 @@ static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
}
struct kvm_vm_stat {
- ulong remote_tlb_flush;
+ struct kvm_vm_stat_generic generic;
};
struct kvm_vcpu_stat {
- u64 halt_successful_poll;
- u64 halt_attempted_poll;
- u64 halt_poll_success_ns;
- u64 halt_poll_fail_ns;
- u64 halt_poll_invalid;
- u64 halt_wakeup;
+ struct kvm_vcpu_stat_generic generic;
u64 hvc_exit_stat;
u64 wfe_exit_stat;
u64 wfi_exit_stat;
@@ -582,15 +597,11 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events);
#define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end, unsigned flags);
-int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
-int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm);
+#ifndef __KVM_NVHE_HYPERVISOR__
#define kvm_call_hyp_nvhe(f, ...) \
({ \
struct arm_smccc_res res; \
@@ -630,9 +641,13 @@ void kvm_arm_resume_guest(struct kvm *kvm);
\
ret; \
})
+#else /* __KVM_NVHE_HYPERVISOR__ */
+#define kvm_call_hyp(f, ...) f(__VA_ARGS__)
+#define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__)
+#define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__)
+#endif /* __KVM_NVHE_HYPERVISOR__ */
void force_vm_exit(const cpumask_t *mask);
-void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
@@ -692,19 +707,6 @@ static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
}
-static inline bool kvm_arch_requires_vhe(void)
-{
- /*
- * The Arm architecture specifies that implementation of SVE
- * requires VHE also to be implemented. The KVM code for arm64
- * relies on this when SVE is present:
- */
- if (system_supports_sve())
- return true;
-
- return false;
-}
-
void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
static inline void kvm_arch_hardware_unsetup(void) {}
@@ -713,6 +715,7 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
void kvm_arm_init_debug(void);
+void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
@@ -723,6 +726,9 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
+long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
+ struct kvm_arm_copy_mte_tags *copy_tags);
+
/* Guest/host FPSIMD coordination helpers */
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
@@ -734,6 +740,10 @@ static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
return (!has_vhe() && attr->exclude_host);
}
+/* Flags for host debug state */
+void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
+
#ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
{
@@ -767,9 +777,17 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
#define kvm_arm_vcpu_sve_finalized(vcpu) \
((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
+#define kvm_has_mte(kvm) (system_supports_mte() && (kvm)->arch.mte_enabled)
#define kvm_vcpu_has_pmu(vcpu) \
(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
int kvm_trng_call(struct kvm_vcpu *vcpu);
+#ifdef CONFIG_KVM
+extern phys_addr_t hyp_mem_base;
+extern phys_addr_t hyp_mem_size;
+void __init kvm_hyp_reserve(void);
+#else
+static inline void kvm_hyp_reserve(void) { }
+#endif
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index c0450828378b..9d60b3006efc 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -83,8 +83,15 @@ void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt);
void __debug_switch_to_guest(struct kvm_vcpu *vcpu);
void __debug_switch_to_host(struct kvm_vcpu *vcpu);
+#ifdef __KVM_NVHE_HYPERVISOR__
+void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu);
+void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
+#endif
+
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
+void __sve_save_state(void *sve_pffr, u32 *fpsr);
+void __sve_restore_state(void *sve_pffr, u32 *fpsr);
#ifndef __KVM_NVHE_HYPERVISOR__
void activate_traps_vhe_load(struct kvm_vcpu *vcpu);
@@ -95,9 +102,20 @@ u64 __guest_enter(struct kvm_vcpu *vcpu);
bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt);
-void __noreturn hyp_panic(void);
#ifdef __KVM_NVHE_HYPERVISOR__
-void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par);
+void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
+ u64 elr, u64 par);
#endif
+#ifdef __KVM_NVHE_HYPERVISOR__
+void __pkvm_init_switch_pgd(phys_addr_t phys, unsigned long size,
+ phys_addr_t pgd, void *sp, void *cont_fn);
+int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
+ unsigned long *per_cpu_base, u32 hyp_va_bits);
+void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
+#endif
+
+extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val);
+extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val);
+
#endif /* __ARM64_KVM_HYP_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 90873851f677..b52c5c4b9a3d 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -121,6 +121,8 @@ void kvm_update_va_mask(struct alt_instr *alt,
void kvm_compute_layout(void);
void kvm_apply_hyp_relocations(void);
+#define __hyp_pa(x) (((phys_addr_t)(x)) + hyp_physvirt_offset)
+
static __always_inline unsigned long __kern_hyp_va(unsigned long v)
{
asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
@@ -166,21 +168,28 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
phys_addr_t kvm_mmu_get_httbr(void);
phys_addr_t kvm_get_idmap_vector(void);
-int kvm_mmu_init(void);
+int kvm_mmu_init(u32 *hyp_va_bits);
+
+static inline void *__kvm_vector_slot2addr(void *base,
+ enum arm64_hyp_spectre_vector slot)
+{
+ int idx = slot - (slot != HYP_VECTOR_DIRECT);
+
+ return base + (idx * SZ_2K);
+}
struct kvm;
-#define kvm_flush_dcache_to_poc(a,l) __flush_dcache_area((a), (l))
+#define kvm_flush_dcache_to_poc(a,l) \
+ dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
{
return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
}
-static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
+static inline void __clean_dcache_guest_page(void *va, size_t size)
{
- void *va = page_address(pfn_to_page(pfn));
-
/*
* With FWB, we ensure that the guest always accesses memory using
* cacheable attributes, and we don't have to clean to PoC when
@@ -193,18 +202,14 @@ static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
kvm_flush_dcache_to_poc(va, size);
}
-static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
- unsigned long size)
+static inline void __invalidate_icache_guest_page(void *va, size_t size)
{
if (icache_is_aliasing()) {
/* any kind of VIPT cache */
- __flush_icache_all();
+ icache_inval_all_pou();
} else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
/* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
- void *va = page_address(pfn_to_page(pfn));
-
- invalidate_icache_range((unsigned long)va,
- (unsigned long)va + size);
+ icache_inval_pou((unsigned long)va, (unsigned long)va + size);
}
}
@@ -262,9 +267,9 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
* Must be called from hyp code running at EL2 with an updated VTTBR
* and interrupts disabled.
*/
-static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu)
+static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long vtcr)
{
- write_sysreg(kern_hyp_va(mmu->kvm)->arch.vtcr, vtcr_el2);
+ write_sysreg(vtcr, vtcr_el2);
write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
/*
@@ -275,5 +280,14 @@ static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu)
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
}
+static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu)
+{
+ __load_stage2(mmu, kern_hyp_va(mmu->arch)->vtcr);
+}
+
+static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
+{
+ return container_of(mmu->arch, struct kvm, arch);
+}
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/kvm_mte.h b/arch/arm64/include/asm/kvm_mte.h
new file mode 100644
index 000000000000..de002636eb1f
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_mte.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2021 ARM Ltd.
+ */
+#ifndef __ASM_KVM_MTE_H
+#define __ASM_KVM_MTE_H
+
+#ifdef __ASSEMBLY__
+
+#include <asm/sysreg.h>
+
+#ifdef CONFIG_ARM64_MTE
+
+.macro mte_switch_to_guest g_ctxt, h_ctxt, reg1
+alternative_if_not ARM64_MTE
+ b .L__skip_switch\@
+alternative_else_nop_endif
+ mrs \reg1, hcr_el2
+ tbz \reg1, #(HCR_ATA_SHIFT), .L__skip_switch\@
+
+ mrs_s \reg1, SYS_RGSR_EL1
+ str \reg1, [\h_ctxt, #CPU_RGSR_EL1]
+ mrs_s \reg1, SYS_GCR_EL1
+ str \reg1, [\h_ctxt, #CPU_GCR_EL1]
+
+ ldr \reg1, [\g_ctxt, #CPU_RGSR_EL1]
+ msr_s SYS_RGSR_EL1, \reg1
+ ldr \reg1, [\g_ctxt, #CPU_GCR_EL1]
+ msr_s SYS_GCR_EL1, \reg1
+
+.L__skip_switch\@:
+.endm
+
+.macro mte_switch_to_hyp g_ctxt, h_ctxt, reg1
+alternative_if_not ARM64_MTE
+ b .L__skip_switch\@
+alternative_else_nop_endif
+ mrs \reg1, hcr_el2
+ tbz \reg1, #(HCR_ATA_SHIFT), .L__skip_switch\@
+
+ mrs_s \reg1, SYS_RGSR_EL1
+ str \reg1, [\g_ctxt, #CPU_RGSR_EL1]
+ mrs_s \reg1, SYS_GCR_EL1
+ str \reg1, [\g_ctxt, #CPU_GCR_EL1]
+
+ ldr \reg1, [\h_ctxt, #CPU_RGSR_EL1]
+ msr_s SYS_RGSR_EL1, \reg1
+ ldr \reg1, [\h_ctxt, #CPU_GCR_EL1]
+ msr_s SYS_GCR_EL1, \reg1
+
+ isb
+
+.L__skip_switch\@:
+.endm
+
+#else /* !CONFIG_ARM64_MTE */
+
+.macro mte_switch_to_guest g_ctxt, h_ctxt, reg1
+.endm
+
+.macro mte_switch_to_hyp g_ctxt, h_ctxt, reg1
+.endm
+
+#endif /* CONFIG_ARM64_MTE */
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_KVM_MTE_H */
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index 8886d43cfb11..f004c0115d89 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -11,22 +11,87 @@
#include <linux/kvm_host.h>
#include <linux/types.h>
+#define KVM_PGTABLE_MAX_LEVELS 4U
+
+static inline u64 kvm_get_parange(u64 mmfr0)
+{
+ u64 parange = cpuid_feature_extract_unsigned_field(mmfr0,
+ ID_AA64MMFR0_PARANGE_SHIFT);
+ if (parange > ID_AA64MMFR0_PARANGE_MAX)
+ parange = ID_AA64MMFR0_PARANGE_MAX;
+
+ return parange;
+}
+
typedef u64 kvm_pte_t;
/**
+ * struct kvm_pgtable_mm_ops - Memory management callbacks.
+ * @zalloc_page: Allocate a single zeroed memory page.
+ * The @arg parameter can be used by the walker
+ * to pass a memcache. The initial refcount of
+ * the page is 1.
+ * @zalloc_pages_exact: Allocate an exact number of zeroed memory pages.
+ * The @size parameter is in bytes, and is rounded
+ * up to the next page boundary. The resulting
+ * allocation is physically contiguous.
+ * @free_pages_exact: Free an exact number of memory pages previously
+ * allocated by zalloc_pages_exact.
+ * @get_page: Increment the refcount on a page.
+ * @put_page: Decrement the refcount on a page. When the
+ * refcount reaches 0 the page is automatically
+ * freed.
+ * @page_count: Return the refcount of a page.
+ * @phys_to_virt: Convert a physical address into a virtual
+ * address mapped in the current context.
+ * @virt_to_phys: Convert a virtual address mapped in the current
+ * context into a physical address.
+ * @dcache_clean_inval_poc: Clean and invalidate the data cache to the PoC
+ * for the specified memory address range.
+ * @icache_inval_pou: Invalidate the instruction cache to the PoU
+ * for the specified memory address range.
+ */
+struct kvm_pgtable_mm_ops {
+ void* (*zalloc_page)(void *arg);
+ void* (*zalloc_pages_exact)(size_t size);
+ void (*free_pages_exact)(void *addr, size_t size);
+ void (*get_page)(void *addr);
+ void (*put_page)(void *addr);
+ int (*page_count)(void *addr);
+ void* (*phys_to_virt)(phys_addr_t phys);
+ phys_addr_t (*virt_to_phys)(void *addr);
+ void (*dcache_clean_inval_poc)(void *addr, size_t size);
+ void (*icache_inval_pou)(void *addr, size_t size);
+};
+
+/**
+ * enum kvm_pgtable_stage2_flags - Stage-2 page-table flags.
+ * @KVM_PGTABLE_S2_NOFWB: Don't enforce Normal-WB even if the CPUs have
+ * ARM64_HAS_STAGE2_FWB.
+ * @KVM_PGTABLE_S2_IDMAP: Only use identity mappings.
+ */
+enum kvm_pgtable_stage2_flags {
+ KVM_PGTABLE_S2_NOFWB = BIT(0),
+ KVM_PGTABLE_S2_IDMAP = BIT(1),
+};
+
+/**
* struct kvm_pgtable - KVM page-table.
* @ia_bits: Maximum input address size, in bits.
* @start_level: Level at which the page-table walk starts.
* @pgd: Pointer to the first top-level entry of the page-table.
+ * @mm_ops: Memory management callbacks.
* @mmu: Stage-2 KVM MMU struct. Unused for stage-1 page-tables.
*/
struct kvm_pgtable {
u32 ia_bits;
u32 start_level;
kvm_pte_t *pgd;
+ struct kvm_pgtable_mm_ops *mm_ops;
/* Stage-2 only */
struct kvm_s2_mmu *mmu;
+ enum kvm_pgtable_stage2_flags flags;
};
/**
@@ -50,6 +115,16 @@ enum kvm_pgtable_prot {
#define PAGE_HYP_DEVICE (PAGE_HYP | KVM_PGTABLE_PROT_DEVICE)
/**
+ * struct kvm_mem_range - Range of Intermediate Physical Addresses
+ * @start: Start of the range.
+ * @end: End of the range.
+ */
+struct kvm_mem_range {
+ u64 start;
+ u64 end;
+};
+
+/**
* enum kvm_pgtable_walk_flags - Flags to control a depth-first page-table walk.
* @KVM_PGTABLE_WALK_LEAF: Visit leaf entries, including invalid
* entries.
@@ -86,10 +161,12 @@ struct kvm_pgtable_walker {
* kvm_pgtable_hyp_init() - Initialise a hypervisor stage-1 page-table.
* @pgt: Uninitialised page-table structure to initialise.
* @va_bits: Maximum virtual address bits.
+ * @mm_ops: Memory management callbacks.
*
* Return: 0 on success, negative error code on failure.
*/
-int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits);
+int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
+ struct kvm_pgtable_mm_ops *mm_ops);
/**
* kvm_pgtable_hyp_destroy() - Destroy an unused hypervisor stage-1 page-table.
@@ -123,17 +200,41 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
enum kvm_pgtable_prot prot);
/**
- * kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table.
+ * kvm_get_vtcr() - Helper to construct VTCR_EL2
+ * @mmfr0: Sanitized value of SYS_ID_AA64MMFR0_EL1 register.
+ * @mmfr1: Sanitized value of SYS_ID_AA64MMFR1_EL1 register.
+ * @phys_shfit: Value to set in VTCR_EL2.T0SZ.
+ *
+ * The VTCR value is common across all the physical CPUs on the system.
+ * We use system wide sanitised values to fill in different fields,
+ * except for Hardware Management of Access Flags. HA Flag is set
+ * unconditionally on all CPUs, as it is safe to run with or without
+ * the feature and the bit is RES0 on CPUs that don't support it.
+ *
+ * Return: VTCR_EL2 value
+ */
+u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
+
+/**
+ * kvm_pgtable_stage2_init_flags() - Initialise a guest stage-2 page-table.
* @pgt: Uninitialised page-table structure to initialise.
- * @kvm: KVM structure representing the guest virtual machine.
+ * @arch: Arch-specific KVM structure representing the guest virtual
+ * machine.
+ * @mm_ops: Memory management callbacks.
+ * @flags: Stage-2 configuration flags.
*
* Return: 0 on success, negative error code on failure.
*/
-int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm *kvm);
+int kvm_pgtable_stage2_init_flags(struct kvm_pgtable *pgt, struct kvm_arch *arch,
+ struct kvm_pgtable_mm_ops *mm_ops,
+ enum kvm_pgtable_stage2_flags flags);
+
+#define kvm_pgtable_stage2_init(pgt, arch, mm_ops) \
+ kvm_pgtable_stage2_init_flags(pgt, arch, mm_ops, 0)
/**
* kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table.
- * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
+ * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
*
* The page-table is assumed to be unreachable by any hardware walkers prior
* to freeing and therefore no TLB invalidation is performed.
@@ -142,13 +243,13 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
/**
* kvm_pgtable_stage2_map() - Install a mapping in a guest stage-2 page-table.
- * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
+ * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Intermediate physical address at which to place the mapping.
* @size: Size of the mapping.
* @phys: Physical address of the memory to map.
* @prot: Permissions and attributes for the mapping.
- * @mc: Cache of pre-allocated GFP_PGTABLE_USER memory from which to
- * allocate page-table pages.
+ * @mc: Cache of pre-allocated and zeroed memory from which to allocate
+ * page-table pages.
*
* The offset of @addr within a page is ignored, @size is rounded-up to
* the next page boundary and @phys is rounded-down to the previous page
@@ -170,11 +271,31 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
*/
int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
u64 phys, enum kvm_pgtable_prot prot,
- struct kvm_mmu_memory_cache *mc);
+ void *mc);
+
+/**
+ * kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to
+ * track ownership.
+ * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
+ * @addr: Base intermediate physical address to annotate.
+ * @size: Size of the annotated range.
+ * @mc: Cache of pre-allocated and zeroed memory from which to allocate
+ * page-table pages.
+ * @owner_id: Unique identifier for the owner of the page.
+ *
+ * By default, all page-tables are owned by identifier 0. This function can be
+ * used to mark portions of the IPA space as owned by other entities. When a
+ * stage 2 is used with identity-mappings, these annotations allow to use the
+ * page-table data structure as a simple rmap.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
+ void *mc, u8 owner_id);
/**
* kvm_pgtable_stage2_unmap() - Remove a mapping from a guest stage-2 page-table.
- * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
+ * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Intermediate physical address from which to remove the mapping.
* @size: Size of the mapping.
*
@@ -194,7 +315,7 @@ int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
/**
* kvm_pgtable_stage2_wrprotect() - Write-protect guest stage-2 address range
* without TLB invalidation.
- * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
+ * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Intermediate physical address from which to write-protect,
* @size: Size of the range.
*
@@ -211,7 +332,7 @@ int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
/**
* kvm_pgtable_stage2_mkyoung() - Set the access flag in a page-table entry.
- * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
+ * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Intermediate physical address to identify the page-table entry.
*
* The offset of @addr within a page is ignored.
@@ -225,7 +346,7 @@ kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr);
/**
* kvm_pgtable_stage2_mkold() - Clear the access flag in a page-table entry.
- * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
+ * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Intermediate physical address to identify the page-table entry.
*
* The offset of @addr within a page is ignored.
@@ -244,7 +365,7 @@ kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr);
/**
* kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a
* page-table entry.
- * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
+ * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Intermediate physical address to identify the page-table entry.
* @prot: Additional permissions to grant for the mapping.
*
@@ -263,7 +384,7 @@ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
/**
* kvm_pgtable_stage2_is_young() - Test whether a page-table entry has the
* access flag set.
- * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
+ * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Intermediate physical address to identify the page-table entry.
*
* The offset of @addr within a page is ignored.
@@ -276,7 +397,7 @@ bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr);
* kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point
* of Coherency for guest stage-2 address
* range.
- * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init().
+ * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
* @addr: Intermediate physical address from which to flush.
* @size: Size of the range.
*
@@ -311,4 +432,23 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
struct kvm_pgtable_walker *walker);
+/**
+ * kvm_pgtable_stage2_find_range() - Find a range of Intermediate Physical
+ * Addresses with compatible permission
+ * attributes.
+ * @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
+ * @addr: Address that must be covered by the range.
+ * @prot: Protection attributes that the range must be compatible with.
+ * @range: Range structure used to limit the search space at call time and
+ * that will hold the result.
+ *
+ * The offset of @addr within a page is ignored. An IPA is compatible with @prot
+ * iff its corresponding stage-2 page-table entry has default ownership and, if
+ * valid, is mapped with protection attributes identical to @prot.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int kvm_pgtable_stage2_find_range(struct kvm_pgtable *pgt, u64 addr,
+ enum kvm_pgtable_prot prot,
+ struct kvm_mem_range *range);
#endif /* __ARM64_KVM_PGTABLE_H__ */
diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h
index ba89a9af820a..9906541a6861 100644
--- a/arch/arm64/include/asm/linkage.h
+++ b/arch/arm64/include/asm/linkage.h
@@ -56,8 +56,16 @@
SYM_FUNC_START_ALIAS(__pi_##x); \
SYM_FUNC_START_WEAK(x)
+#define SYM_FUNC_START_WEAK_ALIAS_PI(x) \
+ SYM_FUNC_START_ALIAS(__pi_##x); \
+ SYM_START(x, SYM_L_WEAK, SYM_A_ALIGN)
+
#define SYM_FUNC_END_PI(x) \
SYM_FUNC_END(x); \
SYM_FUNC_END_ALIAS(__pi_##x)
+#define SYM_FUNC_END_ALIAS_PI(x) \
+ SYM_FUNC_END_ALIAS(x); \
+ SYM_FUNC_END_ALIAS(__pi_##x)
+
#endif
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index c759faf7a1ff..824a3655dd93 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -135,10 +135,8 @@
#define MT_NORMAL 0
#define MT_NORMAL_TAGGED 1
#define MT_NORMAL_NC 2
-#define MT_NORMAL_WT 3
-#define MT_DEVICE_nGnRnE 4
-#define MT_DEVICE_nGnRE 5
-#define MT_DEVICE_GRE 6
+#define MT_DEVICE_nGnRnE 3
+#define MT_DEVICE_nGnRE 4
/*
* Memory types for Stage-2 translation
@@ -243,13 +241,15 @@ static inline const void *__tag_set(const void *addr, u8 tag)
}
#ifdef CONFIG_KASAN_HW_TAGS
-#define arch_enable_tagging() mte_enable_kernel()
+#define arch_enable_tagging_sync() mte_enable_kernel_sync()
+#define arch_enable_tagging_async() mte_enable_kernel_async()
#define arch_set_tagging_report_once(state) mte_set_report_once(state)
+#define arch_force_async_tag_fault() mte_check_tfsr_exit()
#define arch_init_tags(max_tag) mte_init_tags(max_tag)
#define arch_get_random_tag() mte_get_random_tag()
#define arch_get_mem_tag(addr) mte_get_mem_tag(addr)
-#define arch_set_mem_tag_range(addr, size, tag) \
- mte_set_mem_tag_range((addr), (size), (tag))
+#define arch_set_mem_tag_range(addr, size, tag, init) \
+ mte_set_mem_tag_range((addr), (size), (tag), (init))
#endif /* CONFIG_KASAN_HW_TAGS */
/*
@@ -327,7 +327,12 @@ static inline void *phys_to_virt(phys_addr_t x)
*/
#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
-#if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL)
+#if defined(CONFIG_DEBUG_VIRTUAL)
+#define page_to_virt(x) ({ \
+ __typeof__(x) __page = x; \
+ void *__addr = __va(page_to_phys(__page)); \
+ (void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\
+})
#define virt_to_page(x) pfn_to_page(virt_to_pfn(x))
#else
#define page_to_virt(x) ({ \
@@ -342,11 +347,11 @@ static inline void *phys_to_virt(phys_addr_t x)
u64 __addr = VMEMMAP_START + (__idx * sizeof(struct page)); \
(struct page *)__addr; \
})
-#endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */
+#endif /* CONFIG_DEBUG_VIRTUAL */
#define virt_addr_valid(addr) ({ \
__typeof__(addr) __addr = __tag_reset(addr); \
- __is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr)); \
+ __is_lm_address(__addr) && pfn_is_map_memory(virt_to_pfn(__addr)); \
})
void dump_mem_limit(void);
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 70ce8c1d2b07..eeb210997149 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -63,23 +63,6 @@ static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm)
extern u64 idmap_t0sz;
extern u64 idmap_ptrs_per_pgd;
-static inline bool __cpu_uses_extended_idmap(void)
-{
- if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52))
- return false;
-
- return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS));
-}
-
-/*
- * True if the extended ID map requires an extra level of translation table
- * to be configured.
- */
-static inline bool __cpu_uses_extended_idmap_level(void)
-{
- return ARM64_HW_PGTABLE_LEVELS(64 - idmap_t0sz) > CONFIG_PGTABLE_LEVELS;
-}
-
/*
* Ensure TCR.T0SZ is set to the provided value.
*/
@@ -136,7 +119,7 @@ static inline void cpu_install_idmap(void)
* Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
* avoiding the possibility of conflicting TLB entries being allocated.
*/
-static inline void cpu_replace_ttbr1(pgd_t *pgdp)
+static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp)
{
typedef void (ttbr_replace_func)(phys_addr_t);
extern ttbr_replace_func idmap_cpu_replace_ttbr1;
@@ -157,7 +140,7 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp)
ttbr1 |= TTBR_CNP_BIT;
}
- replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
+ replace_phys = (void *)__pa_symbol(function_nocfi(idmap_cpu_replace_ttbr1));
cpu_install_idmap();
replace_phys(ttbr1);
@@ -194,9 +177,9 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
return;
if (mm == &init_mm)
- ttbr = __pa_symbol(reserved_pg_dir);
+ ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
else
- ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
+ ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48;
WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
}
diff --git a/arch/arm64/include/asm/module.lds.h b/arch/arm64/include/asm/module.lds.h
index 810045628c66..a11ccadd47d2 100644
--- a/arch/arm64/include/asm/module.lds.h
+++ b/arch/arm64/include/asm/module.lds.h
@@ -1,7 +1,20 @@
-#ifdef CONFIG_ARM64_MODULE_PLTS
SECTIONS {
+#ifdef CONFIG_ARM64_MODULE_PLTS
.plt 0 (NOLOAD) : { BYTE(0) }
.init.plt 0 (NOLOAD) : { BYTE(0) }
.text.ftrace_trampoline 0 (NOLOAD) : { BYTE(0) }
-}
#endif
+
+#ifdef CONFIG_KASAN_SW_TAGS
+ /*
+ * Outlined checks go into comdat-deduplicated sections named .text.hot.
+ * Because they are in comdats they are not combined by the linker and
+ * we otherwise end up with multiple sections with the same .text.hot
+ * name in the .ko file. The kernel module loader warns if it sees
+ * multiple sections with the same name so we use this sections
+ * directive to force them into a single section and silence the
+ * warning.
+ */
+ .text.hot : { *(.text.hot) }
+#endif
+}
diff --git a/arch/arm64/include/asm/mte-def.h b/arch/arm64/include/asm/mte-def.h
index cf241b0f0a42..626d359b396e 100644
--- a/arch/arm64/include/asm/mte-def.h
+++ b/arch/arm64/include/asm/mte-def.h
@@ -7,6 +7,7 @@
#define MTE_GRANULE_SIZE UL(16)
#define MTE_GRANULE_MASK (~(MTE_GRANULE_SIZE - 1))
+#define MTE_GRANULES_PER_PAGE (PAGE_SIZE / MTE_GRANULE_SIZE)
#define MTE_TAG_SHIFT 56
#define MTE_TAG_SIZE 4
#define MTE_TAG_MASK GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT)
diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h
index 7ab500e2ad17..d952352bd008 100644
--- a/arch/arm64/include/asm/mte-kasan.h
+++ b/arch/arm64/include/asm/mte-kasan.h
@@ -48,36 +48,88 @@ static inline u8 mte_get_random_tag(void)
return mte_get_ptr_tag(addr);
}
+static inline u64 __stg_post(u64 p)
+{
+ asm volatile(__MTE_PREAMBLE "stg %0, [%0], #16"
+ : "+r"(p)
+ :
+ : "memory");
+ return p;
+}
+
+static inline u64 __stzg_post(u64 p)
+{
+ asm volatile(__MTE_PREAMBLE "stzg %0, [%0], #16"
+ : "+r"(p)
+ :
+ : "memory");
+ return p;
+}
+
+static inline void __dc_gva(u64 p)
+{
+ asm volatile(__MTE_PREAMBLE "dc gva, %0" : : "r"(p) : "memory");
+}
+
+static inline void __dc_gzva(u64 p)
+{
+ asm volatile(__MTE_PREAMBLE "dc gzva, %0" : : "r"(p) : "memory");
+}
+
/*
* Assign allocation tags for a region of memory based on the pointer tag.
* Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and
- * size must be non-zero and MTE_GRANULE_SIZE aligned.
+ * size must be MTE_GRANULE_SIZE aligned.
*/
-static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
+static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag,
+ bool init)
{
- u64 curr, end;
+ u64 curr, mask, dczid_bs, end1, end2, end3;
- if (!size)
- return;
+ /* Read DC G(Z)VA block size from the system register. */
+ dczid_bs = 4ul << (read_cpuid(DCZID_EL0) & 0xf);
curr = (u64)__tag_set(addr, tag);
- end = curr + size;
-
- do {
- /*
- * 'asm volatile' is required to prevent the compiler to move
- * the statement outside of the loop.
- */
- asm volatile(__MTE_PREAMBLE "stg %0, [%0]"
- :
- : "r" (curr)
- : "memory");
-
- curr += MTE_GRANULE_SIZE;
- } while (curr != end);
+ mask = dczid_bs - 1;
+ /* STG/STZG up to the end of the first block. */
+ end1 = curr | mask;
+ end3 = curr + size;
+ /* DC GVA / GZVA in [end1, end2) */
+ end2 = end3 & ~mask;
+
+ /*
+ * The following code uses STG on the first DC GVA block even if the
+ * start address is aligned - it appears to be faster than an alignment
+ * check + conditional branch. Also, if the range size is at least 2 DC
+ * GVA blocks, the first two loops can use post-condition to save one
+ * branch each.
+ */
+#define SET_MEMTAG_RANGE(stg_post, dc_gva) \
+ do { \
+ if (size >= 2 * dczid_bs) { \
+ do { \
+ curr = stg_post(curr); \
+ } while (curr < end1); \
+ \
+ do { \
+ dc_gva(curr); \
+ curr += dczid_bs; \
+ } while (curr < end2); \
+ } \
+ \
+ while (curr < end3) \
+ curr = stg_post(curr); \
+ } while (0)
+
+ if (init)
+ SET_MEMTAG_RANGE(__stzg_post, __dc_gzva);
+ else
+ SET_MEMTAG_RANGE(__stg_post, __dc_gva);
+#undef SET_MEMTAG_RANGE
}
-void mte_enable_kernel(void);
+void mte_enable_kernel_sync(void);
+void mte_enable_kernel_async(void);
void mte_init_tags(u64 max_tag);
void mte_set_report_once(bool state);
@@ -100,11 +152,16 @@ static inline u8 mte_get_random_tag(void)
return 0xFF;
}
-static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag)
+static inline void mte_set_mem_tag_range(void *addr, size_t size,
+ u8 tag, bool init)
+{
+}
+
+static inline void mte_enable_kernel_sync(void)
{
}
-static inline void mte_enable_kernel(void)
+static inline void mte_enable_kernel_async(void)
{
}
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index 9b557a457f24..58c7f80f5596 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -37,35 +37,41 @@ void mte_free_tag_storage(char *storage);
/* track which pages have valid allocation tags */
#define PG_mte_tagged PG_arch_2
-void mte_sync_tags(pte_t *ptep, pte_t pte);
+void mte_zero_clear_page_tags(void *addr);
+void mte_sync_tags(pte_t old_pte, pte_t pte);
void mte_copy_page_tags(void *kto, const void *kfrom);
-void flush_mte_state(void);
+void mte_thread_init_user(void);
void mte_thread_switch(struct task_struct *next);
+void mte_suspend_enter(void);
void mte_suspend_exit(void);
long set_mte_ctrl(struct task_struct *task, unsigned long arg);
long get_mte_ctrl(struct task_struct *task);
int mte_ptrace_copy_tags(struct task_struct *child, long request,
unsigned long addr, unsigned long data);
-void mte_assign_mem_tag_range(void *addr, size_t size);
-
#else /* CONFIG_ARM64_MTE */
/* unused if !CONFIG_ARM64_MTE, silence the compiler */
#define PG_mte_tagged 0
-static inline void mte_sync_tags(pte_t *ptep, pte_t pte)
+static inline void mte_zero_clear_page_tags(void *addr)
+{
+}
+static inline void mte_sync_tags(pte_t old_pte, pte_t pte)
{
}
static inline void mte_copy_page_tags(void *kto, const void *kfrom)
{
}
-static inline void flush_mte_state(void)
+static inline void mte_thread_init_user(void)
{
}
static inline void mte_thread_switch(struct task_struct *next)
{
}
+static inline void mte_suspend_enter(void)
+{
+}
static inline void mte_suspend_exit(void)
{
}
@@ -84,11 +90,51 @@ static inline int mte_ptrace_copy_tags(struct task_struct *child,
return -EIO;
}
-static inline void mte_assign_mem_tag_range(void *addr, size_t size)
+#endif /* CONFIG_ARM64_MTE */
+
+#ifdef CONFIG_KASAN_HW_TAGS
+/* Whether the MTE asynchronous mode is enabled. */
+DECLARE_STATIC_KEY_FALSE(mte_async_mode);
+
+static inline bool system_uses_mte_async_mode(void)
{
+ return static_branch_unlikely(&mte_async_mode);
}
-#endif /* CONFIG_ARM64_MTE */
+void mte_check_tfsr_el1(void);
+
+static inline void mte_check_tfsr_entry(void)
+{
+ mte_check_tfsr_el1();
+}
+
+static inline void mte_check_tfsr_exit(void)
+{
+ /*
+ * The asynchronous faults are sync'ed automatically with
+ * TFSR_EL1 on kernel entry but for exit an explicit dsb()
+ * is required.
+ */
+ dsb(nsh);
+ isb();
+
+ mte_check_tfsr_el1();
+}
+#else
+static inline bool system_uses_mte_async_mode(void)
+{
+ return false;
+}
+static inline void mte_check_tfsr_el1(void)
+{
+}
+static inline void mte_check_tfsr_entry(void)
+{
+}
+static inline void mte_check_tfsr_exit(void)
+{
+}
+#endif /* CONFIG_KASAN_HW_TAGS */
#endif /* __ASSEMBLY__ */
#endif /* __ASM_MTE_H */
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 012cffc574e8..993a27ea6f54 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -13,6 +13,7 @@
#ifndef __ASSEMBLY__
#include <linux/personality.h> /* for READ_IMPLIES_EXEC */
+#include <linux/types.h> /* for gfp_t */
#include <asm/pgtable-types.h>
struct page;
@@ -28,16 +29,19 @@ void copy_user_highpage(struct page *to, struct page *from,
void copy_highpage(struct page *to, struct page *from);
#define __HAVE_ARCH_COPY_HIGHPAGE
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
- alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+ unsigned long vaddr);
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
+
+void tag_clear_highpage(struct page *to);
+#define __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
typedef struct page *pgtable_t;
-extern int pfn_valid(unsigned long);
+int pfn_is_map_memory(unsigned long pfn);
#include <asm/memory.h>
diff --git a/arch/arm64/include/asm/paravirt.h b/arch/arm64/include/asm/paravirt.h
index cf3a0fd7c1a7..9aa193e0e8f2 100644
--- a/arch/arm64/include/asm/paravirt.h
+++ b/arch/arm64/include/asm/paravirt.h
@@ -3,23 +3,19 @@
#define _ASM_ARM64_PARAVIRT_H
#ifdef CONFIG_PARAVIRT
+#include <linux/static_call_types.h>
+
struct static_key;
extern struct static_key paravirt_steal_enabled;
extern struct static_key paravirt_steal_rq_enabled;
-struct pv_time_ops {
- unsigned long long (*steal_clock)(int cpu);
-};
-
-struct paravirt_patch_template {
- struct pv_time_ops time;
-};
+u64 dummy_steal_clock(int cpu);
-extern struct paravirt_patch_template pv_ops;
+DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
static inline u64 paravirt_steal_clock(int cpu)
{
- return pv_ops.time.steal_clock(cpu);
+ return static_call(pv_steal_clock)(cpu);
}
int __init pv_time_init(void);
diff --git a/arch/arm64/include/asm/patching.h b/arch/arm64/include/asm/patching.h
new file mode 100644
index 000000000000..6bf5adc56295
--- /dev/null
+++ b/arch/arm64/include/asm/patching.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_PATCHING_H
+#define __ASM_PATCHING_H
+
+#include <linux/types.h>
+
+int aarch64_insn_read(void *addr, u32 *insnp);
+int aarch64_insn_write(void *addr, u32 insn);
+
+int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
+int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
+
+#endif /* __ASM_PATCHING_H */
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 60731f602d3e..4ef6f19331f9 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -239,6 +239,11 @@
/* PMMIR_EL1.SLOTS mask */
#define ARMV8_PMU_SLOTS_MASK 0xff
+#define ARMV8_PMU_BUS_SLOTS_SHIFT 8
+#define ARMV8_PMU_BUS_SLOTS_MASK 0xff
+#define ARMV8_PMU_BUS_WIDTH_SHIFT 16
+#define ARMV8_PMU_BUS_WIDTH_MASK 0xf
+
#ifdef CONFIG_PERF_EVENTS
struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index 3c6a7f5988b1..8433a2058eb1 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -27,7 +27,10 @@ static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmdp)
{
- __pud_populate(pudp, __pa(pmdp), PMD_TYPE_TABLE);
+ pudval_t pudval = PUD_TYPE_TABLE;
+
+ pudval |= (mm == &init_mm) ? PUD_TABLE_UXN : PUD_TABLE_PXN;
+ __pud_populate(pudp, __pa(pmdp), pudval);
}
#else
static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
@@ -45,7 +48,10 @@ static inline void __p4d_populate(p4d_t *p4dp, phys_addr_t pudp, p4dval_t prot)
static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4dp, pud_t *pudp)
{
- __p4d_populate(p4dp, __pa(pudp), PUD_TYPE_TABLE);
+ p4dval_t p4dval = P4D_TYPE_TABLE;
+
+ p4dval |= (mm == &init_mm) ? P4D_TABLE_UXN : P4D_TABLE_PXN;
+ __p4d_populate(p4dp, __pa(pudp), p4dval);
}
#else
static inline void __p4d_populate(p4d_t *p4dp, phys_addr_t pudp, p4dval_t prot)
@@ -70,17 +76,15 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
{
- /*
- * The pmd must be loaded with the physical address of the PTE table
- */
- __pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE);
+ VM_BUG_ON(mm != &init_mm);
+ __pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE | PMD_TABLE_UXN);
}
static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
{
- __pmd_populate(pmdp, page_to_phys(ptep), PMD_TYPE_TABLE);
+ VM_BUG_ON(mm == &init_mm);
+ __pmd_populate(pmdp, page_to_phys(ptep), PMD_TYPE_TABLE | PMD_TABLE_PXN);
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
#endif
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 42442a0ae2ab..40085e53f573 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -72,13 +72,6 @@
#define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT))
/*
- * Section address mask and size definitions.
- */
-#define SECTION_SHIFT PMD_SHIFT
-#define SECTION_SIZE (_AC(1, UL) << SECTION_SHIFT)
-#define SECTION_MASK (~(SECTION_SIZE-1))
-
-/*
* Contiguous page definitions.
*/
#define CONT_PTE_SHIFT (CONFIG_ARM64_CONT_PTE_SHIFT + PAGE_SHIFT)
@@ -94,6 +87,17 @@
/*
* Hardware page table definitions.
*
+ * Level 0 descriptor (P4D).
+ */
+#define P4D_TYPE_TABLE (_AT(p4dval_t, 3) << 0)
+#define P4D_TABLE_BIT (_AT(p4dval_t, 1) << 1)
+#define P4D_TYPE_MASK (_AT(p4dval_t, 3) << 0)
+#define P4D_TYPE_SECT (_AT(p4dval_t, 1) << 0)
+#define P4D_SECT_RDONLY (_AT(p4dval_t, 1) << 7) /* AP[2] */
+#define P4D_TABLE_PXN (_AT(p4dval_t, 1) << 59)
+#define P4D_TABLE_UXN (_AT(p4dval_t, 1) << 60)
+
+/*
* Level 1 descriptor (PUD).
*/
#define PUD_TYPE_TABLE (_AT(pudval_t, 3) << 0)
@@ -101,6 +105,8 @@
#define PUD_TYPE_MASK (_AT(pudval_t, 3) << 0)
#define PUD_TYPE_SECT (_AT(pudval_t, 1) << 0)
#define PUD_SECT_RDONLY (_AT(pudval_t, 1) << 7) /* AP[2] */
+#define PUD_TABLE_PXN (_AT(pudval_t, 1) << 59)
+#define PUD_TABLE_UXN (_AT(pudval_t, 1) << 60)
/*
* Level 2 descriptor (PMD).
@@ -122,6 +128,8 @@
#define PMD_SECT_CONT (_AT(pmdval_t, 1) << 52)
#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
#define PMD_SECT_UXN (_AT(pmdval_t, 1) << 54)
+#define PMD_TABLE_PXN (_AT(pmdval_t, 1) << 59)
+#define PMD_TABLE_UXN (_AT(pmdval_t, 1) << 60)
/*
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
diff --git a/arch/arm64/include/asm/pgtable-prot.h b/arch/arm64/include/asm/pgtable-prot.h
index 046be789fbb4..7032f04c8ac6 100644
--- a/arch/arm64/include/asm/pgtable-prot.h
+++ b/arch/arm64/include/asm/pgtable-prot.h
@@ -55,7 +55,6 @@ extern bool arm64_use_ng_mappings;
#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
-#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
#define PROT_NORMAL_TAGGED (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_TAGGED))
@@ -66,16 +65,15 @@ extern bool arm64_use_ng_mappings;
#define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
#define PAGE_KERNEL __pgprot(PROT_NORMAL)
-#define PAGE_KERNEL_TAGGED __pgprot(PROT_NORMAL_TAGGED)
#define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY)
#define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY)
#define PAGE_KERNEL_EXEC __pgprot(PROT_NORMAL & ~PTE_PXN)
#define PAGE_KERNEL_EXEC_CONT __pgprot((PROT_NORMAL & ~PTE_PXN) | PTE_CONT)
-#define PAGE_S2_MEMATTR(attr) \
+#define PAGE_S2_MEMATTR(attr, has_fwb) \
({ \
u64 __val; \
- if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) \
+ if (has_fwb) \
__val = PTE_S2_MEMATTR(MT_S2_FWB_ ## attr); \
else \
__val = PTE_S2_MEMATTR(MT_S2_ ## attr); \
@@ -88,12 +86,13 @@ extern bool arm64_use_ng_mappings;
#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
+#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
#define __P010 PAGE_READONLY
#define __P011 PAGE_READONLY
-#define __P100 PAGE_READONLY_EXEC
+#define __P100 PAGE_EXECONLY
#define __P101 PAGE_READONLY_EXEC
#define __P110 PAGE_READONLY_EXEC
#define __P111 PAGE_READONLY_EXEC
@@ -102,7 +101,7 @@ extern bool arm64_use_ng_mappings;
#define __S001 PAGE_READONLY
#define __S010 PAGE_SHARED
#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY_EXEC
+#define __S100 PAGE_EXECONLY
#define __S101 PAGE_READONLY_EXEC
#define __S110 PAGE_SHARED_EXEC
#define __S111 PAGE_SHARED_EXEC
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index e17b96d0e4b5..508c7ffad515 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -26,8 +26,6 @@
#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
-#define FIRST_USER_ADDRESS 0UL
-
#ifndef __ASSEMBLY__
#include <asm/cmpxchg.h>
@@ -113,11 +111,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
+/*
+ * Execute-only user mappings do not have the PTE_USER bit set. All valid
+ * kernel mappings have the PTE_UXN bit set.
+ */
#define pte_valid_not_user(pte) \
- ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
-#define pte_valid_user(pte) \
- ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
-
+ ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
/*
* Could the pte be present in the TLB? We must check mm_tlb_flush_pending
* so that we don't erroneously return false for pages that have been
@@ -130,12 +129,14 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
/*
- * p??_access_permitted() is true for valid user mappings (subject to the
- * write permission check). PROT_NONE mappings do not have the PTE_VALID bit
- * set.
+ * p??_access_permitted() is true for valid user mappings (PTE_USER
+ * bit set, subject to the write permission check). For execute-only
+ * mappings, like PROT_EXEC with EPAN (both PTE_USER and PTE_UXN bits
+ * not set) must return false. PROT_NONE mappings do not have the
+ * PTE_VALID bit set.
*/
#define pte_access_permitted(pte, write) \
- (pte_valid_user(pte) && (!(write) || pte_write(pte)))
+ (((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) && (!(write) || pte_write(pte)))
#define pmd_access_permitted(pmd, write) \
(pte_access_permitted(pmd_pte(pmd), (write)))
#define pud_access_permitted(pud, write) \
@@ -311,9 +312,25 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
__sync_icache_dcache(pte);
- if (system_supports_mte() &&
- pte_present(pte) && pte_tagged(pte) && !pte_special(pte))
- mte_sync_tags(ptep, pte);
+ /*
+ * If the PTE would provide user space access to the tags associated
+ * with it then ensure that the MTE tags are synchronised. Although
+ * pte_access_permitted() returns false for exec only mappings, they
+ * don't expose tags (instruction fetches don't check tags).
+ */
+ if (system_supports_mte() && pte_access_permitted(pte, false) &&
+ !pte_special(pte)) {
+ pte_t old_pte = READ_ONCE(*ptep);
+ /*
+ * We only need to synchronise if the new PTE has tags enabled
+ * or if swapping in (in which case another mapping may have
+ * set tags in the past even if this PTE isn't tagged).
+ * (!pte_none() && !pte_present()) is an open coded version of
+ * is_swap_pte()
+ */
+ if (pte_tagged(pte) || (!pte_none(old_pte) && !pte_present(old_pte)))
+ mte_sync_tags(old_pte, pte);
+ }
__check_racy_pte_update(mm, ptep, pte);
@@ -486,6 +503,9 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
#define pgprot_device(prot) \
__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
+#define pgprot_tagged(prot) \
+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_TAGGED))
+#define pgprot_mhp pgprot_tagged
/*
* DMA allocations for non-coherent devices use what the Arm architecture calls
* "Normal non-cacheable" memory, which permits speculation, unaligned accesses
@@ -505,13 +525,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
#define pmd_none(pmd) (!pmd_val(pmd))
-#define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT))
-
#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_TABLE)
#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_SECT)
#define pmd_leaf(pmd) pmd_sect(pmd)
+#define pmd_bad(pmd) (!pmd_table(pmd))
#define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
#define pte_leaf_size(pte) (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE)
@@ -598,7 +617,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
#define pud_none(pud) (!pud_val(pud))
-#define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT))
+#define pud_bad(pud) (!pud_table(pud))
#define pud_present(pud) pte_present(pud_pte(pud))
#define pud_leaf(pud) pud_sect(pud)
#define pud_valid(pud) pte_valid(pud_pte(pud))
@@ -992,6 +1011,18 @@ static inline bool arch_wants_old_prefaulted_pte(void)
}
#define arch_wants_old_prefaulted_pte arch_wants_old_prefaulted_pte
+static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
+{
+ if (cpus_have_const_cap(ARM64_HAS_EPAN))
+ return prot;
+
+ if (pgprot_val(prot) != pgprot_val(PAGE_EXECONLY))
+ return prot;
+
+ return PAGE_READONLY_EXEC;
+}
+
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_PGTABLE_H */
diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h
index b112a11e9302..28a78b67d9b4 100644
--- a/arch/arm64/include/asm/pointer_auth.h
+++ b/arch/arm64/include/asm/pointer_auth.h
@@ -3,6 +3,7 @@
#define __ASM_POINTER_AUTH_H
#include <linux/bitops.h>
+#include <linux/prctl.h>
#include <linux/random.h>
#include <asm/cpufeature.h>
@@ -30,23 +31,6 @@ struct ptrauth_keys_user {
struct ptrauth_key apga;
};
-struct ptrauth_keys_kernel {
- struct ptrauth_key apia;
-};
-
-static inline void ptrauth_keys_init_user(struct ptrauth_keys_user *keys)
-{
- if (system_supports_address_auth()) {
- get_random_bytes(&keys->apia, sizeof(keys->apia));
- get_random_bytes(&keys->apib, sizeof(keys->apib));
- get_random_bytes(&keys->apda, sizeof(keys->apda));
- get_random_bytes(&keys->apdb, sizeof(keys->apdb));
- }
-
- if (system_supports_generic_auth())
- get_random_bytes(&keys->apga, sizeof(keys->apga));
-}
-
#define __ptrauth_key_install_nosync(k, v) \
do { \
struct ptrauth_key __pki_v = (v); \
@@ -54,6 +38,12 @@ do { \
write_sysreg_s(__pki_v.hi, SYS_ ## k ## KEYHI_EL1); \
} while (0)
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
+
+struct ptrauth_keys_kernel {
+ struct ptrauth_key apia;
+};
+
static __always_inline void ptrauth_keys_init_kernel(struct ptrauth_keys_kernel *keys)
{
if (system_supports_address_auth())
@@ -69,8 +59,41 @@ static __always_inline void ptrauth_keys_switch_kernel(struct ptrauth_keys_kerne
isb();
}
+#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
+
+static inline void ptrauth_keys_install_user(struct ptrauth_keys_user *keys)
+{
+ if (system_supports_address_auth()) {
+ __ptrauth_key_install_nosync(APIB, keys->apib);
+ __ptrauth_key_install_nosync(APDA, keys->apda);
+ __ptrauth_key_install_nosync(APDB, keys->apdb);
+ }
+
+ if (system_supports_generic_auth())
+ __ptrauth_key_install_nosync(APGA, keys->apga);
+}
+
+static inline void ptrauth_keys_init_user(struct ptrauth_keys_user *keys)
+{
+ if (system_supports_address_auth()) {
+ get_random_bytes(&keys->apia, sizeof(keys->apia));
+ get_random_bytes(&keys->apib, sizeof(keys->apib));
+ get_random_bytes(&keys->apda, sizeof(keys->apda));
+ get_random_bytes(&keys->apdb, sizeof(keys->apdb));
+ }
+
+ if (system_supports_generic_auth())
+ get_random_bytes(&keys->apga, sizeof(keys->apga));
+
+ ptrauth_keys_install_user(keys);
+}
+
extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg);
+extern int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys,
+ unsigned long enabled);
+extern int ptrauth_get_enabled_keys(struct task_struct *tsk);
+
static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr)
{
return ptrauth_clear_pac(ptr);
@@ -85,20 +108,45 @@ static __always_inline void ptrauth_enable(void)
isb();
}
-#define ptrauth_thread_init_user(tsk) \
- ptrauth_keys_init_user(&(tsk)->thread.keys_user)
-#define ptrauth_thread_init_kernel(tsk) \
- ptrauth_keys_init_kernel(&(tsk)->thread.keys_kernel)
-#define ptrauth_thread_switch_kernel(tsk) \
- ptrauth_keys_switch_kernel(&(tsk)->thread.keys_kernel)
+#define ptrauth_suspend_exit() \
+ ptrauth_keys_install_user(&current->thread.keys_user)
+
+#define ptrauth_thread_init_user() \
+ do { \
+ ptrauth_keys_init_user(&current->thread.keys_user); \
+ \
+ /* enable all keys */ \
+ if (system_supports_address_auth()) \
+ set_task_sctlr_el1(current->thread.sctlr_user | \
+ SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
+ SCTLR_ELx_ENDA | SCTLR_ELx_ENDB); \
+ } while (0)
+
+#define ptrauth_thread_switch_user(tsk) \
+ ptrauth_keys_install_user(&(tsk)->thread.keys_user)
#else /* CONFIG_ARM64_PTR_AUTH */
#define ptrauth_enable()
#define ptrauth_prctl_reset_keys(tsk, arg) (-EINVAL)
+#define ptrauth_set_enabled_keys(tsk, keys, enabled) (-EINVAL)
+#define ptrauth_get_enabled_keys(tsk) (-EINVAL)
#define ptrauth_strip_insn_pac(lr) (lr)
-#define ptrauth_thread_init_user(tsk)
+#define ptrauth_suspend_exit()
+#define ptrauth_thread_init_user()
+#define ptrauth_thread_switch_user(tsk)
+#endif /* CONFIG_ARM64_PTR_AUTH */
+
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
+#define ptrauth_thread_init_kernel(tsk) \
+ ptrauth_keys_init_kernel(&(tsk)->thread.keys_kernel)
+#define ptrauth_thread_switch_kernel(tsk) \
+ ptrauth_keys_switch_kernel(&(tsk)->thread.keys_kernel)
+#else
#define ptrauth_thread_init_kernel(tsk)
#define ptrauth_thread_switch_kernel(tsk)
-#endif /* CONFIG_ARM64_PTR_AUTH */
+#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
+
+#define PR_PAC_ENABLED_KEYS_MASK \
+ (PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY)
#endif /* __ASM_POINTER_AUTH_H */
diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h
index 80e946b2abee..e83f0982b99c 100644
--- a/arch/arm64/include/asm/preempt.h
+++ b/arch/arm64/include/asm/preempt.h
@@ -23,7 +23,7 @@ static inline void preempt_count_set(u64 pc)
} while (0)
#define init_idle_preempt_count(p, cpu) do { \
- task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
+ task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \
} while (0)
static inline void set_preempt_need_resched(void)
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index ca2cd75d3286..b6517fd03d7b 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -148,14 +148,20 @@ struct thread_struct {
struct debug_info debug; /* debugging */
#ifdef CONFIG_ARM64_PTR_AUTH
struct ptrauth_keys_user keys_user;
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
struct ptrauth_keys_kernel keys_kernel;
#endif
+#endif
#ifdef CONFIG_ARM64_MTE
- u64 sctlr_tcf0;
u64 gcr_user_excl;
#endif
+ u64 sctlr_user;
};
+#define SCTLR_USER_MASK \
+ (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | SCTLR_ELx_ENDA | SCTLR_ELx_ENDB | \
+ SCTLR_EL1_TCF0_MASK)
+
static inline void arch_thread_struct_whitelist(unsigned long *offset,
unsigned long *size)
{
@@ -247,6 +253,8 @@ extern void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p);
+void set_task_sctlr_el1(u64 sctlr);
+
/* Thread switching */
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
struct task_struct *next);
@@ -301,6 +309,11 @@ extern void __init minsigstksz_setup(void);
/* PR_PAC_RESET_KEYS prctl */
#define PAC_RESET_KEYS(tsk, arg) ptrauth_prctl_reset_keys(tsk, arg)
+/* PR_PAC_{SET,GET}_ENABLED_KEYS prctl */
+#define PAC_SET_ENABLED_KEYS(tsk, keys, enabled) \
+ ptrauth_set_enabled_keys(tsk, keys, enabled)
+#define PAC_GET_ENABLED_KEYS(tsk) ptrauth_get_enabled_keys(tsk)
+
#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
/* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */
long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg);
@@ -316,13 +329,13 @@ long get_tagged_addr_ctrl(struct task_struct *task);
* of header definitions for the use of task_stack_page.
*/
-#define current_top_of_stack() \
-({ \
- struct stack_info _info; \
- BUG_ON(!on_accessible_stack(current, current_stack_pointer, &_info)); \
- _info.high; \
+#define current_top_of_stack() \
+({ \
+ struct stack_info _info; \
+ BUG_ON(!on_accessible_stack(current, current_stack_pointer, 1, &_info)); \
+ _info.high; \
})
-#define on_thread_stack() (on_task_stack(current, current_stack_pointer, NULL))
+#define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1, NULL))
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/ptdump.h b/arch/arm64/include/asm/ptdump.h
index 38187f74e089..b1dd7ecff7ef 100644
--- a/arch/arm64/include/asm/ptdump.h
+++ b/arch/arm64/include/asm/ptdump.h
@@ -23,7 +23,7 @@ struct ptdump_info {
void ptdump_walk(struct seq_file *s, struct ptdump_info *info);
#ifdef CONFIG_PTDUMP_DEBUGFS
-void ptdump_debugfs_register(struct ptdump_info *info, const char *name);
+void __init ptdump_debugfs_register(struct ptdump_info *info, const char *name);
#else
static inline void ptdump_debugfs_register(struct ptdump_info *info,
const char *name) { }
diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h
index eaa2cd92e4c1..8297bccf0784 100644
--- a/arch/arm64/include/asm/scs.h
+++ b/arch/arm64/include/asm/scs.h
@@ -9,18 +9,18 @@
#ifdef CONFIG_SHADOW_CALL_STACK
scs_sp .req x18
- .macro scs_load tsk, tmp
+ .macro scs_load tsk
ldr scs_sp, [\tsk, #TSK_TI_SCS_SP]
.endm
- .macro scs_save tsk, tmp
+ .macro scs_save tsk
str scs_sp, [\tsk, #TSK_TI_SCS_SP]
.endm
#else
- .macro scs_load tsk, tmp
+ .macro scs_load tsk
.endm
- .macro scs_save tsk, tmp
+ .macro scs_save tsk
.endm
#endif /* CONFIG_SHADOW_CALL_STACK */
diff --git a/arch/arm64/include/asm/sdei.h b/arch/arm64/include/asm/sdei.h
index 63e0b92a5fbb..7bea1d705dd6 100644
--- a/arch/arm64/include/asm/sdei.h
+++ b/arch/arm64/include/asm/sdei.h
@@ -37,13 +37,17 @@ struct sdei_registered_event;
asmlinkage unsigned long __sdei_handler(struct pt_regs *regs,
struct sdei_registered_event *arg);
+unsigned long do_sdei_event(struct pt_regs *regs,
+ struct sdei_registered_event *arg);
+
unsigned long sdei_arch_get_entry_point(int conduit);
#define sdei_arch_get_entry_point(x) sdei_arch_get_entry_point(x)
struct stack_info;
-bool _on_sdei_stack(unsigned long sp, struct stack_info *info);
-static inline bool on_sdei_stack(unsigned long sp,
+bool _on_sdei_stack(unsigned long sp, unsigned long size,
+ struct stack_info *info);
+static inline bool on_sdei_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
{
if (!IS_ENABLED(CONFIG_VMAP_STACK))
@@ -51,7 +55,7 @@ static inline bool on_sdei_stack(unsigned long sp,
if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
return false;
if (in_nmi())
- return _on_sdei_stack(sp, info);
+ return _on_sdei_stack(sp, size, info);
return false;
}
diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h
index 2f36b16a5b5d..e4ad9db53af1 100644
--- a/arch/arm64/include/asm/sections.h
+++ b/arch/arm64/include/asm/sections.h
@@ -13,6 +13,7 @@ extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
extern char __hyp_text_start[], __hyp_text_end[];
extern char __hyp_rodata_start[], __hyp_rodata_end[];
extern char __hyp_reloc_begin[], __hyp_reloc_end[];
+extern char __hyp_bss_start[], __hyp_bss_end[];
extern char __idmap_text_start[], __idmap_text_end[];
extern char __initdata_begin[], __initdata_end[];
extern char __inittext_begin[], __inittext_end[];
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index bcb01ca15325..fc55f5a57a06 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -73,12 +73,10 @@ asmlinkage void secondary_start_kernel(void);
/*
* Initial data for bringing up a secondary CPU.
- * @stack - sp for the secondary CPU
* @status - Result passed back from the secondary CPU to
* indicate failure.
*/
struct secondary_data {
- void *stack;
struct task_struct *task;
long status;
};
@@ -145,6 +143,7 @@ bool cpus_are_stuck_in_kernel(void);
extern void crash_smp_send_stop(void);
extern bool smp_crash_stop_failed(void);
+extern void panic_smp_self_stop(void);
#endif /* ifndef __ASSEMBLY__ */
diff --git a/arch/arm64/include/asm/sparsemem.h b/arch/arm64/include/asm/sparsemem.h
index eb4a75d720ed..4b73463423c3 100644
--- a/arch/arm64/include/asm/sparsemem.h
+++ b/arch/arm64/include/asm/sparsemem.h
@@ -5,7 +5,6 @@
#ifndef __ASM_SPARSEMEM_H
#define __ASM_SPARSEMEM_H
-#ifdef CONFIG_SPARSEMEM
#define MAX_PHYSMEM_BITS CONFIG_ARM64_PA_BITS
/*
@@ -27,6 +26,4 @@
#define SECTION_SIZE_BITS 27
#endif /* CONFIG_ARM64_64K_PAGES */
-#endif /* CONFIG_SPARSEMEM*/
-
#endif
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index eb29b1fe8255..1801399204d7 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -69,14 +69,14 @@ extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
-static inline bool on_stack(unsigned long sp, unsigned long low,
- unsigned long high, enum stack_type type,
- struct stack_info *info)
+static inline bool on_stack(unsigned long sp, unsigned long size,
+ unsigned long low, unsigned long high,
+ enum stack_type type, struct stack_info *info)
{
if (!low)
return false;
- if (sp < low || sp >= high)
+ if (sp < low || sp + size < sp || sp + size > high)
return false;
if (info) {
@@ -87,38 +87,38 @@ static inline bool on_stack(unsigned long sp, unsigned long low,
return true;
}
-static inline bool on_irq_stack(unsigned long sp,
+static inline bool on_irq_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
{
unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
unsigned long high = low + IRQ_STACK_SIZE;
- return on_stack(sp, low, high, STACK_TYPE_IRQ, info);
+ return on_stack(sp, size, low, high, STACK_TYPE_IRQ, info);
}
static inline bool on_task_stack(const struct task_struct *tsk,
- unsigned long sp,
+ unsigned long sp, unsigned long size,
struct stack_info *info)
{
unsigned long low = (unsigned long)task_stack_page(tsk);
unsigned long high = low + THREAD_SIZE;
- return on_stack(sp, low, high, STACK_TYPE_TASK, info);
+ return on_stack(sp, size, low, high, STACK_TYPE_TASK, info);
}
#ifdef CONFIG_VMAP_STACK
DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
-static inline bool on_overflow_stack(unsigned long sp,
+static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
{
unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
unsigned long high = low + OVERFLOW_STACK_SIZE;
- return on_stack(sp, low, high, STACK_TYPE_OVERFLOW, info);
+ return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
}
#else
-static inline bool on_overflow_stack(unsigned long sp,
+static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
struct stack_info *info) { return false; }
#endif
@@ -128,47 +128,27 @@ static inline bool on_overflow_stack(unsigned long sp,
* context.
*/
static inline bool on_accessible_stack(const struct task_struct *tsk,
- unsigned long sp,
+ unsigned long sp, unsigned long size,
struct stack_info *info)
{
if (info)
info->type = STACK_TYPE_UNKNOWN;
- if (on_task_stack(tsk, sp, info))
+ if (on_task_stack(tsk, sp, size, info))
return true;
if (tsk != current || preemptible())
return false;
- if (on_irq_stack(sp, info))
+ if (on_irq_stack(sp, size, info))
return true;
- if (on_overflow_stack(sp, info))
+ if (on_overflow_stack(sp, size, info))
return true;
- if (on_sdei_stack(sp, info))
+ if (on_sdei_stack(sp, size, info))
return true;
return false;
}
-static inline void start_backtrace(struct stackframe *frame,
- unsigned long fp, unsigned long pc)
-{
- frame->fp = fp;
- frame->pc = pc;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame->graph = 0;
-#endif
-
- /*
- * Prime the first unwind.
- *
- * In unwind_frame() we'll check that the FP points to a valid stack,
- * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be
- * treated as a transition to whichever stack that happens to be. The
- * prev_fp value won't be used, but we set it to 0 such that it is
- * definitely not an accessible stack address.
- */
- bitmap_zero(frame->stacks_done, __NR_STACK_TYPES);
- frame->prev_fp = 0;
- frame->prev_type = STACK_TYPE_UNKNOWN;
-}
+void start_backtrace(struct stackframe *frame, unsigned long fp,
+ unsigned long pc);
#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index dfd4edbfe360..7b9c3acba684 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -283,6 +283,8 @@
#define SYS_PMSIRR_EL1_INTERVAL_MASK 0xffffffUL
/* Filtering controls */
+#define SYS_PMSNEVFR_EL1 sys_reg(3, 0, 9, 9, 1)
+
#define SYS_PMSFCR_EL1 sys_reg(3, 0, 9, 9, 4)
#define SYS_PMSFCR_EL1_FE_SHIFT 0
#define SYS_PMSFCR_EL1_FT_SHIFT 1
@@ -333,6 +335,55 @@
/*** End of Statistical Profiling Extension ***/
+/*
+ * TRBE Registers
+ */
+#define SYS_TRBLIMITR_EL1 sys_reg(3, 0, 9, 11, 0)
+#define SYS_TRBPTR_EL1 sys_reg(3, 0, 9, 11, 1)
+#define SYS_TRBBASER_EL1 sys_reg(3, 0, 9, 11, 2)
+#define SYS_TRBSR_EL1 sys_reg(3, 0, 9, 11, 3)
+#define SYS_TRBMAR_EL1 sys_reg(3, 0, 9, 11, 4)
+#define SYS_TRBTRG_EL1 sys_reg(3, 0, 9, 11, 6)
+#define SYS_TRBIDR_EL1 sys_reg(3, 0, 9, 11, 7)
+
+#define TRBLIMITR_LIMIT_MASK GENMASK_ULL(51, 0)
+#define TRBLIMITR_LIMIT_SHIFT 12
+#define TRBLIMITR_NVM BIT(5)
+#define TRBLIMITR_TRIG_MODE_MASK GENMASK(1, 0)
+#define TRBLIMITR_TRIG_MODE_SHIFT 3
+#define TRBLIMITR_FILL_MODE_MASK GENMASK(1, 0)
+#define TRBLIMITR_FILL_MODE_SHIFT 1
+#define TRBLIMITR_ENABLE BIT(0)
+#define TRBPTR_PTR_MASK GENMASK_ULL(63, 0)
+#define TRBPTR_PTR_SHIFT 0
+#define TRBBASER_BASE_MASK GENMASK_ULL(51, 0)
+#define TRBBASER_BASE_SHIFT 12
+#define TRBSR_EC_MASK GENMASK(5, 0)
+#define TRBSR_EC_SHIFT 26
+#define TRBSR_IRQ BIT(22)
+#define TRBSR_TRG BIT(21)
+#define TRBSR_WRAP BIT(20)
+#define TRBSR_ABORT BIT(18)
+#define TRBSR_STOP BIT(17)
+#define TRBSR_MSS_MASK GENMASK(15, 0)
+#define TRBSR_MSS_SHIFT 0
+#define TRBSR_BSC_MASK GENMASK(5, 0)
+#define TRBSR_BSC_SHIFT 0
+#define TRBSR_FSC_MASK GENMASK(5, 0)
+#define TRBSR_FSC_SHIFT 0
+#define TRBMAR_SHARE_MASK GENMASK(1, 0)
+#define TRBMAR_SHARE_SHIFT 8
+#define TRBMAR_OUTER_MASK GENMASK(3, 0)
+#define TRBMAR_OUTER_SHIFT 4
+#define TRBMAR_INNER_MASK GENMASK(3, 0)
+#define TRBMAR_INNER_SHIFT 0
+#define TRBTRG_TRG_MASK GENMASK(31, 0)
+#define TRBTRG_TRG_SHIFT 0
+#define TRBIDR_FLAG BIT(5)
+#define TRBIDR_PROG BIT(4)
+#define TRBIDR_ALIGN_MASK GENMASK(3, 0)
+#define TRBIDR_ALIGN_SHIFT 0
+
#define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1)
#define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2)
@@ -475,9 +526,15 @@
#define SYS_PMCCFILTR_EL0 sys_reg(3, 3, 14, 15, 7)
#define SYS_SCTLR_EL2 sys_reg(3, 4, 1, 0, 0)
+#define SYS_HFGRTR_EL2 sys_reg(3, 4, 1, 1, 4)
+#define SYS_HFGWTR_EL2 sys_reg(3, 4, 1, 1, 5)
+#define SYS_HFGITR_EL2 sys_reg(3, 4, 1, 1, 6)
#define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0)
#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1)
#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0)
+#define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4)
+#define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5)
+#define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6)
#define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0)
#define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1)
#define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1)
@@ -565,8 +622,10 @@
#define SCTLR_ELx_TCF_ASYNC (UL(0x2) << SCTLR_ELx_TCF_SHIFT)
#define SCTLR_ELx_TCF_MASK (UL(0x3) << SCTLR_ELx_TCF_SHIFT)
+#define SCTLR_ELx_ENIA_SHIFT 31
+
#define SCTLR_ELx_ITFSB (BIT(37))
-#define SCTLR_ELx_ENIA (BIT(31))
+#define SCTLR_ELx_ENIA (BIT(SCTLR_ELx_ENIA_SHIFT))
#define SCTLR_ELx_ENIB (BIT(30))
#define SCTLR_ELx_ENDA (BIT(27))
#define SCTLR_ELx_EE (BIT(25))
@@ -579,9 +638,6 @@
#define SCTLR_ELx_A (BIT(1))
#define SCTLR_ELx_M (BIT(0))
-#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
- SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_IESB)
-
/* SCTLR_EL2 specific flags. */
#define SCTLR_EL2_RES1 ((BIT(4)) | (BIT(5)) | (BIT(11)) | (BIT(16)) | \
(BIT(18)) | (BIT(22)) | (BIT(23)) | (BIT(28)) | \
@@ -593,10 +649,16 @@
#define ENDIAN_SET_EL2 0
#endif
+#define INIT_SCTLR_EL2_MMU_ON \
+ (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | SCTLR_ELx_I | \
+ SCTLR_ELx_IESB | SCTLR_ELx_WXN | ENDIAN_SET_EL2 | \
+ SCTLR_ELx_ITFSB | SCTLR_EL2_RES1)
+
#define INIT_SCTLR_EL2_MMU_OFF \
(SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
/* SCTLR_EL1 specific flags. */
+#define SCTLR_EL1_EPAN (BIT(57))
#define SCTLR_EL1_ATA0 (BIT(42))
#define SCTLR_EL1_TCF0_SHIFT 38
@@ -637,14 +699,12 @@
SCTLR_EL1_SED | SCTLR_ELx_I | SCTLR_EL1_DZE | SCTLR_EL1_UCT | \
SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \
SCTLR_ELx_ATA | SCTLR_EL1_ATA0 | ENDIAN_SET_EL1 | SCTLR_EL1_UCI | \
- SCTLR_EL1_RES1)
+ SCTLR_EL1_EPAN | SCTLR_EL1_RES1)
/* MAIR_ELx memory attributes (used by Linux) */
#define MAIR_ATTR_DEVICE_nGnRnE UL(0x00)
#define MAIR_ATTR_DEVICE_nGnRE UL(0x04)
-#define MAIR_ATTR_DEVICE_GRE UL(0x0c)
#define MAIR_ATTR_NORMAL_NC UL(0x44)
-#define MAIR_ATTR_NORMAL_WT UL(0xbb)
#define MAIR_ATTR_NORMAL_TAGGED UL(0xf0)
#define MAIR_ATTR_NORMAL UL(0xff)
#define MAIR_ATTR_MASK UL(0xff)
@@ -796,6 +856,11 @@
#define ID_AA64MMFR0_PARANGE_48 0x5
#define ID_AA64MMFR0_PARANGE_52 0x6
+#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT 0x0
+#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE 0x1
+#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN 0x2
+#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX 0x7
+
#ifdef CONFIG_ARM64_PA_BITS_52
#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52
#else
@@ -835,6 +900,7 @@
#define ID_AA64MMFR2_CNP_SHIFT 0
/* id_aa64dfr0 */
+#define ID_AA64DFR0_TRBE_SHIFT 44
#define ID_AA64DFR0_TRACE_FILT_SHIFT 40
#define ID_AA64DFR0_DOUBLELOCK_SHIFT 36
#define ID_AA64DFR0_PMSVER_SHIFT 32
@@ -961,14 +1027,17 @@
#define ID_PFR1_PROGMOD_SHIFT 0
#if defined(CONFIG_ARM64_4K_PAGES)
-#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT
-#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN4_SUPPORTED
+#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0x7
#elif defined(CONFIG_ARM64_16K_PAGES)
-#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT
-#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN16_SUPPORTED
+#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0xF
#elif defined(CONFIG_ARM64_64K_PAGES)
-#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT
-#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN64_SUPPORTED
+#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0x7
#endif
#define MVFR2_FPMISC_SHIFT 4
@@ -1024,6 +1093,66 @@
#define TRFCR_ELx_ExTRE BIT(1)
#define TRFCR_ELx_E0TRE BIT(0)
+
+/* GIC Hypervisor interface registers */
+/* ICH_MISR_EL2 bit definitions */
+#define ICH_MISR_EOI (1 << 0)
+#define ICH_MISR_U (1 << 1)
+
+/* ICH_LR*_EL2 bit definitions */
+#define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1)
+
+#define ICH_LR_EOI (1ULL << 41)
+#define ICH_LR_GROUP (1ULL << 60)
+#define ICH_LR_HW (1ULL << 61)
+#define ICH_LR_STATE (3ULL << 62)
+#define ICH_LR_PENDING_BIT (1ULL << 62)
+#define ICH_LR_ACTIVE_BIT (1ULL << 63)
+#define ICH_LR_PHYS_ID_SHIFT 32
+#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT)
+#define ICH_LR_PRIORITY_SHIFT 48
+#define ICH_LR_PRIORITY_MASK (0xffULL << ICH_LR_PRIORITY_SHIFT)
+
+/* ICH_HCR_EL2 bit definitions */
+#define ICH_HCR_EN (1 << 0)
+#define ICH_HCR_UIE (1 << 1)
+#define ICH_HCR_NPIE (1 << 3)
+#define ICH_HCR_TC (1 << 10)
+#define ICH_HCR_TALL0 (1 << 11)
+#define ICH_HCR_TALL1 (1 << 12)
+#define ICH_HCR_EOIcount_SHIFT 27
+#define ICH_HCR_EOIcount_MASK (0x1f << ICH_HCR_EOIcount_SHIFT)
+
+/* ICH_VMCR_EL2 bit definitions */
+#define ICH_VMCR_ACK_CTL_SHIFT 2
+#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT)
+#define ICH_VMCR_FIQ_EN_SHIFT 3
+#define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT)
+#define ICH_VMCR_CBPR_SHIFT 4
+#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT)
+#define ICH_VMCR_EOIM_SHIFT 9
+#define ICH_VMCR_EOIM_MASK (1 << ICH_VMCR_EOIM_SHIFT)
+#define ICH_VMCR_BPR1_SHIFT 18
+#define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT)
+#define ICH_VMCR_BPR0_SHIFT 21
+#define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT)
+#define ICH_VMCR_PMR_SHIFT 24
+#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT)
+#define ICH_VMCR_ENG0_SHIFT 0
+#define ICH_VMCR_ENG0_MASK (1 << ICH_VMCR_ENG0_SHIFT)
+#define ICH_VMCR_ENG1_SHIFT 1
+#define ICH_VMCR_ENG1_MASK (1 << ICH_VMCR_ENG1_SHIFT)
+
+/* ICH_VTR_EL2 bit definitions */
+#define ICH_VTR_PRI_BITS_SHIFT 29
+#define ICH_VTR_PRI_BITS_MASK (7 << ICH_VTR_PRI_BITS_SHIFT)
+#define ICH_VTR_ID_BITS_SHIFT 23
+#define ICH_VTR_ID_BITS_MASK (7 << ICH_VTR_ID_BITS_SHIFT)
+#define ICH_VTR_SEIS_SHIFT 22
+#define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT)
+#define ICH_VTR_A3V_SHIFT 21
+#define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT)
+
#ifdef __ASSEMBLY__
.irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 9f4e3b266f21..6623c99f0984 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -55,6 +55,8 @@ void arch_setup_new_exec(void);
#define arch_setup_new_exec arch_setup_new_exec
void arch_release_task_struct(struct task_struct *tsk);
+int arch_dup_task_struct(struct task_struct *dst,
+ struct task_struct *src);
#endif
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 61c97d3b58c7..c995d1f4594f 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -28,6 +28,10 @@ static void tlb_flush(struct mmu_gather *tlb);
*/
static inline int tlb_get_level(struct mmu_gather *tlb)
{
+ /* The TTL field is only valid for the leaf entry. */
+ if (tlb->freed_tables)
+ return 0;
+
if (tlb->cleared_ptes && !(tlb->cleared_pmds ||
tlb->cleared_puds ||
tlb->cleared_p4ds))
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index 3b8dca4eb08d..ec2db3419c41 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -17,17 +17,9 @@ int pcibus_to_node(struct pci_bus *bus);
#include <linux/arch_topology.h>
void update_freq_counters_refs(void);
-void topology_scale_freq_tick(void);
-
-#ifdef CONFIG_ARM64_AMU_EXTN
-/*
- * Replace task scheduler's default counter-based
- * frequency-invariance scale factor setting.
- */
-#define arch_scale_freq_tick topology_scale_freq_tick
-#endif /* CONFIG_ARM64_AMU_EXTN */
/* Replace task scheduler's default frequency-invariant accounting */
+#define arch_scale_freq_tick topology_scale_freq_tick
#define arch_set_freq_scale topology_set_freq_scale
#define arch_scale_freq_capacity topology_get_freq_scale
#define arch_scale_freq_invariant topology_scale_freq_invariant
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 0deb88467111..b5f08621fa29 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -20,6 +20,7 @@
#include <asm/cpufeature.h>
#include <asm/mmu.h>
+#include <asm/mte.h>
#include <asm/ptrace.h>
#include <asm/memory.h>
#include <asm/extable.h>
@@ -188,6 +189,23 @@ static inline void __uaccess_enable_tco(void)
ARM64_MTE, CONFIG_KASAN_HW_TAGS));
}
+/*
+ * These functions disable tag checking only if in MTE async mode
+ * since the sync mode generates exceptions synchronously and the
+ * nofault or load_unaligned_zeropad can handle them.
+ */
+static inline void __uaccess_disable_tco_async(void)
+{
+ if (system_uses_mte_async_mode())
+ __uaccess_disable_tco();
+}
+
+static inline void __uaccess_enable_tco_async(void)
+{
+ if (system_uses_mte_async_mode())
+ __uaccess_enable_tco();
+}
+
static inline void uaccess_disable_privileged(void)
{
__uaccess_disable_tco();
@@ -307,8 +325,10 @@ do { \
do { \
int __gkn_err = 0; \
\
+ __uaccess_enable_tco_async(); \
__raw_get_mem("ldr", *((type *)(dst)), \
(__force type *)(src), __gkn_err); \
+ __uaccess_disable_tco_async(); \
if (unlikely(__gkn_err)) \
goto err_label; \
} while (0)
@@ -380,8 +400,10 @@ do { \
do { \
int __pkn_err = 0; \
\
+ __uaccess_enable_tco_async(); \
__raw_put_mem("str", *((type *)(src)), \
(__force type *)(dst), __pkn_err); \
+ __uaccess_disable_tco_async(); \
if (unlikely(__pkn_err)) \
goto err_label; \
} while(0)
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 949788f5ba40..727bfc3be99b 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -38,7 +38,7 @@
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5)
#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800)
-#define __NR_compat_syscalls 443
+#define __NR_compat_syscalls 447
#endif
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 3d874f624056..99ffcafc736c 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -893,6 +893,14 @@ __SYSCALL(__NR_process_madvise, sys_process_madvise)
__SYSCALL(__NR_epoll_pwait2, compat_sys_epoll_pwait2)
#define __NR_mount_setattr 442
__SYSCALL(__NR_mount_setattr, sys_mount_setattr)
+#define __NR_quotactl_fd 443
+__SYSCALL(__NR_quotactl_fd, sys_quotactl_fd)
+#define __NR_landlock_create_ruleset 444
+__SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
+#define __NR_landlock_add_rule 445
+__SYSCALL(__NR_landlock_add_rule, sys_landlock_add_rule)
+#define __NR_landlock_restrict_self 446
+__SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self)
/*
* Please add new compat syscalls above this comment and update
diff --git a/arch/arm64/include/asm/vdso/compat_gettimeofday.h b/arch/arm64/include/asm/vdso/compat_gettimeofday.h
index 7508b0ac1d21..ecb6fd4c3c64 100644
--- a/arch/arm64/include/asm/vdso/compat_gettimeofday.h
+++ b/arch/arm64/include/asm/vdso/compat_gettimeofday.h
@@ -155,7 +155,8 @@ static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
}
#ifdef CONFIG_TIME_NS
-static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void)
+static __always_inline
+const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
{
const struct vdso_data *ret;
diff --git a/arch/arm64/include/asm/vdso/gettimeofday.h b/arch/arm64/include/asm/vdso/gettimeofday.h
index 631ab1281633..4f7a629df81f 100644
--- a/arch/arm64/include/asm/vdso/gettimeofday.h
+++ b/arch/arm64/include/asm/vdso/gettimeofday.h
@@ -83,11 +83,7 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
*/
isb();
asm volatile("mrs %0, cntvct_el0" : "=r" (res) :: "memory");
- /*
- * This isb() is required to prevent that the seq lock is
- * speculated.#
- */
- isb();
+ arch_counter_enforce_ordering(res);
return res;
}
@@ -100,7 +96,7 @@ const struct vdso_data *__arch_get_vdso_data(void)
#ifdef CONFIG_TIME_NS
static __always_inline
-const struct vdso_data *__arch_get_timens_vdso_data(void)
+const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
{
return _timens_data;
}
diff --git a/arch/arm64/include/asm/vmalloc.h b/arch/arm64/include/asm/vmalloc.h
index 2ca708ab9b20..7a22aeea9bb5 100644
--- a/arch/arm64/include/asm/vmalloc.h
+++ b/arch/arm64/include/asm/vmalloc.h
@@ -1,4 +1,28 @@
#ifndef _ASM_ARM64_VMALLOC_H
#define _ASM_ARM64_VMALLOC_H
+#include <asm/page.h>
+
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+
+#define arch_vmap_pud_supported arch_vmap_pud_supported
+static inline bool arch_vmap_pud_supported(pgprot_t prot)
+{
+ /*
+ * Only 4k granule supports level 1 block mappings.
+ * SW table walks can't handle removal of intermediate entries.
+ */
+ return IS_ENABLED(CONFIG_ARM64_4K_PAGES) &&
+ !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
+}
+
+#define arch_vmap_pmd_supported arch_vmap_pmd_supported
+static inline bool arch_vmap_pmd_supported(pgprot_t prot)
+{
+ /* See arch_vmap_pud_supported() */
+ return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
+}
+
+#endif
+
#endif /* _ASM_ARM64_VMALLOC_H */
diff --git a/arch/arm64/include/asm/word-at-a-time.h b/arch/arm64/include/asm/word-at-a-time.h
index 3333950b5909..2dcb104c645b 100644
--- a/arch/arm64/include/asm/word-at-a-time.h
+++ b/arch/arm64/include/asm/word-at-a-time.h
@@ -53,7 +53,9 @@ static inline unsigned long find_zero(unsigned long mask)
*/
static inline unsigned long load_unaligned_zeropad(const void *addr)
{
- unsigned long ret, offset;
+ unsigned long ret, tmp;
+
+ __uaccess_enable_tco_async();
/* Load word from unaligned pointer addr */
asm(
@@ -61,9 +63,9 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
"2:\n"
" .pushsection .fixup,\"ax\"\n"
" .align 2\n"
- "3: and %1, %2, #0x7\n"
- " bic %2, %2, #0x7\n"
- " ldr %0, [%2]\n"
+ "3: bic %1, %2, #0x7\n"
+ " ldr %0, [%1]\n"
+ " and %1, %2, #0x7\n"
" lsl %1, %1, #0x3\n"
#ifndef __AARCH64EB__
" lsr %0, %0, %1\n"
@@ -73,9 +75,11 @@ static inline unsigned long load_unaligned_zeropad(const void *addr)
" b 2b\n"
" .popsection\n"
_ASM_EXTABLE(1b, 3b)
- : "=&r" (ret), "=&r" (offset)
+ : "=&r" (ret), "=&r" (tmp)
: "r" (addr), "Q" (*(unsigned long *)addr));
+ __uaccess_disable_tco_async();
+
return ret;
}
diff --git a/arch/arm64/include/asm/xen/swiotlb-xen.h b/arch/arm64/include/asm/xen/swiotlb-xen.h
new file mode 100644
index 000000000000..455ade5d5320
--- /dev/null
+++ b/arch/arm64/include/asm/xen/swiotlb-xen.h
@@ -0,0 +1 @@
+#include <xen/arm/swiotlb-xen.h>
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 24223adae150..b3edde68bc3e 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -184,6 +184,17 @@ struct kvm_vcpu_events {
__u32 reserved[12];
};
+struct kvm_arm_copy_mte_tags {
+ __u64 guest_ipa;
+ __u64 length;
+ void __user *addr;
+ __u64 flags;
+ __u64 reserved[2];
+};
+
+#define KVM_ARM_TAGS_TO_GUEST 0
+#define KVM_ARM_TAGS_FROM_GUEST 1
+
/* If you need to interpret the index values, here is the key: */
#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
#define KVM_REG_ARM_COPROC_SHIFT 16
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index ed65576ce710..cce308586fcc 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -9,15 +9,27 @@ CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_insn.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_return_address.o = $(CC_FLAGS_FTRACE)
+# Remove stack protector to avoid triggering unneeded stack canary
+# checks due to randomize_kstack_offset.
+CFLAGS_REMOVE_syscall.o = -fstack-protector -fstack-protector-strong
+CFLAGS_syscall.o += -fno-stack-protector
+
+# It's not safe to invoke KCOV when portions of the kernel environment aren't
+# available or are out-of-sync with HW state. Since `noinstr` doesn't always
+# inhibit KCOV instrumentation, disable it for the entire compilation unit.
+KCOV_INSTRUMENT_entry.o := n
+KCOV_INSTRUMENT_idle.o := n
+
# Object file lists.
obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
entry-common.o entry-fpsimd.o process.o ptrace.o \
setup.o signal.o sys.o stacktrace.o time.o traps.o \
- io.o vdso.o hyp-stub.o psci.o cpu_ops.o insn.o \
+ io.o vdso.o hyp-stub.o psci.o cpu_ops.o \
return_address.o cpuinfo.o cpu_errata.o \
cpufeature.o alternative.o cacheinfo.o \
smp.o smp_spin_table.o topology.o smccc-call.o \
- syscall.o proton-pack.o idreg-override.o
+ syscall.o proton-pack.o idreg-override.o idle.o \
+ patching.o
targets += efi-entry.o
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index cada0b816c8a..f3851724fe35 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -239,6 +239,18 @@ done:
}
}
+static pgprot_t __acpi_get_writethrough_mem_attribute(void)
+{
+ /*
+ * Although UEFI specifies the use of Normal Write-through for
+ * EFI_MEMORY_WT, it is seldom used in practice and not implemented
+ * by most (all?) CPUs. Rather than allocate a MAIR just for this
+ * purpose, emit a warning and use Normal Non-cacheable instead.
+ */
+ pr_warn_once("No MAIR allocation for EFI_MEMORY_WT; treating as Normal Non-cacheable\n");
+ return __pgprot(PROT_NORMAL_NC);
+}
+
pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
{
/*
@@ -246,7 +258,7 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
* types" of UEFI 2.5 section 2.3.6.1, each EFI memory type is
* mapped to a corresponding MAIR attribute encoding.
* The EFI memory attribute advises all possible capabilities
- * of a memory region. We use the most efficient capability.
+ * of a memory region.
*/
u64 attr;
@@ -254,10 +266,10 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
attr = efi_mem_attributes(addr);
if (attr & EFI_MEMORY_WB)
return PAGE_KERNEL;
- if (attr & EFI_MEMORY_WT)
- return __pgprot(PROT_NORMAL_WT);
if (attr & EFI_MEMORY_WC)
return __pgprot(PROT_NORMAL_NC);
+ if (attr & EFI_MEMORY_WT)
+ return __acpi_get_writethrough_mem_attribute();
return __pgprot(PROT_DEVICE_nGnRnE);
}
@@ -340,10 +352,10 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
default:
if (region->attribute & EFI_MEMORY_WB)
prot = PAGE_KERNEL;
- else if (region->attribute & EFI_MEMORY_WT)
- prot = __pgprot(PROT_NORMAL_WT);
else if (region->attribute & EFI_MEMORY_WC)
prot = __pgprot(PROT_NORMAL_NC);
+ else if (region->attribute & EFI_MEMORY_WT)
+ prot = __acpi_get_writethrough_mem_attribute();
}
}
return __ioremap(phys, size, prot);
diff --git a/arch/arm64/kernel/acpi_parking_protocol.c b/arch/arm64/kernel/acpi_parking_protocol.c
index e7c941d8340d..bfeeb5319abf 100644
--- a/arch/arm64/kernel/acpi_parking_protocol.c
+++ b/arch/arm64/kernel/acpi_parking_protocol.c
@@ -99,7 +99,8 @@ static int acpi_parking_protocol_cpu_boot(unsigned int cpu)
* that read this address need to convert this address to the
* Boot-Loader's endianness before jumping.
*/
- writeq_relaxed(__pa_symbol(secondary_entry), &mailbox->entry_point);
+ writeq_relaxed(__pa_symbol(function_nocfi(secondary_entry)),
+ &mailbox->entry_point);
writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id);
arch_send_wakeup_ipi_mask(cpumask_of(cpu));
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 1184c44ea2c7..3fb79b76e9d9 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -133,11 +133,10 @@ static void clean_dcache_range_nopatch(u64 start, u64 end)
} while (cur += d_size, cur < end);
}
-static void __apply_alternatives(void *alt_region, bool is_module,
+static void __nocfi __apply_alternatives(struct alt_region *region, bool is_module,
unsigned long *feature_mask)
{
struct alt_instr *alt;
- struct alt_region *region = alt_region;
__le32 *origptr, *updptr;
alternative_cb_t alt_cb;
@@ -182,7 +181,7 @@ static void __apply_alternatives(void *alt_region, bool is_module,
*/
if (!is_module) {
dsb(ish);
- __flush_icache_all();
+ icache_inval_all_pou();
isb();
/* Ignore ARM64_CB bit from feature mask */
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index a36e2fc330d4..c85670692afa 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -27,6 +27,7 @@
int main(void)
{
DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
+ DEFINE(TSK_CPU, offsetof(struct task_struct, cpu));
BLANK();
DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
@@ -43,8 +44,11 @@ int main(void)
#endif
BLANK();
DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
+ DEFINE(THREAD_SCTLR_USER, offsetof(struct task_struct, thread.sctlr_user));
#ifdef CONFIG_ARM64_PTR_AUTH
DEFINE(THREAD_KEYS_USER, offsetof(struct task_struct, thread.keys_user));
+#endif
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
DEFINE(THREAD_KEYS_KERNEL, offsetof(struct task_struct, thread.keys_kernel));
#endif
#ifdef CONFIG_ARM64_MTE
@@ -95,8 +99,9 @@ int main(void)
DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
BLANK();
DEFINE(PREEMPT_DISABLE_OFFSET, PREEMPT_DISABLE_OFFSET);
+ DEFINE(SOFTIRQ_SHIFT, SOFTIRQ_SHIFT);
+ DEFINE(IRQ_CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
BLANK();
- DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
BLANK();
DEFINE(FTR_OVR_VAL_OFFSET, offsetof(struct arm64_ftr_override, val));
@@ -108,6 +113,8 @@ int main(void)
DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_cpu_context, regs));
+ DEFINE(CPU_RGSR_EL1, offsetof(struct kvm_cpu_context, sys_regs[RGSR_EL1]));
+ DEFINE(CPU_GCR_EL1, offsetof(struct kvm_cpu_context, sys_regs[GCR_EL1]));
DEFINE(CPU_APIAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIAKEYLO_EL1]));
DEFINE(CPU_APIBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIBKEYLO_EL1]));
DEFINE(CPU_APDAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDAKEYLO_EL1]));
@@ -120,6 +127,9 @@ int main(void)
DEFINE(NVHE_INIT_TPIDR_EL2, offsetof(struct kvm_nvhe_init_params, tpidr_el2));
DEFINE(NVHE_INIT_STACK_HYP_VA, offsetof(struct kvm_nvhe_init_params, stack_hyp_va));
DEFINE(NVHE_INIT_PGD_PA, offsetof(struct kvm_nvhe_init_params, pgd_pa));
+ DEFINE(NVHE_INIT_HCR_EL2, offsetof(struct kvm_nvhe_init_params, hcr_el2));
+ DEFINE(NVHE_INIT_VTTBR, offsetof(struct kvm_nvhe_init_params, vttbr));
+ DEFINE(NVHE_INIT_VTCR, offsetof(struct kvm_nvhe_init_params, vtcr));
#endif
#ifdef CONFIG_CPU_PM
DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp));
@@ -132,6 +142,15 @@ int main(void)
DEFINE(ARM_SMCCC_RES_X2_OFFS, offsetof(struct arm_smccc_res, a2));
DEFINE(ARM_SMCCC_QUIRK_ID_OFFS, offsetof(struct arm_smccc_quirk, id));
DEFINE(ARM_SMCCC_QUIRK_STATE_OFFS, offsetof(struct arm_smccc_quirk, state));
+ DEFINE(ARM_SMCCC_1_2_REGS_X0_OFFS, offsetof(struct arm_smccc_1_2_regs, a0));
+ DEFINE(ARM_SMCCC_1_2_REGS_X2_OFFS, offsetof(struct arm_smccc_1_2_regs, a2));
+ DEFINE(ARM_SMCCC_1_2_REGS_X4_OFFS, offsetof(struct arm_smccc_1_2_regs, a4));
+ DEFINE(ARM_SMCCC_1_2_REGS_X6_OFFS, offsetof(struct arm_smccc_1_2_regs, a6));
+ DEFINE(ARM_SMCCC_1_2_REGS_X8_OFFS, offsetof(struct arm_smccc_1_2_regs, a8));
+ DEFINE(ARM_SMCCC_1_2_REGS_X10_OFFS, offsetof(struct arm_smccc_1_2_regs, a10));
+ DEFINE(ARM_SMCCC_1_2_REGS_X12_OFFS, offsetof(struct arm_smccc_1_2_regs, a12));
+ DEFINE(ARM_SMCCC_1_2_REGS_X14_OFFS, offsetof(struct arm_smccc_1_2_regs, a14));
+ DEFINE(ARM_SMCCC_1_2_REGS_X16_OFFS, offsetof(struct arm_smccc_1_2_regs, a16));
BLANK();
DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address));
DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address));
@@ -147,11 +166,9 @@ int main(void)
#endif
#ifdef CONFIG_ARM64_PTR_AUTH
DEFINE(PTRAUTH_USER_KEY_APIA, offsetof(struct ptrauth_keys_user, apia));
- DEFINE(PTRAUTH_USER_KEY_APIB, offsetof(struct ptrauth_keys_user, apib));
- DEFINE(PTRAUTH_USER_KEY_APDA, offsetof(struct ptrauth_keys_user, apda));
- DEFINE(PTRAUTH_USER_KEY_APDB, offsetof(struct ptrauth_keys_user, apdb));
- DEFINE(PTRAUTH_USER_KEY_APGA, offsetof(struct ptrauth_keys_user, apga));
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
DEFINE(PTRAUTH_KERNEL_KEY_APIA, offsetof(struct ptrauth_keys_kernel, apia));
+#endif
BLANK();
#endif
return 0;
diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S
index 37721eb6f9a1..d47ff63a5b66 100644
--- a/arch/arm64/kernel/cpu-reset.S
+++ b/arch/arm64/kernel/cpu-reset.S
@@ -30,10 +30,7 @@
* flat identity mapping.
*/
SYM_CODE_START(__cpu_soft_restart)
- /* Clear sctlr_el1 flags. */
- mrs x12, sctlr_el1
- mov_q x13, SCTLR_ELx_FLAGS
- bic x12, x12, x13
+ mov_q x12, INIT_SCTLR_EL1_MMU_OFF
pre_disable_mmu_workaround
/*
* either disable EL1&0 translation regime or disable EL2&0 translation
diff --git a/arch/arm64/kernel/cpu-reset.h b/arch/arm64/kernel/cpu-reset.h
index ed50e9587ad8..9a7b1262ef17 100644
--- a/arch/arm64/kernel/cpu-reset.h
+++ b/arch/arm64/kernel/cpu-reset.h
@@ -13,16 +13,16 @@
void __cpu_soft_restart(unsigned long el2_switch, unsigned long entry,
unsigned long arg0, unsigned long arg1, unsigned long arg2);
-static inline void __noreturn cpu_soft_restart(unsigned long entry,
- unsigned long arg0,
- unsigned long arg1,
- unsigned long arg2)
+static inline void __noreturn __nocfi cpu_soft_restart(unsigned long entry,
+ unsigned long arg0,
+ unsigned long arg1,
+ unsigned long arg2)
{
typeof(__cpu_soft_restart) *restart;
unsigned long el2_switch = !is_kernel_in_hyp_mode() &&
is_hyp_mode_available();
- restart = (void *)__pa_symbol(__cpu_soft_restart);
+ restart = (void *)__pa_symbol(function_nocfi(__cpu_soft_restart));
cpu_install_idmap();
restart(el2_switch, entry, arg0, arg1, arg2);
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 506a1cd37973..e2c20c036442 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -526,6 +526,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
1, 0),
},
#endif
+#ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM
+ {
+ /* NVIDIA Carmel */
+ .desc = "NVIDIA Carmel CNP erratum",
+ .capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP,
+ ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
+ },
+#endif
{
}
};
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 066030717a4c..125d5c9471ac 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -68,6 +68,7 @@
#include <linux/sort.h>
#include <linux/stop_machine.h>
#include <linux/types.h>
+#include <linux/minmax.h>
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/kasan.h>
@@ -75,6 +76,7 @@
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
#include <asm/fpsimd.h>
+#include <asm/insn.h>
#include <asm/kvm_host.h>
#include <asm/mmu_context.h>
#include <asm/mte.h>
@@ -107,6 +109,24 @@ bool arm64_use_ng_mappings = false;
EXPORT_SYMBOL(arm64_use_ng_mappings);
/*
+ * Permit PER_LINUX32 and execve() of 32-bit binaries even if not all CPUs
+ * support it?
+ */
+static bool __read_mostly allow_mismatched_32bit_el0;
+
+/*
+ * Static branch enabled only if allow_mismatched_32bit_el0 is set and we have
+ * seen at least one CPU capable of 32-bit EL0.
+ */
+DEFINE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
+
+/*
+ * Mask of CPUs supporting 32-bit EL0.
+ * Only valid if arm64_mismatched_32bit_el0 is enabled.
+ */
+static cpumask_var_t cpu_32bit_el0_mask __cpumask_var_read_mostly;
+
+/*
* Flag to indicate if we have computed the system wide
* capabilities based on the boot time active CPUs. This
* will be used to determine if a new booting CPU should
@@ -383,7 +403,6 @@ static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
* of support.
*/
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
ARM64_FTR_END,
};
@@ -400,6 +419,11 @@ static const struct arm64_ftr_bits ftr_dczid[] = {
ARM64_FTR_END,
};
+static const struct arm64_ftr_bits ftr_gmid[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, SYS_GMID_EL1_BS_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
static const struct arm64_ftr_bits ftr_id_isar0[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DIVIDE_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_DEBUG_SHIFT, 4, 0),
@@ -617,6 +641,9 @@ static const struct __ftr_reg_entry {
/* Op1 = 0, CRn = 1, CRm = 2 */
ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
+ /* Op1 = 1, CRn = 0, CRm = 0 */
+ ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid),
+
/* Op1 = 3, CRn = 0, CRm = 0 */
{ SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
@@ -695,14 +722,14 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
ret = ftrp->safe_val;
break;
case FTR_LOWER_SAFE:
- ret = new < cur ? new : cur;
+ ret = min(new, cur);
break;
case FTR_HIGHER_OR_ZERO_SAFE:
if (!cur || !new)
break;
fallthrough;
case FTR_HIGHER_SAFE:
- ret = new > cur ? new : cur;
+ ret = max(new, cur);
break;
default:
BUG();
@@ -767,7 +794,7 @@ static void __init sort_ftr_regs(void)
* Any bits that are not covered by an arm64_ftr_bits entry are considered
* RES0 for the system-wide value, and must strictly match.
*/
-static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
+static void init_cpu_ftr_reg(u32 sys_reg, u64 new)
{
u64 val = 0;
u64 strict_mask = ~0x0ULL;
@@ -809,6 +836,12 @@ static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
reg->name,
ftrp->shift + ftrp->width - 1,
ftrp->shift, str, tmp);
+ } else if ((ftr_mask & reg->override->val) == ftr_mask) {
+ reg->override->val &= ~ftr_mask;
+ pr_warn("%s[%d:%d]: impossible override, ignored\n",
+ reg->name,
+ ftrp->shift + ftrp->width - 1,
+ ftrp->shift);
}
val = arm64_ftr_set_value(ftrp, val, ftr_new);
@@ -857,6 +890,31 @@ static void __init init_cpu_hwcaps_indirect_list(void)
static void __init setup_boot_cpu_capabilities(void);
+static void init_32bit_cpu_features(struct cpuinfo_32bit *info)
+{
+ init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
+ init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1);
+ init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
+ init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
+ init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
+ init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
+ init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
+ init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
+ init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6);
+ init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
+ init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
+ init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
+ init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
+ init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4);
+ init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5);
+ init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
+ init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
+ init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2);
+ init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
+ init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
+ init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
+}
+
void __init init_cpu_features(struct cpuinfo_arm64 *info)
{
/* Before we start using the tables, make sure it is sorted */
@@ -876,35 +934,17 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
- if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
- init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
- init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1);
- init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
- init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
- init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
- init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
- init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
- init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
- init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6);
- init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
- init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
- init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
- init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
- init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4);
- init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5);
- init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
- init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
- init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2);
- init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
- init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
- init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
- }
+ if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
+ init_32bit_cpu_features(&info->aarch32);
if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
sve_init_vq_map();
}
+ if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
+ init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid);
+
/*
* Initialize the indirect array of CPU hwcaps capabilities pointers
* before we handle the boot CPU below.
@@ -969,21 +1009,29 @@ static void relax_cpu_ftr_reg(u32 sys_id, int field)
WARN_ON(!ftrp->width);
}
-static int update_32bit_cpu_features(int cpu, struct cpuinfo_arm64 *info,
- struct cpuinfo_arm64 *boot)
+static void lazy_init_32bit_cpu_features(struct cpuinfo_arm64 *info,
+ struct cpuinfo_arm64 *boot)
+{
+ static bool boot_cpu_32bit_regs_overridden = false;
+
+ if (!allow_mismatched_32bit_el0 || boot_cpu_32bit_regs_overridden)
+ return;
+
+ if (id_aa64pfr0_32bit_el0(boot->reg_id_aa64pfr0))
+ return;
+
+ boot->aarch32 = info->aarch32;
+ init_32bit_cpu_features(&boot->aarch32);
+ boot_cpu_32bit_regs_overridden = true;
+}
+
+static int update_32bit_cpu_features(int cpu, struct cpuinfo_32bit *info,
+ struct cpuinfo_32bit *boot)
{
int taint = 0;
u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
/*
- * If we don't have AArch32 at all then skip the checks entirely
- * as the register values may be UNKNOWN and we're not going to be
- * using them for anything.
- */
- if (!id_aa64pfr0_32bit_el0(pfr0))
- return taint;
-
- /*
* If we don't have AArch32 at EL1, then relax the strictness of
* EL1-dependent register fields to avoid spurious sanity check fails.
*/
@@ -1129,10 +1177,29 @@ void update_cpu_features(int cpu,
}
/*
+ * The kernel uses the LDGM/STGM instructions and the number of tags
+ * they read/write depends on the GMID_EL1.BS field. Check that the
+ * value is the same on all CPUs.
+ */
+ if (IS_ENABLED(CONFIG_ARM64_MTE) &&
+ id_aa64pfr1_mte(info->reg_id_aa64pfr1)) {
+ taint |= check_update_ftr_reg(SYS_GMID_EL1, cpu,
+ info->reg_gmid, boot->reg_gmid);
+ }
+
+ /*
+ * If we don't have AArch32 at all then skip the checks entirely
+ * as the register values may be UNKNOWN and we're not going to be
+ * using them for anything.
+ *
* This relies on a sanitised view of the AArch64 ID registers
* (e.g. SYS_ID_AA64PFR0_EL1), so we call it last.
*/
- taint |= update_32bit_cpu_features(cpu, info, boot);
+ if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
+ lazy_init_32bit_cpu_features(info, boot);
+ taint |= update_32bit_cpu_features(cpu, &info->aarch32,
+ &boot->aarch32);
+ }
/*
* Mismatched CPU features are a recipe for disaster. Don't even
@@ -1242,6 +1309,28 @@ has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
return feature_matches(val, entry);
}
+const struct cpumask *system_32bit_el0_cpumask(void)
+{
+ if (!system_supports_32bit_el0())
+ return cpu_none_mask;
+
+ if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
+ return cpu_32bit_el0_mask;
+
+ return cpu_possible_mask;
+}
+
+static bool has_32bit_el0(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ if (!has_cpuid_feature(entry, scope))
+ return allow_mismatched_32bit_el0;
+
+ if (scope == SCOPE_SYSTEM)
+ pr_info("detected: 32-bit EL0 Support\n");
+
+ return true;
+}
+
static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
{
bool has_sre;
@@ -1321,7 +1410,10 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
* may share TLB entries with a CPU stuck in the crashed
* kernel.
*/
- if (is_kdump_kernel())
+ if (is_kdump_kernel())
+ return false;
+
+ if (cpus_have_const_cap(ARM64_WORKAROUND_NVIDIA_CARMEL_CNP))
return false;
return has_cpuid_feature(entry, scope);
@@ -1443,7 +1535,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
}
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
-static void
+static void __nocfi
kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
{
typedef void (kpti_remap_fn)(int, int, phys_addr_t);
@@ -1460,7 +1552,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
if (arm64_use_ng_mappings)
return;
- remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
+ remap_fn = (void *)__pa_symbol(function_nocfi(idmap_kpti_install_ng_mappings));
cpu_install_idmap();
remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
@@ -1617,7 +1709,6 @@ int get_cpu_with_amu_feat(void)
}
#endif
-#ifdef CONFIG_ARM64_VHE
static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
{
return is_kernel_in_hyp_mode();
@@ -1636,7 +1727,6 @@ static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN))
write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
}
-#endif
static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
{
@@ -1821,6 +1911,18 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.cpu_enable = cpu_enable_pan,
},
#endif /* CONFIG_ARM64_PAN */
+#ifdef CONFIG_ARM64_EPAN
+ {
+ .desc = "Enhanced Privileged Access Never",
+ .capability = ARM64_HAS_EPAN,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64MMFR1_EL1,
+ .field_pos = ID_AA64MMFR1_PAN_SHIFT,
+ .sign = FTR_UNSIGNED,
+ .min_field_value = 3,
+ },
+#endif /* CONFIG_ARM64_EPAN */
#ifdef CONFIG_ARM64_LSE_ATOMICS
{
.desc = "LSE atomic instructions",
@@ -1839,7 +1941,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
.matches = has_no_hw_prefetch,
},
-#ifdef CONFIG_ARM64_VHE
{
.desc = "Virtualization Host Extensions",
.capability = ARM64_HAS_VIRT_HOST_EXTN,
@@ -1847,12 +1948,10 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.matches = runs_at_el2,
.cpu_enable = cpu_copy_el2regs,
},
-#endif /* CONFIG_ARM64_VHE */
{
- .desc = "32-bit EL0 Support",
- .capability = ARM64_HAS_32BIT_EL0,
+ .capability = ARM64_HAS_32BIT_EL0_DO_NOT_USE,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
- .matches = has_cpuid_feature,
+ .matches = has_32bit_el0,
.sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_EL0_SHIFT,
@@ -2361,7 +2460,7 @@ static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
{},
};
-static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
+static void cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
{
switch (cap->hwcap_type) {
case CAP_HWCAP:
@@ -2406,7 +2505,7 @@ static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
return rc;
}
-static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
+static void setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
{
/* We support emulation of accesses to CPU ID feature registers */
cpu_set_named_feature(CPUID);
@@ -2581,7 +2680,7 @@ static void check_early_cpu_features(void)
}
static void
-verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
+__verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
{
for (; caps->matches; caps++)
@@ -2592,6 +2691,14 @@ verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
}
}
+static void verify_local_elf_hwcaps(void)
+{
+ __verify_local_elf_hwcaps(arm64_elf_hwcaps);
+
+ if (id_aa64pfr0_32bit_el0(read_cpuid(ID_AA64PFR0_EL1)))
+ __verify_local_elf_hwcaps(compat_elf_hwcaps);
+}
+
static void verify_sve_features(void)
{
u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
@@ -2656,11 +2763,7 @@ static void verify_local_cpu_capabilities(void)
* on all secondary CPUs.
*/
verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU);
-
- verify_local_elf_hwcaps(arm64_elf_hwcaps);
-
- if (system_supports_32bit_el0())
- verify_local_elf_hwcaps(compat_elf_hwcaps);
+ verify_local_elf_hwcaps();
if (system_supports_sve())
verify_sve_features();
@@ -2795,6 +2898,34 @@ void __init setup_cpu_features(void)
ARCH_DMA_MINALIGN);
}
+static int enable_mismatched_32bit_el0(unsigned int cpu)
+{
+ struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
+ bool cpu_32bit = id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0);
+
+ if (cpu_32bit) {
+ cpumask_set_cpu(cpu, cpu_32bit_el0_mask);
+ static_branch_enable_cpuslocked(&arm64_mismatched_32bit_el0);
+ setup_elf_hwcaps(compat_elf_hwcaps);
+ }
+
+ return 0;
+}
+
+static int __init init_32bit_el0_mask(void)
+{
+ if (!allow_mismatched_32bit_el0)
+ return 0;
+
+ if (!zalloc_cpumask_var(&cpu_32bit_el0_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "arm64/mismatched_32bit_el0:online",
+ enable_mismatched_32bit_el0, NULL);
+}
+subsys_initcall_sync(init_32bit_el0_mask);
+
static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
{
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
@@ -2888,8 +3019,8 @@ static int emulate_mrs(struct pt_regs *regs, u32 insn)
}
static struct undef_hook mrs_hook = {
- .instr_mask = 0xfff00000,
- .instr_val = 0xd5300000,
+ .instr_mask = 0xffff0000,
+ .instr_val = 0xd5380000,
.pstate_mask = PSR_AA32_MODE_MASK,
.pstate_val = PSR_MODE_EL0t,
.fn = emulate_mrs,
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
index b512b5503f6e..03991eeff643 100644
--- a/arch/arm64/kernel/cpuidle.c
+++ b/arch/arm64/kernel/cpuidle.c
@@ -29,7 +29,7 @@ int arm_cpuidle_init(unsigned int cpu)
/**
* arm_cpuidle_suspend() - function to enter a low-power idle state
- * @arg: argument to pass to CPU suspend operations
+ * @index: argument to pass to CPU suspend operations
*
* Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
* operations back-end error code otherwise.
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 77605aec25fe..87731fea5e41 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -246,7 +246,7 @@ static struct kobj_type cpuregs_kobj_type = {
struct cpuinfo_arm64 *info = kobj_to_cpuinfo(kobj); \
\
if (info->reg_midr) \
- return sprintf(buf, "0x%016x\n", info->reg_##_field); \
+ return sprintf(buf, "0x%016llx\n", info->reg_##_field); \
else \
return 0; \
} \
@@ -344,6 +344,32 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
}
+static void __cpuinfo_store_cpu_32bit(struct cpuinfo_32bit *info)
+{
+ info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
+ info->reg_id_dfr1 = read_cpuid(ID_DFR1_EL1);
+ info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
+ info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
+ info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
+ info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
+ info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
+ info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
+ info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1);
+ info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
+ info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
+ info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
+ info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
+ info->reg_id_mmfr4 = read_cpuid(ID_MMFR4_EL1);
+ info->reg_id_mmfr5 = read_cpuid(ID_MMFR5_EL1);
+ info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
+ info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
+ info->reg_id_pfr2 = read_cpuid(ID_PFR2_EL1);
+
+ info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
+ info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
+ info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
+}
+
static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
{
info->reg_cntfrq = arch_timer_get_cntfrq();
@@ -353,7 +379,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
* with the CLIDR_EL1 fields to avoid triggering false warnings
* when there is a mismatch across the CPUs. Keep track of the
* effective value of the CTR_EL0 in our internal records for
- * acurate sanity check and feature enablement.
+ * accurate sanity check and feature enablement.
*/
info->reg_ctr = read_cpuid_effective_cachetype();
info->reg_dczid = read_cpuid(DCZID_EL0);
@@ -371,31 +397,11 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1);
- /* Update the 32bit ID registers only if AArch32 is implemented */
- if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
- info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
- info->reg_id_dfr1 = read_cpuid(ID_DFR1_EL1);
- info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
- info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
- info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
- info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
- info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
- info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
- info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1);
- info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
- info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
- info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
- info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
- info->reg_id_mmfr4 = read_cpuid(ID_MMFR4_EL1);
- info->reg_id_mmfr5 = read_cpuid(ID_MMFR5_EL1);
- info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
- info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
- info->reg_id_pfr2 = read_cpuid(ID_PFR2_EL1);
-
- info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
- info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
- info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
- }
+ if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
+ info->reg_gmid = read_cpuid(GMID_EL1);
+
+ if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
+ __cpuinfo_store_cpu_32bit(&info->aarch32);
if (IS_ENABLED(CONFIG_ARM64_SVE) &&
id_aa64pfr0_sve(info->reg_id_aa64pfr0))
diff --git a/arch/arm64/kernel/crash_dump.c b/arch/arm64/kernel/crash_dump.c
index e6e284265f19..58303a9ec32c 100644
--- a/arch/arm64/kernel/crash_dump.c
+++ b/arch/arm64/kernel/crash_dump.c
@@ -64,5 +64,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
{
memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count);
+ *ppos += count;
+
return count;
}
diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S
index 0073b24b5d25..61a87fa1c305 100644
--- a/arch/arm64/kernel/efi-entry.S
+++ b/arch/arm64/kernel/efi-entry.S
@@ -28,7 +28,8 @@ SYM_CODE_START(efi_enter_kernel)
* stale icache entries from before relocation.
*/
ldr w1, =kernel_size
- bl __clean_dcache_area_poc
+ add x1, x0, x1
+ bl dcache_clean_poc
ic ialluis
/*
@@ -36,8 +37,8 @@ SYM_CODE_START(efi_enter_kernel)
* so that we can safely disable the MMU and caches.
*/
adr x0, 0f
- ldr w1, 3f
- bl __clean_dcache_area_poc
+ adr x1, 3f
+ bl dcache_clean_poc
0:
/* Turn off Dcache and MMU */
mrs x0, CurrentEL
@@ -64,5 +65,5 @@ SYM_CODE_START(efi_enter_kernel)
mov x2, xzr
mov x3, xzr
br x19
+3:
SYM_CODE_END(efi_enter_kernel)
-3: .long . - 0b
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 9d3588450473..12ce14a98b7c 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -6,7 +6,11 @@
*/
#include <linux/context_tracking.h>
+#include <linux/linkage.h>
+#include <linux/lockdep.h>
#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
#include <linux/thread_info.h>
#include <asm/cpufeature.h>
@@ -15,7 +19,11 @@
#include <asm/exception.h>
#include <asm/kprobes.h>
#include <asm/mmu.h>
+#include <asm/processor.h>
+#include <asm/sdei.h>
+#include <asm/stacktrace.h>
#include <asm/sysreg.h>
+#include <asm/system_misc.h>
/*
* This is intended to match the logic in irqentry_enter(), handling the kernel
@@ -37,6 +45,8 @@ static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
lockdep_hardirqs_off(CALLER_ADDR0);
rcu_irq_enter_check_tick();
trace_hardirqs_off_finish();
+
+ mte_check_tfsr_entry();
}
/*
@@ -47,6 +57,8 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
{
lockdep_assert_irqs_disabled();
+ mte_check_tfsr_exit();
+
if (interrupts_enabled(regs)) {
if (regs->exit_rcu) {
trace_hardirqs_on_prepare();
@@ -63,7 +75,7 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
}
}
-void noinstr arm64_enter_nmi(struct pt_regs *regs)
+static void noinstr arm64_enter_nmi(struct pt_regs *regs)
{
regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
@@ -76,7 +88,7 @@ void noinstr arm64_enter_nmi(struct pt_regs *regs)
ftrace_nmi_enter();
}
-void noinstr arm64_exit_nmi(struct pt_regs *regs)
+static void noinstr arm64_exit_nmi(struct pt_regs *regs)
{
bool restore = regs->lockdep_hardirqs;
@@ -93,7 +105,7 @@ void noinstr arm64_exit_nmi(struct pt_regs *regs)
__nmi_exit();
}
-asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
+static void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
arm64_enter_nmi(regs);
@@ -101,7 +113,7 @@ asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
enter_from_kernel_mode(regs);
}
-asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
+static void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
arm64_exit_nmi(regs);
@@ -109,6 +121,65 @@ asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
exit_to_kernel_mode(regs);
}
+static void __sched arm64_preempt_schedule_irq(void)
+{
+ lockdep_assert_irqs_disabled();
+
+ /*
+ * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
+ * priority masking is used the GIC irqchip driver will clear DAIF.IF
+ * using gic_arch_enable_irqs() for normal IRQs. If anything is set in
+ * DAIF we must have handled an NMI, so skip preemption.
+ */
+ if (system_uses_irq_prio_masking() && read_sysreg(daif))
+ return;
+
+ /*
+ * Preempting a task from an IRQ means we leave copies of PSTATE
+ * on the stack. cpufeature's enable calls may modify PSTATE, but
+ * resuming one of these preempted tasks would undo those changes.
+ *
+ * Only allow a task to be preempted once cpufeatures have been
+ * enabled.
+ */
+ if (system_capabilities_finalized())
+ preempt_schedule_irq();
+}
+
+static void do_interrupt_handler(struct pt_regs *regs,
+ void (*handler)(struct pt_regs *))
+{
+ if (on_thread_stack())
+ call_on_irq_stack(regs, handler);
+ else
+ handler(regs);
+}
+
+extern void (*handle_arch_irq)(struct pt_regs *);
+extern void (*handle_arch_fiq)(struct pt_regs *);
+
+static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector,
+ unsigned int esr)
+{
+ arm64_enter_nmi(regs);
+
+ console_verbose();
+
+ pr_crit("Unhandled %s exception on CPU%d, ESR 0x%08x -- %s\n",
+ vector, smp_processor_id(), esr,
+ esr_get_class_string(esr));
+
+ __show_regs(regs);
+ panic("Unhandled exception");
+}
+
+#define UNHANDLED(el, regsize, vector) \
+asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs) \
+{ \
+ const char *desc = #regsize "-bit " #el " " #vector; \
+ __panic_unhandled(regs, desc, read_sysreg(esr_el1)); \
+}
+
#ifdef CONFIG_ARM64_ERRATUM_1463225
static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
@@ -158,6 +229,11 @@ static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
}
#endif /* CONFIG_ARM64_ERRATUM_1463225 */
+UNHANDLED(el1t, 64, sync)
+UNHANDLED(el1t, 64, irq)
+UNHANDLED(el1t, 64, fiq)
+UNHANDLED(el1t, 64, error)
+
static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
@@ -189,15 +265,6 @@ static void noinstr el1_undef(struct pt_regs *regs)
exit_to_kernel_mode(regs);
}
-static void noinstr el1_inv(struct pt_regs *regs, unsigned long esr)
-{
- enter_from_kernel_mode(regs);
- local_daif_inherit(regs);
- bad_mode(regs, 0, esr);
- local_daif_mask();
- exit_to_kernel_mode(regs);
-}
-
static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
{
regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
@@ -226,14 +293,6 @@ static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
{
unsigned long far = read_sysreg(far_el1);
- /*
- * The CPU masked interrupts, and we are leaving them masked during
- * do_debug_exception(). Update PMR as if we had called
- * local_daif_mask().
- */
- if (system_uses_irq_prio_masking())
- gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
-
arm64_enter_el1_dbg(regs);
if (!cortex_a76_erratum_1463225_debug_handler(regs))
do_debug_exception(far, esr, regs);
@@ -249,7 +308,7 @@ static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
exit_to_kernel_mode(regs);
}
-asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
+asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@@ -279,10 +338,50 @@ asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
el1_fpac(regs, esr);
break;
default:
- el1_inv(regs, esr);
+ __panic_unhandled(regs, "64-bit el1h sync", esr);
}
}
+static void noinstr el1_interrupt(struct pt_regs *regs,
+ void (*handler)(struct pt_regs *))
+{
+ write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
+
+ enter_el1_irq_or_nmi(regs);
+ do_interrupt_handler(regs, handler);
+
+ /*
+ * Note: thread_info::preempt_count includes both thread_info::count
+ * and thread_info::need_resched, and is not equivalent to
+ * preempt_count().
+ */
+ if (IS_ENABLED(CONFIG_PREEMPTION) &&
+ READ_ONCE(current_thread_info()->preempt_count) == 0)
+ arm64_preempt_schedule_irq();
+
+ exit_el1_irq_or_nmi(regs);
+}
+
+asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs)
+{
+ el1_interrupt(regs, handle_arch_irq);
+}
+
+asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs)
+{
+ el1_interrupt(regs, handle_arch_fiq);
+}
+
+asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs)
+{
+ unsigned long esr = read_sysreg(esr_el1);
+
+ local_daif_restore(DAIF_ERRCTX);
+ arm64_enter_nmi(regs);
+ do_serror(regs, esr);
+ arm64_exit_nmi(regs);
+}
+
asmlinkage void noinstr enter_from_user_mode(void)
{
lockdep_hardirqs_off(CALLER_ADDR0);
@@ -293,6 +392,8 @@ asmlinkage void noinstr enter_from_user_mode(void)
asmlinkage void noinstr exit_to_user_mode(void)
{
+ mte_check_tfsr_exit();
+
trace_hardirqs_on_prepare();
lockdep_hardirqs_on_prepare(CALLER_ADDR0);
user_enter_irqoff();
@@ -398,19 +499,13 @@ static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
/* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
unsigned long far = read_sysreg(far_el1);
- if (system_uses_irq_prio_masking())
- gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
-
enter_from_user_mode();
do_debug_exception(far, esr, regs);
- local_daif_restore(DAIF_PROCCTX_NOIRQ);
+ local_daif_restore(DAIF_PROCCTX);
}
static void noinstr el0_svc(struct pt_regs *regs)
{
- if (system_uses_irq_prio_masking())
- gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
-
enter_from_user_mode();
cortex_a76_erratum_1463225_svc_handler();
do_el0_svc(regs);
@@ -423,7 +518,7 @@ static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
do_ptrauth_fault(regs, esr);
}
-asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
+asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@@ -476,6 +571,56 @@ asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
}
}
+static void noinstr el0_interrupt(struct pt_regs *regs,
+ void (*handler)(struct pt_regs *))
+{
+ enter_from_user_mode();
+
+ write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
+
+ if (regs->pc & BIT(55))
+ arm64_apply_bp_hardening();
+
+ do_interrupt_handler(regs, handler);
+}
+
+static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
+{
+ el0_interrupt(regs, handle_arch_irq);
+}
+
+asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs)
+{
+ __el0_irq_handler_common(regs);
+}
+
+static void noinstr __el0_fiq_handler_common(struct pt_regs *regs)
+{
+ el0_interrupt(regs, handle_arch_fiq);
+}
+
+asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
+{
+ __el0_fiq_handler_common(regs);
+}
+
+static void __el0_error_handler_common(struct pt_regs *regs)
+{
+ unsigned long esr = read_sysreg(esr_el1);
+
+ enter_from_user_mode();
+ local_daif_restore(DAIF_ERRCTX);
+ arm64_enter_nmi(regs);
+ do_serror(regs, esr);
+ arm64_exit_nmi(regs);
+ local_daif_restore(DAIF_PROCCTX);
+}
+
+asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs)
+{
+ __el0_error_handler_common(regs);
+}
+
#ifdef CONFIG_COMPAT
static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
{
@@ -486,15 +631,12 @@ static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
static void noinstr el0_svc_compat(struct pt_regs *regs)
{
- if (system_uses_irq_prio_masking())
- gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
-
enter_from_user_mode();
cortex_a76_erratum_1463225_svc_handler();
do_el0_svc_compat(regs);
}
-asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
+asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
{
unsigned long esr = read_sysreg(esr_el1);
@@ -537,4 +679,71 @@ asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
el0_inv(regs, esr);
}
}
+
+asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs)
+{
+ __el0_irq_handler_common(regs);
+}
+
+asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs)
+{
+ __el0_fiq_handler_common(regs);
+}
+
+asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs)
+{
+ __el0_error_handler_common(regs);
+}
+#else /* CONFIG_COMPAT */
+UNHANDLED(el0t, 32, sync)
+UNHANDLED(el0t, 32, irq)
+UNHANDLED(el0t, 32, fiq)
+UNHANDLED(el0t, 32, error)
#endif /* CONFIG_COMPAT */
+
+#ifdef CONFIG_VMAP_STACK
+asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
+{
+ unsigned int esr = read_sysreg(esr_el1);
+ unsigned long far = read_sysreg(far_el1);
+
+ arm64_enter_nmi(regs);
+ panic_bad_stack(regs, esr, far);
+}
+#endif /* CONFIG_VMAP_STACK */
+
+#ifdef CONFIG_ARM_SDE_INTERFACE
+asmlinkage noinstr unsigned long
+__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
+{
+ unsigned long ret;
+
+ /*
+ * We didn't take an exception to get here, so the HW hasn't
+ * set/cleared bits in PSTATE that we may rely on.
+ *
+ * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
+ * whether PSTATE bits are inherited unchanged or generated from
+ * scratch, and the TF-A implementation always clears PAN and always
+ * clears UAO. There are no other known implementations.
+ *
+ * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
+ * PSTATE is modified upon architectural exceptions, and so PAN is
+ * either inherited or set per SCTLR_ELx.SPAN, and UAO is always
+ * cleared.
+ *
+ * We must explicitly reset PAN to the expected state, including
+ * clearing it when the host isn't using it, in case a VM had it set.
+ */
+ if (system_uses_hw_pan())
+ set_pstate_pan(1);
+ else if (cpu_has_pan())
+ set_pstate_pan(0);
+
+ arm64_enter_nmi(regs);
+ ret = do_sdei_event(regs, arg);
+ arm64_exit_nmi(regs);
+
+ return ret;
+}
+#endif /* CONFIG_ARM_SDE_INTERFACE */
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S
index 2ca395c25448..0a7a64753878 100644
--- a/arch/arm64/kernel/entry-fpsimd.S
+++ b/arch/arm64/kernel/entry-fpsimd.S
@@ -48,6 +48,11 @@ SYM_FUNC_START(sve_get_vl)
ret
SYM_FUNC_END(sve_get_vl)
+SYM_FUNC_START(sve_set_vq)
+ sve_load_vq x0, x1, x2
+ ret
+SYM_FUNC_END(sve_set_vq)
+
/*
* Load SVE state from FPSIMD state.
*
@@ -58,16 +63,24 @@ SYM_FUNC_END(sve_get_vl)
* and the rest zeroed. All the other SVE registers will be zeroed.
*/
SYM_FUNC_START(sve_load_from_fpsimd_state)
- sve_load_vq x1, x2, x3
- fpsimd_restore x0, 8
- _for n, 0, 15, _sve_pfalse \n
- _sve_wrffr 0
- ret
+ sve_load_vq x1, x2, x3
+ fpsimd_restore x0, 8
+ sve_flush_p_ffr
+ ret
SYM_FUNC_END(sve_load_from_fpsimd_state)
-/* Zero all SVE registers but the first 128-bits of each vector */
+/*
+ * Zero all SVE registers but the first 128-bits of each vector
+ *
+ * VQ must already be configured by caller, any further updates of VQ
+ * will need to ensure that the register state remains valid.
+ *
+ * x0 = VQ - 1
+ */
SYM_FUNC_START(sve_flush_live)
- sve_flush
+ cbz x0, 1f // A VQ-1 of 0 is 128 bits so no extra Z state
+ sve_flush_z
+1: sve_flush_p_ffr
ret
SYM_FUNC_END(sve_flush_live)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index a31a0a713c85..863d44f73028 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -33,12 +33,6 @@
* Context tracking and irqflag tracing need to instrument transitions between
* user and kernel mode.
*/
- .macro user_exit_irqoff
-#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
- bl enter_from_user_mode
-#endif
- .endm
-
.macro user_enter_irqoff
#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
bl exit_to_user_mode
@@ -51,16 +45,7 @@
.endr
.endm
-/*
- * Bad Abort numbers
- *-----------------
- */
-#define BAD_SYNC 0
-#define BAD_IRQ 1
-#define BAD_FIQ 2
-#define BAD_ERROR 3
-
- .macro kernel_ventry, el, label, regsize = 64
+ .macro kernel_ventry, el:req, ht:req, regsize:req, label:req
.align 7
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
.if \el == 0
@@ -87,7 +72,7 @@ alternative_else_nop_endif
tbnz x0, #THREAD_SHIFT, 0f
sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
- b el\()\el\()_\label
+ b el\el\ht\()_\regsize\()_\label
0:
/*
@@ -119,7 +104,7 @@ alternative_else_nop_endif
sub sp, sp, x0
mrs x0, tpidrro_el0
#endif
- b el\()\el\()_\label
+ b el\el\ht\()_\regsize\()_\label
.endm
.macro tramp_alias, dst, sym
@@ -148,16 +133,18 @@ alternative_cb_end
.endm
/* Check for MTE asynchronous tag check faults */
- .macro check_mte_async_tcf, flgs, tmp
+ .macro check_mte_async_tcf, tmp, ti_flags
#ifdef CONFIG_ARM64_MTE
+ .arch_extension lse
alternative_if_not ARM64_MTE
b 1f
alternative_else_nop_endif
mrs_s \tmp, SYS_TFSRE0_EL1
tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
/* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
- orr \flgs, \flgs, #_TIF_MTE_ASYNC_FAULT
- str \flgs, [tsk, #TSK_TI_FLAGS]
+ mov \tmp, #_TIF_MTE_ASYNC_FAULT
+ add \ti_flags, tsk, #TSK_TI_FLAGS
+ stset \tmp, [\ti_flags]
msr_s SYS_TFSRE0_EL1, xzr
1:
#endif
@@ -244,14 +231,36 @@ alternative_else_nop_endif
disable_step_tsk x19, x20
/* Check for asynchronous tag check faults in user space */
- check_mte_async_tcf x19, x22
+ check_mte_async_tcf x22, x23
apply_ssbd 1, x22, x23
- ptrauth_keys_install_kernel tsk, x20, x22, x23
+#ifdef CONFIG_ARM64_PTR_AUTH
+alternative_if ARM64_HAS_ADDRESS_AUTH
+ /*
+ * Enable IA for in-kernel PAC if the task had it disabled. Although
+ * this could be implemented with an unconditional MRS which would avoid
+ * a load, this was measured to be slower on Cortex-A75 and Cortex-A76.
+ *
+ * Install the kernel IA key only if IA was enabled in the task. If IA
+ * was disabled on kernel exit then we would have left the kernel IA
+ * installed so there is no need to install it again.
+ */
+ ldr x0, [tsk, THREAD_SCTLR_USER]
+ tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f
+ __ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23
+ b 2f
+1:
+ mrs x0, sctlr_el1
+ orr x0, x0, SCTLR_ELx_ENIA
+ msr sctlr_el1, x0
+2:
+ isb
+alternative_else_nop_endif
+#endif
mte_set_kernel_gcr x22, x23
- scs_load tsk, x20
+ scs_load tsk
.else
add x21, sp, #PT_REGS_SIZE
get_current_task tsk
@@ -261,16 +270,16 @@ alternative_else_nop_endif
stp lr, x21, [sp, #S_LR]
/*
- * For exceptions from EL0, terminate the callchain here.
+ * For exceptions from EL0, create a final frame record.
* For exceptions from EL1, create a synthetic frame record so the
* interrupted code shows up in the backtrace.
*/
.if \el == 0
- mov x29, xzr
+ stp xzr, xzr, [sp, #S_STACKFRAME]
.else
stp x29, x22, [sp, #S_STACKFRAME]
- add x29, sp, #S_STACKFRAME
.endif
+ add x29, sp, #S_STACKFRAME
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
alternative_if_not ARM64_HAS_PAN
@@ -290,6 +299,8 @@ alternative_else_nop_endif
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
mrs_s x20, SYS_ICC_PMR_EL1
str x20, [sp, #S_PMR_SAVE]
+ mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
+ msr_s SYS_ICC_PMR_EL1, x20
alternative_else_nop_endif
/* Re-enable tag checking (TCO set on exception entry) */
@@ -349,10 +360,28 @@ alternative_if ARM64_WORKAROUND_845719
alternative_else_nop_endif
#endif
3:
- scs_save tsk, x0
+ scs_save tsk
- /* No kernel C function calls after this as user keys are set. */
- ptrauth_keys_install_user tsk, x0, x1, x2
+#ifdef CONFIG_ARM64_PTR_AUTH
+alternative_if ARM64_HAS_ADDRESS_AUTH
+ /*
+ * IA was enabled for in-kernel PAC. Disable it now if needed, or
+ * alternatively install the user's IA. All other per-task keys and
+ * SCTLR bits were updated on task switch.
+ *
+ * No kernel C function calls after this.
+ */
+ ldr x0, [tsk, THREAD_SCTLR_USER]
+ tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f
+ __ptrauth_keys_install_user tsk, x0, x1, x2
+ b 2f
+1:
+ mrs x0, sctlr_el1
+ bic x0, x0, SCTLR_ELx_ENIA
+ msr sctlr_el1, x0
+2:
+alternative_else_nop_endif
+#endif
mte_set_user_gcr tsk, x0, x1
@@ -442,77 +471,12 @@ SYM_CODE_START_LOCAL(__swpan_exit_el0)
SYM_CODE_END(__swpan_exit_el0)
#endif
- .macro irq_stack_entry
- mov x19, sp // preserve the original sp
-#ifdef CONFIG_SHADOW_CALL_STACK
- mov x24, scs_sp // preserve the original shadow stack
-#endif
-
- /*
- * Compare sp with the base of the task stack.
- * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
- * and should switch to the irq stack.
- */
- ldr x25, [tsk, TSK_STACK]
- eor x25, x25, x19
- and x25, x25, #~(THREAD_SIZE - 1)
- cbnz x25, 9998f
-
- ldr_this_cpu x25, irq_stack_ptr, x26
- mov x26, #IRQ_STACK_SIZE
- add x26, x25, x26
-
- /* switch to the irq stack */
- mov sp, x26
-
-#ifdef CONFIG_SHADOW_CALL_STACK
- /* also switch to the irq shadow stack */
- ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x26
-#endif
-
-9998:
- .endm
-
- /*
- * The callee-saved regs (x19-x29) should be preserved between
- * irq_stack_entry and irq_stack_exit, but note that kernel_entry
- * uses x20-x23 to store data for later use.
- */
- .macro irq_stack_exit
- mov sp, x19
-#ifdef CONFIG_SHADOW_CALL_STACK
- mov scs_sp, x24
-#endif
- .endm
-
/* GPRs used by entry code */
tsk .req x28 // current thread_info
/*
* Interrupt handling.
*/
- .macro irq_handler
- ldr_l x1, handle_arch_irq
- mov x0, sp
- irq_stack_entry
- blr x1
- irq_stack_exit
- .endm
-
-#ifdef CONFIG_ARM64_PSEUDO_NMI
- /*
- * Set res to 0 if irqs were unmasked in interrupted context.
- * Otherwise set res to non-0 value.
- */
- .macro test_irqs_unmasked res:req, pmr:req
-alternative_if ARM64_HAS_IRQ_PRIO_MASKING
- sub \res, \pmr, #GIC_PRIO_IRQON
-alternative_else
- mov \res, xzr
-alternative_endif
- .endm
-#endif
-
.macro gic_prio_kentry_setup, tmp:req
#ifdef CONFIG_ARM64_PSEUDO_NMI
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
@@ -522,15 +486,6 @@ alternative_endif
#endif
.endm
- .macro gic_prio_irq_setup, pmr:req, tmp:req
-#ifdef CONFIG_ARM64_PSEUDO_NMI
- alternative_if ARM64_HAS_IRQ_PRIO_MASKING
- orr \tmp, \pmr, #GIC_PRIO_PSR_I_SET
- msr_s SYS_ICC_PMR_EL1, \tmp
- alternative_else_nop_endif
-#endif
- .endm
-
.text
/*
@@ -540,32 +495,25 @@ alternative_endif
.align 11
SYM_CODE_START(vectors)
- kernel_ventry 1, sync_invalid // Synchronous EL1t
- kernel_ventry 1, irq_invalid // IRQ EL1t
- kernel_ventry 1, fiq_invalid // FIQ EL1t
- kernel_ventry 1, error_invalid // Error EL1t
-
- kernel_ventry 1, sync // Synchronous EL1h
- kernel_ventry 1, irq // IRQ EL1h
- kernel_ventry 1, fiq_invalid // FIQ EL1h
- kernel_ventry 1, error // Error EL1h
-
- kernel_ventry 0, sync // Synchronous 64-bit EL0
- kernel_ventry 0, irq // IRQ 64-bit EL0
- kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0
- kernel_ventry 0, error // Error 64-bit EL0
-
-#ifdef CONFIG_COMPAT
- kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0
- kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0
- kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0
- kernel_ventry 0, error_compat, 32 // Error 32-bit EL0
-#else
- kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0
- kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0
- kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
- kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
-#endif
+ kernel_ventry 1, t, 64, sync // Synchronous EL1t
+ kernel_ventry 1, t, 64, irq // IRQ EL1t
+ kernel_ventry 1, t, 64, fiq // FIQ EL1h
+ kernel_ventry 1, t, 64, error // Error EL1t
+
+ kernel_ventry 1, h, 64, sync // Synchronous EL1h
+ kernel_ventry 1, h, 64, irq // IRQ EL1h
+ kernel_ventry 1, h, 64, fiq // FIQ EL1h
+ kernel_ventry 1, h, 64, error // Error EL1h
+
+ kernel_ventry 0, t, 64, sync // Synchronous 64-bit EL0
+ kernel_ventry 0, t, 64, irq // IRQ 64-bit EL0
+ kernel_ventry 0, t, 64, fiq // FIQ 64-bit EL0
+ kernel_ventry 0, t, 64, error // Error 64-bit EL0
+
+ kernel_ventry 0, t, 32, sync // Synchronous 32-bit EL0
+ kernel_ventry 0, t, 32, irq // IRQ 32-bit EL0
+ kernel_ventry 0, t, 32, fiq // FIQ 32-bit EL0
+ kernel_ventry 0, t, 32, error // Error 32-bit EL0
SYM_CODE_END(vectors)
#ifdef CONFIG_VMAP_STACK
@@ -596,170 +544,46 @@ __bad_stack:
ASM_BUG()
#endif /* CONFIG_VMAP_STACK */
-/*
- * Invalid mode handlers
- */
- .macro inv_entry, el, reason, regsize = 64
+
+ .macro entry_handler el:req, ht:req, regsize:req, label:req
+SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label)
kernel_entry \el, \regsize
mov x0, sp
- mov x1, #\reason
- mrs x2, esr_el1
- bl bad_mode
- ASM_BUG()
+ bl el\el\ht\()_\regsize\()_\label\()_handler
+ .if \el == 0
+ b ret_to_user
+ .else
+ b ret_to_kernel
+ .endif
+SYM_CODE_END(el\el\ht\()_\regsize\()_\label)
.endm
-SYM_CODE_START_LOCAL(el0_sync_invalid)
- inv_entry 0, BAD_SYNC
-SYM_CODE_END(el0_sync_invalid)
-
-SYM_CODE_START_LOCAL(el0_irq_invalid)
- inv_entry 0, BAD_IRQ
-SYM_CODE_END(el0_irq_invalid)
-
-SYM_CODE_START_LOCAL(el0_fiq_invalid)
- inv_entry 0, BAD_FIQ
-SYM_CODE_END(el0_fiq_invalid)
-
-SYM_CODE_START_LOCAL(el0_error_invalid)
- inv_entry 0, BAD_ERROR
-SYM_CODE_END(el0_error_invalid)
-
-#ifdef CONFIG_COMPAT
-SYM_CODE_START_LOCAL(el0_fiq_invalid_compat)
- inv_entry 0, BAD_FIQ, 32
-SYM_CODE_END(el0_fiq_invalid_compat)
-#endif
-
-SYM_CODE_START_LOCAL(el1_sync_invalid)
- inv_entry 1, BAD_SYNC
-SYM_CODE_END(el1_sync_invalid)
-
-SYM_CODE_START_LOCAL(el1_irq_invalid)
- inv_entry 1, BAD_IRQ
-SYM_CODE_END(el1_irq_invalid)
-
-SYM_CODE_START_LOCAL(el1_fiq_invalid)
- inv_entry 1, BAD_FIQ
-SYM_CODE_END(el1_fiq_invalid)
-
-SYM_CODE_START_LOCAL(el1_error_invalid)
- inv_entry 1, BAD_ERROR
-SYM_CODE_END(el1_error_invalid)
-
/*
- * EL1 mode handlers.
+ * Early exception handlers
*/
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
- kernel_entry 1
- mov x0, sp
- bl el1_sync_handler
+ entry_handler 1, t, 64, sync
+ entry_handler 1, t, 64, irq
+ entry_handler 1, t, 64, fiq
+ entry_handler 1, t, 64, error
+
+ entry_handler 1, h, 64, sync
+ entry_handler 1, h, 64, irq
+ entry_handler 1, h, 64, fiq
+ entry_handler 1, h, 64, error
+
+ entry_handler 0, t, 64, sync
+ entry_handler 0, t, 64, irq
+ entry_handler 0, t, 64, fiq
+ entry_handler 0, t, 64, error
+
+ entry_handler 0, t, 32, sync
+ entry_handler 0, t, 32, irq
+ entry_handler 0, t, 32, fiq
+ entry_handler 0, t, 32, error
+
+SYM_CODE_START_LOCAL(ret_to_kernel)
kernel_exit 1
-SYM_CODE_END(el1_sync)
-
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
- kernel_entry 1
- gic_prio_irq_setup pmr=x20, tmp=x1
- enable_da_f
-
- mov x0, sp
- bl enter_el1_irq_or_nmi
-
- irq_handler
-
-#ifdef CONFIG_PREEMPTION
- ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
-alternative_if ARM64_HAS_IRQ_PRIO_MASKING
- /*
- * DA_F were cleared at start of handling. If anything is set in DAIF,
- * we come back from an NMI, so skip preemption
- */
- mrs x0, daif
- orr x24, x24, x0
-alternative_else_nop_endif
- cbnz x24, 1f // preempt count != 0 || NMI return path
- bl arm64_preempt_schedule_irq // irq en/disable is done inside
-1:
-#endif
-
- mov x0, sp
- bl exit_el1_irq_or_nmi
-
- kernel_exit 1
-SYM_CODE_END(el1_irq)
-
-/*
- * EL0 mode handlers.
- */
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el0_sync)
- kernel_entry 0
- mov x0, sp
- bl el0_sync_handler
- b ret_to_user
-SYM_CODE_END(el0_sync)
-
-#ifdef CONFIG_COMPAT
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
- kernel_entry 0, 32
- mov x0, sp
- bl el0_sync_compat_handler
- b ret_to_user
-SYM_CODE_END(el0_sync_compat)
-
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
- kernel_entry 0, 32
- b el0_irq_naked
-SYM_CODE_END(el0_irq_compat)
-
-SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
- kernel_entry 0, 32
- b el0_error_naked
-SYM_CODE_END(el0_error_compat)
-#endif
-
- .align 6
-SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
- kernel_entry 0
-el0_irq_naked:
- gic_prio_irq_setup pmr=x20, tmp=x0
- user_exit_irqoff
- enable_da_f
-
- tbz x22, #55, 1f
- bl do_el0_irq_bp_hardening
-1:
- irq_handler
-
- b ret_to_user
-SYM_CODE_END(el0_irq)
-
-SYM_CODE_START_LOCAL(el1_error)
- kernel_entry 1
- mrs x1, esr_el1
- gic_prio_kentry_setup tmp=x2
- enable_dbg
- mov x0, sp
- bl do_serror
- kernel_exit 1
-SYM_CODE_END(el1_error)
-
-SYM_CODE_START_LOCAL(el0_error)
- kernel_entry 0
-el0_error_naked:
- mrs x25, esr_el1
- gic_prio_kentry_setup tmp=x2
- user_exit_irqoff
- enable_dbg
- mov x0, sp
- mov x1, x25
- bl do_serror
- enable_da_f
- b ret_to_user
-SYM_CODE_END(el0_error)
+SYM_CODE_END(ret_to_kernel)
/*
* "slow" syscall return path.
@@ -942,8 +766,8 @@ SYM_FUNC_START(cpu_switch_to)
mov sp, x9
msr sp_el0, x1
ptrauth_keys_install_kernel x1, x8, x9, x10
- scs_save x0, x8
- scs_load x1, x8
+ scs_save x0
+ scs_load x1
ret
SYM_FUNC_END(cpu_switch_to)
NOKPROBE(cpu_switch_to)
@@ -961,6 +785,42 @@ SYM_CODE_START(ret_from_fork)
SYM_CODE_END(ret_from_fork)
NOKPROBE(ret_from_fork)
+/*
+ * void call_on_irq_stack(struct pt_regs *regs,
+ * void (*func)(struct pt_regs *));
+ *
+ * Calls func(regs) using this CPU's irq stack and shadow irq stack.
+ */
+SYM_FUNC_START(call_on_irq_stack)
+#ifdef CONFIG_SHADOW_CALL_STACK
+ stp scs_sp, xzr, [sp, #-16]!
+ ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17
+#endif
+ /* Create a frame record to save our LR and SP (implicit in FP) */
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
+
+ ldr_this_cpu x16, irq_stack_ptr, x17
+ mov x15, #IRQ_STACK_SIZE
+ add x16, x16, x15
+
+ /* Move to the new stack and call the function there */
+ mov sp, x16
+ blr x1
+
+ /*
+ * Restore the SP from the FP, and restore the FP and LR from the frame
+ * record.
+ */
+ mov sp, x29
+ ldp x29, x30, [sp], #16
+#ifdef CONFIG_SHADOW_CALL_STACK
+ ldp scs_sp, xzr, [sp], #16
+#endif
+ ret
+SYM_FUNC_END(call_on_irq_stack)
+NOKPROBE(call_on_irq_stack)
+
#ifdef CONFIG_ARM_SDE_INTERFACE
#include <asm/sdei.h>
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 062b21f30f94..e57b23f95284 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -180,7 +180,7 @@ static void __get_cpu_fpsimd_context(void)
*/
static void get_cpu_fpsimd_context(void)
{
- preempt_disable();
+ local_bh_disable();
__get_cpu_fpsimd_context();
}
@@ -201,7 +201,7 @@ static void __put_cpu_fpsimd_context(void)
static void put_cpu_fpsimd_context(void)
{
__put_cpu_fpsimd_context();
- preempt_enable();
+ local_bh_enable();
}
static bool have_cpu_fpsimd_context(void)
@@ -285,7 +285,7 @@ static void task_fpsimd_load(void)
WARN_ON(!system_supports_fpsimd());
WARN_ON(!have_cpu_fpsimd_context());
- if (system_supports_sve() && test_thread_flag(TIF_SVE))
+ if (IS_ENABLED(CONFIG_ARM64_SVE) && test_thread_flag(TIF_SVE))
sve_load_state(sve_pffr(&current->thread),
&current->thread.uw.fpsimd_state.fpsr,
sve_vq_from_vl(current->thread.sve_vl) - 1);
@@ -307,7 +307,8 @@ static void fpsimd_save(void)
WARN_ON(!have_cpu_fpsimd_context());
if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
- if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
+ if (IS_ENABLED(CONFIG_ARM64_SVE) &&
+ test_thread_flag(TIF_SVE)) {
if (WARN_ON(sve_get_vl() != last->sve_vl)) {
/*
* Can't save the user regs, so current would
@@ -926,9 +927,8 @@ void fpsimd_release_task(struct task_struct *dead_task)
* Trapped SVE access
*
* Storage is allocated for the full SVE state, the current FPSIMD
- * register contents are migrated across, and TIF_SVE is set so that
- * the SVE access trap will be disabled the next time this task
- * reaches ret_to_user.
+ * register contents are migrated across, and the access trap is
+ * disabled.
*
* TIF_SVE should be clear on entry: otherwise, fpsimd_restore_current_state()
* would have disabled the SVE access trap for userspace during
@@ -946,15 +946,26 @@ void do_sve_acc(unsigned int esr, struct pt_regs *regs)
get_cpu_fpsimd_context();
- fpsimd_save();
-
- /* Force ret_to_user to reload the registers: */
- fpsimd_flush_task_state(current);
-
- fpsimd_to_sve(current);
if (test_and_set_thread_flag(TIF_SVE))
WARN_ON(1); /* SVE access shouldn't have trapped */
+ /*
+ * Convert the FPSIMD state to SVE, zeroing all the state that
+ * is not shared with FPSIMD. If (as is likely) the current
+ * state is live in the registers then do this there and
+ * update our metadata for the current task including
+ * disabling the trap, otherwise update our in-memory copy.
+ */
+ if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
+ unsigned long vq_minus_one =
+ sve_vq_from_vl(current->thread.sve_vl) - 1;
+ sve_set_vq(vq_minus_one);
+ sve_flush_live(vq_minus_one);
+ fpsimd_bind_task_to_cpu();
+ } else {
+ fpsimd_to_sve(current);
+ }
+
put_cpu_fpsimd_context();
}
@@ -1092,7 +1103,7 @@ void fpsimd_preserve_current_state(void)
void fpsimd_signal_preserve_current_state(void)
{
fpsimd_preserve_current_state();
- if (system_supports_sve() && test_thread_flag(TIF_SVE))
+ if (test_thread_flag(TIF_SVE))
sve_to_fpsimd(current);
}
@@ -1181,7 +1192,7 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state)
get_cpu_fpsimd_context();
current->thread.uw.fpsimd_state = *state;
- if (system_supports_sve() && test_thread_flag(TIF_SVE))
+ if (test_thread_flag(TIF_SVE))
fpsimd_to_sve(current);
task_fpsimd_load();
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 86a5cf9bc19a..7f467bd9db7a 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -15,6 +15,7 @@
#include <asm/debug-monitors.h>
#include <asm/ftrace.h>
#include <asm/insn.h>
+#include <asm/patching.h>
#ifdef CONFIG_DYNAMIC_FTRACE
/*
@@ -55,7 +56,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
unsigned long pc;
u32 new;
- pc = (unsigned long)&ftrace_call;
+ pc = (unsigned long)function_nocfi(ftrace_call);
new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func,
AARCH64_INSN_BRANCH_LINK);
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 66b0e0b66e31..c5c994a73a64 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -16,6 +16,7 @@
#include <asm/asm_pointer_auth.h>
#include <asm/assembler.h>
#include <asm/boot.h>
+#include <asm/bug.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
#include <asm/cache.h>
@@ -117,8 +118,8 @@ SYM_CODE_START_LOCAL(preserve_boot_args)
dmb sy // needed before dc ivac with
// MMU off
- mov x1, #0x20 // 4 x 8 bytes
- b __inval_dcache_area // tail call
+ add x1, x0, #0x20 // 4 x 8 bytes
+ b dcache_inval_poc // tail call
SYM_CODE_END(preserve_boot_args)
/*
@@ -195,7 +196,7 @@ SYM_CODE_END(preserve_boot_args)
and \iend, \iend, \istart // iend = (vend >> shift) & (ptrs - 1)
mov \istart, \ptrs
mul \istart, \istart, \count
- add \iend, \iend, \istart // iend += (count - 1) * ptrs
+ add \iend, \iend, \istart // iend += count * ptrs
// our entries span multiple tables
lsr \istart, \vstart, \shift
@@ -268,8 +269,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
*/
adrp x0, init_pg_dir
adrp x1, init_pg_end
- sub x1, x1, x0
- bl __inval_dcache_area
+ bl dcache_inval_poc
/*
* Clear the init page tables.
@@ -319,7 +319,7 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
*/
adrp x5, __idmap_text_end
clz x5, x5
- cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
+ cmp x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough?
b.ge 1f // .. then skip VA range extension
adr_l x6, idmap_t0sz
@@ -354,7 +354,6 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
#endif
1:
ldr_l x4, idmap_ptrs_per_pgd
- mov x5, x3 // __pa(__idmap_text_start)
adr_l x6, __idmap_text_end // __pa(__idmap_text_end)
map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14
@@ -382,39 +381,57 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
adrp x0, idmap_pg_dir
adrp x1, idmap_pg_end
- sub x1, x1, x0
- bl __inval_dcache_area
+ bl dcache_inval_poc
adrp x0, init_pg_dir
adrp x1, init_pg_end
- sub x1, x1, x0
- bl __inval_dcache_area
+ bl dcache_inval_poc
ret x28
SYM_FUNC_END(__create_page_tables)
+ /*
+ * Initialize CPU registers with task-specific and cpu-specific context.
+ *
+ * Create a final frame record at task_pt_regs(current)->stackframe, so
+ * that the unwinder can identify the final frame record of any task by
+ * its location in the task stack. We reserve the entire pt_regs space
+ * for consistency with user tasks and kthreads.
+ */
+ .macro init_cpu_task tsk, tmp1, tmp2
+ msr sp_el0, \tsk
+
+ ldr \tmp1, [\tsk, #TSK_STACK]
+ add sp, \tmp1, #THREAD_SIZE
+ sub sp, sp, #PT_REGS_SIZE
+
+ stp xzr, xzr, [sp, #S_STACKFRAME]
+ add x29, sp, #S_STACKFRAME
+
+ scs_load \tsk
+
+ adr_l \tmp1, __per_cpu_offset
+ ldr w\tmp2, [\tsk, #TSK_CPU]
+ ldr \tmp1, [\tmp1, \tmp2, lsl #3]
+ set_this_cpu_offset \tmp1
+ .endm
+
/*
* The following fragment of code is executed with the MMU enabled.
*
* x0 = __PHYS_OFFSET
*/
SYM_FUNC_START_LOCAL(__primary_switched)
- adrp x4, init_thread_union
- add sp, x4, #THREAD_SIZE
- adr_l x5, init_task
- msr sp_el0, x5 // Save thread_info
+ adr_l x4, init_task
+ init_cpu_task x4, x5, x6
adr_l x8, vectors // load VBAR_EL1 with virtual
msr vbar_el1, x8 // vector table address
isb
- stp xzr, x30, [sp, #-16]!
+ stp x29, x30, [sp, #-16]!
mov x29, sp
-#ifdef CONFIG_SHADOW_CALL_STACK
- adr_l scs_sp, init_shadow_call_stack // Set shadow call stack
-#endif
-
str_l x21, __fdt_pointer, x5 // Save FDT pointer
ldr_l x4, kimage_vaddr // Save the offset between
@@ -446,10 +463,9 @@ SYM_FUNC_START_LOCAL(__primary_switched)
0:
#endif
bl switch_to_vhe // Prefer VHE if possible
- add sp, sp, #16
- mov x29, #0
- mov x30, #0
- b start_kernel
+ ldp x29, x30, [sp], #16
+ bl start_kernel
+ ASM_BUG()
SYM_FUNC_END(__primary_switched)
.pushsection ".rodata", "a"
@@ -477,14 +493,13 @@ EXPORT_SYMBOL(kimage_vaddr)
* booted in EL1 or EL2 respectively.
*/
SYM_FUNC_START(init_kernel_el)
- mov_q x0, INIT_SCTLR_EL1_MMU_OFF
- msr sctlr_el1, x0
-
mrs x0, CurrentEL
cmp x0, #CurrentEL_EL2
b.eq init_el2
SYM_INNER_LABEL(init_el1, SYM_L_LOCAL)
+ mov_q x0, INIT_SCTLR_EL1_MMU_OFF
+ msr sctlr_el1, x0
isb
mov_q x0, INIT_PSTATE_EL1
msr spsr_el1, x0
@@ -504,9 +519,43 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
msr vbar_el2, x0
isb
+ /*
+ * Fruity CPUs seem to have HCR_EL2.E2H set to RES1,
+ * making it impossible to start in nVHE mode. Is that
+ * compliant with the architecture? Absolutely not!
+ */
+ mrs x0, hcr_el2
+ and x0, x0, #HCR_E2H
+ cbz x0, 1f
+
+ /* Switching to VHE requires a sane SCTLR_EL1 as a start */
+ mov_q x0, INIT_SCTLR_EL1_MMU_OFF
+ msr_s SYS_SCTLR_EL12, x0
+
+ /*
+ * Force an eret into a helper "function", and let it return
+ * to our original caller... This makes sure that we have
+ * initialised the basic PSTATE state.
+ */
+ mov x0, #INIT_PSTATE_EL2
+ msr spsr_el1, x0
+ adr x0, __cpu_stick_to_vhe
+ msr elr_el1, x0
+ eret
+
+1:
+ mov_q x0, INIT_SCTLR_EL1_MMU_OFF
+ msr sctlr_el1, x0
+
msr elr_el2, lr
mov w0, #BOOT_CPU_MODE_EL2
eret
+
+__cpu_stick_to_vhe:
+ mov x0, #HVC_VHE_RESTART
+ hvc #0
+ mov x0, #BOOT_CPU_MODE_EL2
+ ret
SYM_FUNC_END(init_kernel_el)
/*
@@ -518,7 +567,7 @@ SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag)
cmp w0, #BOOT_CPU_MODE_EL2
b.ne 1f
add x1, x1, #4
-1: str w0, [x1] // This CPU has booted in EL1
+1: str w0, [x1] // Save CPU boot mode
dmb sy
dc ivac, x1 // Invalidate potentially stale cache line
ret
@@ -599,21 +648,17 @@ SYM_FUNC_START_LOCAL(__secondary_switched)
isb
adr_l x0, secondary_data
- ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
- cbz x1, __secondary_too_slow
- mov sp, x1
ldr x2, [x0, #CPU_BOOT_TASK]
cbz x2, __secondary_too_slow
- msr sp_el0, x2
- scs_load x2, x3
- mov x29, #0
- mov x30, #0
+
+ init_cpu_task x2, x1, x3
#ifdef CONFIG_ARM64_PTR_AUTH
ptrauth_keys_init_cpu x2, x3, x4, x5
#endif
- b secondary_start_kernel
+ bl secondary_start_kernel
+ ASM_BUG()
SYM_FUNC_END(__secondary_switched)
SYM_FUNC_START_LOCAL(__secondary_too_slow)
@@ -655,8 +700,10 @@ SYM_FUNC_END(__secondary_too_slow)
SYM_FUNC_START(__enable_mmu)
mrs x2, ID_AA64MMFR0_EL1
ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4
- cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
- b.ne __no_granule_support
+ cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MIN
+ b.lt __no_granule_support
+ cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED_MAX
+ b.gt __no_granule_support
update_early_cpu_boot_status 0, x2, x3
adrp x2, idmap_pg_dir
phys_to_ttbr x1, x1
diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S
index 8ccca660034e..81c0186a5e32 100644
--- a/arch/arm64/kernel/hibernate-asm.S
+++ b/arch/arm64/kernel/hibernate-asm.S
@@ -45,7 +45,7 @@
* Because this code has to be copied to a 'safe' page, it can't call out to
* other functions by PC-relative address. Also remember that it may be
* mid-way through over-writing other functions. For this reason it contains
- * code from flush_icache_range() and uses the copy_page() macro.
+ * code from caches_clean_inval_pou() and uses the copy_page() macro.
*
* This 'safe' page is mapped via ttbr0, and executed from there. This function
* switches to a copy of the linear map in ttbr1, performs the restore, then
@@ -87,11 +87,12 @@ SYM_CODE_START(swsusp_arch_suspend_exit)
copy_page x0, x1, x2, x3, x4, x5, x6, x7, x8, x9
add x1, x10, #PAGE_SIZE
- /* Clean the copied page to PoU - based on flush_icache_range() */
+ /* Clean the copied page to PoU - based on caches_clean_inval_pou() */
raw_dcache_line_size x2, x3
sub x3, x2, #1
bic x4, x10, x3
-2: dc cvau, x4 /* clean D line / unified line */
+2: /* clean D line / unified line */
+alternative_insn "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE
add x4, x4, x2
cmp x4, x1
b.lo 2b
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index b1cef371df2b..46a0b4d6e251 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -210,7 +210,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
return -ENOMEM;
memcpy(page, src_start, length);
- __flush_icache_range((unsigned long)page, (unsigned long)page + length);
+ caches_clean_inval_pou((unsigned long)page, (unsigned long)page + length);
rc = trans_pgd_idmap_page(&trans_info, &trans_ttbr0, &t0sz, page);
if (rc)
return rc;
@@ -240,8 +240,6 @@ static int create_safe_exec_page(void *src_start, size_t length,
return 0;
}
-#define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start))
-
#ifdef CONFIG_ARM64_MTE
static DEFINE_XARRAY(mte_pages);
@@ -383,13 +381,18 @@ int swsusp_arch_suspend(void)
ret = swsusp_save();
} else {
/* Clean kernel core startup/idle code to PoC*/
- dcache_clean_range(__mmuoff_data_start, __mmuoff_data_end);
- dcache_clean_range(__idmap_text_start, __idmap_text_end);
+ dcache_clean_inval_poc((unsigned long)__mmuoff_data_start,
+ (unsigned long)__mmuoff_data_end);
+ dcache_clean_inval_poc((unsigned long)__idmap_text_start,
+ (unsigned long)__idmap_text_end);
/* Clean kvm setup code to PoC? */
if (el2_reset_needed()) {
- dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
- dcache_clean_range(__hyp_text_start, __hyp_text_end);
+ dcache_clean_inval_poc(
+ (unsigned long)__hyp_idmap_text_start,
+ (unsigned long)__hyp_idmap_text_end);
+ dcache_clean_inval_poc((unsigned long)__hyp_text_start,
+ (unsigned long)__hyp_text_end);
}
swsusp_mte_restore_tags();
@@ -474,7 +477,8 @@ int swsusp_arch_resume(void)
* The hibernate exit text contains a set of el2 vectors, that will
* be executed at el2 with the mmu off in order to reload hyp-stub.
*/
- __flush_dcache_area(hibernate_exit, exit_size);
+ dcache_clean_inval_poc((unsigned long)hibernate_exit,
+ (unsigned long)hibernate_exit + exit_size);
/*
* KASLR will cause the el2 vectors to be in a different location in
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index 5eccbd62fec8..43d212618834 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -27,12 +27,12 @@ SYM_CODE_START(__hyp_stub_vectors)
ventry el2_fiq_invalid // FIQ EL2t
ventry el2_error_invalid // Error EL2t
- ventry el2_sync_invalid // Synchronous EL2h
+ ventry elx_sync // Synchronous EL2h
ventry el2_irq_invalid // IRQ EL2h
ventry el2_fiq_invalid // FIQ EL2h
ventry el2_error_invalid // Error EL2h
- ventry el1_sync // Synchronous 64-bit EL1
+ ventry elx_sync // Synchronous 64-bit EL1
ventry el1_irq_invalid // IRQ 64-bit EL1
ventry el1_fiq_invalid // FIQ 64-bit EL1
ventry el1_error_invalid // Error 64-bit EL1
@@ -45,7 +45,7 @@ SYM_CODE_END(__hyp_stub_vectors)
.align 11
-SYM_CODE_START_LOCAL(el1_sync)
+SYM_CODE_START_LOCAL(elx_sync)
cmp x0, #HVC_SET_VECTORS
b.ne 1f
msr vbar_el2, x1
@@ -71,7 +71,7 @@ SYM_CODE_START_LOCAL(el1_sync)
9: mov x0, xzr
eret
-SYM_CODE_END(el1_sync)
+SYM_CODE_END(elx_sync)
// nVHE? No way! Give me the real thing!
SYM_CODE_START_LOCAL(mutate_to_vhe)
@@ -115,9 +115,10 @@ SYM_CODE_START_LOCAL(mutate_to_vhe)
mrs_s x0, SYS_VBAR_EL12
msr vbar_el1, x0
- // Use EL2 translations for SPE and disable access from EL1
+ // Use EL2 translations for SPE & TRBE and disable access from EL1
mrs x0, mdcr_el2
bic x0, x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
+ bic x0, x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT)
msr mdcr_el2, x0
// Transfer the MM state from EL1 to EL2
@@ -224,7 +225,6 @@ SYM_FUNC_END(__hyp_reset_vectors)
* Entry point to switch to VHE if deemed capable
*/
SYM_FUNC_START(switch_to_vhe)
-#ifdef CONFIG_ARM64_VHE
// Need to have booted at EL2
adr_l x1, __boot_cpu_mode
ldr w0, [x1]
@@ -240,6 +240,5 @@ SYM_FUNC_START(switch_to_vhe)
mov x0, #HVC_VHE_RESTART
hvc #0
1:
-#endif
ret
SYM_FUNC_END(switch_to_vhe)
diff --git a/arch/arm64/kernel/idle.c b/arch/arm64/kernel/idle.c
new file mode 100644
index 000000000000..a2cfbacec2bb
--- /dev/null
+++ b/arch/arm64/kernel/idle.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Low-level idle sequences
+ */
+
+#include <linux/cpu.h>
+#include <linux/irqflags.h>
+
+#include <asm/barrier.h>
+#include <asm/cpuidle.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
+
+/*
+ * cpu_do_idle()
+ *
+ * Idle the processor (wait for interrupt).
+ *
+ * If the CPU supports priority masking we must do additional work to
+ * ensure that interrupts are not masked at the PMR (because the core will
+ * not wake up if we block the wake up signal in the interrupt controller).
+ */
+void noinstr cpu_do_idle(void)
+{
+ struct arm_cpuidle_irq_context context;
+
+ arm_cpuidle_save_irq_context(&context);
+
+ dsb(sy);
+ wfi();
+
+ arm_cpuidle_restore_irq_context(&context);
+}
+
+/*
+ * This is our default idle handler.
+ */
+void noinstr arch_cpu_idle(void)
+{
+ /*
+ * This should do all the clock switching and wait for interrupt
+ * tricks
+ */
+ cpu_do_idle();
+ raw_local_irq_enable();
+}
diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c
index dffb16682330..53a381a7f65d 100644
--- a/arch/arm64/kernel/idreg-override.c
+++ b/arch/arm64/kernel/idreg-override.c
@@ -25,14 +25,26 @@ struct ftr_set_desc {
struct {
char name[FTR_DESC_FIELD_LEN];
u8 shift;
+ bool (*filter)(u64 val);
} fields[];
};
+static bool __init mmfr1_vh_filter(u64 val)
+{
+ /*
+ * If we ever reach this point while running VHE, we're
+ * guaranteed to be on one of these funky, VHE-stuck CPUs. If
+ * the user was trying to force nVHE on us, proceed with
+ * attitude adjustment.
+ */
+ return !(is_kernel_in_hyp_mode() && val == 0);
+}
+
static const struct ftr_set_desc mmfr1 __initconst = {
.name = "id_aa64mmfr1",
.override = &id_aa64mmfr1_override,
.fields = {
- { "vh", ID_AA64MMFR1_VHE_SHIFT },
+ { "vh", ID_AA64MMFR1_VHE_SHIFT, mmfr1_vh_filter },
{}
},
};
@@ -124,6 +136,18 @@ static void __init match_options(const char *cmdline)
if (find_field(cmdline, regs[i], f, &v))
continue;
+ /*
+ * If an override gets filtered out, advertise
+ * it by setting the value to 0xf, but
+ * clearing the mask... Yes, this is fragile.
+ */
+ if (regs[i]->fields[f].filter &&
+ !regs[i]->fields[f].filter(v)) {
+ regs[i]->override->val |= mask;
+ regs[i]->override->mask &= ~mask;
+ continue;
+ }
+
regs[i]->override->val &= ~mask;
regs[i]->override->val |= (v << shift) & mask;
regs[i]->override->mask |= mask;
@@ -163,33 +187,36 @@ static __init void __parse_cmdline(const char *cmdline, bool parse_aliases)
} while (1);
}
-static __init void parse_cmdline(void)
+static __init const u8 *get_bootargs_cmdline(void)
{
- if (!IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
- const u8 *prop;
- void *fdt;
- int node;
+ const u8 *prop;
+ void *fdt;
+ int node;
- fdt = get_early_fdt_ptr();
- if (!fdt)
- goto out;
+ fdt = get_early_fdt_ptr();
+ if (!fdt)
+ return NULL;
- node = fdt_path_offset(fdt, "/chosen");
- if (node < 0)
- goto out;
+ node = fdt_path_offset(fdt, "/chosen");
+ if (node < 0)
+ return NULL;
- prop = fdt_getprop(fdt, node, "bootargs", NULL);
- if (!prop)
- goto out;
+ prop = fdt_getprop(fdt, node, "bootargs", NULL);
+ if (!prop)
+ return NULL;
- __parse_cmdline(prop, true);
+ return strlen(prop) ? prop : NULL;
+}
- if (!IS_ENABLED(CONFIG_CMDLINE_EXTEND))
- return;
- }
+static __init void parse_cmdline(void)
+{
+ const u8 *prop = get_bootargs_cmdline();
+
+ if (IS_ENABLED(CONFIG_CMDLINE_FORCE) || !prop)
+ __parse_cmdline(CONFIG_CMDLINE, true);
-out:
- __parse_cmdline(CONFIG_CMDLINE, true);
+ if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && prop)
+ __parse_cmdline(prop, true);
}
/* Keep checkers quiet */
@@ -210,7 +237,8 @@ asmlinkage void __init init_feature_override(void)
for (i = 0; i < ARRAY_SIZE(regs); i++) {
if (regs[i]->override)
- __flush_dcache_area(regs[i]->override,
+ dcache_clean_inval_poc((unsigned long)regs[i]->override,
+ (unsigned long)regs[i]->override +
sizeof(*regs[i]->override));
}
}
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index 23f1a557bd9f..c96a9a0043bf 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -35,7 +35,7 @@ __efistub_strnlen = __pi_strnlen;
__efistub_strcmp = __pi_strcmp;
__efistub_strncmp = __pi_strncmp;
__efistub_strrchr = __pi_strrchr;
-__efistub___clean_dcache_area_poc = __pi___clean_dcache_area_poc;
+__efistub_dcache_clean_poc = __pi_dcache_clean_poc;
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
__efistub___memcpy = __pi_memcpy;
@@ -65,13 +65,13 @@ __efistub__ctype = _ctype;
KVM_NVHE_ALIAS(kvm_patch_vector_branch);
KVM_NVHE_ALIAS(kvm_update_va_mask);
KVM_NVHE_ALIAS(kvm_get_kimage_voffset);
+KVM_NVHE_ALIAS(kvm_compute_final_ctr_el0);
/* Global kernel state accessed by nVHE hyp code. */
KVM_NVHE_ALIAS(kvm_vgic_global_state);
/* Kernel symbols used to call panic() from nVHE hyp code (via ERET). */
-KVM_NVHE_ALIAS(__hyp_panic_string);
-KVM_NVHE_ALIAS(panic);
+KVM_NVHE_ALIAS(nvhe_hyp_panic_handler);
/* Vectors installed by hyp-init on reset HVC. */
KVM_NVHE_ALIAS(__hyp_stub_vectors);
@@ -101,6 +101,39 @@ KVM_NVHE_ALIAS(__stop___kvm_ex_table);
/* Array containing bases of nVHE per-CPU memory regions. */
KVM_NVHE_ALIAS(kvm_arm_hyp_percpu_base);
+/* PMU available static key */
+KVM_NVHE_ALIAS(kvm_arm_pmu_available);
+
+/* Position-independent library routines */
+KVM_NVHE_ALIAS_HYP(clear_page, __pi_clear_page);
+KVM_NVHE_ALIAS_HYP(copy_page, __pi_copy_page);
+KVM_NVHE_ALIAS_HYP(memcpy, __pi_memcpy);
+KVM_NVHE_ALIAS_HYP(memset, __pi_memset);
+
+#ifdef CONFIG_KASAN
+KVM_NVHE_ALIAS_HYP(__memcpy, __pi_memcpy);
+KVM_NVHE_ALIAS_HYP(__memset, __pi_memset);
+#endif
+
+/* Kernel memory sections */
+KVM_NVHE_ALIAS(__start_rodata);
+KVM_NVHE_ALIAS(__end_rodata);
+KVM_NVHE_ALIAS(__bss_start);
+KVM_NVHE_ALIAS(__bss_stop);
+
+/* Hyp memory sections */
+KVM_NVHE_ALIAS(__hyp_idmap_text_start);
+KVM_NVHE_ALIAS(__hyp_idmap_text_end);
+KVM_NVHE_ALIAS(__hyp_text_start);
+KVM_NVHE_ALIAS(__hyp_text_end);
+KVM_NVHE_ALIAS(__hyp_bss_start);
+KVM_NVHE_ALIAS(__hyp_bss_end);
+KVM_NVHE_ALIAS(__hyp_rodata_start);
+KVM_NVHE_ALIAS(__hyp_rodata_end);
+
+/* pKVM static key */
+KVM_NVHE_ALIAS(kvm_protected_mode_initialized);
+
#endif /* CONFIG_KVM */
#endif /* __ARM64_KERNEL_IMAGE_VARS_H */
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index dfb1feab867d..bda49430c9ea 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -71,13 +71,44 @@ static void init_irq_stacks(void)
}
#endif
+static void default_handle_irq(struct pt_regs *regs)
+{
+ panic("IRQ taken without a root IRQ handler\n");
+}
+
+static void default_handle_fiq(struct pt_regs *regs)
+{
+ panic("FIQ taken without a root FIQ handler\n");
+}
+
+void (*handle_arch_irq)(struct pt_regs *) __ro_after_init = default_handle_irq;
+void (*handle_arch_fiq)(struct pt_regs *) __ro_after_init = default_handle_fiq;
+
+int __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
+{
+ if (handle_arch_irq != default_handle_irq)
+ return -EBUSY;
+
+ handle_arch_irq = handle_irq;
+ pr_info("Root IRQ handler: %ps\n", handle_irq);
+ return 0;
+}
+
+int __init set_handle_fiq(void (*handle_fiq)(struct pt_regs *))
+{
+ if (handle_arch_fiq != default_handle_fiq)
+ return -EBUSY;
+
+ handle_arch_fiq = handle_fiq;
+ pr_info("Root FIQ handler: %ps\n", handle_fiq);
+ return 0;
+}
+
void __init init_IRQ(void)
{
init_irq_stacks();
init_irq_scs();
irqchip_init();
- if (!handle_arch_irq)
- panic("No interrupt controller found.");
if (system_uses_irq_prio_masking()) {
/*
diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c
index 9a8a0ae1e75f..fc98037e1220 100644
--- a/arch/arm64/kernel/jump_label.c
+++ b/arch/arm64/kernel/jump_label.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/jump_label.h>
#include <asm/insn.h>
+#include <asm/patching.h>
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 27f8939deb1b..cfa2cfde3019 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -72,7 +72,9 @@ u64 __init kaslr_early_init(void)
* we end up running with module randomization disabled.
*/
module_alloc_base = (u64)_etext - MODULES_VSIZE;
- __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
+ dcache_clean_inval_poc((unsigned long)&module_alloc_base,
+ (unsigned long)&module_alloc_base +
+ sizeof(module_alloc_base));
/*
* Try to map the FDT early. If this fails, we simply bail,
@@ -128,15 +130,17 @@ u64 __init kaslr_early_init(void)
/* use the top 16 bits to randomize the linear region */
memstart_offset_seed = seed >> 48;
- if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
- IS_ENABLED(CONFIG_KASAN_SW_TAGS))
+ if (!IS_ENABLED(CONFIG_KASAN_VMALLOC) &&
+ (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
+ IS_ENABLED(CONFIG_KASAN_SW_TAGS)))
/*
- * KASAN does not expect the module region to intersect the
- * vmalloc region, since shadow memory is allocated for each
- * module at load time, whereas the vmalloc region is shadowed
- * by KASAN zero pages. So keep modules out of the vmalloc
- * region if KASAN is enabled, and put the kernel well within
- * 4 GB of the module region.
+ * KASAN without KASAN_VMALLOC does not expect the module region
+ * to intersect the vmalloc region, since shadow memory is
+ * allocated for each module at load time, whereas the vmalloc
+ * region is shadowed by KASAN zero pages. So keep modules
+ * out of the vmalloc region if KASAN is enabled without
+ * KASAN_VMALLOC, and put the kernel well within 4 GB of the
+ * module region.
*/
return offset % SZ_2G;
@@ -168,8 +172,12 @@ u64 __init kaslr_early_init(void)
module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21;
module_alloc_base &= PAGE_MASK;
- __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base));
- __flush_dcache_area(&memstart_offset_seed, sizeof(memstart_offset_seed));
+ dcache_clean_inval_poc((unsigned long)&module_alloc_base,
+ (unsigned long)&module_alloc_base +
+ sizeof(module_alloc_base));
+ dcache_clean_inval_poc((unsigned long)&memstart_offset_seed,
+ (unsigned long)&memstart_offset_seed +
+ sizeof(memstart_offset_seed));
return offset;
}
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index 1a157ca33262..2aede780fb80 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -17,6 +17,7 @@
#include <asm/debug-monitors.h>
#include <asm/insn.h>
+#include <asm/patching.h>
#include <asm/traps.h>
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index 90a335c74442..03ceabe4d912 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -68,10 +68,16 @@ int machine_kexec_post_load(struct kimage *kimage)
kimage->arch.kern_reloc = __pa(reloc_code);
kexec_image_info(kimage);
- /* Flush the reloc_code in preparation for its execution. */
- __flush_dcache_area(reloc_code, arm64_relocate_new_kernel_size);
- flush_icache_range((uintptr_t)reloc_code, (uintptr_t)reloc_code +
- arm64_relocate_new_kernel_size);
+ /*
+ * For execution with the MMU off, reloc_code needs to be cleaned to the
+ * PoC and invalidated from the I-cache.
+ */
+ dcache_clean_inval_poc((unsigned long)reloc_code,
+ (unsigned long)reloc_code +
+ arm64_relocate_new_kernel_size);
+ icache_inval_pou((uintptr_t)reloc_code,
+ (uintptr_t)reloc_code +
+ arm64_relocate_new_kernel_size);
return 0;
}
@@ -102,16 +108,18 @@ static void kexec_list_flush(struct kimage *kimage)
for (entry = &kimage->head; ; entry++) {
unsigned int flag;
- void *addr;
+ unsigned long addr;
/* flush the list entries. */
- __flush_dcache_area(entry, sizeof(kimage_entry_t));
+ dcache_clean_inval_poc((unsigned long)entry,
+ (unsigned long)entry +
+ sizeof(kimage_entry_t));
flag = *entry & IND_FLAGS;
if (flag == IND_DONE)
break;
- addr = phys_to_virt(*entry & PAGE_MASK);
+ addr = (unsigned long)phys_to_virt(*entry & PAGE_MASK);
switch (flag) {
case IND_INDIRECTION:
@@ -120,7 +128,7 @@ static void kexec_list_flush(struct kimage *kimage)
break;
case IND_SOURCE:
/* flush the source pages. */
- __flush_dcache_area(addr, PAGE_SIZE);
+ dcache_clean_inval_poc(addr, addr + PAGE_SIZE);
break;
case IND_DESTINATION:
break;
@@ -147,8 +155,10 @@ static void kexec_segment_flush(const struct kimage *kimage)
kimage->segment[i].memsz,
kimage->segment[i].memsz / PAGE_SIZE);
- __flush_dcache_area(phys_to_virt(kimage->segment[i].mem),
- kimage->segment[i].memsz);
+ dcache_clean_inval_poc(
+ (unsigned long)phys_to_virt(kimage->segment[i].mem),
+ (unsigned long)phys_to_virt(kimage->segment[i].mem) +
+ kimage->segment[i].memsz);
}
}
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
index 0cde47a63beb..63634b4d72c1 100644
--- a/arch/arm64/kernel/machine_kexec_file.c
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -15,23 +15,12 @@
#include <linux/kexec.h>
#include <linux/libfdt.h>
#include <linux/memblock.h>
+#include <linux/of.h>
#include <linux/of_fdt.h>
-#include <linux/random.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
-#include <asm/byteorder.h>
-
-/* relevant device tree properties */
-#define FDT_PROP_KEXEC_ELFHDR "linux,elfcorehdr"
-#define FDT_PROP_MEM_RANGE "linux,usable-memory-range"
-#define FDT_PROP_INITRD_START "linux,initrd-start"
-#define FDT_PROP_INITRD_END "linux,initrd-end"
-#define FDT_PROP_BOOTARGS "bootargs"
-#define FDT_PROP_KASLR_SEED "kaslr-seed"
-#define FDT_PROP_RNG_SEED "rng-seed"
-#define RNG_SEED_SIZE 128
const struct kexec_file_ops * const kexec_file_loaders[] = {
&kexec_image_ops,
@@ -40,174 +29,16 @@ const struct kexec_file_ops * const kexec_file_loaders[] = {
int arch_kimage_file_post_load_cleanup(struct kimage *image)
{
- vfree(image->arch.dtb);
+ kvfree(image->arch.dtb);
image->arch.dtb = NULL;
- vfree(image->arch.elf_headers);
- image->arch.elf_headers = NULL;
- image->arch.elf_headers_sz = 0;
+ vfree(image->elf_headers);
+ image->elf_headers = NULL;
+ image->elf_headers_sz = 0;
return kexec_image_post_load_cleanup_default(image);
}
-static int setup_dtb(struct kimage *image,
- unsigned long initrd_load_addr, unsigned long initrd_len,
- char *cmdline, void *dtb)
-{
- int off, ret;
-
- ret = fdt_path_offset(dtb, "/chosen");
- if (ret < 0)
- goto out;
-
- off = ret;
-
- ret = fdt_delprop(dtb, off, FDT_PROP_KEXEC_ELFHDR);
- if (ret && ret != -FDT_ERR_NOTFOUND)
- goto out;
- ret = fdt_delprop(dtb, off, FDT_PROP_MEM_RANGE);
- if (ret && ret != -FDT_ERR_NOTFOUND)
- goto out;
-
- if (image->type == KEXEC_TYPE_CRASH) {
- /* add linux,elfcorehdr */
- ret = fdt_appendprop_addrrange(dtb, 0, off,
- FDT_PROP_KEXEC_ELFHDR,
- image->arch.elf_headers_mem,
- image->arch.elf_headers_sz);
- if (ret)
- return (ret == -FDT_ERR_NOSPACE ? -ENOMEM : -EINVAL);
-
- /* add linux,usable-memory-range */
- ret = fdt_appendprop_addrrange(dtb, 0, off,
- FDT_PROP_MEM_RANGE,
- crashk_res.start,
- crashk_res.end - crashk_res.start + 1);
- if (ret)
- return (ret == -FDT_ERR_NOSPACE ? -ENOMEM : -EINVAL);
- }
-
- /* add bootargs */
- if (cmdline) {
- ret = fdt_setprop_string(dtb, off, FDT_PROP_BOOTARGS, cmdline);
- if (ret)
- goto out;
- } else {
- ret = fdt_delprop(dtb, off, FDT_PROP_BOOTARGS);
- if (ret && (ret != -FDT_ERR_NOTFOUND))
- goto out;
- }
-
- /* add initrd-* */
- if (initrd_load_addr) {
- ret = fdt_setprop_u64(dtb, off, FDT_PROP_INITRD_START,
- initrd_load_addr);
- if (ret)
- goto out;
-
- ret = fdt_setprop_u64(dtb, off, FDT_PROP_INITRD_END,
- initrd_load_addr + initrd_len);
- if (ret)
- goto out;
- } else {
- ret = fdt_delprop(dtb, off, FDT_PROP_INITRD_START);
- if (ret && (ret != -FDT_ERR_NOTFOUND))
- goto out;
-
- ret = fdt_delprop(dtb, off, FDT_PROP_INITRD_END);
- if (ret && (ret != -FDT_ERR_NOTFOUND))
- goto out;
- }
-
- /* add kaslr-seed */
- ret = fdt_delprop(dtb, off, FDT_PROP_KASLR_SEED);
- if (ret == -FDT_ERR_NOTFOUND)
- ret = 0;
- else if (ret)
- goto out;
-
- if (rng_is_initialized()) {
- u64 seed = get_random_u64();
- ret = fdt_setprop_u64(dtb, off, FDT_PROP_KASLR_SEED, seed);
- if (ret)
- goto out;
- } else {
- pr_notice("RNG is not initialised: omitting \"%s\" property\n",
- FDT_PROP_KASLR_SEED);
- }
-
- /* add rng-seed */
- if (rng_is_initialized()) {
- void *rng_seed;
- ret = fdt_setprop_placeholder(dtb, off, FDT_PROP_RNG_SEED,
- RNG_SEED_SIZE, &rng_seed);
- if (ret)
- goto out;
- get_random_bytes(rng_seed, RNG_SEED_SIZE);
- } else {
- pr_notice("RNG is not initialised: omitting \"%s\" property\n",
- FDT_PROP_RNG_SEED);
- }
-
-out:
- if (ret)
- return (ret == -FDT_ERR_NOSPACE) ? -ENOMEM : -EINVAL;
-
- return 0;
-}
-
-/*
- * More space needed so that we can add initrd, bootargs, kaslr-seed,
- * rng-seed, userable-memory-range and elfcorehdr.
- */
-#define DTB_EXTRA_SPACE 0x1000
-
-static int create_dtb(struct kimage *image,
- unsigned long initrd_load_addr, unsigned long initrd_len,
- char *cmdline, void **dtb)
-{
- void *buf;
- size_t buf_size;
- size_t cmdline_len;
- int ret;
-
- cmdline_len = cmdline ? strlen(cmdline) : 0;
- buf_size = fdt_totalsize(initial_boot_params)
- + cmdline_len + DTB_EXTRA_SPACE;
-
- for (;;) {
- buf = vmalloc(buf_size);
- if (!buf)
- return -ENOMEM;
-
- /* duplicate a device tree blob */
- ret = fdt_open_into(initial_boot_params, buf, buf_size);
- if (ret) {
- vfree(buf);
- return -EINVAL;
- }
-
- ret = setup_dtb(image, initrd_load_addr, initrd_len,
- cmdline, buf);
- if (ret) {
- vfree(buf);
- if (ret == -ENOMEM) {
- /* unlikely, but just in case */
- buf_size += DTB_EXTRA_SPACE;
- continue;
- } else {
- return ret;
- }
- }
-
- /* trim it */
- fdt_pack(buf);
- *dtb = buf;
-
- return 0;
- }
-}
-
static int prepare_elf_headers(void **addr, unsigned long *sz)
{
struct crash_mem *cmem;
@@ -284,12 +115,12 @@ int load_other_segments(struct kimage *image,
vfree(headers);
goto out_err;
}
- image->arch.elf_headers = headers;
- image->arch.elf_headers_mem = kbuf.mem;
- image->arch.elf_headers_sz = headers_sz;
+ image->elf_headers = headers;
+ image->elf_load_addr = kbuf.mem;
+ image->elf_headers_sz = headers_sz;
pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
- image->arch.elf_headers_mem, kbuf.bufsz, kbuf.memsz);
+ image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
}
/* load initrd */
@@ -314,12 +145,15 @@ int load_other_segments(struct kimage *image,
}
/* load dtb */
- ret = create_dtb(image, initrd_load_addr, initrd_len, cmdline, &dtb);
- if (ret) {
+ dtb = of_kexec_alloc_and_setup_fdt(image, initrd_load_addr,
+ initrd_len, cmdline, 0);
+ if (!dtb) {
pr_err("Preparing for new dtb failed\n");
goto out_err;
}
+ /* trim it */
+ fdt_pack(dtb);
dtb_len = fdt_totalsize(dtb);
kbuf.buffer = dtb;
kbuf.bufsz = dtb_len;
@@ -343,6 +177,6 @@ int load_other_segments(struct kimage *image,
out_err:
image->nr_segments = orig_segments;
- vfree(dtb);
+ kvfree(dtb);
return ret;
}
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index fe21e0f06492..b5ec010c481f 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -40,14 +40,16 @@ void *module_alloc(unsigned long size)
NUMA_NO_NODE, __builtin_return_address(0));
if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
- !IS_ENABLED(CONFIG_KASAN_GENERIC) &&
- !IS_ENABLED(CONFIG_KASAN_SW_TAGS))
+ (IS_ENABLED(CONFIG_KASAN_VMALLOC) ||
+ (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
+ !IS_ENABLED(CONFIG_KASAN_SW_TAGS))))
/*
- * KASAN can only deal with module allocations being served
- * from the reserved module region, since the remainder of
- * the vmalloc region is already backed by zero shadow pages,
- * and punching holes into it is non-trivial. Since the module
- * region is not randomized when KASAN is enabled, it is even
+ * KASAN without KASAN_VMALLOC can only deal with module
+ * allocations being served from the reserved module region,
+ * since the remainder of the vmalloc region is already
+ * backed by zero shadow pages, and punching holes into it
+ * is non-trivial. Since the module region is not randomized
+ * when KASAN is enabled without KASAN_VMALLOC, it is even
* less likely that the module region gets exhausted, so we
* can simply omit this fallback in that case.
*/
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index b3c70a612c7a..69b3fde8759e 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -26,10 +26,15 @@ u64 gcr_kernel_excl __ro_after_init;
static bool report_fault_once = true;
-static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
+#ifdef CONFIG_KASAN_HW_TAGS
+/* Whether the MTE asynchronous mode is enabled. */
+DEFINE_STATIC_KEY_FALSE(mte_async_mode);
+EXPORT_SYMBOL_GPL(mte_async_mode);
+#endif
+
+static void mte_sync_page_tags(struct page *page, pte_t old_pte,
+ bool check_swap, bool pte_is_tagged)
{
- pte_t old_pte = READ_ONCE(*ptep);
-
if (check_swap && is_swap_pte(old_pte)) {
swp_entry_t entry = pte_to_swp_entry(old_pte);
@@ -37,6 +42,9 @@ static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
return;
}
+ if (!pte_is_tagged)
+ return;
+
page_kasan_tag_reset(page);
/*
* We need smp_wmb() in between setting the flags and clearing the
@@ -49,16 +57,22 @@ static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap)
mte_clear_page_tags(page_address(page));
}
-void mte_sync_tags(pte_t *ptep, pte_t pte)
+void mte_sync_tags(pte_t old_pte, pte_t pte)
{
struct page *page = pte_page(pte);
long i, nr_pages = compound_nr(page);
bool check_swap = nr_pages == 1;
+ bool pte_is_tagged = pte_tagged(pte);
+
+ /* Early out if there's nothing to do */
+ if (!check_swap && !pte_is_tagged)
+ return;
/* if PG_mte_tagged is set, tags have already been initialised */
for (i = 0; i < nr_pages; i++, page++) {
if (!test_and_set_bit(PG_mte_tagged, &page->flags))
- mte_sync_page_tags(page, ptep, check_swap);
+ mte_sync_page_tags(page, old_pte, check_swap,
+ pte_is_tagged);
}
}
@@ -107,13 +121,45 @@ void mte_init_tags(u64 max_tag)
write_sysreg_s(SYS_GCR_EL1_RRND | gcr_kernel_excl, SYS_GCR_EL1);
}
-void mte_enable_kernel(void)
+static inline void __mte_enable_kernel(const char *mode, unsigned long tcf)
{
/* Enable MTE Sync Mode for EL1. */
- sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, SCTLR_ELx_TCF_SYNC);
+ sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, tcf);
isb();
+
+ pr_info_once("MTE: enabled in %s mode at EL1\n", mode);
}
+#ifdef CONFIG_KASAN_HW_TAGS
+void mte_enable_kernel_sync(void)
+{
+ /*
+ * Make sure we enter this function when no PE has set
+ * async mode previously.
+ */
+ WARN_ONCE(system_uses_mte_async_mode(),
+ "MTE async mode enabled system wide!");
+
+ __mte_enable_kernel("synchronous", SCTLR_ELx_TCF_SYNC);
+}
+
+void mte_enable_kernel_async(void)
+{
+ __mte_enable_kernel("asynchronous", SCTLR_ELx_TCF_ASYNC);
+
+ /*
+ * MTE async mode is set system wide by the first PE that
+ * executes this function.
+ *
+ * Note: If in future KASAN acquires a runtime switching
+ * mode in between sync and async, this strategy needs
+ * to be reviewed.
+ */
+ if (!system_uses_mte_async_mode())
+ static_branch_enable(&mte_async_mode);
+}
+#endif
+
void mte_set_report_once(bool state)
{
WRITE_ONCE(report_fault_once, state);
@@ -124,25 +170,28 @@ bool mte_report_once(void)
return READ_ONCE(report_fault_once);
}
-static void update_sctlr_el1_tcf0(u64 tcf0)
+#ifdef CONFIG_KASAN_HW_TAGS
+void mte_check_tfsr_el1(void)
{
- /* ISB required for the kernel uaccess routines */
- sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF0_MASK, tcf0);
- isb();
-}
+ u64 tfsr_el1;
-static void set_sctlr_el1_tcf0(u64 tcf0)
-{
- /*
- * mte_thread_switch() checks current->thread.sctlr_tcf0 as an
- * optimisation. Disable preemption so that it does not see
- * the variable update before the SCTLR_EL1.TCF0 one.
- */
- preempt_disable();
- current->thread.sctlr_tcf0 = tcf0;
- update_sctlr_el1_tcf0(tcf0);
- preempt_enable();
+ if (!system_supports_mte())
+ return;
+
+ tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
+
+ if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) {
+ /*
+ * Note: isb() is not required after this direct write
+ * because there is no indirect read subsequent to it
+ * (per ARM DDI 0487F.c table D13-1).
+ */
+ write_sysreg_s(0, SYS_TFSR_EL1);
+
+ kasan_report_async();
+ }
}
+#endif
static void update_gcr_el1_excl(u64 excl)
{
@@ -166,7 +215,7 @@ static void set_gcr_el1_excl(u64 excl)
*/
}
-void flush_mte_state(void)
+void mte_thread_init_user(void)
{
if (!system_supports_mte())
return;
@@ -176,19 +225,39 @@ void flush_mte_state(void)
write_sysreg_s(0, SYS_TFSRE0_EL1);
clear_thread_flag(TIF_MTE_ASYNC_FAULT);
/* disable tag checking */
- set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE);
+ set_task_sctlr_el1((current->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK) |
+ SCTLR_EL1_TCF0_NONE);
/* reset tag generation mask */
set_gcr_el1_excl(SYS_GCR_EL1_EXCL_MASK);
}
void mte_thread_switch(struct task_struct *next)
{
+ /*
+ * Check if an async tag exception occurred at EL1.
+ *
+ * Note: On the context switch path we rely on the dsb() present
+ * in __switch_to() to guarantee that the indirect writes to TFSR_EL1
+ * are synchronized before this point.
+ */
+ isb();
+ mte_check_tfsr_el1();
+}
+
+void mte_suspend_enter(void)
+{
if (!system_supports_mte())
return;
- /* avoid expensive SCTLR_EL1 accesses if no change */
- if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0)
- update_sctlr_el1_tcf0(next->thread.sctlr_tcf0);
+ /*
+ * The barriers are required to guarantee that the indirect writes
+ * to TFSR_EL1 are synchronized before we report the state.
+ */
+ dsb(nsh);
+ isb();
+
+ /* Report SYS_TFSR_EL1 before suspend entry */
+ mte_check_tfsr_el1();
}
void mte_suspend_exit(void)
@@ -201,7 +270,7 @@ void mte_suspend_exit(void)
long set_mte_ctrl(struct task_struct *task, unsigned long arg)
{
- u64 tcf0;
+ u64 sctlr = task->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK;
u64 gcr_excl = ~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
SYS_GCR_EL1_EXCL_MASK;
@@ -210,23 +279,23 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
switch (arg & PR_MTE_TCF_MASK) {
case PR_MTE_TCF_NONE:
- tcf0 = SCTLR_EL1_TCF0_NONE;
+ sctlr |= SCTLR_EL1_TCF0_NONE;
break;
case PR_MTE_TCF_SYNC:
- tcf0 = SCTLR_EL1_TCF0_SYNC;
+ sctlr |= SCTLR_EL1_TCF0_SYNC;
break;
case PR_MTE_TCF_ASYNC:
- tcf0 = SCTLR_EL1_TCF0_ASYNC;
+ sctlr |= SCTLR_EL1_TCF0_ASYNC;
break;
default:
return -EINVAL;
}
if (task != current) {
- task->thread.sctlr_tcf0 = tcf0;
+ task->thread.sctlr_user = sctlr;
task->thread.gcr_user_excl = gcr_excl;
} else {
- set_sctlr_el1_tcf0(tcf0);
+ set_task_sctlr_el1(sctlr);
set_gcr_el1_excl(gcr_excl);
}
@@ -243,7 +312,7 @@ long get_mte_ctrl(struct task_struct *task)
ret = incl << PR_MTE_TAG_SHIFT;
- switch (task->thread.sctlr_tcf0) {
+ switch (task->thread.sctlr_user & SCTLR_EL1_TCF0_MASK) {
case SCTLR_EL1_TCF0_NONE:
ret |= PR_MTE_TCF_NONE;
break;
diff --git a/arch/arm64/kernel/paravirt.c b/arch/arm64/kernel/paravirt.c
index c07d7a034941..75fed4460407 100644
--- a/arch/arm64/kernel/paravirt.c
+++ b/arch/arm64/kernel/paravirt.c
@@ -18,6 +18,7 @@
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/static_call.h>
#include <asm/paravirt.h>
#include <asm/pvclock-abi.h>
@@ -26,8 +27,12 @@
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
-struct paravirt_patch_template pv_ops;
-EXPORT_SYMBOL_GPL(pv_ops);
+static u64 native_steal_clock(int cpu)
+{
+ return 0;
+}
+
+DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
struct pv_time_stolen_time_region {
struct pvclock_vcpu_stolen_time *kaddr;
@@ -45,7 +50,7 @@ static int __init parse_no_stealacc(char *arg)
early_param("no-steal-acc", parse_no_stealacc);
/* return stolen time in ns by asking the hypervisor */
-static u64 pv_steal_clock(int cpu)
+static u64 para_steal_clock(int cpu)
{
struct pv_time_stolen_time_region *reg;
@@ -150,7 +155,7 @@ int __init pv_time_init(void)
if (ret)
return ret;
- pv_ops.time.steal_clock = pv_steal_clock;
+ static_call_update(pv_steal_clock, para_steal_clock);
static_key_slow_inc(&paravirt_steal_enabled);
if (steal_acc)
diff --git a/arch/arm64/kernel/patching.c b/arch/arm64/kernel/patching.c
new file mode 100644
index 000000000000..771f543464e0
--- /dev/null
+++ b/arch/arm64/kernel/patching.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/stop_machine.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/fixmap.h>
+#include <asm/insn.h>
+#include <asm/kprobes.h>
+#include <asm/patching.h>
+#include <asm/sections.h>
+
+static DEFINE_RAW_SPINLOCK(patch_lock);
+
+static bool is_exit_text(unsigned long addr)
+{
+ /* discarded with init text/data */
+ return system_state < SYSTEM_RUNNING &&
+ addr >= (unsigned long)__exittext_begin &&
+ addr < (unsigned long)__exittext_end;
+}
+
+static bool is_image_text(unsigned long addr)
+{
+ return core_kernel_text(addr) || is_exit_text(addr);
+}
+
+static void __kprobes *patch_map(void *addr, int fixmap)
+{
+ unsigned long uintaddr = (uintptr_t) addr;
+ bool image = is_image_text(uintaddr);
+ struct page *page;
+
+ if (image)
+ page = phys_to_page(__pa_symbol(addr));
+ else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
+ page = vmalloc_to_page(addr);
+ else
+ return addr;
+
+ BUG_ON(!page);
+ return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
+ (uintaddr & ~PAGE_MASK));
+}
+
+static void __kprobes patch_unmap(int fixmap)
+{
+ clear_fixmap(fixmap);
+}
+/*
+ * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
+ * little-endian.
+ */
+int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
+{
+ int ret;
+ __le32 val;
+
+ ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE);
+ if (!ret)
+ *insnp = le32_to_cpu(val);
+
+ return ret;
+}
+
+static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
+{
+ void *waddr = addr;
+ unsigned long flags = 0;
+ int ret;
+
+ raw_spin_lock_irqsave(&patch_lock, flags);
+ waddr = patch_map(addr, FIX_TEXT_POKE0);
+
+ ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE);
+
+ patch_unmap(FIX_TEXT_POKE0);
+ raw_spin_unlock_irqrestore(&patch_lock, flags);
+
+ return ret;
+}
+
+int __kprobes aarch64_insn_write(void *addr, u32 insn)
+{
+ return __aarch64_insn_write(addr, cpu_to_le32(insn));
+}
+
+int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
+{
+ u32 *tp = addr;
+ int ret;
+
+ /* A64 instructions must be word aligned */
+ if ((uintptr_t)tp & 0x3)
+ return -EINVAL;
+
+ ret = aarch64_insn_write(tp, insn);
+ if (ret == 0)
+ caches_clean_inval_pou((uintptr_t)tp,
+ (uintptr_t)tp + AARCH64_INSN_SIZE);
+
+ return ret;
+}
+
+struct aarch64_insn_patch {
+ void **text_addrs;
+ u32 *new_insns;
+ int insn_cnt;
+ atomic_t cpu_count;
+};
+
+static int __kprobes aarch64_insn_patch_text_cb(void *arg)
+{
+ int i, ret = 0;
+ struct aarch64_insn_patch *pp = arg;
+
+ /* The first CPU becomes master */
+ if (atomic_inc_return(&pp->cpu_count) == 1) {
+ for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
+ ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
+ pp->new_insns[i]);
+ /* Notify other processors with an additional increment. */
+ atomic_inc(&pp->cpu_count);
+ } else {
+ while (atomic_read(&pp->cpu_count) <= num_online_cpus())
+ cpu_relax();
+ isb();
+ }
+
+ return ret;
+}
+
+int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
+{
+ struct aarch64_insn_patch patch = {
+ .text_addrs = addrs,
+ .new_insns = insns,
+ .insn_cnt = cnt,
+ .cpu_count = ATOMIC_INIT(0),
+ };
+
+ if (cnt <= 0)
+ return -EINVAL;
+
+ return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
+ cpu_online_mask);
+}
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
index 88ff471b0bce..4a72c2727309 100644
--- a/arch/arm64/kernel/perf_callchain.c
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -116,7 +116,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
tail = (struct frame_tail __user *)regs->regs[29];
while (entry->nr < entry->max_stack &&
- tail && !((unsigned long)tail & 0xf))
+ tail && !((unsigned long)tail & 0x7))
tail = user_backtrace(tail, entry);
} else {
#ifdef CONFIG_COMPAT
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 7d2318f80955..d07788dad388 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -165,10 +165,7 @@ armv8pmu_events_sysfs_show(struct device *dev,
}
#define ARMV8_EVENT_ATTR(name, config) \
- (&((struct perf_pmu_events_attr) { \
- .attr = __ATTR(name, 0444, armv8pmu_events_sysfs_show, NULL), \
- .id = config, \
- }).attr.attr)
+ PMU_EVENT_ATTR_ID(name, armv8pmu_events_sysfs_show, config)
static struct attribute *armv8_pmuv3_event_attrs[] = {
ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR),
@@ -312,13 +309,46 @@ static ssize_t slots_show(struct device *dev, struct device_attribute *attr,
struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK;
- return snprintf(page, PAGE_SIZE, "0x%08x\n", slots);
+ return sysfs_emit(page, "0x%08x\n", slots);
}
static DEVICE_ATTR_RO(slots);
+static ssize_t bus_slots_show(struct device *dev, struct device_attribute *attr,
+ char *page)
+{
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
+ u32 bus_slots = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_SLOTS_SHIFT)
+ & ARMV8_PMU_BUS_SLOTS_MASK;
+
+ return sysfs_emit(page, "0x%08x\n", bus_slots);
+}
+
+static DEVICE_ATTR_RO(bus_slots);
+
+static ssize_t bus_width_show(struct device *dev, struct device_attribute *attr,
+ char *page)
+{
+ struct pmu *pmu = dev_get_drvdata(dev);
+ struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu);
+ u32 bus_width = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_WIDTH_SHIFT)
+ & ARMV8_PMU_BUS_WIDTH_MASK;
+ u32 val = 0;
+
+ /* Encoded as Log2(number of bytes), plus one */
+ if (bus_width > 2 && bus_width < 13)
+ val = 1 << (bus_width - 1);
+
+ return sysfs_emit(page, "0x%08x\n", val);
+}
+
+static DEVICE_ATTR_RO(bus_width);
+
static struct attribute *armv8_pmuv3_caps_attrs[] = {
&dev_attr_slots.attr,
+ &dev_attr_bus_slots.attr,
+ &dev_attr_bus_width.attr,
NULL,
};
@@ -460,7 +490,7 @@ static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
}
-static inline u32 armv8pmu_read_evcntr(int idx)
+static inline u64 armv8pmu_read_evcntr(int idx)
{
u32 counter = ARMV8_IDX_TO_COUNTER(idx);
@@ -470,9 +500,8 @@ static inline u32 armv8pmu_read_evcntr(int idx)
static inline u64 armv8pmu_read_hw_counter(struct perf_event *event)
{
int idx = event->hw.idx;
- u64 val = 0;
+ u64 val = armv8pmu_read_evcntr(idx);
- val = armv8pmu_read_evcntr(idx);
if (armv8pmu_event_is_chained(event))
val = (val << 32) | armv8pmu_read_evcntr(idx - 1);
return val;
@@ -520,7 +549,7 @@ static u64 armv8pmu_read_counter(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
- u64 value = 0;
+ u64 value;
if (idx == ARMV8_IDX_CYCLE_COUNTER)
value = read_sysreg(pmccntr_el0);
diff --git a/arch/arm64/kernel/pointer_auth.c b/arch/arm64/kernel/pointer_auth.c
index adb955fd9bdd..60901ab0a7fe 100644
--- a/arch/arm64/kernel/pointer_auth.c
+++ b/arch/arm64/kernel/pointer_auth.c
@@ -43,6 +43,69 @@ int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg)
get_random_bytes(&keys->apdb, sizeof(keys->apdb));
if (arg & PR_PAC_APGAKEY)
get_random_bytes(&keys->apga, sizeof(keys->apga));
+ ptrauth_keys_install_user(keys);
return 0;
}
+
+static u64 arg_to_enxx_mask(unsigned long arg)
+{
+ u64 sctlr_enxx_mask = 0;
+
+ WARN_ON(arg & ~PR_PAC_ENABLED_KEYS_MASK);
+ if (arg & PR_PAC_APIAKEY)
+ sctlr_enxx_mask |= SCTLR_ELx_ENIA;
+ if (arg & PR_PAC_APIBKEY)
+ sctlr_enxx_mask |= SCTLR_ELx_ENIB;
+ if (arg & PR_PAC_APDAKEY)
+ sctlr_enxx_mask |= SCTLR_ELx_ENDA;
+ if (arg & PR_PAC_APDBKEY)
+ sctlr_enxx_mask |= SCTLR_ELx_ENDB;
+ return sctlr_enxx_mask;
+}
+
+int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys,
+ unsigned long enabled)
+{
+ u64 sctlr = tsk->thread.sctlr_user;
+
+ if (!system_supports_address_auth())
+ return -EINVAL;
+
+ if (is_compat_thread(task_thread_info(tsk)))
+ return -EINVAL;
+
+ if ((keys & ~PR_PAC_ENABLED_KEYS_MASK) || (enabled & ~keys))
+ return -EINVAL;
+
+ sctlr &= ~arg_to_enxx_mask(keys);
+ sctlr |= arg_to_enxx_mask(enabled);
+ if (tsk == current)
+ set_task_sctlr_el1(sctlr);
+ else
+ tsk->thread.sctlr_user = sctlr;
+
+ return 0;
+}
+
+int ptrauth_get_enabled_keys(struct task_struct *tsk)
+{
+ int retval = 0;
+
+ if (!system_supports_address_auth())
+ return -EINVAL;
+
+ if (is_compat_thread(task_thread_info(tsk)))
+ return -EINVAL;
+
+ if (tsk->thread.sctlr_user & SCTLR_ELx_ENIA)
+ retval |= PR_PAC_APIAKEY;
+ if (tsk->thread.sctlr_user & SCTLR_ELx_ENIB)
+ retval |= PR_PAC_APIBKEY;
+ if (tsk->thread.sctlr_user & SCTLR_ELx_ENDA)
+ retval |= PR_PAC_APDAKEY;
+ if (tsk->thread.sctlr_user & SCTLR_ELx_ENDB)
+ retval |= PR_PAC_APDBKEY;
+
+ return retval;
+}
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index 66aac2881ba8..6dbcc89f6662 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -7,26 +7,28 @@
* Copyright (C) 2013 Linaro Limited.
* Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org>
*/
+#include <linux/extable.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/kprobes.h>
-#include <linux/extable.h>
-#include <linux/slab.h>
-#include <linux/stop_machine.h>
#include <linux/sched/debug.h>
#include <linux/set_memory.h>
+#include <linux/slab.h>
+#include <linux/stop_machine.h>
#include <linux/stringify.h>
+#include <linux/uaccess.h>
#include <linux/vmalloc.h>
-#include <asm/traps.h>
-#include <asm/ptrace.h>
+
#include <asm/cacheflush.h>
-#include <asm/debug-monitors.h>
#include <asm/daifflags.h>
-#include <asm/system_misc.h>
+#include <asm/debug-monitors.h>
#include <asm/insn.h>
-#include <linux/uaccess.h>
#include <asm/irq.h>
+#include <asm/patching.h>
+#include <asm/ptrace.h>
#include <asm/sections.h>
+#include <asm/system_misc.h>
+#include <asm/traps.h>
#include "decode-insn.h"
@@ -264,35 +266,19 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
* normal page fault.
*/
instruction_pointer_set(regs, (unsigned long) cur->addr);
- if (!instruction_pointer(regs))
- BUG();
+ BUG_ON(!instruction_pointer(regs));
- if (kcb->kprobe_status == KPROBE_REENTER)
+ if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb);
- else
+ } else {
+ kprobes_restore_local_irqflag(kcb, regs);
reset_current_kprobe();
+ }
break;
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, fsr))
- return 1;
-
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
diff --git a/arch/arm64/kernel/probes/simulate-insn.c b/arch/arm64/kernel/probes/simulate-insn.c
index 25f67ec59635..22d0b3252476 100644
--- a/arch/arm64/kernel/probes/simulate-insn.c
+++ b/arch/arm64/kernel/probes/simulate-insn.c
@@ -10,6 +10,7 @@
#include <linux/kprobes.h>
#include <asm/ptrace.h>
+#include <asm/traps.h>
#include "simulate-insn.h"
diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c
index 2c247634552b..9be668f3f034 100644
--- a/arch/arm64/kernel/probes/uprobes.c
+++ b/arch/arm64/kernel/probes/uprobes.c
@@ -21,7 +21,7 @@ void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
memcpy(dst, src, len);
/* flush caches (dcache/icache) */
- sync_icache_aliases(dst, len);
+ sync_icache_aliases((unsigned long)dst, (unsigned long)dst + len);
kunmap_atomic(xol_page_kaddr);
}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 325c83b1a24d..5ba0ed036dee 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -18,7 +18,6 @@
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/kernel.h>
-#include <linux/lockdep.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/nospec.h>
@@ -46,7 +45,6 @@
#include <linux/prctl.h>
#include <asm/alternative.h>
-#include <asm/arch_gicv3.h>
#include <asm/compat.h>
#include <asm/cpufeature.h>
#include <asm/cacheflush.h>
@@ -57,6 +55,8 @@
#include <asm/processor.h>
#include <asm/pointer_auth.h>
#include <asm/stacktrace.h>
+#include <asm/switch_to.h>
+#include <asm/system_misc.h>
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
#include <linux/stackprotector.h>
@@ -72,63 +72,6 @@ EXPORT_SYMBOL_GPL(pm_power_off);
void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
-static void noinstr __cpu_do_idle(void)
-{
- dsb(sy);
- wfi();
-}
-
-static void noinstr __cpu_do_idle_irqprio(void)
-{
- unsigned long pmr;
- unsigned long daif_bits;
-
- daif_bits = read_sysreg(daif);
- write_sysreg(daif_bits | PSR_I_BIT, daif);
-
- /*
- * Unmask PMR before going idle to make sure interrupts can
- * be raised.
- */
- pmr = gic_read_pmr();
- gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
-
- __cpu_do_idle();
-
- gic_write_pmr(pmr);
- write_sysreg(daif_bits, daif);
-}
-
-/*
- * cpu_do_idle()
- *
- * Idle the processor (wait for interrupt).
- *
- * If the CPU supports priority masking we must do additional work to
- * ensure that interrupts are not masked at the PMR (because the core will
- * not wake up if we block the wake up signal in the interrupt controller).
- */
-void noinstr cpu_do_idle(void)
-{
- if (system_uses_irq_prio_masking())
- __cpu_do_idle_irqprio();
- else
- __cpu_do_idle();
-}
-
-/*
- * This is our default idle handler.
- */
-void noinstr arch_cpu_idle(void)
-{
- /*
- * This should do all the clock switching and wait for interrupt
- * tricks
- */
- cpu_do_idle();
- raw_local_irq_enable();
-}
-
#ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void)
{
@@ -292,13 +235,10 @@ void __show_regs(struct pt_regs *regs)
i = top_reg;
while (i >= 0) {
- printk("x%-2d: %016llx ", i, regs->regs[i]);
- i--;
+ printk("x%-2d: %016llx", i, regs->regs[i]);
- if (i % 2 == 0) {
- pr_cont("x%-2d: %016llx ", i, regs->regs[i]);
- i--;
- }
+ while (i-- % 3)
+ pr_cont(" x%-2d: %016llx", i, regs->regs[i]);
pr_cont("\n");
}
@@ -339,7 +279,6 @@ void flush_thread(void)
tls_thread_flush();
flush_ptrace_hw_breakpoint(current);
flush_tagged_addr_state();
- flush_mte_state();
}
void release_thread(struct task_struct *dead_task)
@@ -437,6 +376,11 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
}
p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
p->thread.cpu_context.sp = (unsigned long)childregs;
+ /*
+ * For the benefit of the unwinder, set up childregs->stackframe
+ * as the final frame for the new task.
+ */
+ p->thread.cpu_context.fp = (unsigned long)childregs->stackframe;
ptrace_hw_copy_thread(p);
@@ -529,6 +473,40 @@ static void erratum_1418040_thread_switch(struct task_struct *prev,
write_sysreg(val, cntkctl_el1);
}
+static void compat_thread_switch(struct task_struct *next)
+{
+ if (!is_compat_thread(task_thread_info(next)))
+ return;
+
+ if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
+ set_tsk_thread_flag(next, TIF_NOTIFY_RESUME);
+}
+
+static void update_sctlr_el1(u64 sctlr)
+{
+ /*
+ * EnIA must not be cleared while in the kernel as this is necessary for
+ * in-kernel PAC. It will be cleared on kernel exit if needed.
+ */
+ sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK & ~SCTLR_ELx_ENIA, sctlr);
+
+ /* ISB required for the kernel uaccess routines when setting TCF0. */
+ isb();
+}
+
+void set_task_sctlr_el1(u64 sctlr)
+{
+ /*
+ * __switch_to() checks current->thread.sctlr as an
+ * optimisation. Disable preemption so that it does not see
+ * the variable update before the SCTLR_EL1 one.
+ */
+ preempt_disable();
+ current->thread.sctlr_user = sctlr;
+ update_sctlr_el1(sctlr);
+ preempt_enable();
+}
+
/*
* Thread switching.
*/
@@ -544,6 +522,8 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
entry_task_switch(next);
ssbs_thread_switch(next);
erratum_1418040_thread_switch(prev, next);
+ ptrauth_thread_switch_user(next);
+ compat_thread_switch(next);
/*
* Complete any pending TLB or cache maintenance on this CPU in case
@@ -559,6 +539,9 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
* registers.
*/
mte_thread_switch(next);
+ /* avoid expensive SCTLR_EL1 accesses if no change */
+ if (prev->thread.sctlr_user != next->thread.sctlr_user)
+ update_sctlr_el1(next->thread.sctlr_user);
/* the actual thread switch */
last = cpu_switch_to(prev, next);
@@ -571,7 +554,7 @@ unsigned long get_wchan(struct task_struct *p)
struct stackframe frame;
unsigned long stack_page, ret = 0;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
stack_page = (unsigned long)try_get_task_stack(p);
@@ -606,9 +589,17 @@ unsigned long arch_align_stack(unsigned long sp)
*/
void arch_setup_new_exec(void)
{
- current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
+ unsigned long mmflags = 0;
+
+ if (is_compat_task()) {
+ mmflags = MMCF_AARCH32;
+ if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
+ set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
+ }
- ptrauth_thread_init_user(current);
+ current->mm->context.flags = mmflags;
+ ptrauth_thread_init_user();
+ mte_thread_init_user();
if (task_spec_ssb_noexec(current)) {
arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
@@ -696,22 +687,6 @@ static int __init tagged_addr_init(void)
core_initcall(tagged_addr_init);
#endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
-asmlinkage void __sched arm64_preempt_schedule_irq(void)
-{
- lockdep_assert_irqs_disabled();
-
- /*
- * Preempting a task from an IRQ means we leave copies of PSTATE
- * on the stack. cpufeature's enable calls may modify PSTATE, but
- * resuming one of these preempted tasks would undo those changes.
- *
- * Only allow a task to be preempted once cpufeatures have been
- * enabled.
- */
- if (system_capabilities_finalized())
- preempt_schedule_irq();
-}
-
#ifdef CONFIG_BINFMT_ELF
int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state,
bool has_interp, bool is_interp)
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 62d2bda7adb8..ab7f4c476104 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -38,7 +38,8 @@ static int __init cpu_psci_cpu_prepare(unsigned int cpu)
static int cpu_psci_cpu_boot(unsigned int cpu)
{
- int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa_symbol(secondary_entry));
+ phys_addr_t pa_secondary_entry = __pa_symbol(function_nocfi(secondary_entry));
+ int err = psci_ops.cpu_on(cpu_logical_map(cpu), pa_secondary_entry);
if (err)
pr_err("failed to boot CPU%d (%d)\n", cpu, err);
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 170f42fd6101..499b6b2f9757 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -122,7 +122,7 @@ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
{
return ((addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
- on_irq_stack(addr, NULL);
+ on_irq_stack(addr, sizeof(unsigned long), NULL);
}
/**
@@ -909,6 +909,38 @@ static int pac_mask_get(struct task_struct *target,
return membuf_write(&to, &uregs, sizeof(uregs));
}
+static int pac_enabled_keys_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ long enabled_keys = ptrauth_get_enabled_keys(target);
+
+ if (IS_ERR_VALUE(enabled_keys))
+ return enabled_keys;
+
+ return membuf_write(&to, &enabled_keys, sizeof(enabled_keys));
+}
+
+static int pac_enabled_keys_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+ long enabled_keys = ptrauth_get_enabled_keys(target);
+
+ if (IS_ERR_VALUE(enabled_keys))
+ return enabled_keys;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &enabled_keys, 0,
+ sizeof(long));
+ if (ret)
+ return ret;
+
+ return ptrauth_set_enabled_keys(target, PR_PAC_ENABLED_KEYS_MASK,
+ enabled_keys);
+}
+
#ifdef CONFIG_CHECKPOINT_RESTORE
static __uint128_t pac_key_to_user(const struct ptrauth_key *key)
{
@@ -1074,6 +1106,7 @@ enum aarch64_regset {
#endif
#ifdef CONFIG_ARM64_PTR_AUTH
REGSET_PAC_MASK,
+ REGSET_PAC_ENABLED_KEYS,
#ifdef CONFIG_CHECKPOINT_RESTORE
REGSET_PACA_KEYS,
REGSET_PACG_KEYS,
@@ -1160,6 +1193,14 @@ static const struct user_regset aarch64_regsets[] = {
.regset_get = pac_mask_get,
/* this cannot be set dynamically */
},
+ [REGSET_PAC_ENABLED_KEYS] = {
+ .core_note_type = NT_ARM_PAC_ENABLED_KEYS,
+ .n = 1,
+ .size = sizeof(long),
+ .align = sizeof(long),
+ .regset_get = pac_enabled_keys_get,
+ .set = pac_enabled_keys_set,
+ },
#ifdef CONFIG_CHECKPOINT_RESTORE
[REGSET_PACA_KEYS] = {
.core_note_type = NT_ARM_PACA_KEYS,
diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c
index 2c7ca449dd51..47f77d1234cb 100644
--- a/arch/arm64/kernel/sdei.c
+++ b/arch/arm64/kernel/sdei.c
@@ -162,31 +162,33 @@ static int init_sdei_scs(void)
return err;
}
-static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
+static bool on_sdei_normal_stack(unsigned long sp, unsigned long size,
+ struct stack_info *info)
{
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
unsigned long high = low + SDEI_STACK_SIZE;
- return on_stack(sp, low, high, STACK_TYPE_SDEI_NORMAL, info);
+ return on_stack(sp, size, low, high, STACK_TYPE_SDEI_NORMAL, info);
}
-static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
+static bool on_sdei_critical_stack(unsigned long sp, unsigned long size,
+ struct stack_info *info)
{
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
unsigned long high = low + SDEI_STACK_SIZE;
- return on_stack(sp, low, high, STACK_TYPE_SDEI_CRITICAL, info);
+ return on_stack(sp, size, low, high, STACK_TYPE_SDEI_CRITICAL, info);
}
-bool _on_sdei_stack(unsigned long sp, struct stack_info *info)
+bool _on_sdei_stack(unsigned long sp, unsigned long size, struct stack_info *info)
{
if (!IS_ENABLED(CONFIG_VMAP_STACK))
return false;
- if (on_sdei_critical_stack(sp, info))
+ if (on_sdei_critical_stack(sp, size, info))
return true;
- if (on_sdei_normal_stack(sp, info))
+ if (on_sdei_normal_stack(sp, size, info))
return true;
return false;
@@ -231,13 +233,13 @@ out_err:
}
/*
- * __sdei_handler() returns one of:
+ * do_sdei_event() returns one of:
* SDEI_EV_HANDLED - success, return to the interrupted context.
* SDEI_EV_FAILED - failure, return this error code to firmare.
* virtual-address - success, return to this address.
*/
-static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
- struct sdei_registered_event *arg)
+unsigned long __kprobes do_sdei_event(struct pt_regs *regs,
+ struct sdei_registered_event *arg)
{
u32 mode;
int i, err = 0;
@@ -292,45 +294,3 @@ static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
return vbar + 0x480;
}
-
-static void __kprobes notrace __sdei_pstate_entry(void)
-{
- /*
- * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
- * whether PSTATE bits are inherited unchanged or generated from
- * scratch, and the TF-A implementation always clears PAN and always
- * clears UAO. There are no other known implementations.
- *
- * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
- * PSTATE is modified upon architectural exceptions, and so PAN is
- * either inherited or set per SCTLR_ELx.SPAN, and UAO is always
- * cleared.
- *
- * We must explicitly reset PAN to the expected state, including
- * clearing it when the host isn't using it, in case a VM had it set.
- */
- if (system_uses_hw_pan())
- set_pstate_pan(1);
- else if (cpu_has_pan())
- set_pstate_pan(0);
-}
-
-asmlinkage noinstr unsigned long
-__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
-{
- unsigned long ret;
-
- /*
- * We didn't take an exception to get here, so the HW hasn't
- * set/cleared bits in PSTATE that we may rely on. Initialize PAN.
- */
- __sdei_pstate_entry();
-
- arm64_enter_nmi(regs);
-
- ret = _sdei_handler(regs, arg);
-
- arm64_exit_nmi(regs);
-
- return ret;
-}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 61845c0821d9..880f40bae60e 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/fs.h>
+#include <linux/panic_notifier.h>
#include <linux/proc_fs.h>
#include <linux/memblock.h>
#include <linux/of_fdt.h>
@@ -87,12 +88,6 @@ void __init smp_setup_processor_id(void)
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
set_cpu_logical_map(0, mpidr);
- /*
- * clear __my_cpu_offset on boot CPU to avoid hang caused by
- * using percpu variable early, for example, lockdep will
- * access percpu variable inside lock_release
- */
- set_my_cpu_offset(0);
pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
(unsigned long)mpidr, read_cpuid_id());
}
@@ -381,7 +376,7 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
* faults in case uaccess_enable() is inadvertently called by the init
* thread.
*/
- init_task.thread_info.ttbr0 = __pa_symbol(reserved_pg_dir);
+ init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
#endif
if (boot_args[1] || boot_args[2] || boot_args[3]) {
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 6237486ff6bb..f8192f4ae0b8 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -911,6 +911,19 @@ static void do_signal(struct pt_regs *regs)
restore_saved_sigmask();
}
+static bool cpu_affinity_invalid(struct pt_regs *regs)
+{
+ if (!compat_user_mode(regs))
+ return false;
+
+ /*
+ * We're preemptible, but a reschedule will cause us to check the
+ * affinity again.
+ */
+ return !cpumask_test_cpu(raw_smp_processor_id(),
+ system_32bit_el0_cpumask());
+}
+
asmlinkage void do_notify_resume(struct pt_regs *regs,
unsigned long thread_flags)
{
@@ -938,6 +951,19 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
if (thread_flags & _TIF_NOTIFY_RESUME) {
tracehook_notify_resume(regs);
rseq_handle_notify_resume(NULL, regs);
+
+ /*
+ * If we reschedule after checking the affinity
+ * then we must ensure that TIF_NOTIFY_RESUME
+ * is set so that we check the affinity again.
+ * Since tracehook_notify_resume() clears the
+ * flag, ensure that the compiler doesn't move
+ * it after the affinity check.
+ */
+ barrier();
+
+ if (cpu_affinity_invalid(regs))
+ force_sig(SIGKILL);
}
if (thread_flags & _TIF_FOREIGN_FPSTATE)
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 5bfd9b87f85d..4ea9392f86e0 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -134,7 +134,7 @@ SYM_FUNC_START(_cpu_resume)
*/
bl cpu_do_resume
-#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
+#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
mov x0, sp
bl kasan_unpoison_task_stack_below
#endif
diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S
index d62447964ed9..d3d37f932b97 100644
--- a/arch/arm64/kernel/smccc-call.S
+++ b/arch/arm64/kernel/smccc-call.S
@@ -7,8 +7,34 @@
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
+#include <asm/thread_info.h>
+
+/*
+ * If we have SMCCC v1.3 and (as is likely) no SVE state in
+ * the registers then set the SMCCC hint bit to say there's no
+ * need to preserve it. Do this by directly adjusting the SMCCC
+ * function value which is already stored in x0 ready to be called.
+ */
+SYM_FUNC_START(__arm_smccc_sve_check)
+
+ ldr_l x16, smccc_has_sve_hint
+ cbz x16, 2f
+
+ get_current_task x16
+ ldr x16, [x16, #TSK_TI_FLAGS]
+ tbnz x16, #TIF_FOREIGN_FPSTATE, 1f // Any live FP state?
+ tbnz x16, #TIF_SVE, 2f // Does that state include SVE?
+
+1: orr x0, x0, ARM_SMCCC_1_3_SVE_HINT
+
+2: ret
+SYM_FUNC_END(__arm_smccc_sve_check)
+EXPORT_SYMBOL(__arm_smccc_sve_check)
.macro SMCCC instr
+alternative_if ARM64_SVE
+ bl __arm_smccc_sve_check
+alternative_else_nop_endif
\instr #0
ldr x4, [sp]
stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS]
@@ -43,3 +69,60 @@ SYM_FUNC_START(__arm_smccc_hvc)
SMCCC hvc
SYM_FUNC_END(__arm_smccc_hvc)
EXPORT_SYMBOL(__arm_smccc_hvc)
+
+ .macro SMCCC_1_2 instr
+ /* Save `res` and free a GPR that won't be clobbered */
+ stp x1, x19, [sp, #-16]!
+
+ /* Ensure `args` won't be clobbered while loading regs in next step */
+ mov x19, x0
+
+ /* Load the registers x0 - x17 from the struct arm_smccc_1_2_regs */
+ ldp x0, x1, [x19, #ARM_SMCCC_1_2_REGS_X0_OFFS]
+ ldp x2, x3, [x19, #ARM_SMCCC_1_2_REGS_X2_OFFS]
+ ldp x4, x5, [x19, #ARM_SMCCC_1_2_REGS_X4_OFFS]
+ ldp x6, x7, [x19, #ARM_SMCCC_1_2_REGS_X6_OFFS]
+ ldp x8, x9, [x19, #ARM_SMCCC_1_2_REGS_X8_OFFS]
+ ldp x10, x11, [x19, #ARM_SMCCC_1_2_REGS_X10_OFFS]
+ ldp x12, x13, [x19, #ARM_SMCCC_1_2_REGS_X12_OFFS]
+ ldp x14, x15, [x19, #ARM_SMCCC_1_2_REGS_X14_OFFS]
+ ldp x16, x17, [x19, #ARM_SMCCC_1_2_REGS_X16_OFFS]
+
+ \instr #0
+
+ /* Load the `res` from the stack */
+ ldr x19, [sp]
+
+ /* Store the registers x0 - x17 into the result structure */
+ stp x0, x1, [x19, #ARM_SMCCC_1_2_REGS_X0_OFFS]
+ stp x2, x3, [x19, #ARM_SMCCC_1_2_REGS_X2_OFFS]
+ stp x4, x5, [x19, #ARM_SMCCC_1_2_REGS_X4_OFFS]
+ stp x6, x7, [x19, #ARM_SMCCC_1_2_REGS_X6_OFFS]
+ stp x8, x9, [x19, #ARM_SMCCC_1_2_REGS_X8_OFFS]
+ stp x10, x11, [x19, #ARM_SMCCC_1_2_REGS_X10_OFFS]
+ stp x12, x13, [x19, #ARM_SMCCC_1_2_REGS_X12_OFFS]
+ stp x14, x15, [x19, #ARM_SMCCC_1_2_REGS_X14_OFFS]
+ stp x16, x17, [x19, #ARM_SMCCC_1_2_REGS_X16_OFFS]
+
+ /* Restore original x19 */
+ ldp xzr, x19, [sp], #16
+ ret
+.endm
+
+/*
+ * void arm_smccc_1_2_hvc(const struct arm_smccc_1_2_regs *args,
+ * struct arm_smccc_1_2_regs *res);
+ */
+SYM_FUNC_START(arm_smccc_1_2_hvc)
+ SMCCC_1_2 hvc
+SYM_FUNC_END(arm_smccc_1_2_hvc)
+EXPORT_SYMBOL(arm_smccc_1_2_hvc)
+
+/*
+ * void arm_smccc_1_2_smc(const struct arm_smccc_1_2_regs *args,
+ * struct arm_smccc_1_2_regs *res);
+ */
+SYM_FUNC_START(arm_smccc_1_2_smc)
+ SMCCC_1_2 smc
+SYM_FUNC_END(arm_smccc_1_2_smc)
+EXPORT_SYMBOL(arm_smccc_1_2_smc)
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 357590beaabb..6f6ff072acbd 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -120,9 +120,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
* page tables.
*/
secondary_data.task = idle;
- secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
update_cpu_boot_status(CPU_MMU_OFF);
- __flush_dcache_area(&secondary_data, sizeof(secondary_data));
/* Now bring the CPU into our world */
ret = boot_secondary(cpu, idle);
@@ -142,8 +140,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
pr_crit("CPU%u: failed to come online\n", cpu);
secondary_data.task = NULL;
- secondary_data.stack = NULL;
- __flush_dcache_area(&secondary_data, sizeof(secondary_data));
status = READ_ONCE(secondary_data.status);
if (status == CPU_MMU_OFF)
status = READ_ONCE(__early_cpu_boot_status);
@@ -188,6 +184,7 @@ static void init_gic_priority_masking(void)
cpuflags = read_sysreg(daif);
WARN_ON(!(cpuflags & PSR_I_BIT));
+ WARN_ON(!(cpuflags & PSR_F_BIT));
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
}
@@ -201,10 +198,7 @@ asmlinkage notrace void secondary_start_kernel(void)
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
struct mm_struct *mm = &init_mm;
const struct cpu_operations *ops;
- unsigned int cpu;
-
- cpu = task_cpu(current);
- set_my_cpu_offset(per_cpu_offset(cpu));
+ unsigned int cpu = smp_processor_id();
/*
* All kernel threads share the same mm context; grab a
@@ -223,7 +217,6 @@ asmlinkage notrace void secondary_start_kernel(void)
init_gic_priority_masking();
rcu_cpu_starting(cpu);
- preempt_disable();
trace_hardirqs_off();
/*
@@ -351,7 +344,7 @@ void __cpu_die(unsigned int cpu)
pr_crit("CPU%u: cpu didn't die\n", cpu);
return;
}
- pr_notice("CPU%u: shutdown\n", cpu);
+ pr_debug("CPU%u: shutdown\n", cpu);
/*
* Now that the dying CPU is beyond the point of no return w.r.t.
@@ -451,6 +444,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
void __init smp_prepare_boot_cpu(void)
{
+ /*
+ * The runtime per-cpu areas have been allocated by
+ * setup_per_cpu_areas(), and CPU0's boot time per-cpu area will be
+ * freed shortly, so we must move over to the runtime per-cpu area.
+ */
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
cpuinfo_store_boot_cpu();
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 056772c26098..7e1624ecab3c 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -36,7 +36,7 @@ static void write_pen_release(u64 val)
unsigned long size = sizeof(secondary_holding_pen_release);
secondary_holding_pen_release = val;
- __flush_dcache_area(start, size);
+ dcache_clean_inval_poc((unsigned long)start, (unsigned long)start + size);
}
@@ -66,6 +66,7 @@ static int smp_spin_table_cpu_init(unsigned int cpu)
static int smp_spin_table_cpu_prepare(unsigned int cpu)
{
__le64 __iomem *release_addr;
+ phys_addr_t pa_holding_pen = __pa_symbol(function_nocfi(secondary_holding_pen));
if (!cpu_release_addr[cpu])
return -ENODEV;
@@ -88,9 +89,10 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
* boot-loader's endianness before jumping. This is mandated by
* the boot protocol.
*/
- writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr);
- __flush_dcache_area((__force void *)release_addr,
- sizeof(*release_addr));
+ writeq_relaxed(pa_holding_pen, release_addr);
+ dcache_clean_inval_poc((__force unsigned long)release_addr,
+ (__force unsigned long)release_addr +
+ sizeof(*release_addr));
/*
* Send an event to wake up the secondary CPU.
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index ad20981dfda4..b189de5ca6cb 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -32,6 +32,30 @@
* add sp, sp, #0x10
*/
+
+void start_backtrace(struct stackframe *frame, unsigned long fp,
+ unsigned long pc)
+{
+ frame->fp = fp;
+ frame->pc = pc;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ frame->graph = 0;
+#endif
+
+ /*
+ * Prime the first unwind.
+ *
+ * In unwind_frame() we'll check that the FP points to a valid stack,
+ * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be
+ * treated as a transition to whichever stack that happens to be. The
+ * prev_fp value won't be used, but we set it to 0 such that it is
+ * definitely not an accessible stack address.
+ */
+ bitmap_zero(frame->stacks_done, __NR_STACK_TYPES);
+ frame->prev_fp = 0;
+ frame->prev_type = STACK_TYPE_UNKNOWN;
+}
+
/*
* Unwind from one frame record (A) to the next frame record (B).
*
@@ -44,17 +68,17 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
unsigned long fp = frame->fp;
struct stack_info info;
- /* Terminal record; nothing to unwind */
- if (!fp)
+ if (!tsk)
+ tsk = current;
+
+ /* Final frame; nothing to unwind */
+ if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
return -ENOENT;
- if (fp & 0xf)
+ if (fp & 0x7)
return -EINVAL;
- if (!tsk)
- tsk = current;
-
- if (!on_accessible_stack(tsk, fp, &info))
+ if (!on_accessible_stack(tsk, fp, 16, &info))
return -EINVAL;
if (test_bit(info.type, frame->stacks_done))
@@ -194,8 +218,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
#ifdef CONFIG_STACKTRACE
-void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
- struct task_struct *task, struct pt_regs *regs)
+noinline void arch_stack_walk(stack_trace_consume_fn consume_entry,
+ void *cookie, struct task_struct *task,
+ struct pt_regs *regs)
{
struct stackframe frame;
@@ -203,8 +228,8 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
start_backtrace(&frame, regs->regs[29], regs->pc);
else if (task == current)
start_backtrace(&frame,
- (unsigned long)__builtin_frame_address(0),
- (unsigned long)arch_stack_walk);
+ (unsigned long)__builtin_frame_address(1),
+ (unsigned long)__builtin_return_address(0));
else
start_backtrace(&frame, thread_saved_fp(task),
thread_saved_pc(task));
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index d7564891ffe1..938ce6fbee8a 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -7,6 +7,7 @@
#include <asm/alternative.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
+#include <asm/cpuidle.h>
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/exec.h>
@@ -74,8 +75,9 @@ void notrace __cpu_suspend_exit(void)
*/
spectre_v4_enable_mitigation(NULL);
- /* Restore additional MTE-specific configuration */
+ /* Restore additional feature-specific configuration */
mte_suspend_exit();
+ ptrauth_suspend_exit();
}
/*
@@ -90,6 +92,10 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
int ret = 0;
unsigned long flags;
struct sleep_stack_data state;
+ struct arm_cpuidle_irq_context context;
+
+ /* Report any MTE async fault before going to suspend */
+ mte_suspend_enter();
/*
* From this point debug exceptions are disabled to prevent
@@ -99,12 +105,18 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
flags = local_daif_save();
/*
- * Function graph tracer state gets incosistent when the kernel
+ * Function graph tracer state gets inconsistent when the kernel
* calls functions that never return (aka suspend finishers) hence
* disable graph tracing during their execution.
*/
pause_graph_tracing();
+ /*
+ * Switch to using DAIF.IF instead of PMR in order to reliably
+ * resume if we're using pseudo-NMIs.
+ */
+ arm_cpuidle_save_irq_context(&context);
+
if (__cpu_suspend_enter(&state)) {
/* Call the suspend finisher */
ret = fn(arg);
@@ -122,6 +134,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
RCU_NONIDLE(__cpu_suspend_exit());
}
+ arm_cpuidle_restore_irq_context(&context);
+
unpause_graph_tracing();
/*
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index 265fe3eb1069..db5159a3055f 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -41,7 +41,7 @@ __do_compat_cache_op(unsigned long start, unsigned long end)
dsb(ish);
}
- ret = __flush_cache_user_range(start, start + chunk);
+ ret = caches_clean_inval_user_pou(start, start + chunk);
if (ret)
return ret;
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index b9cf12b271d7..263d6c1a525f 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -5,6 +5,7 @@
#include <linux/errno.h>
#include <linux/nospec.h>
#include <linux/ptrace.h>
+#include <linux/randomize_kstack.h>
#include <linux/syscalls.h>
#include <asm/daifflags.h>
@@ -43,6 +44,8 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
{
long ret;
+ add_random_kstack_offset();
+
if (scno < sc_nr) {
syscall_fn_t syscall_fn;
syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)];
@@ -55,6 +58,19 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
ret = lower_32_bits(ret);
regs->regs[0] = ret;
+
+ /*
+ * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
+ * but not enough for arm64 stack utilization comfort. To keep
+ * reasonable stack head room, reduce the maximum offset to 9 bits.
+ *
+ * The actual entropy will be further reduced by the compiler when
+ * applying stack alignment constraints: the AAPCS mandates a
+ * 16-byte (i.e. 4-bit) aligned SP at function boundaries.
+ *
+ * The resulting 5 bits of entropy is seen in SP[8:4].
+ */
+ choose_random_kstack_offset(get_random_int() & 0x1FF);
}
static inline bool has_syscall_work(unsigned long flags)
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index e08a4126453a..4dd14a6620c1 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -199,12 +199,47 @@ static int freq_inv_set_max_ratio(int cpu, u64 max_rate, u64 ref_rate)
return 0;
}
-static DEFINE_STATIC_KEY_FALSE(amu_fie_key);
-#define amu_freq_invariant() static_branch_unlikely(&amu_fie_key)
+static void amu_scale_freq_tick(void)
+{
+ u64 prev_core_cnt, prev_const_cnt;
+ u64 core_cnt, const_cnt, scale;
+
+ prev_const_cnt = this_cpu_read(arch_const_cycles_prev);
+ prev_core_cnt = this_cpu_read(arch_core_cycles_prev);
+
+ update_freq_counters_refs();
+
+ const_cnt = this_cpu_read(arch_const_cycles_prev);
+ core_cnt = this_cpu_read(arch_core_cycles_prev);
+
+ if (unlikely(core_cnt <= prev_core_cnt ||
+ const_cnt <= prev_const_cnt))
+ return;
+
+ /*
+ * /\core arch_max_freq_scale
+ * scale = ------- * --------------------
+ * /\const SCHED_CAPACITY_SCALE
+ *
+ * See validate_cpu_freq_invariance_counters() for details on
+ * arch_max_freq_scale and the use of SCHED_CAPACITY_SHIFT.
+ */
+ scale = core_cnt - prev_core_cnt;
+ scale *= this_cpu_read(arch_max_freq_scale);
+ scale = div64_u64(scale >> SCHED_CAPACITY_SHIFT,
+ const_cnt - prev_const_cnt);
+
+ scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE);
+ this_cpu_write(arch_freq_scale, (unsigned long)scale);
+}
+
+static struct scale_freq_data amu_sfd = {
+ .source = SCALE_FREQ_SOURCE_ARCH,
+ .set_freq_scale = amu_scale_freq_tick,
+};
static void amu_fie_setup(const struct cpumask *cpus)
{
- bool invariant;
int cpu;
/* We are already set since the last insmod of cpufreq driver */
@@ -221,25 +256,10 @@ static void amu_fie_setup(const struct cpumask *cpus)
cpumask_or(amu_fie_cpus, amu_fie_cpus, cpus);
- invariant = topology_scale_freq_invariant();
-
- /* We aren't fully invariant yet */
- if (!invariant && !cpumask_equal(amu_fie_cpus, cpu_present_mask))
- return;
-
- static_branch_enable(&amu_fie_key);
+ topology_set_scale_freq_source(&amu_sfd, amu_fie_cpus);
pr_debug("CPUs[%*pbl]: counters will be used for FIE.",
cpumask_pr_args(cpus));
-
- /*
- * Task scheduler behavior depends on frequency invariance support,
- * either cpufreq or counter driven. If the support status changes as
- * a result of counter initialisation and use, retrigger the build of
- * scheduling domains to ensure the information is propagated properly.
- */
- if (!invariant)
- rebuild_sched_domains_energy();
}
static int init_amu_fie_callback(struct notifier_block *nb, unsigned long val,
@@ -256,8 +276,8 @@ static int init_amu_fie_callback(struct notifier_block *nb, unsigned long val,
* initialized AMU support and enabled invariance. The AMU counters will
* keep on working just fine in the absence of the cpufreq driver, and
* for the CPUs for which there are no counters available, the last set
- * value of freq_scale will remain valid as that is the frequency those
- * CPUs are running at.
+ * value of arch_freq_scale will remain valid as that is the frequency
+ * those CPUs are running at.
*/
return 0;
@@ -283,53 +303,6 @@ static int __init init_amu_fie(void)
}
core_initcall(init_amu_fie);
-bool arch_freq_counters_available(const struct cpumask *cpus)
-{
- return amu_freq_invariant() &&
- cpumask_subset(cpus, amu_fie_cpus);
-}
-
-void topology_scale_freq_tick(void)
-{
- u64 prev_core_cnt, prev_const_cnt;
- u64 core_cnt, const_cnt, scale;
- int cpu = smp_processor_id();
-
- if (!amu_freq_invariant())
- return;
-
- if (!cpumask_test_cpu(cpu, amu_fie_cpus))
- return;
-
- prev_const_cnt = this_cpu_read(arch_const_cycles_prev);
- prev_core_cnt = this_cpu_read(arch_core_cycles_prev);
-
- update_freq_counters_refs();
-
- const_cnt = this_cpu_read(arch_const_cycles_prev);
- core_cnt = this_cpu_read(arch_core_cycles_prev);
-
- if (unlikely(core_cnt <= prev_core_cnt ||
- const_cnt <= prev_const_cnt))
- return;
-
- /*
- * /\core arch_max_freq_scale
- * scale = ------- * --------------------
- * /\const SCHED_CAPACITY_SCALE
- *
- * See validate_cpu_freq_invariance_counters() for details on
- * arch_max_freq_scale and the use of SCHED_CAPACITY_SHIFT.
- */
- scale = core_cnt - prev_core_cnt;
- scale *= this_cpu_read(arch_max_freq_scale);
- scale = div64_u64(scale >> SCHED_CAPACITY_SHIFT,
- const_cnt - prev_const_cnt);
-
- scale = min_t(unsigned long, scale, SCHED_CAPACITY_SCALE);
- this_cpu_write(freq_scale, (unsigned long)scale);
-}
-
#ifdef CONFIG_ACPI_CPPC_LIB
#include <acpi/cppc_acpi.h>
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index a05d34f0e82a..b03e383d944a 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -38,6 +38,7 @@
#include <asm/extable.h>
#include <asm/insn.h>
#include <asm/kprobes.h>
+#include <asm/patching.h>
#include <asm/traps.h>
#include <asm/smp.h>
#include <asm/stack_pointer.h>
@@ -45,11 +46,102 @@
#include <asm/system_misc.h>
#include <asm/sysreg.h>
-static const char *handler[] = {
- "Synchronous Abort",
- "IRQ",
- "FIQ",
- "Error"
+static bool __kprobes __check_eq(unsigned long pstate)
+{
+ return (pstate & PSR_Z_BIT) != 0;
+}
+
+static bool __kprobes __check_ne(unsigned long pstate)
+{
+ return (pstate & PSR_Z_BIT) == 0;
+}
+
+static bool __kprobes __check_cs(unsigned long pstate)
+{
+ return (pstate & PSR_C_BIT) != 0;
+}
+
+static bool __kprobes __check_cc(unsigned long pstate)
+{
+ return (pstate & PSR_C_BIT) == 0;
+}
+
+static bool __kprobes __check_mi(unsigned long pstate)
+{
+ return (pstate & PSR_N_BIT) != 0;
+}
+
+static bool __kprobes __check_pl(unsigned long pstate)
+{
+ return (pstate & PSR_N_BIT) == 0;
+}
+
+static bool __kprobes __check_vs(unsigned long pstate)
+{
+ return (pstate & PSR_V_BIT) != 0;
+}
+
+static bool __kprobes __check_vc(unsigned long pstate)
+{
+ return (pstate & PSR_V_BIT) == 0;
+}
+
+static bool __kprobes __check_hi(unsigned long pstate)
+{
+ pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
+ return (pstate & PSR_C_BIT) != 0;
+}
+
+static bool __kprobes __check_ls(unsigned long pstate)
+{
+ pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
+ return (pstate & PSR_C_BIT) == 0;
+}
+
+static bool __kprobes __check_ge(unsigned long pstate)
+{
+ pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
+ return (pstate & PSR_N_BIT) == 0;
+}
+
+static bool __kprobes __check_lt(unsigned long pstate)
+{
+ pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
+ return (pstate & PSR_N_BIT) != 0;
+}
+
+static bool __kprobes __check_gt(unsigned long pstate)
+{
+ /*PSR_N_BIT ^= PSR_V_BIT */
+ unsigned long temp = pstate ^ (pstate << 3);
+
+ temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
+ return (temp & PSR_N_BIT) == 0;
+}
+
+static bool __kprobes __check_le(unsigned long pstate)
+{
+ /*PSR_N_BIT ^= PSR_V_BIT */
+ unsigned long temp = pstate ^ (pstate << 3);
+
+ temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
+ return (temp & PSR_N_BIT) != 0;
+}
+
+static bool __kprobes __check_al(unsigned long pstate)
+{
+ return true;
+}
+
+/*
+ * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
+ * it behaves identically to 0b1110 ("al").
+ */
+pstate_check_t * const aarch32_opcode_cond_checks[16] = {
+ __check_eq, __check_ne, __check_cs, __check_cc,
+ __check_mi, __check_pl, __check_vs, __check_vc,
+ __check_hi, __check_ls, __check_ge, __check_lt,
+ __check_gt, __check_le, __check_al, __check_al
};
int show_unhandled_signals = 0;
@@ -751,27 +843,8 @@ const char *esr_get_class_string(u32 esr)
}
/*
- * bad_mode handles the impossible case in the exception vector. This is always
- * fatal.
- */
-asmlinkage void notrace bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
-{
- arm64_enter_nmi(regs);
-
- console_verbose();
-
- pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
- handler[reason], smp_processor_id(), esr,
- esr_get_class_string(esr));
-
- __show_regs(regs);
- local_daif_mask();
- panic("bad mode");
-}
-
-/*
* bad_el0_sync handles unexpected, but potentially recoverable synchronous
- * exceptions taken from EL0. Unlike bad_mode, this returns.
+ * exceptions taken from EL0.
*/
void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
{
@@ -789,15 +862,11 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
__aligned(16);
-asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
+void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far)
{
unsigned long tsk_stk = (unsigned long)current->stack;
unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
- unsigned int esr = read_sysreg(esr_el1);
- unsigned long far = read_sysreg(far_el1);
-
- arm64_enter_nmi(regs);
console_verbose();
pr_emerg("Insufficient stack space to handle exception!");
@@ -870,15 +939,11 @@ bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
}
}
-asmlinkage void noinstr do_serror(struct pt_regs *regs, unsigned int esr)
+void do_serror(struct pt_regs *regs, unsigned int esr)
{
- arm64_enter_nmi(regs);
-
/* non-RAS errors are not containable */
if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
arm64_serror_panic(regs, esr);
-
- arm64_exit_nmi(regs);
}
/* GENERIC_BUG traps */
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index cee5d04ea9ad..a61fc4f989b3 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -86,7 +86,7 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
return 0;
}
-static int __vdso_init(enum vdso_abi abi)
+static int __init __vdso_init(enum vdso_abi abi)
{
int i;
struct page **vdso_pagelist;
@@ -271,6 +271,14 @@ enum aarch32_map {
static struct page *aarch32_vectors_page __ro_after_init;
static struct page *aarch32_sig_page __ro_after_init;
+static int aarch32_sigpage_mremap(const struct vm_special_mapping *sm,
+ struct vm_area_struct *new_vma)
+{
+ current->mm->context.sigpage = (void *)new_vma->vm_start;
+
+ return 0;
+}
+
static struct vm_special_mapping aarch32_vdso_maps[] = {
[AA32_MAP_VECTORS] = {
.name = "[vectors]", /* ABI */
@@ -279,6 +287,7 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
[AA32_MAP_SIGPAGE] = {
.name = "[sigpage]", /* ABI */
.pages = &aarch32_sig_page,
+ .mremap = aarch32_sigpage_mremap,
},
[AA32_MAP_VVAR] = {
.name = "[vvar]",
@@ -299,34 +308,35 @@ static int aarch32_alloc_kuser_vdso_page(void)
if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
return 0;
- vdso_page = get_zeroed_page(GFP_ATOMIC);
+ vdso_page = get_zeroed_page(GFP_KERNEL);
if (!vdso_page)
return -ENOMEM;
memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
kuser_sz);
aarch32_vectors_page = virt_to_page(vdso_page);
- flush_dcache_page(aarch32_vectors_page);
return 0;
}
+#define COMPAT_SIGPAGE_POISON_WORD 0xe7fddef1
static int aarch32_alloc_sigpage(void)
{
extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
- unsigned long sigpage;
+ __le32 poison = cpu_to_le32(COMPAT_SIGPAGE_POISON_WORD);
+ void *sigpage;
- sigpage = get_zeroed_page(GFP_ATOMIC);
+ sigpage = (void *)__get_free_page(GFP_KERNEL);
if (!sigpage)
return -ENOMEM;
- memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
+ memset32(sigpage, (__force u32)poison, PAGE_SIZE / sizeof(poison));
+ memcpy(sigpage, __aarch32_sigret_code_start, sigret_sz);
aarch32_sig_page = virt_to_page(sigpage);
- flush_dcache_page(aarch32_sig_page);
return 0;
}
-static int __aarch32_alloc_vdso_pages(void)
+static int __init __aarch32_alloc_vdso_pages(void)
{
if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
index 61dbb4c838ef..a5e61e09ea92 100644
--- a/arch/arm64/kernel/vdso/vdso.lds.S
+++ b/arch/arm64/kernel/vdso/vdso.lds.S
@@ -31,6 +31,13 @@ SECTIONS
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
+ /*
+ * Discard .note.gnu.property sections which are unused and have
+ * different alignment requirement from vDSO note sections.
+ */
+ /DISCARD/ : {
+ *(.note.GNU-stack .note.gnu.property)
+ }
.note : { *(.note.*) } :text :note
. = ALIGN(16);
@@ -48,7 +55,6 @@ SECTIONS
PROVIDE(end = .);
/DISCARD/ : {
- *(.note.GNU-stack)
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
*(.eh_frame .eh_frame_hdr)
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index 789ad420f16b..3dba0c4f8f42 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -10,15 +10,7 @@ include $(srctree)/lib/vdso/Makefile
# Same as cc-*option, but using CC_COMPAT instead of CC
ifeq ($(CONFIG_CC_IS_CLANG), y)
-COMPAT_GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE_COMPAT)elfedit))
-COMPAT_GCC_TOOLCHAIN := $(realpath $(COMPAT_GCC_TOOLCHAIN_DIR)/..)
-
CC_COMPAT_CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%))
-CC_COMPAT_CLANG_FLAGS += --prefix=$(COMPAT_GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE_COMPAT))
-CC_COMPAT_CLANG_FLAGS += -no-integrated-as -Qunused-arguments
-ifneq ($(COMPAT_GCC_TOOLCHAIN),)
-CC_COMPAT_CLANG_FLAGS += --gcc-toolchain=$(COMPAT_GCC_TOOLCHAIN)
-endif
CC_COMPAT ?= $(CC)
CC_COMPAT += $(CC_COMPAT_CLANG_FLAGS)
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 7eea7888bb02..709d2c433c5e 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -5,24 +5,7 @@
* Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
*/
-#define RO_EXCEPTION_TABLE_ALIGN 8
-#define RUNTIME_DISCARD_EXIT
-
-#include <asm-generic/vmlinux.lds.h>
-#include <asm/cache.h>
#include <asm/hyp_image.h>
-#include <asm/kernel-pgtable.h>
-#include <asm/memory.h>
-#include <asm/page.h>
-
-#include "image.h"
-
-OUTPUT_ARCH(aarch64)
-ENTRY(_text)
-
-jiffies = jiffies_64;
-
-
#ifdef CONFIG_KVM
#define HYPERVISOR_EXTABLE \
. = ALIGN(SZ_8); \
@@ -32,9 +15,11 @@ jiffies = jiffies_64;
#define HYPERVISOR_DATA_SECTIONS \
HYP_SECTION_NAME(.rodata) : { \
+ . = ALIGN(PAGE_SIZE); \
__hyp_rodata_start = .; \
*(HYP_SECTION_NAME(.data..ro_after_init)) \
*(HYP_SECTION_NAME(.rodata)) \
+ . = ALIGN(PAGE_SIZE); \
__hyp_rodata_end = .; \
}
@@ -51,29 +36,52 @@ jiffies = jiffies_64;
__hyp_reloc_end = .; \
}
+#define BSS_FIRST_SECTIONS \
+ __hyp_bss_start = .; \
+ *(HYP_SECTION_NAME(.bss)) \
+ . = ALIGN(PAGE_SIZE); \
+ __hyp_bss_end = .;
+
+/*
+ * We require that __hyp_bss_start and __bss_start are aligned, and enforce it
+ * with an assertion. But the BSS_SECTION macro places an empty .sbss section
+ * between them, which can in some cases cause the linker to misalign them. To
+ * work around the issue, force a page alignment for __bss_start.
+ */
+#define SBSS_ALIGN PAGE_SIZE
#else /* CONFIG_KVM */
#define HYPERVISOR_EXTABLE
#define HYPERVISOR_DATA_SECTIONS
#define HYPERVISOR_PERCPU_SECTION
#define HYPERVISOR_RELOC_SECTION
+#define SBSS_ALIGN 0
#endif
+#define RO_EXCEPTION_TABLE_ALIGN 8
+#define RUNTIME_DISCARD_EXIT
+
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/cache.h>
+#include <asm/kernel-pgtable.h>
+#include <asm/memory.h>
+#include <asm/page.h>
+
+#include "image.h"
+
+OUTPUT_ARCH(aarch64)
+ENTRY(_text)
+
+jiffies = jiffies_64;
+
#define HYPERVISOR_TEXT \
- /* \
- * Align to 4 KB so that \
- * a) the HYP vector table is at its minimum \
- * alignment of 2048 bytes \
- * b) the HYP init code will not cross a page \
- * boundary if its size does not exceed \
- * 4 KB (see related ASSERT() below) \
- */ \
- . = ALIGN(SZ_4K); \
+ . = ALIGN(PAGE_SIZE); \
__hyp_idmap_text_start = .; \
*(.hyp.idmap.text) \
__hyp_idmap_text_end = .; \
__hyp_text_start = .; \
*(.hyp.text) \
HYPERVISOR_EXTABLE \
+ . = ALIGN(PAGE_SIZE); \
__hyp_text_end = .;
#define IDMAP_TEXT \
@@ -276,7 +284,7 @@ SECTIONS
__pecoff_data_rawsize = ABSOLUTE(. - __initdata_begin);
_edata = .;
- BSS_SECTION(0, 0, 0)
+ BSS_SECTION(SBSS_ALIGN, 0, 0)
. = ALIGN(PAGE_SIZE);
init_pg_dir = .;
@@ -309,11 +317,12 @@ SECTIONS
#include "image-vars.h"
/*
- * The HYP init code and ID map text can't be longer than a page each,
- * and should not cross a page boundary.
+ * The HYP init code and ID map text can't be longer than a page each. The
+ * former is page-aligned, but the latter may not be with 16K or 64K pages, so
+ * it should also not cross a page boundary.
*/
-ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
- "HYP init code too big or misaligned")
+ASSERT(__hyp_idmap_text_end - __hyp_idmap_text_start <= PAGE_SIZE,
+ "HYP init code too big")
ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
"ID map text too big or misaligned")
#ifdef CONFIG_HIBERNATION
@@ -324,6 +333,9 @@ ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
"Entry trampoline text too big")
#endif
+#ifdef CONFIG_KVM
+ASSERT(__hyp_bss_start == __bss_start, "HYP and Host BSS are misaligned")
+#endif
/*
* If padding is applied before .head.text, virt<->phys conversions will fail.
*/
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 3964acf5451e..a4eba0908bfa 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -20,8 +20,6 @@ if VIRTUALIZATION
menuconfig KVM
bool "Kernel-based Virtual Machine (KVM) support"
depends on OF
- # for TASKSTATS/TASK_DELAY_ACCT:
- depends on NET && MULTIUSER
select MMU_NOTIFIER
select PREEMPT_NOTIFIERS
select HAVE_KVM_CPU_RELAX_INTERCEPT
@@ -38,8 +36,7 @@ menuconfig KVM
select IRQ_BYPASS_MANAGER
select HAVE_KVM_IRQ_BYPASS
select HAVE_KVM_VCPU_RUN_PID_CHANGE
- select TASKSTATS
- select TASK_DELAY_ACCT
+ select SCHED_INFO
help
Support hosting virtualized guest machines.
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 589921392cb1..989bb5dad2c8 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -11,7 +11,7 @@ obj-$(CONFIG_KVM) += kvm.o
obj-$(CONFIG_KVM) += hyp/
kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
- $(KVM)/vfio.o $(KVM)/irqchip.o \
+ $(KVM)/vfio.o $(KVM)/irqchip.o $(KVM)/binary_stats.o \
arm.o mmu.o mmio.o psci.o perf.o hypercalls.o pvtime.o \
inject_fault.o va_layout.o handle_exit.o \
guest.o debug.o reset.o sys_regs.o \
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index 74e0699661e9..3df67c127489 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -9,6 +9,7 @@
#include <linux/kvm_host.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/uaccess.h>
#include <clocksource/arm_arch_timer.h>
@@ -973,36 +974,154 @@ static int kvm_timer_dying_cpu(unsigned int cpu)
return 0;
}
-int kvm_timer_hyp_init(bool has_gic)
+static int timer_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
{
- struct arch_timer_kvm_info *info;
- int err;
+ if (vcpu)
+ irqd_set_forwarded_to_vcpu(d);
+ else
+ irqd_clr_forwarded_to_vcpu(d);
- info = arch_timer_get_kvm_info();
- timecounter = &info->timecounter;
+ return 0;
+}
- if (!timecounter->cc) {
- kvm_err("kvm_arch_timer: uninitialized timecounter\n");
- return -ENODEV;
+static int timer_irq_set_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which, bool val)
+{
+ if (which != IRQCHIP_STATE_ACTIVE || !irqd_is_forwarded_to_vcpu(d))
+ return irq_chip_set_parent_state(d, which, val);
+
+ if (val)
+ irq_chip_mask_parent(d);
+ else
+ irq_chip_unmask_parent(d);
+
+ return 0;
+}
+
+static void timer_irq_eoi(struct irq_data *d)
+{
+ if (!irqd_is_forwarded_to_vcpu(d))
+ irq_chip_eoi_parent(d);
+}
+
+static void timer_irq_ack(struct irq_data *d)
+{
+ d = d->parent_data;
+ if (d->chip->irq_ack)
+ d->chip->irq_ack(d);
+}
+
+static struct irq_chip timer_chip = {
+ .name = "KVM",
+ .irq_ack = timer_irq_ack,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = timer_irq_eoi,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_vcpu_affinity = timer_irq_set_vcpu_affinity,
+ .irq_set_irqchip_state = timer_irq_set_irqchip_state,
+};
+
+static int timer_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ irq_hw_number_t hwirq = (uintptr_t)arg;
+
+ return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &timer_chip, NULL);
+}
+
+static void timer_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+}
+
+static const struct irq_domain_ops timer_domain_ops = {
+ .alloc = timer_irq_domain_alloc,
+ .free = timer_irq_domain_free,
+};
+
+static struct irq_ops arch_timer_irq_ops = {
+ .get_input_level = kvm_arch_timer_get_input_level,
+};
+
+static void kvm_irq_fixup_flags(unsigned int virq, u32 *flags)
+{
+ *flags = irq_get_trigger_type(virq);
+ if (*flags != IRQF_TRIGGER_HIGH && *flags != IRQF_TRIGGER_LOW) {
+ kvm_err("Invalid trigger for timer IRQ%d, assuming level low\n",
+ virq);
+ *flags = IRQF_TRIGGER_LOW;
}
+}
- /* First, do the virtual EL1 timer irq */
+static int kvm_irq_init(struct arch_timer_kvm_info *info)
+{
+ struct irq_domain *domain = NULL;
if (info->virtual_irq <= 0) {
kvm_err("kvm_arch_timer: invalid virtual timer IRQ: %d\n",
info->virtual_irq);
return -ENODEV;
}
+
host_vtimer_irq = info->virtual_irq;
+ kvm_irq_fixup_flags(host_vtimer_irq, &host_vtimer_irq_flags);
+
+ if (kvm_vgic_global_state.no_hw_deactivation) {
+ struct fwnode_handle *fwnode;
+ struct irq_data *data;
+
+ fwnode = irq_domain_alloc_named_fwnode("kvm-timer");
+ if (!fwnode)
+ return -ENOMEM;
+
+ /* Assume both vtimer and ptimer in the same parent */
+ data = irq_get_irq_data(host_vtimer_irq);
+ domain = irq_domain_create_hierarchy(data->domain, 0,
+ NR_KVM_TIMERS, fwnode,
+ &timer_domain_ops, NULL);
+ if (!domain) {
+ irq_domain_free_fwnode(fwnode);
+ return -ENOMEM;
+ }
+
+ arch_timer_irq_ops.flags |= VGIC_IRQ_SW_RESAMPLE;
+ WARN_ON(irq_domain_push_irq(domain, host_vtimer_irq,
+ (void *)TIMER_VTIMER));
+ }
- host_vtimer_irq_flags = irq_get_trigger_type(host_vtimer_irq);
- if (host_vtimer_irq_flags != IRQF_TRIGGER_HIGH &&
- host_vtimer_irq_flags != IRQF_TRIGGER_LOW) {
- kvm_err("Invalid trigger for vtimer IRQ%d, assuming level low\n",
- host_vtimer_irq);
- host_vtimer_irq_flags = IRQF_TRIGGER_LOW;
+ if (info->physical_irq > 0) {
+ host_ptimer_irq = info->physical_irq;
+ kvm_irq_fixup_flags(host_ptimer_irq, &host_ptimer_irq_flags);
+
+ if (domain)
+ WARN_ON(irq_domain_push_irq(domain, host_ptimer_irq,
+ (void *)TIMER_PTIMER));
}
+ return 0;
+}
+
+int kvm_timer_hyp_init(bool has_gic)
+{
+ struct arch_timer_kvm_info *info;
+ int err;
+
+ info = arch_timer_get_kvm_info();
+ timecounter = &info->timecounter;
+
+ if (!timecounter->cc) {
+ kvm_err("kvm_arch_timer: uninitialized timecounter\n");
+ return -ENODEV;
+ }
+
+ err = kvm_irq_init(info);
+ if (err)
+ return err;
+
+ /* First, do the virtual EL1 timer irq */
+
err = request_percpu_irq(host_vtimer_irq, kvm_arch_timer_handler,
"kvm guest vtimer", kvm_get_running_vcpus());
if (err) {
@@ -1027,15 +1146,6 @@ int kvm_timer_hyp_init(bool has_gic)
/* Now let's do the physical EL1 timer irq */
if (info->physical_irq > 0) {
- host_ptimer_irq = info->physical_irq;
- host_ptimer_irq_flags = irq_get_trigger_type(host_ptimer_irq);
- if (host_ptimer_irq_flags != IRQF_TRIGGER_HIGH &&
- host_ptimer_irq_flags != IRQF_TRIGGER_LOW) {
- kvm_err("Invalid trigger for ptimer IRQ%d, assuming level low\n",
- host_ptimer_irq);
- host_ptimer_irq_flags = IRQF_TRIGGER_LOW;
- }
-
err = request_percpu_irq(host_ptimer_irq, kvm_arch_timer_handler,
"kvm guest ptimer", kvm_get_running_vcpus());
if (err) {
@@ -1143,7 +1253,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
ret = kvm_vgic_map_phys_irq(vcpu,
map.direct_vtimer->host_timer_irq,
map.direct_vtimer->irq.irq,
- kvm_arch_timer_get_input_level);
+ &arch_timer_irq_ops);
if (ret)
return ret;
@@ -1151,7 +1261,7 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu)
ret = kvm_vgic_map_phys_irq(vcpu,
map.direct_ptimer->host_timer_irq,
map.direct_ptimer->irq.irq,
- kvm_arch_timer_get_input_level);
+ &arch_timer_irq_ops);
}
if (ret)
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index fc4c95dd2d26..e9a2b8f27792 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -93,6 +93,12 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
r = 0;
kvm->arch.return_nisv_io_abort_to_user = true;
break;
+ case KVM_CAP_ARM_MTE:
+ if (!system_supports_mte() || kvm->created_vcpus)
+ return -EINVAL;
+ r = 0;
+ kvm->arch.mte_enabled = true;
+ break;
default:
r = -EINVAL;
break;
@@ -206,8 +212,11 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_ARM_INJECT_EXT_DABT:
case KVM_CAP_SET_GUEST_DEBUG:
case KVM_CAP_VCPU_ATTRIBUTES:
+ case KVM_CAP_PTP_KVM:
r = 1;
break;
+ case KVM_CAP_SET_GUEST_DEBUG2:
+ return KVM_GUESTDBG_VALID_MASK;
case KVM_CAP_ARM_SET_DEVICE_ADDR:
r = 1;
break;
@@ -234,6 +243,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
*/
r = 1;
break;
+ case KVM_CAP_ARM_MTE:
+ r = system_supports_mte();
+ break;
case KVM_CAP_STEAL_TIME:
r = kvm_arm_pvtime_supported();
break;
@@ -385,11 +397,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
last_ran = this_cpu_ptr(mmu->last_vcpu_ran);
/*
+ * We guarantee that both TLBs and I-cache are private to each
+ * vcpu. If detecting that a vcpu from the same VM has
+ * previously run on the same physical CPU, call into the
+ * hypervisor code to nuke the relevant contexts.
+ *
* We might get preempted before the vCPU actually runs, but
* over-invalidation doesn't affect correctness.
*/
if (*last_ran != vcpu->vcpu_id) {
- kvm_call_hyp(__kvm_tlb_flush_local_vmid, mmu);
+ kvm_call_hyp(__kvm_flush_cpu_context, mmu);
*last_ran = vcpu->vcpu_id;
}
@@ -411,10 +428,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (vcpu_has_ptrauth(vcpu))
vcpu_ptrauth_disable(vcpu);
+ kvm_arch_vcpu_load_debug_state_flags(vcpu);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
+ kvm_arch_vcpu_put_debug_state_flags(vcpu);
kvm_arch_vcpu_put_fp(vcpu);
if (has_vhe())
kvm_vcpu_put_sysregs_vhe(vcpu);
@@ -575,6 +594,8 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
vcpu->arch.has_run_once = true;
+ kvm_arm_vcpu_init_debug(vcpu);
+
if (likely(irqchip_in_kernel(kvm))) {
/*
* Map the VGIC hardware resources before running a vcpu the
@@ -677,9 +698,22 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
vgic_v4_load(vcpu);
preempt_enable();
}
+
+ if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu))
+ kvm_pmu_handle_pmcr(vcpu,
+ __vcpu_sys_reg(vcpu, PMCR_EL0));
}
}
+static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
+{
+ if (likely(!vcpu_mode_is_32bit(vcpu)))
+ return false;
+
+ return !system_supports_32bit_el0() ||
+ static_branch_unlikely(&arm64_mismatched_32bit_el0);
+}
+
/**
* kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
* @vcpu: The VCPU pointer
@@ -708,11 +742,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
return ret;
}
- if (run->immediate_exit)
- return -EINTR;
-
vcpu_load(vcpu);
+ if (run->immediate_exit) {
+ ret = -EINTR;
+ goto out;
+ }
+
kvm_sigset_activate(vcpu);
ret = 1;
@@ -863,7 +899,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
* with the asymmetric AArch32 case), return to userspace with
* a fatal error.
*/
- if (!system_supports_32bit_el0() && vcpu_mode_is_32bit(vcpu)) {
+ if (vcpu_mode_is_bad_32bit(vcpu)) {
/*
* As we have caught the guest red-handed, decide that
* it isn't fit for purpose anymore by making the vcpu
@@ -885,6 +921,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_sigset_deactivate(vcpu);
+out:
+ /*
+ * In the unlikely event that we are returning to userspace
+ * with pending exceptions or PC adjustment, commit these
+ * adjustments in order to give userspace a consistent view of
+ * the vcpu state. Note that this relies on __kvm_adjust_pc()
+ * being preempt-safe on VHE.
+ */
+ if (unlikely(vcpu->arch.flags & (KVM_ARM64_PENDING_EXCEPTION |
+ KVM_ARM64_INCREMENT_PC)))
+ kvm_call_hyp(__kvm_adjust_pc, vcpu);
+
vcpu_put(vcpu);
return ret;
}
@@ -1052,7 +1100,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
stage2_unmap_vm(vcpu->kvm);
else
- __flush_icache_all();
+ icache_inval_all_pou();
}
vcpu_reset_hcr(vcpu);
@@ -1263,7 +1311,7 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
}
void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
- struct kvm_memory_slot *memslot)
+ const struct kvm_memory_slot *memslot)
{
kvm_flush_remote_tlbs(kvm);
}
@@ -1324,6 +1372,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
return 0;
}
+ case KVM_ARM_MTE_COPY_TAGS: {
+ struct kvm_arm_copy_mte_tags copy_tags;
+
+ if (copy_from_user(&copy_tags, argp, sizeof(copy_tags)))
+ return -EFAULT;
+ return kvm_vm_ioctl_mte_copy_tags(kvm, &copy_tags);
+ }
default:
return -EINVAL;
}
@@ -1345,16 +1400,9 @@ static unsigned long nvhe_percpu_order(void)
/* A lookup table holding the hypervisor VA for each vector slot */
static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS];
-static int __kvm_vector_slot2idx(enum arm64_hyp_spectre_vector slot)
-{
- return slot - (slot != HYP_VECTOR_DIRECT);
-}
-
static void kvm_init_vector_slot(void *base, enum arm64_hyp_spectre_vector slot)
{
- int idx = __kvm_vector_slot2idx(slot);
-
- hyp_spectre_vector_selector[slot] = base + (idx * SZ_2K);
+ hyp_spectre_vector_selector[slot] = __kvm_vector_slot2addr(base, slot);
}
static int kvm_init_vector_slots(void)
@@ -1383,22 +1431,18 @@ static int kvm_init_vector_slots(void)
return 0;
}
-static void cpu_init_hyp_mode(void)
+static void cpu_prepare_hyp_mode(int cpu)
{
- struct kvm_nvhe_init_params *params = this_cpu_ptr_nvhe_sym(kvm_init_params);
- struct arm_smccc_res res;
+ struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu);
unsigned long tcr;
- /* Switch from the HYP stub to our own HYP init vector */
- __hyp_set_vectors(kvm_get_idmap_vector());
-
/*
* Calculate the raw per-cpu offset without a translation from the
* kernel's mapping to the linear mapping, and store it in tpidr_el2
* so that we can use adr_l to access per-cpu variables in EL2.
* Also drop the KASAN tag which gets in the way...
*/
- params->tpidr_el2 = (unsigned long)kasan_reset_tag(this_cpu_ptr_nvhe_sym(__per_cpu_start)) -
+ params->tpidr_el2 = (unsigned long)kasan_reset_tag(per_cpu_ptr_nvhe_sym(__per_cpu_start, cpu)) -
(unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
params->mair_el2 = read_sysreg(mair_el1);
@@ -1422,14 +1466,28 @@ static void cpu_init_hyp_mode(void)
tcr |= (idmap_t0sz & GENMASK(TCR_TxSZ_WIDTH - 1, 0)) << TCR_T0SZ_OFFSET;
params->tcr_el2 = tcr;
- params->stack_hyp_va = kern_hyp_va(__this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE);
+ params->stack_hyp_va = kern_hyp_va(per_cpu(kvm_arm_hyp_stack_page, cpu) + PAGE_SIZE);
params->pgd_pa = kvm_mmu_get_httbr();
+ if (is_protected_kvm_enabled())
+ params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS;
+ else
+ params->hcr_el2 = HCR_HOST_NVHE_FLAGS;
+ params->vttbr = params->vtcr = 0;
/*
* Flush the init params from the data cache because the struct will
* be read while the MMU is off.
*/
kvm_flush_dcache_to_poc(params, sizeof(*params));
+}
+
+static void hyp_install_host_vector(void)
+{
+ struct kvm_nvhe_init_params *params;
+ struct arm_smccc_res res;
+
+ /* Switch from the HYP stub to our own HYP init vector */
+ __hyp_set_vectors(kvm_get_idmap_vector());
/*
* Call initialization code, and switch to the full blown HYP code.
@@ -1438,8 +1496,14 @@ static void cpu_init_hyp_mode(void)
* cpus_have_const_cap() wrapper.
*/
BUG_ON(!system_capabilities_finalized());
+ params = this_cpu_ptr_nvhe_sym(kvm_init_params);
arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init), virt_to_phys(params), &res);
WARN_ON(res.a0 != SMCCC_RET_SUCCESS);
+}
+
+static void cpu_init_hyp_mode(void)
+{
+ hyp_install_host_vector();
/*
* Disabling SSBD on a non-VHE system requires us to enable SSBS
@@ -1482,7 +1546,10 @@ static void cpu_set_hyp_vector(void)
struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
void *vector = hyp_spectre_vector_selector[data->slot];
- *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)vector;
+ if (!is_protected_kvm_enabled())
+ *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)vector;
+ else
+ kvm_call_hyp_nvhe(__pkvm_cpu_set_vector, data->slot);
}
static void cpu_hyp_reinit(void)
@@ -1490,13 +1557,14 @@ static void cpu_hyp_reinit(void)
kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt);
cpu_hyp_reset();
- cpu_set_hyp_vector();
if (is_kernel_in_hyp_mode())
kvm_timer_init_vhe();
else
cpu_init_hyp_mode();
+ cpu_set_hyp_vector();
+
kvm_arm_init_debug();
if (vgic_present)
@@ -1692,18 +1760,62 @@ static void teardown_hyp_mode(void)
}
}
+static int do_pkvm_init(u32 hyp_va_bits)
+{
+ void *per_cpu_base = kvm_ksym_ref(kvm_arm_hyp_percpu_base);
+ int ret;
+
+ preempt_disable();
+ hyp_install_host_vector();
+ ret = kvm_call_hyp_nvhe(__pkvm_init, hyp_mem_base, hyp_mem_size,
+ num_possible_cpus(), kern_hyp_va(per_cpu_base),
+ hyp_va_bits);
+ preempt_enable();
+
+ return ret;
+}
+
+static int kvm_hyp_init_protection(u32 hyp_va_bits)
+{
+ void *addr = phys_to_virt(hyp_mem_base);
+ int ret;
+
+ kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
+ kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
+
+ ret = create_hyp_mappings(addr, addr + hyp_mem_size, PAGE_HYP);
+ if (ret)
+ return ret;
+
+ ret = do_pkvm_init(hyp_va_bits);
+ if (ret)
+ return ret;
+
+ free_hyp_pgds();
+
+ return 0;
+}
+
/**
* Inits Hyp-mode on all online CPUs
*/
static int init_hyp_mode(void)
{
+ u32 hyp_va_bits;
int cpu;
- int err = 0;
+ int err = -ENOMEM;
+
+ /*
+ * The protected Hyp-mode cannot be initialized if the memory pool
+ * allocation has failed.
+ */
+ if (is_protected_kvm_enabled() && !hyp_mem_base)
+ goto out_err;
/*
* Allocate Hyp PGD and setup Hyp identity mapping
*/
- err = kvm_mmu_init();
+ err = kvm_mmu_init(&hyp_va_bits);
if (err)
goto out_err;
@@ -1764,7 +1876,19 @@ static int init_hyp_mode(void)
goto out_err;
}
- err = create_hyp_mappings(kvm_ksym_ref(__bss_start),
+ /*
+ * .hyp.bss is guaranteed to be placed at the beginning of the .bss
+ * section thanks to an assertion in the linker script. Map it RW and
+ * the rest of .bss RO.
+ */
+ err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_start),
+ kvm_ksym_ref(__hyp_bss_end), PAGE_HYP);
+ if (err) {
+ kvm_err("Cannot map hyp bss section: %d\n", err);
+ goto out_err;
+ }
+
+ err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_end),
kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
if (err) {
kvm_err("Cannot map bss section\n");
@@ -1785,26 +1909,36 @@ static int init_hyp_mode(void)
}
}
- /*
- * Map Hyp percpu pages
- */
for_each_possible_cpu(cpu) {
char *percpu_begin = (char *)kvm_arm_hyp_percpu_base[cpu];
char *percpu_end = percpu_begin + nvhe_percpu_size();
+ /* Map Hyp percpu pages */
err = create_hyp_mappings(percpu_begin, percpu_end, PAGE_HYP);
-
if (err) {
kvm_err("Cannot map hyp percpu region\n");
goto out_err;
}
+
+ /* Prepare the CPU initialization parameters */
+ cpu_prepare_hyp_mode(cpu);
}
if (is_protected_kvm_enabled()) {
init_cpu_logical_map();
- if (!init_psci_relay())
+ if (!init_psci_relay()) {
+ err = -ENODEV;
+ goto out_err;
+ }
+ }
+
+ if (is_protected_kvm_enabled()) {
+ err = kvm_hyp_init_protection(hyp_va_bits);
+ if (err) {
+ kvm_err("Failed to init hyp memory protection\n");
goto out_err;
+ }
}
return 0;
@@ -1815,6 +1949,72 @@ out_err:
return err;
}
+static void _kvm_host_prot_finalize(void *discard)
+{
+ WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize));
+}
+
+static inline int pkvm_mark_hyp(phys_addr_t start, phys_addr_t end)
+{
+ return kvm_call_hyp_nvhe(__pkvm_mark_hyp, start, end);
+}
+
+#define pkvm_mark_hyp_section(__section) \
+ pkvm_mark_hyp(__pa_symbol(__section##_start), \
+ __pa_symbol(__section##_end))
+
+static int finalize_hyp_mode(void)
+{
+ int cpu, ret;
+
+ if (!is_protected_kvm_enabled())
+ return 0;
+
+ ret = pkvm_mark_hyp_section(__hyp_idmap_text);
+ if (ret)
+ return ret;
+
+ ret = pkvm_mark_hyp_section(__hyp_text);
+ if (ret)
+ return ret;
+
+ ret = pkvm_mark_hyp_section(__hyp_rodata);
+ if (ret)
+ return ret;
+
+ ret = pkvm_mark_hyp_section(__hyp_bss);
+ if (ret)
+ return ret;
+
+ ret = pkvm_mark_hyp(hyp_mem_base, hyp_mem_base + hyp_mem_size);
+ if (ret)
+ return ret;
+
+ for_each_possible_cpu(cpu) {
+ phys_addr_t start = virt_to_phys((void *)kvm_arm_hyp_percpu_base[cpu]);
+ phys_addr_t end = start + (PAGE_SIZE << nvhe_percpu_order());
+
+ ret = pkvm_mark_hyp(start, end);
+ if (ret)
+ return ret;
+
+ start = virt_to_phys((void *)per_cpu(kvm_arm_hyp_stack_page, cpu));
+ end = start + PAGE_SIZE;
+ ret = pkvm_mark_hyp(start, end);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Flip the static key upfront as that may no longer be possible
+ * once the host stage 2 is installed.
+ */
+ static_branch_enable(&kvm_protected_mode_initialized);
+ on_each_cpu(_kvm_host_prot_finalize, NULL, 1);
+
+ return 0;
+}
+
static void check_kvm_target_cpu(void *ret)
{
*(int *)ret = kvm_target_cpu();
@@ -1889,11 +2089,6 @@ int kvm_arch_init(void *opaque)
in_hyp_mode = is_kernel_in_hyp_mode();
- if (!in_hyp_mode && kvm_arch_requires_vhe()) {
- kvm_pr_unimpl("CPU unsupported in non-VHE mode, not initializing\n");
- return -ENODEV;
- }
-
if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) ||
cpus_have_final_cap(ARM64_WORKAROUND_1508412))
kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \
@@ -1931,8 +2126,15 @@ int kvm_arch_init(void *opaque)
if (err)
goto out_hyp;
+ if (!in_hyp_mode) {
+ err = finalize_hyp_mode();
+ if (err) {
+ kvm_err("Failed to finalize Hyp protection\n");
+ goto out_hyp;
+ }
+ }
+
if (is_protected_kvm_enabled()) {
- static_branch_enable(&kvm_protected_mode_initialized);
kvm_info("Protected nVHE mode initialized successfully\n");
} else if (in_hyp_mode) {
kvm_info("VHE mode initialized successfully\n");
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
index 7a7e425616b5..d5e79d7ee6e9 100644
--- a/arch/arm64/kvm/debug.c
+++ b/arch/arm64/kvm/debug.c
@@ -69,6 +69,65 @@ void kvm_arm_init_debug(void)
}
/**
+ * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
+ *
+ * @vcpu: the vcpu pointer
+ *
+ * This ensures we will trap access to:
+ * - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
+ * - Debug ROM Address (MDCR_EL2_TDRA)
+ * - OS related registers (MDCR_EL2_TDOSA)
+ * - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
+ * - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
+ * - Self-hosted Trace (MDCR_EL2_TTRF/MDCR_EL2_E2TB)
+ */
+static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
+{
+ /*
+ * This also clears MDCR_EL2_E2PB_MASK and MDCR_EL2_E2TB_MASK
+ * to disable guest access to the profiling and trace buffers
+ */
+ vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
+ vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
+ MDCR_EL2_TPMS |
+ MDCR_EL2_TTRF |
+ MDCR_EL2_TPMCR |
+ MDCR_EL2_TDRA |
+ MDCR_EL2_TDOSA);
+
+ /* Is the VM being debugged by userspace? */
+ if (vcpu->guest_debug)
+ /* Route all software debug exceptions to EL2 */
+ vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
+
+ /*
+ * Trap debug register access when one of the following is true:
+ * - Userspace is using the hardware to debug the guest
+ * (KVM_GUESTDBG_USE_HW is set).
+ * - The guest is not using debug (KVM_ARM64_DEBUG_DIRTY is clear).
+ */
+ if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) ||
+ !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
+ vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
+
+ trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
+}
+
+/**
+ * kvm_arm_vcpu_init_debug - setup vcpu debug traps
+ *
+ * @vcpu: the vcpu pointer
+ *
+ * Set vcpu initial mdcr_el2 value.
+ */
+void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ kvm_arm_setup_mdcr_el2(vcpu);
+ preempt_enable();
+}
+
+/**
* kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
*/
@@ -83,12 +142,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
* @vcpu: the vcpu pointer
*
* This is called before each entry into the hypervisor to setup any
- * debug related registers. Currently this just ensures we will trap
- * access to:
- * - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
- * - Debug ROM Address (MDCR_EL2_TDRA)
- * - OS related registers (MDCR_EL2_TDOSA)
- * - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
+ * debug related registers.
*
* Additionally, KVM only traps guest accesses to the debug registers if
* the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
@@ -100,27 +154,14 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
{
- bool trap_debug = !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY);
unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2;
trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
- /*
- * This also clears MDCR_EL2_E2PB_MASK to disable guest access
- * to the profiling buffer.
- */
- vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
- vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
- MDCR_EL2_TPMS |
- MDCR_EL2_TPMCR |
- MDCR_EL2_TDRA |
- MDCR_EL2_TDOSA);
+ kvm_arm_setup_mdcr_el2(vcpu);
/* Is Guest debugging in effect? */
if (vcpu->guest_debug) {
- /* Route all software debug exceptions to EL2 */
- vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
-
/* Save guest debug state */
save_guest_debug_regs(vcpu);
@@ -174,7 +215,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
- trap_debug = true;
trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
&vcpu->arch.debug_ptr->dbg_bcr[0],
@@ -189,10 +229,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
BUG_ON(!vcpu->guest_debug &&
vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
- /* Trap debug register access */
- if (trap_debug)
- vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
-
/* If KDE or MDE are set, perform a full save/restore cycle. */
if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
@@ -201,7 +237,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2)
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
- trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
}
@@ -229,3 +264,32 @@ void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
}
}
}
+
+void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu)
+{
+ u64 dfr0;
+
+ /* For VHE, there is nothing to do */
+ if (has_vhe())
+ return;
+
+ dfr0 = read_sysreg(id_aa64dfr0_el1);
+ /*
+ * If SPE is present on this CPU and is available at current EL,
+ * we may need to check if the host state needs to be saved.
+ */
+ if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_PMSVER_SHIFT) &&
+ !(read_sysreg_s(SYS_PMBIDR_EL1) & BIT(SYS_PMBIDR_EL1_P_SHIFT)))
+ vcpu->arch.flags |= KVM_ARM64_DEBUG_STATE_SAVE_SPE;
+
+ /* Check if we have TRBE implemented and available at the host */
+ if (cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRBE_SHIFT) &&
+ !(read_sysreg_s(SYS_TRBIDR_EL1) & TRBIDR_PROG))
+ vcpu->arch.flags |= KVM_ARM64_DEBUG_STATE_SAVE_TRBE;
+}
+
+void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.flags &= ~(KVM_ARM64_DEBUG_STATE_SAVE_SPE |
+ KVM_ARM64_DEBUG_STATE_SAVE_TRBE);
+}
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index 3e081d556e81..5621020b28de 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -11,6 +11,7 @@
#include <linux/kvm_host.h>
#include <asm/fpsimd.h>
#include <asm/kvm_asm.h>
+#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/sysreg.h>
@@ -42,6 +43,17 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
if (ret)
goto error;
+ if (vcpu->arch.sve_state) {
+ void *sve_end;
+
+ sve_end = vcpu->arch.sve_state + vcpu_sve_state_size(vcpu);
+
+ ret = create_hyp_mappings(vcpu->arch.sve_state, sve_end,
+ PAGE_HYP);
+ if (ret)
+ goto error;
+ }
+
vcpu->arch.host_thread_info = kern_hyp_va(ti);
vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
error:
@@ -109,11 +121,17 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
local_irq_save(flags);
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
- fpsimd_save_and_flush_cpu_state();
+ if (guest_has_sve) {
+ __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
+
+ /* Restore the VL that was saved when bound to the CPU */
+ if (!has_vhe())
+ sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1,
+ SYS_ZCR_EL1);
+ }
- if (guest_has_sve)
- __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_s(SYS_ZCR_EL12);
- } else if (host_has_sve) {
+ fpsimd_save_and_flush_cpu_state();
+ } else if (has_vhe() && host_has_sve) {
/*
* The FPSIMD/SVE state in the CPU has not been touched, and we
* have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 9bbd30e62799..1dfb83578277 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -28,20 +28,40 @@
#include "trace.h"
-struct kvm_stats_debugfs_item debugfs_entries[] = {
- VCPU_STAT("halt_successful_poll", halt_successful_poll),
- VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
- VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
- VCPU_STAT("halt_wakeup", halt_wakeup),
- VCPU_STAT("hvc_exit_stat", hvc_exit_stat),
- VCPU_STAT("wfe_exit_stat", wfe_exit_stat),
- VCPU_STAT("wfi_exit_stat", wfi_exit_stat),
- VCPU_STAT("mmio_exit_user", mmio_exit_user),
- VCPU_STAT("mmio_exit_kernel", mmio_exit_kernel),
- VCPU_STAT("exits", exits),
- VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
- VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
- { NULL }
+const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+ KVM_GENERIC_VM_STATS()
+};
+static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
+ sizeof(struct kvm_vm_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vm_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vm_stats_desc),
+};
+
+const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ KVM_GENERIC_VCPU_STATS(),
+ STATS_DESC_COUNTER(VCPU, hvc_exit_stat),
+ STATS_DESC_COUNTER(VCPU, wfe_exit_stat),
+ STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
+ STATS_DESC_COUNTER(VCPU, mmio_exit_user),
+ STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
+ STATS_DESC_COUNTER(VCPU, exits)
+};
+static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
+ sizeof(struct kvm_vcpu_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vcpu_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vcpu_stats_desc),
};
static bool core_reg_offset_is_vreg(u64 off)
@@ -299,7 +319,7 @@ static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
memset(vqs, 0, sizeof(vqs));
- max_vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
+ max_vq = vcpu_sve_max_vq(vcpu);
for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq)
if (sve_vq_available(vq))
vqs[vq_word(vq)] |= vq_mask(vq);
@@ -427,7 +447,7 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
return -ENOENT;
- vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
+ vq = vcpu_sve_max_vq(vcpu);
reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) -
SVE_SIG_REGS_OFFSET;
@@ -437,7 +457,7 @@ static int sve_reg_to_region(struct sve_state_reg_region *region,
if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0)
return -ENOENT;
- vq = sve_vq_from_vl(vcpu->arch.sve_max_vl);
+ vq = vcpu_sve_max_vq(vcpu);
reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) -
SVE_SIG_REGS_OFFSET;
@@ -888,11 +908,6 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
return -EINVAL;
}
-#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
- KVM_GUESTDBG_USE_SW_BP | \
- KVM_GUESTDBG_USE_HW | \
- KVM_GUESTDBG_SINGLESTEP)
-
/**
* kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
* @kvm: pointer to the KVM struct
@@ -1000,3 +1015,89 @@ int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
return ret;
}
+
+long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
+ struct kvm_arm_copy_mte_tags *copy_tags)
+{
+ gpa_t guest_ipa = copy_tags->guest_ipa;
+ size_t length = copy_tags->length;
+ void __user *tags = copy_tags->addr;
+ gpa_t gfn;
+ bool write = !(copy_tags->flags & KVM_ARM_TAGS_FROM_GUEST);
+ int ret = 0;
+
+ if (!kvm_has_mte(kvm))
+ return -EINVAL;
+
+ if (copy_tags->reserved[0] || copy_tags->reserved[1])
+ return -EINVAL;
+
+ if (copy_tags->flags & ~KVM_ARM_TAGS_FROM_GUEST)
+ return -EINVAL;
+
+ if (length & ~PAGE_MASK || guest_ipa & ~PAGE_MASK)
+ return -EINVAL;
+
+ gfn = gpa_to_gfn(guest_ipa);
+
+ mutex_lock(&kvm->slots_lock);
+
+ while (length > 0) {
+ kvm_pfn_t pfn = gfn_to_pfn_prot(kvm, gfn, write, NULL);
+ void *maddr;
+ unsigned long num_tags;
+ struct page *page;
+
+ if (is_error_noslot_pfn(pfn)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ page = pfn_to_online_page(pfn);
+ if (!page) {
+ /* Reject ZONE_DEVICE memory */
+ ret = -EFAULT;
+ goto out;
+ }
+ maddr = page_address(page);
+
+ if (!write) {
+ if (test_bit(PG_mte_tagged, &page->flags))
+ num_tags = mte_copy_tags_to_user(tags, maddr,
+ MTE_GRANULES_PER_PAGE);
+ else
+ /* No tags in memory, so write zeros */
+ num_tags = MTE_GRANULES_PER_PAGE -
+ clear_user(tags, MTE_GRANULES_PER_PAGE);
+ kvm_release_pfn_clean(pfn);
+ } else {
+ num_tags = mte_copy_tags_from_user(maddr, tags,
+ MTE_GRANULES_PER_PAGE);
+
+ /*
+ * Set the flag after checking the write
+ * completed fully
+ */
+ if (num_tags == MTE_GRANULES_PER_PAGE)
+ set_bit(PG_mte_tagged, &page->flags);
+
+ kvm_release_pfn_dirty(pfn);
+ }
+
+ if (num_tags != MTE_GRANULES_PER_PAGE) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ gfn++;
+ tags += num_tags;
+ length -= PAGE_SIZE;
+ }
+
+out:
+ mutex_unlock(&kvm->slots_lock);
+ /* If some data has been copied report the number of bytes copied */
+ if (length != copy_tags->length)
+ return copy_tags->length - length;
+ return ret;
+}
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index cebe39f3b1b6..6f48336b1d86 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -291,3 +291,48 @@ void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
if (exception_index == ARM_EXCEPTION_EL1_SERROR)
kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
}
+
+void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr,
+ u64 par, uintptr_t vcpu,
+ u64 far, u64 hpfar) {
+ u64 elr_in_kimg = __phys_to_kimg(__hyp_pa(elr));
+ u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr;
+ u64 mode = spsr & PSR_MODE_MASK;
+
+ /*
+ * The nVHE hyp symbols are not included by kallsyms to avoid issues
+ * with aliasing. That means that the symbols cannot be printed with the
+ * "%pS" format specifier, so fall back to the vmlinux address if
+ * there's no better option.
+ */
+ if (mode != PSR_MODE_EL2t && mode != PSR_MODE_EL2h) {
+ kvm_err("Invalid host exception to nVHE hyp!\n");
+ } else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
+ (esr & ESR_ELx_BRK64_ISS_COMMENT_MASK) == BUG_BRK_IMM) {
+ struct bug_entry *bug = find_bug(elr_in_kimg);
+ const char *file = NULL;
+ unsigned int line = 0;
+
+ /* All hyp bugs, including warnings, are treated as fatal. */
+ if (bug)
+ bug_get_file_line(bug, &file, &line);
+
+ if (file)
+ kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line);
+ else
+ kvm_err("nVHE hyp BUG at: %016llx!\n", elr + hyp_offset);
+ } else {
+ kvm_err("nVHE hyp panic at: %016llx!\n", elr + hyp_offset);
+ }
+
+ /*
+ * Hyp has panicked and we're going to handle that by panicking the
+ * kernel. The kernel offset will be revealed in the panic so we're
+ * also safe to reveal the hyp offset as a debugging aid for translating
+ * hyp VAs to vmlinux addresses.
+ */
+ kvm_err("Hyp Offset: 0x%llx\n", hyp_offset);
+
+ panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%016lx\n",
+ spsr, elr, esr, far, hpfar, par, vcpu);
+}
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 687598e41b21..b726332eec49 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -10,4 +10,4 @@ subdir-ccflags-y := -I$(incdir) \
-DDISABLE_BRANCH_PROFILING \
$(DISABLE_STACKLEAK_PLUGIN)
-obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o
+obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o reserved_mem.o
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index b0afad7a99c6..435346ea1504 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -13,6 +13,7 @@
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
+#include <asm/kvm_mte.h>
#include <asm/kvm_ptrauth.h>
.text
@@ -51,6 +52,9 @@ alternative_else_nop_endif
add x29, x0, #VCPU_CONTEXT
+ // mte_switch_to_guest(g_ctxt, h_ctxt, tmp1)
+ mte_switch_to_guest x29, x1, x2
+
// Macro ptrauth_switch_to_guest format:
// ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3)
// The below macro to restore guest keys is not implemented in C code
@@ -85,8 +89,10 @@ SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
// If the hyp context is loaded, go straight to hyp_panic
get_loaded_vcpu x0, x1
- cbz x0, hyp_panic
+ cbnz x0, 1f
+ b hyp_panic
+1:
// The hyp context is saved so make sure it is restored to allow
// hyp_panic to run at hyp and, subsequently, panic to run in the host.
// This makes use of __guest_exit to avoid duplication but sets the
@@ -94,7 +100,7 @@ SYM_INNER_LABEL(__guest_exit_panic, SYM_L_GLOBAL)
// current state is saved to the guest context but it will only be
// accurate if the guest had been completely restored.
adr_this_cpu x0, kvm_hyp_ctxt, x1
- adr x1, hyp_panic
+ adr_l x1, hyp_panic
str x1, [x0, #CPU_XREG_OFFSET(30)]
get_vcpu_ptr x1, x0
@@ -140,13 +146,16 @@ SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
// when this feature is enabled for kernel code.
ptrauth_switch_to_hyp x1, x2, x3, x4, x5
+ // mte_switch_to_hyp(g_ctxt, h_ctxt, reg1)
+ mte_switch_to_hyp x1, x2, x3
+
// Restore hyp's sp_el0
restore_sp_el0 x2, x3
// Now restore the hyp regs
restore_callee_saved_regs x2
- set_loaded_vcpu xzr, x1, x2
+ set_loaded_vcpu xzr, x2, x3
alternative_if ARM64_HAS_RAS_EXTN
// If we have the RAS extensions we can consume a pending error
diff --git a/arch/arm64/kvm/hyp/exception.c b/arch/arm64/kvm/hyp/exception.c
index 73629094f903..0418399e0a20 100644
--- a/arch/arm64/kvm/hyp/exception.c
+++ b/arch/arm64/kvm/hyp/exception.c
@@ -112,7 +112,8 @@ static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode,
new |= (old & PSR_C_BIT);
new |= (old & PSR_V_BIT);
- // TODO: TCO (if/when ARMv8.5-MemTag is exposed to guests)
+ if (kvm_has_mte(vcpu->kvm))
+ new |= PSR_TCO_BIT;
new |= (old & PSR_DIT_BIT);
@@ -296,7 +297,7 @@ static void enter_exception32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
*vcpu_pc(vcpu) = vect_offset;
}
-void kvm_inject_exception(struct kvm_vcpu *vcpu)
+static void kvm_inject_exception(struct kvm_vcpu *vcpu)
{
if (vcpu_el1_is_32bit(vcpu)) {
switch (vcpu->arch.flags & KVM_ARM64_EXCEPT_MASK) {
@@ -329,3 +330,19 @@ void kvm_inject_exception(struct kvm_vcpu *vcpu)
}
}
}
+
+/*
+ * Adjust the guest PC (and potentially exception state) depending on
+ * flags provided by the emulation code.
+ */
+void __kvm_adjust_pc(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) {
+ kvm_inject_exception(vcpu);
+ vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
+ KVM_ARM64_EXCEPT_MASK);
+ } else if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) {
+ kvm_skip_instr(vcpu);
+ vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC;
+ }
+}
diff --git a/arch/arm64/kvm/hyp/fpsimd.S b/arch/arm64/kvm/hyp/fpsimd.S
index 01f114aa47b0..3c635929771a 100644
--- a/arch/arm64/kvm/hyp/fpsimd.S
+++ b/arch/arm64/kvm/hyp/fpsimd.S
@@ -19,3 +19,13 @@ SYM_FUNC_START(__fpsimd_restore_state)
fpsimd_restore x0, 1
ret
SYM_FUNC_END(__fpsimd_restore_state)
+
+SYM_FUNC_START(__sve_restore_state)
+ __sve_load 0, x1, 2
+ ret
+SYM_FUNC_END(__sve_restore_state)
+
+SYM_FUNC_START(__sve_save_state)
+ sve_save 0, x1, 2
+ ret
+SYM_FUNC_END(__sve_save_state)
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 5f49df4ffdd8..9aa9b73475c9 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -76,6 +76,7 @@ el1_trap:
b __guest_exit
el1_irq:
+el1_fiq:
get_vcpu_ptr x1, x0
mov x0, #ARM_EXCEPTION_IRQ
b __guest_exit
@@ -131,7 +132,6 @@ SYM_CODE_END(\label)
invalid_vector el2t_error_invalid
invalid_vector el2h_irq_invalid
invalid_vector el2h_fiq_invalid
- invalid_vector el1_fiq_invalid
.ltorg
@@ -179,12 +179,12 @@ SYM_CODE_START(__kvm_hyp_vector)
valid_vect el1_sync // Synchronous 64-bit EL1
valid_vect el1_irq // IRQ 64-bit EL1
- invalid_vect el1_fiq_invalid // FIQ 64-bit EL1
+ valid_vect el1_fiq // FIQ 64-bit EL1
valid_vect el1_error // Error 64-bit EL1
valid_vect el1_sync // Synchronous 32-bit EL1
valid_vect el1_irq // IRQ 32-bit EL1
- invalid_vect el1_fiq_invalid // FIQ 32-bit EL1
+ valid_vect el1_fiq // FIQ 32-bit EL1
valid_vect el1_error // Error 32-bit EL1
SYM_CODE_END(__kvm_hyp_vector)
diff --git a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
index 61716359035d..4fdfeabefeb4 100644
--- a/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
+++ b/arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
@@ -13,8 +13,6 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_host.h>
-void kvm_inject_exception(struct kvm_vcpu *vcpu);
-
static inline void kvm_skip_instr(struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu)) {
@@ -44,22 +42,6 @@ static inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
}
/*
- * Adjust the guest PC on entry, depending on flags provided by EL1
- * for the purpose of emulation (MMIO, sysreg) or exception injection.
- */
-static inline void __adjust_pc(struct kvm_vcpu *vcpu)
-{
- if (vcpu->arch.flags & KVM_ARM64_PENDING_EXCEPTION) {
- kvm_inject_exception(vcpu);
- vcpu->arch.flags &= ~(KVM_ARM64_PENDING_EXCEPTION |
- KVM_ARM64_EXCEPT_MASK);
- } else if (vcpu->arch.flags & KVM_ARM64_INCREMENT_PC) {
- kvm_skip_instr(vcpu);
- vcpu->arch.flags &= ~KVM_ARM64_INCREMENT_PC;
- }
-}
-
-/*
* Skip an instruction while host sysregs are live.
* Assumes host is always 64-bit.
*/
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 54f4860cd87c..e4a2f295a394 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -30,8 +30,6 @@
#include <asm/processor.h>
#include <asm/thread_info.h>
-extern const char __hyp_panic_string[];
-
extern struct exception_table_entry __start___kvm_ex_table;
extern struct exception_table_entry __stop___kvm_ex_table;
@@ -90,15 +88,18 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
* counter, which could make a PMXEVCNTR_EL0 access UNDEF at
* EL1 instead of being trapped to EL2.
*/
- write_sysreg(0, pmselr_el0);
- write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
+ if (kvm_arm_support_pmu_v3()) {
+ write_sysreg(0, pmselr_el0);
+ write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
+ }
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
}
static inline void __deactivate_traps_common(void)
{
write_sysreg(0, hstr_el2);
- write_sysreg(0, pmuserenr_el0);
+ if (kvm_arm_support_pmu_v3())
+ write_sysreg(0, pmuserenr_el0);
}
static inline void ___activate_traps(struct kvm_vcpu *vcpu)
@@ -157,18 +158,10 @@ static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
return true;
}
-static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
+static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
{
- u8 ec;
- u64 esr;
u64 hpfar, far;
- esr = vcpu->arch.fault.esr_el2;
- ec = ESR_ELx_EC(esr);
-
- if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
- return true;
-
far = read_sysreg_el2(SYS_FAR);
/*
@@ -191,33 +184,59 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
hpfar = read_sysreg(hpfar_el2);
}
- vcpu->arch.fault.far_el2 = far;
- vcpu->arch.fault.hpfar_el2 = hpfar;
+ fault->far_el2 = far;
+ fault->hpfar_el2 = hpfar;
return true;
}
+static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
+{
+ u8 ec;
+ u64 esr;
+
+ esr = vcpu->arch.fault.esr_el2;
+ ec = ESR_ELx_EC(esr);
+
+ if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
+ return true;
+
+ return __get_fault_info(esr, &vcpu->arch.fault);
+}
+
+static inline void __hyp_sve_save_host(struct kvm_vcpu *vcpu)
+{
+ struct thread_struct *thread;
+
+ thread = container_of(vcpu->arch.host_fpsimd_state, struct thread_struct,
+ uw.fpsimd_state);
+
+ __sve_save_state(sve_pffr(thread), &vcpu->arch.host_fpsimd_state->fpsr);
+}
+
+static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
+{
+ sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
+ __sve_restore_state(vcpu_sve_pffr(vcpu),
+ &vcpu->arch.ctxt.fp_regs.fpsr);
+ write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
+}
+
/* Check for an FPSIMD/SVE trap and handle as appropriate */
static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
{
- bool vhe, sve_guest, sve_host;
+ bool sve_guest, sve_host;
u8 esr_ec;
+ u64 reg;
if (!system_supports_fpsimd())
return false;
- /*
- * Currently system_supports_sve() currently implies has_vhe(),
- * so the check is redundant. However, has_vhe() can be determined
- * statically and helps the compiler remove dead code.
- */
- if (has_vhe() && system_supports_sve()) {
+ if (system_supports_sve()) {
sve_guest = vcpu_has_sve(vcpu);
sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE;
- vhe = true;
} else {
sve_guest = false;
sve_host = false;
- vhe = has_vhe();
}
esr_ec = kvm_vcpu_trap_get_class(vcpu);
@@ -226,53 +245,38 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
return false;
/* Don't handle SVE traps for non-SVE vcpus here: */
- if (!sve_guest)
- if (esr_ec != ESR_ELx_EC_FP_ASIMD)
- return false;
+ if (!sve_guest && esr_ec != ESR_ELx_EC_FP_ASIMD)
+ return false;
/* Valid trap. Switch the context: */
-
- if (vhe) {
- u64 reg = read_sysreg(cpacr_el1) | CPACR_EL1_FPEN;
-
+ if (has_vhe()) {
+ reg = CPACR_EL1_FPEN;
if (sve_guest)
reg |= CPACR_EL1_ZEN;
- write_sysreg(reg, cpacr_el1);
+ sysreg_clear_set(cpacr_el1, 0, reg);
} else {
- write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP,
- cptr_el2);
- }
+ reg = CPTR_EL2_TFP;
+ if (sve_guest)
+ reg |= CPTR_EL2_TZ;
+ sysreg_clear_set(cptr_el2, reg, 0);
+ }
isb();
if (vcpu->arch.flags & KVM_ARM64_FP_HOST) {
- /*
- * In the SVE case, VHE is assumed: it is enforced by
- * Kconfig and kvm_arch_init().
- */
- if (sve_host) {
- struct thread_struct *thread = container_of(
- vcpu->arch.host_fpsimd_state,
- struct thread_struct, uw.fpsimd_state);
-
- sve_save_state(sve_pffr(thread),
- &vcpu->arch.host_fpsimd_state->fpsr);
- } else {
+ if (sve_host)
+ __hyp_sve_save_host(vcpu);
+ else
__fpsimd_save_state(vcpu->arch.host_fpsimd_state);
- }
vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
}
- if (sve_guest) {
- sve_load_state(vcpu_sve_pffr(vcpu),
- &vcpu->arch.ctxt.fp_regs.fpsr,
- sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
- write_sysreg_s(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR_EL12);
- } else {
+ if (sve_guest)
+ __hyp_sve_restore_guest(vcpu);
+ else
__fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs);
- }
/* Skip restoring fpexc32 for AArch64 guests */
if (!(read_sysreg(hcr_el2) & HCR_RW))
diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
index cce43bfe158f..de7e14c862e6 100644
--- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
+++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
@@ -14,6 +14,7 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
+#include <asm/kvm_mmu.h>
static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
{
@@ -26,6 +27,16 @@ static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0);
}
+static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt)
+{
+ struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu;
+
+ if (!vcpu)
+ vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt);
+
+ return kvm_has_mte(kern_hyp_va(vcpu->kvm));
+}
+
static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
{
ctxt_sys_reg(ctxt, CSSELR_EL1) = read_sysreg(csselr_el1);
@@ -46,6 +57,11 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
ctxt_sys_reg(ctxt, PAR_EL1) = read_sysreg_par();
ctxt_sys_reg(ctxt, TPIDR_EL1) = read_sysreg(tpidr_el1);
+ if (ctxt_has_mte(ctxt)) {
+ ctxt_sys_reg(ctxt, TFSR_EL1) = read_sysreg_el1(SYS_TFSR);
+ ctxt_sys_reg(ctxt, TFSRE0_EL1) = read_sysreg_s(SYS_TFSRE0_EL1);
+ }
+
ctxt_sys_reg(ctxt, SP_EL1) = read_sysreg(sp_el1);
ctxt_sys_reg(ctxt, ELR_EL1) = read_sysreg_el1(SYS_ELR);
ctxt_sys_reg(ctxt, SPSR_EL1) = read_sysreg_el1(SYS_SPSR);
@@ -107,6 +123,11 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
write_sysreg(ctxt_sys_reg(ctxt, PAR_EL1), par_el1);
write_sysreg(ctxt_sys_reg(ctxt, TPIDR_EL1), tpidr_el1);
+ if (ctxt_has_mte(ctxt)) {
+ write_sysreg_el1(ctxt_sys_reg(ctxt, TFSR_EL1), SYS_TFSR);
+ write_sysreg_s(ctxt_sys_reg(ctxt, TFSRE0_EL1), SYS_TFSRE0_EL1);
+ }
+
if (!has_vhe() &&
cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT) &&
ctxt->__hyp_running_vcpu) {
diff --git a/arch/arm64/kvm/hyp/include/nvhe/early_alloc.h b/arch/arm64/kvm/hyp/include/nvhe/early_alloc.h
new file mode 100644
index 000000000000..dc61aaa56f31
--- /dev/null
+++ b/arch/arm64/kvm/hyp/include/nvhe/early_alloc.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __KVM_HYP_EARLY_ALLOC_H
+#define __KVM_HYP_EARLY_ALLOC_H
+
+#include <asm/kvm_pgtable.h>
+
+void hyp_early_alloc_init(void *virt, unsigned long size);
+unsigned long hyp_early_alloc_nr_used_pages(void);
+void *hyp_early_alloc_page(void *arg);
+void *hyp_early_alloc_contig(unsigned int nr_pages);
+
+extern struct kvm_pgtable_mm_ops hyp_early_alloc_mm_ops;
+
+#endif /* __KVM_HYP_EARLY_ALLOC_H */
diff --git a/arch/arm64/kvm/hyp/include/nvhe/gfp.h b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
new file mode 100644
index 000000000000..fb0f523d1492
--- /dev/null
+++ b/arch/arm64/kvm/hyp/include/nvhe/gfp.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __KVM_HYP_GFP_H
+#define __KVM_HYP_GFP_H
+
+#include <linux/list.h>
+
+#include <nvhe/memory.h>
+#include <nvhe/spinlock.h>
+
+#define HYP_NO_ORDER USHRT_MAX
+
+struct hyp_pool {
+ /*
+ * Spinlock protecting concurrent changes to the memory pool as well as
+ * the struct hyp_page of the pool's pages until we have a proper atomic
+ * API at EL2.
+ */
+ hyp_spinlock_t lock;
+ struct list_head free_area[MAX_ORDER];
+ phys_addr_t range_start;
+ phys_addr_t range_end;
+ unsigned short max_order;
+};
+
+/* Allocation */
+void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order);
+void hyp_get_page(struct hyp_pool *pool, void *addr);
+void hyp_put_page(struct hyp_pool *pool, void *addr);
+
+/* Used pages cannot be freed */
+int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
+ unsigned int reserved_pages);
+#endif /* __KVM_HYP_GFP_H */
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
new file mode 100644
index 000000000000..9c227d87c36d
--- /dev/null
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Google LLC
+ * Author: Quentin Perret <qperret@google.com>
+ */
+
+#ifndef __KVM_NVHE_MEM_PROTECT__
+#define __KVM_NVHE_MEM_PROTECT__
+#include <linux/kvm_host.h>
+#include <asm/kvm_hyp.h>
+#include <asm/kvm_pgtable.h>
+#include <asm/virt.h>
+#include <nvhe/spinlock.h>
+
+struct host_kvm {
+ struct kvm_arch arch;
+ struct kvm_pgtable pgt;
+ struct kvm_pgtable_mm_ops mm_ops;
+ hyp_spinlock_t lock;
+};
+extern struct host_kvm host_kvm;
+
+int __pkvm_prot_finalize(void);
+int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end);
+
+int kvm_host_prepare_stage2(void *pgt_pool_base);
+void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
+
+static __always_inline void __load_host_stage2(void)
+{
+ if (static_branch_likely(&kvm_protected_mode_initialized))
+ __load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
+ else
+ write_sysreg(0, vttbr_el2);
+}
+#endif /* __KVM_NVHE_MEM_PROTECT__ */
diff --git a/arch/arm64/kvm/hyp/include/nvhe/memory.h b/arch/arm64/kvm/hyp/include/nvhe/memory.h
new file mode 100644
index 000000000000..592b7edb3edb
--- /dev/null
+++ b/arch/arm64/kvm/hyp/include/nvhe/memory.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __KVM_HYP_MEMORY_H
+#define __KVM_HYP_MEMORY_H
+
+#include <asm/kvm_mmu.h>
+#include <asm/page.h>
+
+#include <linux/types.h>
+
+struct hyp_page {
+ unsigned short refcount;
+ unsigned short order;
+};
+
+extern u64 __hyp_vmemmap;
+#define hyp_vmemmap ((struct hyp_page *)__hyp_vmemmap)
+
+#define __hyp_va(phys) ((void *)((phys_addr_t)(phys) - hyp_physvirt_offset))
+
+static inline void *hyp_phys_to_virt(phys_addr_t phys)
+{
+ return __hyp_va(phys);
+}
+
+static inline phys_addr_t hyp_virt_to_phys(void *addr)
+{
+ return __hyp_pa(addr);
+}
+
+#define hyp_phys_to_pfn(phys) ((phys) >> PAGE_SHIFT)
+#define hyp_pfn_to_phys(pfn) ((phys_addr_t)((pfn) << PAGE_SHIFT))
+#define hyp_phys_to_page(phys) (&hyp_vmemmap[hyp_phys_to_pfn(phys)])
+#define hyp_virt_to_page(virt) hyp_phys_to_page(__hyp_pa(virt))
+#define hyp_virt_to_pfn(virt) hyp_phys_to_pfn(__hyp_pa(virt))
+
+#define hyp_page_to_pfn(page) ((struct hyp_page *)(page) - hyp_vmemmap)
+#define hyp_page_to_phys(page) hyp_pfn_to_phys((hyp_page_to_pfn(page)))
+#define hyp_page_to_virt(page) __hyp_va(hyp_page_to_phys(page))
+#define hyp_page_to_pool(page) (((struct hyp_page *)page)->pool)
+
+static inline int hyp_page_count(void *addr)
+{
+ struct hyp_page *p = hyp_virt_to_page(addr);
+
+ return p->refcount;
+}
+
+#endif /* __KVM_HYP_MEMORY_H */
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mm.h b/arch/arm64/kvm/hyp/include/nvhe/mm.h
new file mode 100644
index 000000000000..8ec3a5a7744b
--- /dev/null
+++ b/arch/arm64/kvm/hyp/include/nvhe/mm.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __KVM_HYP_MM_H
+#define __KVM_HYP_MM_H
+
+#include <asm/kvm_pgtable.h>
+#include <asm/spectre.h>
+#include <linux/memblock.h>
+#include <linux/types.h>
+
+#include <nvhe/memory.h>
+#include <nvhe/spinlock.h>
+
+#define HYP_MEMBLOCK_REGIONS 128
+extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
+extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
+extern struct kvm_pgtable pkvm_pgtable;
+extern hyp_spinlock_t pkvm_pgd_lock;
+extern struct hyp_pool hpool;
+extern u64 __io_map_base;
+
+int hyp_create_idmap(u32 hyp_va_bits);
+int hyp_map_vectors(void);
+int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back);
+int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot);
+int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
+int __pkvm_create_mappings(unsigned long start, unsigned long size,
+ unsigned long phys, enum kvm_pgtable_prot prot);
+unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
+ enum kvm_pgtable_prot prot);
+
+static inline void hyp_vmemmap_range(phys_addr_t phys, unsigned long size,
+ unsigned long *start, unsigned long *end)
+{
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ struct hyp_page *p = hyp_phys_to_page(phys);
+
+ *start = (unsigned long)p;
+ *end = *start + nr_pages * sizeof(struct hyp_page);
+ *start = ALIGN_DOWN(*start, PAGE_SIZE);
+ *end = ALIGN(*end, PAGE_SIZE);
+}
+
+static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
+{
+ unsigned long total = 0, i;
+
+ /* Provision the worst case scenario */
+ for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) {
+ nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
+ total += nr_pages;
+ }
+
+ return total;
+}
+
+static inline unsigned long __hyp_pgtable_total_pages(void)
+{
+ unsigned long res = 0, i;
+
+ /* Cover all of memory with page-granularity */
+ for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
+ struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
+ res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
+ }
+
+ return res;
+}
+
+static inline unsigned long hyp_s1_pgtable_pages(void)
+{
+ unsigned long res;
+
+ res = __hyp_pgtable_total_pages();
+
+ /* Allow 1 GiB for private mappings */
+ res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
+
+ return res;
+}
+
+static inline unsigned long host_s2_pgtable_pages(void)
+{
+ unsigned long res;
+
+ /*
+ * Include an extra 16 pages to safely upper-bound the worst case of
+ * concatenated pgds.
+ */
+ res = __hyp_pgtable_total_pages() + 16;
+
+ /* Allow 1 GiB for MMIO mappings */
+ res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
+
+ return res;
+}
+
+#endif /* __KVM_HYP_MM_H */
diff --git a/arch/arm64/kvm/hyp/include/nvhe/spinlock.h b/arch/arm64/kvm/hyp/include/nvhe/spinlock.h
new file mode 100644
index 000000000000..76b537f8d1c6
--- /dev/null
+++ b/arch/arm64/kvm/hyp/include/nvhe/spinlock.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * A stand-alone ticket spinlock implementation for use by the non-VHE
+ * KVM hypervisor code running at EL2.
+ *
+ * Copyright (C) 2020 Google LLC
+ * Author: Will Deacon <will@kernel.org>
+ *
+ * Heavily based on the implementation removed by c11090474d70 which was:
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#ifndef __ARM64_KVM_NVHE_SPINLOCK_H__
+#define __ARM64_KVM_NVHE_SPINLOCK_H__
+
+#include <asm/alternative.h>
+#include <asm/lse.h>
+
+typedef union hyp_spinlock {
+ u32 __val;
+ struct {
+#ifdef __AARCH64EB__
+ u16 next, owner;
+#else
+ u16 owner, next;
+#endif
+ };
+} hyp_spinlock_t;
+
+#define hyp_spin_lock_init(l) \
+do { \
+ *(l) = (hyp_spinlock_t){ .__val = 0 }; \
+} while (0)
+
+static inline void hyp_spin_lock(hyp_spinlock_t *lock)
+{
+ u32 tmp;
+ hyp_spinlock_t lockval, newval;
+
+ asm volatile(
+ /* Atomically increment the next ticket. */
+ ARM64_LSE_ATOMIC_INSN(
+ /* LL/SC */
+" prfm pstl1strm, %3\n"
+"1: ldaxr %w0, %3\n"
+" add %w1, %w0, #(1 << 16)\n"
+" stxr %w2, %w1, %3\n"
+" cbnz %w2, 1b\n",
+ /* LSE atomics */
+" mov %w2, #(1 << 16)\n"
+" ldadda %w2, %w0, %3\n"
+ __nops(3))
+
+ /* Did we get the lock? */
+" eor %w1, %w0, %w0, ror #16\n"
+" cbz %w1, 3f\n"
+ /*
+ * No: spin on the owner. Send a local event to avoid missing an
+ * unlock before the exclusive load.
+ */
+" sevl\n"
+"2: wfe\n"
+" ldaxrh %w2, %4\n"
+" eor %w1, %w2, %w0, lsr #16\n"
+" cbnz %w1, 2b\n"
+ /* We got the lock. Critical section starts here. */
+"3:"
+ : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
+ : "Q" (lock->owner)
+ : "memory");
+}
+
+static inline void hyp_spin_unlock(hyp_spinlock_t *lock)
+{
+ u64 tmp;
+
+ asm volatile(
+ ARM64_LSE_ATOMIC_INSN(
+ /* LL/SC */
+ " ldrh %w1, %0\n"
+ " add %w1, %w1, #1\n"
+ " stlrh %w1, %0",
+ /* LSE atomics */
+ " mov %w1, #1\n"
+ " staddlh %w1, %0\n"
+ __nops(1))
+ : "=Q" (lock->owner), "=&r" (tmp)
+ :
+ : "memory");
+}
+
+#endif /* __ARM64_KVM_NVHE_SPINLOCK_H__ */
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index a6707df4f6c0..5df6193fc430 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -9,10 +9,15 @@ ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS
hostprogs := gen-hyprel
HOST_EXTRACFLAGS += -I$(objtree)/include
+lib-objs := clear_page.o copy_page.o memcpy.o memset.o
+lib-objs := $(addprefix ../../../lib/, $(lib-objs))
+
obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
- hyp-main.o hyp-smp.o psci-relay.o
+ hyp-main.o hyp-smp.o psci-relay.o early_alloc.o stub.o page_alloc.o \
+ cache.o setup.o mm.o mem_protect.o
obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
- ../fpsimd.o ../hyp-entry.o ../exception.o
+ ../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o
+obj-y += $(lib-objs)
##
## Build rules for compiling nVHE hyp code
@@ -75,9 +80,9 @@ quiet_cmd_hyprel = HYPREL $@
quiet_cmd_hypcopy = HYPCOPY $@
cmd_hypcopy = $(OBJCOPY) --prefix-symbols=__kvm_nvhe_ $< $@
-# Remove ftrace and Shadow Call Stack CFLAGS.
-# This is equivalent to the 'notrace' and '__noscs' annotations.
-KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS), $(KBUILD_CFLAGS))
+# Remove ftrace, Shadow Call Stack, and CFI CFLAGS.
+# This is equivalent to the 'notrace', '__noscs', and '__nocfi' annotations.
+KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS) $(CC_FLAGS_CFI), $(KBUILD_CFLAGS))
# KVM nVHE code is run at a different exception code with a different map, so
# compiler instrumentation that inserts callbacks or checks into the code may
diff --git a/arch/arm64/kvm/hyp/nvhe/cache.S b/arch/arm64/kvm/hyp/nvhe/cache.S
new file mode 100644
index 000000000000..958734f4d6b0
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/cache.S
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Code copied from arch/arm64/mm/cache.S.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/alternative.h>
+
+SYM_FUNC_START_PI(dcache_clean_inval_poc)
+ dcache_by_line_op civac, sy, x0, x1, x2, x3
+ ret
+SYM_FUNC_END_PI(dcache_clean_inval_poc)
diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
index 91a711aa8382..7d3f25868cae 100644
--- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c
+++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
@@ -21,17 +21,11 @@ static void __debug_save_spe(u64 *pmscr_el1)
/* Clear pmscr in case of early return */
*pmscr_el1 = 0;
- /* SPE present on this CPU? */
- if (!cpuid_feature_extract_unsigned_field(read_sysreg(id_aa64dfr0_el1),
- ID_AA64DFR0_PMSVER_SHIFT))
- return;
-
- /* Yes; is it owned by EL3? */
- reg = read_sysreg_s(SYS_PMBIDR_EL1);
- if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT))
- return;
-
- /* No; is the host actually using the thing? */
+ /*
+ * At this point, we know that this CPU implements
+ * SPE and is available to the host.
+ * Check if the host is actually using it ?
+ */
reg = read_sysreg_s(SYS_PMBLIMITR_EL1);
if (!(reg & BIT(SYS_PMBLIMITR_EL1_E_SHIFT)))
return;
@@ -58,16 +52,60 @@ static void __debug_restore_spe(u64 pmscr_el1)
write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1);
}
-void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
+static void __debug_save_trace(u64 *trfcr_el1)
+{
+ *trfcr_el1 = 0;
+
+ /* Check if the TRBE is enabled */
+ if (!(read_sysreg_s(SYS_TRBLIMITR_EL1) & TRBLIMITR_ENABLE))
+ return;
+ /*
+ * Prohibit trace generation while we are in guest.
+ * Since access to TRFCR_EL1 is trapped, the guest can't
+ * modify the filtering set by the host.
+ */
+ *trfcr_el1 = read_sysreg_s(SYS_TRFCR_EL1);
+ write_sysreg_s(0, SYS_TRFCR_EL1);
+ isb();
+ /* Drain the trace buffer to memory */
+ tsb_csync();
+ dsb(nsh);
+}
+
+static void __debug_restore_trace(u64 trfcr_el1)
+{
+ if (!trfcr_el1)
+ return;
+
+ /* Restore trace filter controls */
+ write_sysreg_s(trfcr_el1, SYS_TRFCR_EL1);
+}
+
+void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
{
/* Disable and flush SPE data generation */
- __debug_save_spe(&vcpu->arch.host_debug_state.pmscr_el1);
+ if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_SPE)
+ __debug_save_spe(&vcpu->arch.host_debug_state.pmscr_el1);
+ /* Disable and flush Self-Hosted Trace generation */
+ if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_TRBE)
+ __debug_save_trace(&vcpu->arch.host_debug_state.trfcr_el1);
+}
+
+void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
+{
__debug_switch_to_guest_common(vcpu);
}
+void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_SPE)
+ __debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
+ if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_TRBE)
+ __debug_restore_trace(vcpu->arch.host_debug_state.trfcr_el1);
+}
+
void __debug_switch_to_host(struct kvm_vcpu *vcpu)
{
- __debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
__debug_switch_to_host_common(vcpu);
}
diff --git a/arch/arm64/kvm/hyp/nvhe/early_alloc.c b/arch/arm64/kvm/hyp/nvhe/early_alloc.c
new file mode 100644
index 000000000000..1306c430ab87
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/early_alloc.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Google LLC
+ * Author: Quentin Perret <qperret@google.com>
+ */
+
+#include <asm/kvm_pgtable.h>
+
+#include <nvhe/early_alloc.h>
+#include <nvhe/memory.h>
+
+struct kvm_pgtable_mm_ops hyp_early_alloc_mm_ops;
+s64 __ro_after_init hyp_physvirt_offset;
+
+static unsigned long base;
+static unsigned long end;
+static unsigned long cur;
+
+unsigned long hyp_early_alloc_nr_used_pages(void)
+{
+ return (cur - base) >> PAGE_SHIFT;
+}
+
+void *hyp_early_alloc_contig(unsigned int nr_pages)
+{
+ unsigned long size = (nr_pages << PAGE_SHIFT);
+ void *ret = (void *)cur;
+
+ if (!nr_pages)
+ return NULL;
+
+ if (end - cur < size)
+ return NULL;
+
+ cur += size;
+ memset(ret, 0, size);
+
+ return ret;
+}
+
+void *hyp_early_alloc_page(void *arg)
+{
+ return hyp_early_alloc_contig(1);
+}
+
+void hyp_early_alloc_init(void *virt, unsigned long size)
+{
+ base = cur = (unsigned long)virt;
+ end = base + size;
+
+ hyp_early_alloc_mm_ops.zalloc_page = hyp_early_alloc_page;
+ hyp_early_alloc_mm_ops.phys_to_virt = hyp_phys_to_virt;
+ hyp_early_alloc_mm_ops.virt_to_phys = hyp_virt_to_phys;
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c b/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c
index ead02c6a7628..6bc88a756cb7 100644
--- a/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c
+++ b/arch/arm64/kvm/hyp/nvhe/gen-hyprel.c
@@ -50,6 +50,18 @@
#ifndef R_AARCH64_ABS64
#define R_AARCH64_ABS64 257
#endif
+#ifndef R_AARCH64_PREL64
+#define R_AARCH64_PREL64 260
+#endif
+#ifndef R_AARCH64_PREL32
+#define R_AARCH64_PREL32 261
+#endif
+#ifndef R_AARCH64_PREL16
+#define R_AARCH64_PREL16 262
+#endif
+#ifndef R_AARCH64_PLT32
+#define R_AARCH64_PLT32 314
+#endif
#ifndef R_AARCH64_LD_PREL_LO19
#define R_AARCH64_LD_PREL_LO19 273
#endif
@@ -371,6 +383,12 @@ static void emit_rela_section(Elf64_Shdr *sh_rela)
case R_AARCH64_ABS64:
emit_rela_abs64(rela, sh_orig_name);
break;
+ /* Allow position-relative data relocations. */
+ case R_AARCH64_PREL64:
+ case R_AARCH64_PREL32:
+ case R_AARCH64_PREL16:
+ case R_AARCH64_PLT32:
+ break;
/* Allow relocations to generate PC-relative addressing. */
case R_AARCH64_LD_PREL_LO19:
case R_AARCH64_ADR_PREL_LO21:
diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
index 6585a7cbbc56..2b23400e0fb3 100644
--- a/arch/arm64/kvm/hyp/nvhe/host.S
+++ b/arch/arm64/kvm/hyp/nvhe/host.S
@@ -71,32 +71,29 @@ SYM_FUNC_START(__host_enter)
SYM_FUNC_END(__host_enter)
/*
- * void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par);
+ * void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
+ * u64 elr, u64 par);
*/
SYM_FUNC_START(__hyp_do_panic)
/* Prepare and exit to the host's panic funciton. */
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
PSR_MODE_EL1h)
msr spsr_el2, lr
- ldr lr, =panic
+ ldr lr, =nvhe_hyp_panic_handler
hyp_kimg_va lr, x6
msr elr_el2, lr
- /* Set the panic format string. Use the, now free, LR as scratch. */
- ldr lr, =__hyp_panic_string
- hyp_kimg_va lr, x6
+ mov x29, x0
- /* Load the format arguments into x1-7. */
- mov x6, x3
- get_vcpu_ptr x7, x3
- mrs x3, esr_el2
- mrs x4, far_el2
- mrs x5, hpfar_el2
+ /* Load the panic arguments into x0-7 */
+ mrs x0, esr_el2
+ get_vcpu_ptr x4, x5
+ mrs x5, far_el2
+ mrs x6, hpfar_el2
+ mov x7, xzr // Unused argument
/* Enter the host, conditionally restoring the host context. */
- cmp x0, xzr
- mov x0, lr
- b.eq __host_enter_without_restoring
+ cbz x29, __host_enter_without_restoring
b __host_enter_for_panic
SYM_FUNC_END(__hyp_do_panic)
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
index c631e29fb001..c953fb4b9a13 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
@@ -83,11 +83,6 @@ SYM_CODE_END(__kvm_hyp_init)
* x0: struct kvm_nvhe_init_params PA
*/
SYM_CODE_START_LOCAL(___kvm_hyp_init)
-alternative_if ARM64_KVM_PROTECTED_MODE
- mov_q x1, HCR_HOST_NVHE_PROTECTED_FLAGS
- msr hcr_el2, x1
-alternative_else_nop_endif
-
ldr x1, [x0, #NVHE_INIT_TPIDR_EL2]
msr tpidr_el2, x1
@@ -97,6 +92,15 @@ alternative_else_nop_endif
ldr x1, [x0, #NVHE_INIT_MAIR_EL2]
msr mair_el2, x1
+ ldr x1, [x0, #NVHE_INIT_HCR_EL2]
+ msr hcr_el2, x1
+
+ ldr x1, [x0, #NVHE_INIT_VTTBR]
+ msr vttbr_el2, x1
+
+ ldr x1, [x0, #NVHE_INIT_VTCR]
+ msr vtcr_el2, x1
+
ldr x1, [x0, #NVHE_INIT_PGD_PA]
phys_to_ttbr x2, x1
alternative_if ARM64_HAS_CNP
@@ -115,15 +119,10 @@ alternative_else_nop_endif
/* Invalidate the stale TLBs from Bootloader */
tlbi alle2
+ tlbi vmalls12e1
dsb sy
- /*
- * Preserve all the RES1 bits while setting the default flags,
- * as well as the EE bit on BE. Drop the A flag since the compiler
- * is allowed to generate unaligned accesses.
- */
- mov_q x0, (SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
-CPU_BE( orr x0, x0, #SCTLR_ELx_EE)
+ mov_q x0, INIT_SCTLR_EL2_MMU_ON
alternative_if ARM64_HAS_ADDRESS_AUTH
mov_q x1, (SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \
SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)
@@ -221,9 +220,7 @@ SYM_CODE_START(__kvm_handle_stub_hvc)
mov x0, xzr
reset:
/* Reset kvm back to the hyp stub. */
- mrs x5, sctlr_el2
- mov_q x6, SCTLR_ELx_FLAGS
- bic x5, x5, x6 // Clear SCTL_M and etc
+ mov_q x5, INIT_SCTLR_EL2_MMU_OFF
pre_disable_mmu_workaround
msr sctlr_el2, x5
isb
@@ -244,4 +241,31 @@ alternative_else_nop_endif
SYM_CODE_END(__kvm_handle_stub_hvc)
+SYM_FUNC_START(__pkvm_init_switch_pgd)
+ /* Turn the MMU off */
+ pre_disable_mmu_workaround
+ mrs x2, sctlr_el2
+ bic x3, x2, #SCTLR_ELx_M
+ msr sctlr_el2, x3
+ isb
+
+ tlbi alle2
+
+ /* Install the new pgtables */
+ ldr x3, [x0, #NVHE_INIT_PGD_PA]
+ phys_to_ttbr x4, x3
+alternative_if ARM64_HAS_CNP
+ orr x4, x4, #TTBR_CNP_BIT
+alternative_else_nop_endif
+ msr ttbr0_el2, x4
+
+ /* Set the new stack pointer */
+ ldr x0, [x0, #NVHE_INIT_STACK_HYP_VA]
+ mov sp, x0
+
+ /* And turn the MMU back on! */
+ set_sctlr_el2 x2
+ ret x1
+SYM_FUNC_END(__pkvm_init_switch_pgd)
+
.popsection
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index f012f8665ecc..1632f001f4ed 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -6,12 +6,15 @@
#include <hyp/switch.h>
+#include <asm/pgtable-types.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_host.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
+#include <nvhe/mem_protect.h>
+#include <nvhe/mm.h>
#include <nvhe/trap_handler.h>
DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
@@ -25,6 +28,13 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu));
}
+static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
+
+ __kvm_adjust_pc(kern_hyp_va(vcpu));
+}
+
static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt)
{
__kvm_flush_vm_context();
@@ -46,11 +56,11 @@ static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
__kvm_tlb_flush_vmid(kern_hyp_va(mmu));
}
-static void handle___kvm_tlb_flush_local_vmid(struct kvm_cpu_context *host_ctxt)
+static void handle___kvm_flush_cpu_context(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
- __kvm_tlb_flush_local_vmid(kern_hyp_va(mmu));
+ __kvm_flush_cpu_context(kern_hyp_va(mmu));
}
static void handle___kvm_timer_set_cntvoff(struct kvm_cpu_context *host_ctxt)
@@ -67,9 +77,9 @@ static void handle___kvm_enable_ssbs(struct kvm_cpu_context *host_ctxt)
write_sysreg_el2(tmp, SYS_SCTLR);
}
-static void handle___vgic_v3_get_ich_vtr_el2(struct kvm_cpu_context *host_ctxt)
+static void handle___vgic_v3_get_gic_config(struct kvm_cpu_context *host_ctxt)
{
- cpu_reg(host_ctxt, 1) = __vgic_v3_get_ich_vtr_el2();
+ cpu_reg(host_ctxt, 1) = __vgic_v3_get_gic_config();
}
static void handle___vgic_v3_read_vmcr(struct kvm_cpu_context *host_ctxt)
@@ -106,25 +116,87 @@ static void handle___vgic_v3_restore_aprs(struct kvm_cpu_context *host_ctxt)
__vgic_v3_restore_aprs(kern_hyp_va(cpu_if));
}
+static void handle___pkvm_init(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
+ DECLARE_REG(unsigned long, size, host_ctxt, 2);
+ DECLARE_REG(unsigned long, nr_cpus, host_ctxt, 3);
+ DECLARE_REG(unsigned long *, per_cpu_base, host_ctxt, 4);
+ DECLARE_REG(u32, hyp_va_bits, host_ctxt, 5);
+
+ /*
+ * __pkvm_init() will return only if an error occurred, otherwise it
+ * will tail-call in __pkvm_init_finalise() which will have to deal
+ * with the host context directly.
+ */
+ cpu_reg(host_ctxt, 1) = __pkvm_init(phys, size, nr_cpus, per_cpu_base,
+ hyp_va_bits);
+}
+
+static void handle___pkvm_cpu_set_vector(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(enum arm64_hyp_spectre_vector, slot, host_ctxt, 1);
+
+ cpu_reg(host_ctxt, 1) = pkvm_cpu_set_vector(slot);
+}
+
+static void handle___pkvm_create_mappings(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(unsigned long, start, host_ctxt, 1);
+ DECLARE_REG(unsigned long, size, host_ctxt, 2);
+ DECLARE_REG(unsigned long, phys, host_ctxt, 3);
+ DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 4);
+
+ cpu_reg(host_ctxt, 1) = __pkvm_create_mappings(start, size, phys, prot);
+}
+
+static void handle___pkvm_create_private_mapping(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
+ DECLARE_REG(size_t, size, host_ctxt, 2);
+ DECLARE_REG(enum kvm_pgtable_prot, prot, host_ctxt, 3);
+
+ cpu_reg(host_ctxt, 1) = __pkvm_create_private_mapping(phys, size, prot);
+}
+
+static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt)
+{
+ cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize();
+}
+
+static void handle___pkvm_mark_hyp(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(phys_addr_t, start, host_ctxt, 1);
+ DECLARE_REG(phys_addr_t, end, host_ctxt, 2);
+
+ cpu_reg(host_ctxt, 1) = __pkvm_mark_hyp(start, end);
+}
typedef void (*hcall_t)(struct kvm_cpu_context *);
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
static const hcall_t host_hcall[] = {
HANDLE_FUNC(__kvm_vcpu_run),
+ HANDLE_FUNC(__kvm_adjust_pc),
HANDLE_FUNC(__kvm_flush_vm_context),
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
HANDLE_FUNC(__kvm_tlb_flush_vmid),
- HANDLE_FUNC(__kvm_tlb_flush_local_vmid),
+ HANDLE_FUNC(__kvm_flush_cpu_context),
HANDLE_FUNC(__kvm_timer_set_cntvoff),
HANDLE_FUNC(__kvm_enable_ssbs),
- HANDLE_FUNC(__vgic_v3_get_ich_vtr_el2),
+ HANDLE_FUNC(__vgic_v3_get_gic_config),
HANDLE_FUNC(__vgic_v3_read_vmcr),
HANDLE_FUNC(__vgic_v3_write_vmcr),
HANDLE_FUNC(__vgic_v3_init_lrs),
HANDLE_FUNC(__kvm_get_mdcr_el2),
HANDLE_FUNC(__vgic_v3_save_aprs),
HANDLE_FUNC(__vgic_v3_restore_aprs),
+ HANDLE_FUNC(__pkvm_init),
+ HANDLE_FUNC(__pkvm_cpu_set_vector),
+ HANDLE_FUNC(__pkvm_create_mappings),
+ HANDLE_FUNC(__pkvm_create_private_mapping),
+ HANDLE_FUNC(__pkvm_prot_finalize),
+ HANDLE_FUNC(__pkvm_mark_hyp),
};
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
@@ -177,7 +249,16 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
case ESR_ELx_EC_SMC64:
handle_host_smc(host_ctxt);
break;
+ case ESR_ELx_EC_SVE:
+ sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0);
+ isb();
+ sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
+ break;
+ case ESR_ELx_EC_IABT_LOW:
+ case ESR_ELx_EC_DABT_LOW:
+ handle_host_mem_abort(host_ctxt);
+ break;
default:
- hyp_panic();
+ BUG();
}
}
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
index 879559057dee..9f54833af400 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
@@ -18,8 +18,7 @@ u64 __ro_after_init hyp_cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID
u64 cpu_logical_map(unsigned int cpu)
{
- if (cpu >= ARRAY_SIZE(hyp_cpu_logical_map))
- hyp_panic();
+ BUG_ON(cpu >= ARRAY_SIZE(hyp_cpu_logical_map));
return hyp_cpu_logical_map[cpu];
}
@@ -30,8 +29,7 @@ unsigned long __hyp_per_cpu_offset(unsigned int cpu)
unsigned long this_cpu_base;
unsigned long elf_base;
- if (cpu >= ARRAY_SIZE(kvm_arm_hyp_percpu_base))
- hyp_panic();
+ BUG_ON(cpu >= ARRAY_SIZE(kvm_arm_hyp_percpu_base));
cpu_base_array = (unsigned long *)&kvm_arm_hyp_percpu_base;
this_cpu_base = kern_hyp_va(cpu_base_array[cpu]);
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
index cd119d82d8e3..f4562f417d3f 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
@@ -25,4 +25,5 @@ SECTIONS {
BEGIN_HYP_SECTION(.data..percpu)
PERCPU_INPUT(L1_CACHE_BYTES)
END_HYP_SECTION
+ HYP_SECTION(.bss)
}
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
new file mode 100644
index 000000000000..d938ce95d3bd
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Google LLC
+ * Author: Quentin Perret <qperret@google.com>
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_hyp.h>
+#include <asm/kvm_mmu.h>
+#include <asm/kvm_pgtable.h>
+#include <asm/stage2_pgtable.h>
+
+#include <hyp/switch.h>
+
+#include <nvhe/gfp.h>
+#include <nvhe/memory.h>
+#include <nvhe/mem_protect.h>
+#include <nvhe/mm.h>
+
+#define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP)
+
+extern unsigned long hyp_nr_cpus;
+struct host_kvm host_kvm;
+
+static struct hyp_pool host_s2_pool;
+
+/*
+ * Copies of the host's CPU features registers holding sanitized values.
+ */
+u64 id_aa64mmfr0_el1_sys_val;
+u64 id_aa64mmfr1_el1_sys_val;
+
+static const u8 pkvm_hyp_id = 1;
+
+static void *host_s2_zalloc_pages_exact(size_t size)
+{
+ return hyp_alloc_pages(&host_s2_pool, get_order(size));
+}
+
+static void *host_s2_zalloc_page(void *pool)
+{
+ return hyp_alloc_pages(pool, 0);
+}
+
+static void host_s2_get_page(void *addr)
+{
+ hyp_get_page(&host_s2_pool, addr);
+}
+
+static void host_s2_put_page(void *addr)
+{
+ hyp_put_page(&host_s2_pool, addr);
+}
+
+static int prepare_s2_pool(void *pgt_pool_base)
+{
+ unsigned long nr_pages, pfn;
+ int ret;
+
+ pfn = hyp_virt_to_pfn(pgt_pool_base);
+ nr_pages = host_s2_pgtable_pages();
+ ret = hyp_pool_init(&host_s2_pool, pfn, nr_pages, 0);
+ if (ret)
+ return ret;
+
+ host_kvm.mm_ops = (struct kvm_pgtable_mm_ops) {
+ .zalloc_pages_exact = host_s2_zalloc_pages_exact,
+ .zalloc_page = host_s2_zalloc_page,
+ .phys_to_virt = hyp_phys_to_virt,
+ .virt_to_phys = hyp_virt_to_phys,
+ .page_count = hyp_page_count,
+ .get_page = host_s2_get_page,
+ .put_page = host_s2_put_page,
+ };
+
+ return 0;
+}
+
+static void prepare_host_vtcr(void)
+{
+ u32 parange, phys_shift;
+
+ /* The host stage 2 is id-mapped, so use parange for T0SZ */
+ parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val);
+ phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange);
+
+ host_kvm.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
+ id_aa64mmfr1_el1_sys_val, phys_shift);
+}
+
+int kvm_host_prepare_stage2(void *pgt_pool_base)
+{
+ struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
+ int ret;
+
+ prepare_host_vtcr();
+ hyp_spin_lock_init(&host_kvm.lock);
+
+ ret = prepare_s2_pool(pgt_pool_base);
+ if (ret)
+ return ret;
+
+ ret = kvm_pgtable_stage2_init_flags(&host_kvm.pgt, &host_kvm.arch,
+ &host_kvm.mm_ops, KVM_HOST_S2_FLAGS);
+ if (ret)
+ return ret;
+
+ mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd);
+ mmu->arch = &host_kvm.arch;
+ mmu->pgt = &host_kvm.pgt;
+ mmu->vmid.vmid_gen = 0;
+ mmu->vmid.vmid = 0;
+
+ return 0;
+}
+
+int __pkvm_prot_finalize(void)
+{
+ struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
+ struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
+
+ params->vttbr = kvm_get_vttbr(mmu);
+ params->vtcr = host_kvm.arch.vtcr;
+ params->hcr_el2 |= HCR_VM;
+ kvm_flush_dcache_to_poc(params, sizeof(*params));
+
+ write_sysreg(params->hcr_el2, hcr_el2);
+ __load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
+
+ /*
+ * Make sure to have an ISB before the TLB maintenance below but only
+ * when __load_stage2() doesn't include one already.
+ */
+ asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
+
+ /* Invalidate stale HCR bits that may be cached in TLBs */
+ __tlbi(vmalls12e1);
+ dsb(nsh);
+ isb();
+
+ return 0;
+}
+
+static int host_stage2_unmap_dev_all(void)
+{
+ struct kvm_pgtable *pgt = &host_kvm.pgt;
+ struct memblock_region *reg;
+ u64 addr = 0;
+ int i, ret;
+
+ /* Unmap all non-memory regions to recycle the pages */
+ for (i = 0; i < hyp_memblock_nr; i++, addr = reg->base + reg->size) {
+ reg = &hyp_memory[i];
+ ret = kvm_pgtable_stage2_unmap(pgt, addr, reg->base - addr);
+ if (ret)
+ return ret;
+ }
+ return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr);
+}
+
+static bool find_mem_range(phys_addr_t addr, struct kvm_mem_range *range)
+{
+ int cur, left = 0, right = hyp_memblock_nr;
+ struct memblock_region *reg;
+ phys_addr_t end;
+
+ range->start = 0;
+ range->end = ULONG_MAX;
+
+ /* The list of memblock regions is sorted, binary search it */
+ while (left < right) {
+ cur = (left + right) >> 1;
+ reg = &hyp_memory[cur];
+ end = reg->base + reg->size;
+ if (addr < reg->base) {
+ right = cur;
+ range->end = reg->base;
+ } else if (addr >= end) {
+ left = cur + 1;
+ range->start = end;
+ } else {
+ range->start = reg->base;
+ range->end = end;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool range_is_memory(u64 start, u64 end)
+{
+ struct kvm_mem_range r1, r2;
+
+ if (!find_mem_range(start, &r1) || !find_mem_range(end, &r2))
+ return false;
+ if (r1.start != r2.start)
+ return false;
+
+ return true;
+}
+
+static inline int __host_stage2_idmap(u64 start, u64 end,
+ enum kvm_pgtable_prot prot)
+{
+ return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start,
+ prot, &host_s2_pool);
+}
+
+static int host_stage2_idmap(u64 addr)
+{
+ enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W;
+ struct kvm_mem_range range;
+ bool is_memory = find_mem_range(addr, &range);
+ int ret;
+
+ if (is_memory)
+ prot |= KVM_PGTABLE_PROT_X;
+
+ hyp_spin_lock(&host_kvm.lock);
+ ret = kvm_pgtable_stage2_find_range(&host_kvm.pgt, addr, prot, &range);
+ if (ret)
+ goto unlock;
+
+ ret = __host_stage2_idmap(range.start, range.end, prot);
+ if (ret != -ENOMEM)
+ goto unlock;
+
+ /*
+ * The pool has been provided with enough pages to cover all of memory
+ * with page granularity, but it is difficult to know how much of the
+ * MMIO range we will need to cover upfront, so we may need to 'recycle'
+ * the pages if we run out.
+ */
+ ret = host_stage2_unmap_dev_all();
+ if (ret)
+ goto unlock;
+
+ ret = __host_stage2_idmap(range.start, range.end, prot);
+
+unlock:
+ hyp_spin_unlock(&host_kvm.lock);
+
+ return ret;
+}
+
+int __pkvm_mark_hyp(phys_addr_t start, phys_addr_t end)
+{
+ int ret;
+
+ /*
+ * host_stage2_unmap_dev_all() currently relies on MMIO mappings being
+ * non-persistent, so don't allow changing page ownership in MMIO range.
+ */
+ if (!range_is_memory(start, end))
+ return -EINVAL;
+
+ hyp_spin_lock(&host_kvm.lock);
+ ret = kvm_pgtable_stage2_set_owner(&host_kvm.pgt, start, end - start,
+ &host_s2_pool, pkvm_hyp_id);
+ hyp_spin_unlock(&host_kvm.lock);
+
+ return ret != -EAGAIN ? ret : 0;
+}
+
+void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
+{
+ struct kvm_vcpu_fault_info fault;
+ u64 esr, addr;
+ int ret = 0;
+
+ esr = read_sysreg_el2(SYS_ESR);
+ BUG_ON(!__get_fault_info(esr, &fault));
+
+ addr = (fault.hpfar_el2 & HPFAR_MASK) << 8;
+ ret = host_stage2_idmap(addr);
+ BUG_ON(ret && ret != -EAGAIN);
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c
new file mode 100644
index 000000000000..a8efdf0f9003
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/mm.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Google LLC
+ * Author: Quentin Perret <qperret@google.com>
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_hyp.h>
+#include <asm/kvm_mmu.h>
+#include <asm/kvm_pgtable.h>
+#include <asm/spectre.h>
+
+#include <nvhe/early_alloc.h>
+#include <nvhe/gfp.h>
+#include <nvhe/memory.h>
+#include <nvhe/mm.h>
+#include <nvhe/spinlock.h>
+
+struct kvm_pgtable pkvm_pgtable;
+hyp_spinlock_t pkvm_pgd_lock;
+u64 __io_map_base;
+
+struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS];
+unsigned int hyp_memblock_nr;
+
+int __pkvm_create_mappings(unsigned long start, unsigned long size,
+ unsigned long phys, enum kvm_pgtable_prot prot)
+{
+ int err;
+
+ hyp_spin_lock(&pkvm_pgd_lock);
+ err = kvm_pgtable_hyp_map(&pkvm_pgtable, start, size, phys, prot);
+ hyp_spin_unlock(&pkvm_pgd_lock);
+
+ return err;
+}
+
+unsigned long __pkvm_create_private_mapping(phys_addr_t phys, size_t size,
+ enum kvm_pgtable_prot prot)
+{
+ unsigned long addr;
+ int err;
+
+ hyp_spin_lock(&pkvm_pgd_lock);
+
+ size = PAGE_ALIGN(size + offset_in_page(phys));
+ addr = __io_map_base;
+ __io_map_base += size;
+
+ /* Are we overflowing on the vmemmap ? */
+ if (__io_map_base > __hyp_vmemmap) {
+ __io_map_base -= size;
+ addr = (unsigned long)ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ err = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, size, phys, prot);
+ if (err) {
+ addr = (unsigned long)ERR_PTR(err);
+ goto out;
+ }
+
+ addr = addr + offset_in_page(phys);
+out:
+ hyp_spin_unlock(&pkvm_pgd_lock);
+
+ return addr;
+}
+
+int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
+{
+ unsigned long start = (unsigned long)from;
+ unsigned long end = (unsigned long)to;
+ unsigned long virt_addr;
+ phys_addr_t phys;
+
+ start = start & PAGE_MASK;
+ end = PAGE_ALIGN(end);
+
+ for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
+ int err;
+
+ phys = hyp_virt_to_phys((void *)virt_addr);
+ err = __pkvm_create_mappings(virt_addr, PAGE_SIZE, phys, prot);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back)
+{
+ unsigned long start, end;
+
+ hyp_vmemmap_range(phys, size, &start, &end);
+
+ return __pkvm_create_mappings(start, end - start, back, PAGE_HYP);
+}
+
+static void *__hyp_bp_vect_base;
+int pkvm_cpu_set_vector(enum arm64_hyp_spectre_vector slot)
+{
+ void *vector;
+
+ switch (slot) {
+ case HYP_VECTOR_DIRECT: {
+ vector = __kvm_hyp_vector;
+ break;
+ }
+ case HYP_VECTOR_SPECTRE_DIRECT: {
+ vector = __bp_harden_hyp_vecs;
+ break;
+ }
+ case HYP_VECTOR_INDIRECT:
+ case HYP_VECTOR_SPECTRE_INDIRECT: {
+ vector = (void *)__hyp_bp_vect_base;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ vector = __kvm_vector_slot2addr(vector, slot);
+ *this_cpu_ptr(&kvm_hyp_vector) = (unsigned long)vector;
+
+ return 0;
+}
+
+int hyp_map_vectors(void)
+{
+ phys_addr_t phys;
+ void *bp_base;
+
+ if (!cpus_have_const_cap(ARM64_SPECTRE_V3A))
+ return 0;
+
+ phys = __hyp_pa(__bp_harden_hyp_vecs);
+ bp_base = (void *)__pkvm_create_private_mapping(phys,
+ __BP_HARDEN_HYP_VECS_SZ,
+ PAGE_HYP_EXEC);
+ if (IS_ERR_OR_NULL(bp_base))
+ return PTR_ERR(bp_base);
+
+ __hyp_bp_vect_base = bp_base;
+
+ return 0;
+}
+
+int hyp_create_idmap(u32 hyp_va_bits)
+{
+ unsigned long start, end;
+
+ start = hyp_virt_to_phys((void *)__hyp_idmap_text_start);
+ start = ALIGN_DOWN(start, PAGE_SIZE);
+
+ end = hyp_virt_to_phys((void *)__hyp_idmap_text_end);
+ end = ALIGN(end, PAGE_SIZE);
+
+ /*
+ * One half of the VA space is reserved to linearly map portions of
+ * memory -- see va_layout.c for more details. The other half of the VA
+ * space contains the trampoline page, and needs some care. Split that
+ * second half in two and find the quarter of VA space not conflicting
+ * with the idmap to place the IOs and the vmemmap. IOs use the lower
+ * half of the quarter and the vmemmap the upper half.
+ */
+ __io_map_base = start & BIT(hyp_va_bits - 2);
+ __io_map_base ^= BIT(hyp_va_bits - 2);
+ __hyp_vmemmap = __io_map_base | BIT(hyp_va_bits - 3);
+
+ return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC);
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/page_alloc.c b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
new file mode 100644
index 000000000000..41fc25bdfb34
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Google LLC
+ * Author: Quentin Perret <qperret@google.com>
+ */
+
+#include <asm/kvm_hyp.h>
+#include <nvhe/gfp.h>
+
+u64 __hyp_vmemmap;
+
+/*
+ * Index the hyp_vmemmap to find a potential buddy page, but make no assumption
+ * about its current state.
+ *
+ * Example buddy-tree for a 4-pages physically contiguous pool:
+ *
+ * o : Page 3
+ * /
+ * o-o : Page 2
+ * /
+ * / o : Page 1
+ * / /
+ * o---o-o : Page 0
+ * Order 2 1 0
+ *
+ * Example of requests on this pool:
+ * __find_buddy_nocheck(pool, page 0, order 0) => page 1
+ * __find_buddy_nocheck(pool, page 0, order 1) => page 2
+ * __find_buddy_nocheck(pool, page 1, order 0) => page 0
+ * __find_buddy_nocheck(pool, page 2, order 0) => page 3
+ */
+static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool,
+ struct hyp_page *p,
+ unsigned short order)
+{
+ phys_addr_t addr = hyp_page_to_phys(p);
+
+ addr ^= (PAGE_SIZE << order);
+
+ /*
+ * Don't return a page outside the pool range -- it belongs to
+ * something else and may not be mapped in hyp_vmemmap.
+ */
+ if (addr < pool->range_start || addr >= pool->range_end)
+ return NULL;
+
+ return hyp_phys_to_page(addr);
+}
+
+/* Find a buddy page currently available for allocation */
+static struct hyp_page *__find_buddy_avail(struct hyp_pool *pool,
+ struct hyp_page *p,
+ unsigned short order)
+{
+ struct hyp_page *buddy = __find_buddy_nocheck(pool, p, order);
+
+ if (!buddy || buddy->order != order || buddy->refcount)
+ return NULL;
+
+ return buddy;
+
+}
+
+/*
+ * Pages that are available for allocation are tracked in free-lists, so we use
+ * the pages themselves to store the list nodes to avoid wasting space. As the
+ * allocator always returns zeroed pages (which are zeroed on the hyp_put_page()
+ * path to optimize allocation speed), we also need to clean-up the list node in
+ * each page when we take it out of the list.
+ */
+static inline void page_remove_from_list(struct hyp_page *p)
+{
+ struct list_head *node = hyp_page_to_virt(p);
+
+ __list_del_entry(node);
+ memset(node, 0, sizeof(*node));
+}
+
+static inline void page_add_to_list(struct hyp_page *p, struct list_head *head)
+{
+ struct list_head *node = hyp_page_to_virt(p);
+
+ INIT_LIST_HEAD(node);
+ list_add_tail(node, head);
+}
+
+static inline struct hyp_page *node_to_page(struct list_head *node)
+{
+ return hyp_virt_to_page(node);
+}
+
+static void __hyp_attach_page(struct hyp_pool *pool,
+ struct hyp_page *p)
+{
+ unsigned short order = p->order;
+ struct hyp_page *buddy;
+
+ memset(hyp_page_to_virt(p), 0, PAGE_SIZE << p->order);
+
+ /*
+ * Only the first struct hyp_page of a high-order page (otherwise known
+ * as the 'head') should have p->order set. The non-head pages should
+ * have p->order = HYP_NO_ORDER. Here @p may no longer be the head
+ * after coallescing, so make sure to mark it HYP_NO_ORDER proactively.
+ */
+ p->order = HYP_NO_ORDER;
+ for (; (order + 1) < pool->max_order; order++) {
+ buddy = __find_buddy_avail(pool, p, order);
+ if (!buddy)
+ break;
+
+ /* Take the buddy out of its list, and coallesce with @p */
+ page_remove_from_list(buddy);
+ buddy->order = HYP_NO_ORDER;
+ p = min(p, buddy);
+ }
+
+ /* Mark the new head, and insert it */
+ p->order = order;
+ page_add_to_list(p, &pool->free_area[order]);
+}
+
+static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool,
+ struct hyp_page *p,
+ unsigned short order)
+{
+ struct hyp_page *buddy;
+
+ page_remove_from_list(p);
+ while (p->order > order) {
+ /*
+ * The buddy of order n - 1 currently has HYP_NO_ORDER as it
+ * is covered by a higher-level page (whose head is @p). Use
+ * __find_buddy_nocheck() to find it and inject it in the
+ * free_list[n - 1], effectively splitting @p in half.
+ */
+ p->order--;
+ buddy = __find_buddy_nocheck(pool, p, p->order);
+ buddy->order = p->order;
+ page_add_to_list(buddy, &pool->free_area[buddy->order]);
+ }
+
+ return p;
+}
+
+static inline void hyp_page_ref_inc(struct hyp_page *p)
+{
+ BUG_ON(p->refcount == USHRT_MAX);
+ p->refcount++;
+}
+
+static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
+{
+ p->refcount--;
+ return (p->refcount == 0);
+}
+
+static inline void hyp_set_page_refcounted(struct hyp_page *p)
+{
+ BUG_ON(p->refcount);
+ p->refcount = 1;
+}
+
+static void __hyp_put_page(struct hyp_pool *pool, struct hyp_page *p)
+{
+ if (hyp_page_ref_dec_and_test(p))
+ __hyp_attach_page(pool, p);
+}
+
+/*
+ * Changes to the buddy tree and page refcounts must be done with the hyp_pool
+ * lock held. If a refcount change requires an update to the buddy tree (e.g.
+ * hyp_put_page()), both operations must be done within the same critical
+ * section to guarantee transient states (e.g. a page with null refcount but
+ * not yet attached to a free list) can't be observed by well-behaved readers.
+ */
+void hyp_put_page(struct hyp_pool *pool, void *addr)
+{
+ struct hyp_page *p = hyp_virt_to_page(addr);
+
+ hyp_spin_lock(&pool->lock);
+ __hyp_put_page(pool, p);
+ hyp_spin_unlock(&pool->lock);
+}
+
+void hyp_get_page(struct hyp_pool *pool, void *addr)
+{
+ struct hyp_page *p = hyp_virt_to_page(addr);
+
+ hyp_spin_lock(&pool->lock);
+ hyp_page_ref_inc(p);
+ hyp_spin_unlock(&pool->lock);
+}
+
+void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order)
+{
+ unsigned short i = order;
+ struct hyp_page *p;
+
+ hyp_spin_lock(&pool->lock);
+
+ /* Look for a high-enough-order page */
+ while (i < pool->max_order && list_empty(&pool->free_area[i]))
+ i++;
+ if (i >= pool->max_order) {
+ hyp_spin_unlock(&pool->lock);
+ return NULL;
+ }
+
+ /* Extract it from the tree at the right order */
+ p = node_to_page(pool->free_area[i].next);
+ p = __hyp_extract_page(pool, p, order);
+
+ hyp_set_page_refcounted(p);
+ hyp_spin_unlock(&pool->lock);
+
+ return hyp_page_to_virt(p);
+}
+
+int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
+ unsigned int reserved_pages)
+{
+ phys_addr_t phys = hyp_pfn_to_phys(pfn);
+ struct hyp_page *p;
+ int i;
+
+ hyp_spin_lock_init(&pool->lock);
+ pool->max_order = min(MAX_ORDER, get_order(nr_pages << PAGE_SHIFT));
+ for (i = 0; i < pool->max_order; i++)
+ INIT_LIST_HEAD(&pool->free_area[i]);
+ pool->range_start = phys;
+ pool->range_end = phys + (nr_pages << PAGE_SHIFT);
+
+ /* Init the vmemmap portion */
+ p = hyp_phys_to_page(phys);
+ for (i = 0; i < nr_pages; i++) {
+ p[i].order = 0;
+ hyp_set_page_refcounted(&p[i]);
+ }
+
+ /* Attach the unused pages to the buddy tree */
+ for (i = reserved_pages; i < nr_pages; i++)
+ __hyp_put_page(pool, &p[i]);
+
+ return 0;
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
index 63de71c0481e..08508783ec3d 100644
--- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c
+++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
@@ -11,6 +11,7 @@
#include <linux/kvm_host.h>
#include <uapi/linux/psci.h>
+#include <nvhe/memory.h>
#include <nvhe/trap_handler.h>
void kvm_hyp_cpu_entry(unsigned long r0);
@@ -20,9 +21,6 @@ void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
/* Config options set by the host. */
struct kvm_host_psci_config __ro_after_init kvm_host_psci_config;
-s64 __ro_after_init hyp_physvirt_offset;
-
-#define __hyp_pa(x) ((phys_addr_t)((x)) + hyp_physvirt_offset)
#define INVALID_CPU_ID UINT_MAX
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
new file mode 100644
index 000000000000..0b574d106519
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Google LLC
+ * Author: Quentin Perret <qperret@google.com>
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_hyp.h>
+#include <asm/kvm_mmu.h>
+#include <asm/kvm_pgtable.h>
+
+#include <nvhe/early_alloc.h>
+#include <nvhe/gfp.h>
+#include <nvhe/memory.h>
+#include <nvhe/mem_protect.h>
+#include <nvhe/mm.h>
+#include <nvhe/trap_handler.h>
+
+struct hyp_pool hpool;
+unsigned long hyp_nr_cpus;
+
+#define hyp_percpu_size ((unsigned long)__per_cpu_end - \
+ (unsigned long)__per_cpu_start)
+
+static void *vmemmap_base;
+static void *hyp_pgt_base;
+static void *host_s2_pgt_base;
+static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
+
+static int divide_memory_pool(void *virt, unsigned long size)
+{
+ unsigned long vstart, vend, nr_pages;
+
+ hyp_early_alloc_init(virt, size);
+
+ hyp_vmemmap_range(__hyp_pa(virt), size, &vstart, &vend);
+ nr_pages = (vend - vstart) >> PAGE_SHIFT;
+ vmemmap_base = hyp_early_alloc_contig(nr_pages);
+ if (!vmemmap_base)
+ return -ENOMEM;
+
+ nr_pages = hyp_s1_pgtable_pages();
+ hyp_pgt_base = hyp_early_alloc_contig(nr_pages);
+ if (!hyp_pgt_base)
+ return -ENOMEM;
+
+ nr_pages = host_s2_pgtable_pages();
+ host_s2_pgt_base = hyp_early_alloc_contig(nr_pages);
+ if (!host_s2_pgt_base)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
+ unsigned long *per_cpu_base,
+ u32 hyp_va_bits)
+{
+ void *start, *end, *virt = hyp_phys_to_virt(phys);
+ unsigned long pgt_size = hyp_s1_pgtable_pages() << PAGE_SHIFT;
+ int ret, i;
+
+ /* Recreate the hyp page-table using the early page allocator */
+ hyp_early_alloc_init(hyp_pgt_base, pgt_size);
+ ret = kvm_pgtable_hyp_init(&pkvm_pgtable, hyp_va_bits,
+ &hyp_early_alloc_mm_ops);
+ if (ret)
+ return ret;
+
+ ret = hyp_create_idmap(hyp_va_bits);
+ if (ret)
+ return ret;
+
+ ret = hyp_map_vectors();
+ if (ret)
+ return ret;
+
+ ret = hyp_back_vmemmap(phys, size, hyp_virt_to_phys(vmemmap_base));
+ if (ret)
+ return ret;
+
+ ret = pkvm_create_mappings(__hyp_text_start, __hyp_text_end, PAGE_HYP_EXEC);
+ if (ret)
+ return ret;
+
+ ret = pkvm_create_mappings(__start_rodata, __end_rodata, PAGE_HYP_RO);
+ if (ret)
+ return ret;
+
+ ret = pkvm_create_mappings(__hyp_rodata_start, __hyp_rodata_end, PAGE_HYP_RO);
+ if (ret)
+ return ret;
+
+ ret = pkvm_create_mappings(__hyp_bss_start, __hyp_bss_end, PAGE_HYP);
+ if (ret)
+ return ret;
+
+ ret = pkvm_create_mappings(__hyp_bss_end, __bss_stop, PAGE_HYP_RO);
+ if (ret)
+ return ret;
+
+ ret = pkvm_create_mappings(virt, virt + size, PAGE_HYP);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < hyp_nr_cpus; i++) {
+ start = (void *)kern_hyp_va(per_cpu_base[i]);
+ end = start + PAGE_ALIGN(hyp_percpu_size);
+ ret = pkvm_create_mappings(start, end, PAGE_HYP);
+ if (ret)
+ return ret;
+
+ end = (void *)per_cpu_ptr(&kvm_init_params, i)->stack_hyp_va;
+ start = end - PAGE_SIZE;
+ ret = pkvm_create_mappings(start, end, PAGE_HYP);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void update_nvhe_init_params(void)
+{
+ struct kvm_nvhe_init_params *params;
+ unsigned long i;
+
+ for (i = 0; i < hyp_nr_cpus; i++) {
+ params = per_cpu_ptr(&kvm_init_params, i);
+ params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
+ dcache_clean_inval_poc((unsigned long)params,
+ (unsigned long)params + sizeof(*params));
+ }
+}
+
+static void *hyp_zalloc_hyp_page(void *arg)
+{
+ return hyp_alloc_pages(&hpool, 0);
+}
+
+static void hpool_get_page(void *addr)
+{
+ hyp_get_page(&hpool, addr);
+}
+
+static void hpool_put_page(void *addr)
+{
+ hyp_put_page(&hpool, addr);
+}
+
+void __noreturn __pkvm_init_finalise(void)
+{
+ struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data);
+ struct kvm_cpu_context *host_ctxt = &host_data->host_ctxt;
+ unsigned long nr_pages, reserved_pages, pfn;
+ int ret;
+
+ /* Now that the vmemmap is backed, install the full-fledged allocator */
+ pfn = hyp_virt_to_pfn(hyp_pgt_base);
+ nr_pages = hyp_s1_pgtable_pages();
+ reserved_pages = hyp_early_alloc_nr_used_pages();
+ ret = hyp_pool_init(&hpool, pfn, nr_pages, reserved_pages);
+ if (ret)
+ goto out;
+
+ ret = kvm_host_prepare_stage2(host_s2_pgt_base);
+ if (ret)
+ goto out;
+
+ pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) {
+ .zalloc_page = hyp_zalloc_hyp_page,
+ .phys_to_virt = hyp_phys_to_virt,
+ .virt_to_phys = hyp_virt_to_phys,
+ .get_page = hpool_get_page,
+ .put_page = hpool_put_page,
+ };
+ pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
+
+out:
+ /*
+ * We tail-called to here from handle___pkvm_init() and will not return,
+ * so make sure to propagate the return value to the host.
+ */
+ cpu_reg(host_ctxt, 1) = ret;
+
+ __host_enter(host_ctxt);
+}
+
+int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
+ unsigned long *per_cpu_base, u32 hyp_va_bits)
+{
+ struct kvm_nvhe_init_params *params;
+ void *virt = hyp_phys_to_virt(phys);
+ void (*fn)(phys_addr_t params_pa, void *finalize_fn_va);
+ int ret;
+
+ if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size))
+ return -EINVAL;
+
+ hyp_spin_lock_init(&pkvm_pgd_lock);
+ hyp_nr_cpus = nr_cpus;
+
+ ret = divide_memory_pool(virt, size);
+ if (ret)
+ return ret;
+
+ ret = recreate_hyp_mappings(phys, size, per_cpu_base, hyp_va_bits);
+ if (ret)
+ return ret;
+
+ update_nvhe_init_params();
+
+ /* Jump in the idmap page to switch to the new page-tables */
+ params = this_cpu_ptr(&kvm_init_params);
+ fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd);
+ fn(__hyp_pa(params), __pkvm_init_finalise);
+
+ unreachable();
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/stub.c b/arch/arm64/kvm/hyp/nvhe/stub.c
new file mode 100644
index 000000000000..c0aa6bbfd79d
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/stub.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Stubs for out-of-line function calls caused by re-using kernel
+ * infrastructure at EL2.
+ *
+ * Copyright (C) 2020 - Google LLC
+ */
+
+#include <linux/list.h>
+
+#ifdef CONFIG_DEBUG_LIST
+bool __list_add_valid(struct list_head *new, struct list_head *prev,
+ struct list_head *next)
+{
+ return true;
+}
+
+bool __list_del_entry_valid(struct list_head *entry)
+{
+ return true;
+}
+#endif
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index f3d0e9eca56c..f7af9688c1f7 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -4,7 +4,6 @@
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
-#include <hyp/adjust_pc.h>
#include <hyp/switch.h>
#include <hyp/sysreg-sr.h>
@@ -28,6 +27,8 @@
#include <asm/processor.h>
#include <asm/thread_info.h>
+#include <nvhe/mem_protect.h>
+
/* Non-VHE specific context */
DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
@@ -41,9 +42,9 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
__activate_traps_common(vcpu);
val = CPTR_EL2_DEFAULT;
- val |= CPTR_EL2_TTA | CPTR_EL2_TZ | CPTR_EL2_TAM;
+ val |= CPTR_EL2_TTA | CPTR_EL2_TAM;
if (!update_fp_enabled(vcpu)) {
- val |= CPTR_EL2_TFP;
+ val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
__activate_traps_fpsimd32(vcpu);
}
@@ -68,7 +69,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
static void __deactivate_traps(struct kvm_vcpu *vcpu)
{
extern char __kvm_hyp_host_vector[];
- u64 mdcr_el2;
+ u64 mdcr_el2, cptr;
___deactivate_traps(vcpu);
@@ -95,19 +96,17 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
mdcr_el2 &= MDCR_EL2_HPMN_MASK;
mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
+ mdcr_el2 |= MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT;
write_sysreg(mdcr_el2, mdcr_el2);
- if (is_protected_kvm_enabled())
- write_sysreg(HCR_HOST_NVHE_PROTECTED_FLAGS, hcr_el2);
- else
- write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
- write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
- write_sysreg(__kvm_hyp_host_vector, vbar_el2);
-}
+ write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
-static void __load_host_stage2(void)
-{
- write_sysreg(0, vttbr_el2);
+ cptr = CPTR_EL2_DEFAULT;
+ if (vcpu_has_sve(vcpu) && (vcpu->arch.flags & KVM_ARM64_FP_ENABLED))
+ cptr |= CPTR_EL2_TZ;
+
+ write_sysreg(cptr, cptr_el2);
+ write_sysreg(__kvm_hyp_host_vector, vbar_el2);
}
/* Save VGICv3 state on non-VHE systems */
@@ -192,8 +191,16 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
__sysreg_save_state_nvhe(host_ctxt);
+ /*
+ * We must flush and disable the SPE buffer for nVHE, as
+ * the translation regime(EL1&0) is going to be loaded with
+ * that of the guest. And we must do this before we change the
+ * translation regime to EL2 (via MDCR_EL2_E2PB == 0) and
+ * before we load guest Stage1.
+ */
+ __debug_save_host_buffers_nvhe(vcpu);
- __adjust_pc(vcpu);
+ __kvm_adjust_pc(vcpu);
/*
* We must restore the 32-bit state before the sysregs, thanks
@@ -234,11 +241,12 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
__fpsimd_save_fpexc32(vcpu);
+ __debug_switch_to_host(vcpu);
/*
* This must come after restoring the host sysregs, since a non-VHE
* system may enable SPE here and make use of the TTBRs.
*/
- __debug_switch_to_host(vcpu);
+ __debug_restore_host_buffers_nvhe(vcpu);
if (pmu_switch_needed)
__pmu_switch_to_host(host_ctxt);
@@ -257,7 +265,6 @@ void __noreturn hyp_panic(void)
u64 spsr = read_sysreg_el2(SYS_SPSR);
u64 elr = read_sysreg_el2(SYS_ELR);
u64 par = read_sysreg_par();
- bool restore_host = true;
struct kvm_cpu_context *host_ctxt;
struct kvm_vcpu *vcpu;
@@ -271,7 +278,7 @@ void __noreturn hyp_panic(void)
__sysreg_restore_state_nvhe(host_ctxt);
}
- __hyp_do_panic(restore_host, spsr, elr, par);
+ __hyp_do_panic(host_ctxt, spsr, elr, par);
unreachable();
}
diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
index fbde89a2c6e8..38ed0f6f2703 100644
--- a/arch/arm64/kvm/hyp/nvhe/tlb.c
+++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
@@ -8,6 +8,8 @@
#include <asm/kvm_mmu.h>
#include <asm/tlbflush.h>
+#include <nvhe/mem_protect.h>
+
struct tlb_inv_context {
u64 tcr;
};
@@ -43,7 +45,7 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
{
- write_sysreg(0, vttbr_el2);
+ __load_host_stage2();
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
/* Ensure write of the host VMID */
@@ -102,7 +104,7 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
* you should be running with VHE enabled.
*/
if (icache_is_vpipt())
- __flush_icache_all();
+ icache_inval_all_pou();
__tlb_switch_to_host(&cxt);
}
@@ -123,7 +125,7 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
__tlb_switch_to_host(&cxt);
}
-void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
+void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
{
struct tlb_inv_context cxt;
@@ -131,6 +133,7 @@ void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
__tlb_switch_to_guest(mmu, &cxt);
__tlbi(vmalle1);
+ asm volatile("ic iallu");
dsb(nsh);
isb();
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index 4d177ce1d536..05321f4165e3 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -9,8 +9,7 @@
#include <linux/bitfield.h>
#include <asm/kvm_pgtable.h>
-
-#define KVM_PGTABLE_MAX_LEVELS 4U
+#include <asm/stage2_pgtable.h>
#define KVM_PTE_VALID BIT(0)
@@ -49,6 +48,11 @@
KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
KVM_PTE_LEAF_ATTR_HI_S2_XN)
+#define KVM_PTE_LEAF_ATTR_S2_IGNORED GENMASK(58, 55)
+
+#define KVM_INVALID_PTE_OWNER_MASK GENMASK(63, 56)
+#define KVM_MAX_OWNER_ID 1
+
struct kvm_pgtable_walk_data {
struct kvm_pgtable *pgt;
struct kvm_pgtable_walker *walker;
@@ -68,21 +72,36 @@ static u64 kvm_granule_size(u32 level)
return BIT(kvm_granule_shift(level));
}
-static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level)
+#define KVM_PHYS_INVALID (-1ULL)
+
+static bool kvm_phys_is_valid(u64 phys)
{
- u64 granule = kvm_granule_size(level);
+ return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_PARANGE_MAX));
+}
+static bool kvm_level_supports_block_mapping(u32 level)
+{
/*
* Reject invalid block mappings and don't bother with 4TB mappings for
* 52-bit PAs.
*/
- if (level == 0 || (PAGE_SIZE != SZ_4K && level == 1))
+ return !(level == 0 || (PAGE_SIZE != SZ_4K && level == 1));
+}
+
+static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level)
+{
+ u64 granule = kvm_granule_size(level);
+
+ if (!kvm_level_supports_block_mapping(level))
return false;
if (granule > (end - addr))
return false;
- return IS_ALIGNED(addr, granule) && IS_ALIGNED(phys, granule);
+ if (kvm_phys_is_valid(phys) && !IS_ALIGNED(phys, granule))
+ return false;
+
+ return IS_ALIGNED(addr, granule);
}
static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, u32 level)
@@ -152,20 +171,20 @@ static kvm_pte_t kvm_phys_to_pte(u64 pa)
return pte;
}
-static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte)
+static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
{
- return __va(kvm_pte_to_phys(pte));
+ return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
}
-static void kvm_set_invalid_pte(kvm_pte_t *ptep)
+static void kvm_clear_pte(kvm_pte_t *ptep)
{
- kvm_pte_t pte = *ptep;
- WRITE_ONCE(*ptep, pte & ~KVM_PTE_VALID);
+ WRITE_ONCE(*ptep, 0);
}
-static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp)
+static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp,
+ struct kvm_pgtable_mm_ops *mm_ops)
{
- kvm_pte_t old = *ptep, pte = kvm_phys_to_pte(__pa(childp));
+ kvm_pte_t old = *ptep, pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp));
pte |= FIELD_PREP(KVM_PTE_TYPE, KVM_PTE_TYPE_TABLE);
pte |= KVM_PTE_VALID;
@@ -187,6 +206,11 @@ static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, u32 level)
return pte;
}
+static kvm_pte_t kvm_init_invalid_leaf_owner(u8 owner_id)
+{
+ return FIELD_PREP(KVM_INVALID_PTE_OWNER_MASK, owner_id);
+}
+
static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data, u64 addr,
u32 level, kvm_pte_t *ptep,
enum kvm_pgtable_walk_flags flag)
@@ -223,11 +247,12 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
goto out;
if (!table) {
+ data->addr = ALIGN_DOWN(data->addr, kvm_granule_size(level));
data->addr += kvm_granule_size(level);
goto out;
}
- childp = kvm_pte_follow(pte);
+ childp = kvm_pte_follow(pte, data->pgt->mm_ops);
ret = __kvm_pgtable_walk(data, childp, level + 1);
if (ret)
goto out;
@@ -302,12 +327,12 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
}
struct hyp_map_data {
- u64 phys;
- kvm_pte_t attr;
+ u64 phys;
+ kvm_pte_t attr;
+ struct kvm_pgtable_mm_ops *mm_ops;
};
-static int hyp_map_set_prot_attr(enum kvm_pgtable_prot prot,
- struct hyp_map_data *data)
+static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
{
bool device = prot & KVM_PGTABLE_PROT_DEVICE;
u32 mtype = device ? MT_DEVICE_nGnRE : MT_NORMAL;
@@ -332,7 +357,8 @@ static int hyp_map_set_prot_attr(enum kvm_pgtable_prot prot,
attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_AP, ap);
attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_SH, sh);
attr |= KVM_PTE_LEAF_ATTR_LO_S1_AF;
- data->attr = attr;
+ *ptep = attr;
+
return 0;
}
@@ -358,6 +384,8 @@ static int hyp_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
enum kvm_pgtable_walk_flags flag, void * const arg)
{
kvm_pte_t *childp;
+ struct hyp_map_data *data = arg;
+ struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
if (hyp_map_walker_try_leaf(addr, end, level, ptep, arg))
return 0;
@@ -365,11 +393,11 @@ static int hyp_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1))
return -EINVAL;
- childp = (kvm_pte_t *)get_zeroed_page(GFP_KERNEL);
+ childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
if (!childp)
return -ENOMEM;
- kvm_set_table_pte(ptep, childp);
+ kvm_set_table_pte(ptep, childp, mm_ops);
return 0;
}
@@ -379,6 +407,7 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
int ret;
struct hyp_map_data map_data = {
.phys = ALIGN_DOWN(phys, PAGE_SIZE),
+ .mm_ops = pgt->mm_ops,
};
struct kvm_pgtable_walker walker = {
.cb = hyp_map_walker,
@@ -386,7 +415,7 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
.arg = &map_data,
};
- ret = hyp_map_set_prot_attr(prot, &map_data);
+ ret = hyp_set_prot_attr(prot, &map_data.attr);
if (ret)
return ret;
@@ -396,16 +425,18 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
return ret;
}
-int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits)
+int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
+ struct kvm_pgtable_mm_ops *mm_ops)
{
u64 levels = ARM64_HW_PGTABLE_LEVELS(va_bits);
- pgt->pgd = (kvm_pte_t *)get_zeroed_page(GFP_KERNEL);
+ pgt->pgd = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
if (!pgt->pgd)
return -ENOMEM;
pgt->ia_bits = va_bits;
pgt->start_level = KVM_PGTABLE_MAX_LEVELS - levels;
+ pgt->mm_ops = mm_ops;
pgt->mmu = NULL;
return 0;
}
@@ -413,7 +444,9 @@ int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits)
static int hyp_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
enum kvm_pgtable_walk_flags flag, void * const arg)
{
- free_page((unsigned long)kvm_pte_follow(*ptep));
+ struct kvm_pgtable_mm_ops *mm_ops = arg;
+
+ mm_ops->put_page((void *)kvm_pte_follow(*ptep, mm_ops));
return 0;
}
@@ -422,29 +455,75 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
struct kvm_pgtable_walker walker = {
.cb = hyp_free_walker,
.flags = KVM_PGTABLE_WALK_TABLE_POST,
+ .arg = pgt->mm_ops,
};
WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
- free_page((unsigned long)pgt->pgd);
+ pgt->mm_ops->put_page(pgt->pgd);
pgt->pgd = NULL;
}
struct stage2_map_data {
u64 phys;
kvm_pte_t attr;
+ u8 owner_id;
kvm_pte_t *anchor;
+ kvm_pte_t *childp;
struct kvm_s2_mmu *mmu;
- struct kvm_mmu_memory_cache *memcache;
+ void *memcache;
+
+ struct kvm_pgtable_mm_ops *mm_ops;
};
-static int stage2_map_set_prot_attr(enum kvm_pgtable_prot prot,
- struct stage2_map_data *data)
+u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
+{
+ u64 vtcr = VTCR_EL2_FLAGS;
+ u8 lvls;
+
+ vtcr |= kvm_get_parange(mmfr0) << VTCR_EL2_PS_SHIFT;
+ vtcr |= VTCR_EL2_T0SZ(phys_shift);
+ /*
+ * Use a minimum 2 level page table to prevent splitting
+ * host PMD huge pages at stage2.
+ */
+ lvls = stage2_pgtable_levels(phys_shift);
+ if (lvls < 2)
+ lvls = 2;
+ vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
+
+ /*
+ * Enable the Hardware Access Flag management, unconditionally
+ * on all CPUs. The features is RES0 on CPUs without the support
+ * and must be ignored by the CPUs.
+ */
+ vtcr |= VTCR_EL2_HA;
+
+ /* Set the vmid bits */
+ vtcr |= (get_vmid_bits(mmfr1) == 16) ?
+ VTCR_EL2_VS_16BIT :
+ VTCR_EL2_VS_8BIT;
+
+ return vtcr;
+}
+
+static bool stage2_has_fwb(struct kvm_pgtable *pgt)
+{
+ if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
+ return false;
+
+ return !(pgt->flags & KVM_PGTABLE_S2_NOFWB);
+}
+
+#define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt))
+
+static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot,
+ kvm_pte_t *ptep)
{
bool device = prot & KVM_PGTABLE_PROT_DEVICE;
- kvm_pte_t attr = device ? PAGE_S2_MEMATTR(DEVICE_nGnRE) :
- PAGE_S2_MEMATTR(NORMAL);
+ kvm_pte_t attr = device ? KVM_S2_MEMATTR(pgt, DEVICE_nGnRE) :
+ KVM_S2_MEMATTR(pgt, NORMAL);
u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS;
if (!(prot & KVM_PGTABLE_PROT_X))
@@ -460,44 +539,98 @@ static int stage2_map_set_prot_attr(enum kvm_pgtable_prot prot,
attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, sh);
attr |= KVM_PTE_LEAF_ATTR_LO_S2_AF;
- data->attr = attr;
+ *ptep = attr;
+
return 0;
}
+static bool stage2_pte_needs_update(kvm_pte_t old, kvm_pte_t new)
+{
+ if (!kvm_pte_valid(old) || !kvm_pte_valid(new))
+ return true;
+
+ return ((old ^ new) & (~KVM_PTE_LEAF_ATTR_S2_PERMS));
+}
+
+static bool stage2_pte_is_counted(kvm_pte_t pte)
+{
+ /*
+ * The refcount tracks valid entries as well as invalid entries if they
+ * encode ownership of a page to another entity than the page-table
+ * owner, whose id is 0.
+ */
+ return !!pte;
+}
+
+static void stage2_put_pte(kvm_pte_t *ptep, struct kvm_s2_mmu *mmu, u64 addr,
+ u32 level, struct kvm_pgtable_mm_ops *mm_ops)
+{
+ /*
+ * Clear the existing PTE, and perform break-before-make with
+ * TLB maintenance if it was valid.
+ */
+ if (kvm_pte_valid(*ptep)) {
+ kvm_clear_pte(ptep);
+ kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, addr, level);
+ }
+
+ mm_ops->put_page(ptep);
+}
+
+static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
+{
+ u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR;
+ return memattr == KVM_S2_MEMATTR(pgt, NORMAL);
+}
+
+static bool stage2_pte_executable(kvm_pte_t pte)
+{
+ return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
+}
+
static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
kvm_pte_t *ptep,
struct stage2_map_data *data)
{
kvm_pte_t new, old = *ptep;
u64 granule = kvm_granule_size(level), phys = data->phys;
- struct page *page = virt_to_page(ptep);
+ struct kvm_pgtable *pgt = data->mmu->pgt;
+ struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
if (!kvm_block_mapping_supported(addr, end, phys, level))
return -E2BIG;
- new = kvm_init_valid_leaf_pte(phys, data->attr, level);
- if (kvm_pte_valid(old)) {
+ if (kvm_phys_is_valid(phys))
+ new = kvm_init_valid_leaf_pte(phys, data->attr, level);
+ else
+ new = kvm_init_invalid_leaf_owner(data->owner_id);
+
+ if (stage2_pte_is_counted(old)) {
/*
* Skip updating the PTE if we are trying to recreate the exact
* same mapping or only change the access permissions. Instead,
* the vCPU will exit one more time from guest if still needed
* and then go through the path of relaxing permissions.
*/
- if (!((old ^ new) & (~KVM_PTE_LEAF_ATTR_S2_PERMS)))
+ if (!stage2_pte_needs_update(old, new))
return -EAGAIN;
- /*
- * There's an existing different valid leaf entry, so perform
- * break-before-make.
- */
- kvm_set_invalid_pte(ptep);
- kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level);
- put_page(page);
+ stage2_put_pte(ptep, data->mmu, addr, level, mm_ops);
}
+ /* Perform CMOs before installation of the guest stage-2 PTE */
+ if (mm_ops->dcache_clean_inval_poc && stage2_pte_cacheable(pgt, new))
+ mm_ops->dcache_clean_inval_poc(kvm_pte_follow(new, mm_ops),
+ granule);
+
+ if (mm_ops->icache_inval_pou && stage2_pte_executable(new))
+ mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule);
+
smp_store_release(ptep, new);
- get_page(page);
- data->phys += granule;
+ if (stage2_pte_is_counted(new))
+ mm_ops->get_page(ptep);
+ if (kvm_phys_is_valid(phys))
+ data->phys += granule;
return 0;
}
@@ -511,7 +644,8 @@ static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level,
if (!kvm_block_mapping_supported(addr, end, data->phys, level))
return 0;
- kvm_set_invalid_pte(ptep);
+ data->childp = kvm_pte_follow(*ptep, data->mm_ops);
+ kvm_clear_pte(ptep);
/*
* Invalidate the whole stage-2, as we may have numerous leaf
@@ -526,13 +660,13 @@ static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level,
static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
struct stage2_map_data *data)
{
- int ret;
+ struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
kvm_pte_t *childp, pte = *ptep;
- struct page *page = virt_to_page(ptep);
+ int ret;
if (data->anchor) {
- if (kvm_pte_valid(pte))
- put_page(page);
+ if (stage2_pte_is_counted(pte))
+ mm_ops->put_page(ptep);
return 0;
}
@@ -547,7 +681,7 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
if (!data->memcache)
return -ENOMEM;
- childp = kvm_mmu_memory_cache_alloc(data->memcache);
+ childp = mm_ops->zalloc_page(data->memcache);
if (!childp)
return -ENOMEM;
@@ -556,14 +690,11 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
* a table. Accesses beyond 'end' that fall within the new table
* will be mapped lazily.
*/
- if (kvm_pte_valid(pte)) {
- kvm_set_invalid_pte(ptep);
- kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level);
- put_page(page);
- }
+ if (stage2_pte_is_counted(pte))
+ stage2_put_pte(ptep, data->mmu, addr, level, mm_ops);
- kvm_set_table_pte(ptep, childp);
- get_page(page);
+ kvm_set_table_pte(ptep, childp, mm_ops);
+ mm_ops->get_page(ptep);
return 0;
}
@@ -572,19 +703,25 @@ static int stage2_map_walk_table_post(u64 addr, u64 end, u32 level,
kvm_pte_t *ptep,
struct stage2_map_data *data)
{
+ struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
+ kvm_pte_t *childp;
int ret = 0;
if (!data->anchor)
return 0;
- free_page((unsigned long)kvm_pte_follow(*ptep));
- put_page(virt_to_page(ptep));
-
if (data->anchor == ptep) {
+ childp = data->childp;
data->anchor = NULL;
+ data->childp = NULL;
ret = stage2_map_walk_leaf(addr, end, level, ptep, data);
+ } else {
+ childp = kvm_pte_follow(*ptep, mm_ops);
}
+ mm_ops->put_page(childp);
+ mm_ops->put_page(ptep);
+
return ret;
}
@@ -626,13 +763,14 @@ static int stage2_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
u64 phys, enum kvm_pgtable_prot prot,
- struct kvm_mmu_memory_cache *mc)
+ void *mc)
{
int ret;
struct stage2_map_data map_data = {
.phys = ALIGN_DOWN(phys, PAGE_SIZE),
.mmu = pgt->mmu,
.memcache = mc,
+ .mm_ops = pgt->mm_ops,
};
struct kvm_pgtable_walker walker = {
.cb = stage2_map_walker,
@@ -642,7 +780,10 @@ int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
.arg = &map_data,
};
- ret = stage2_map_set_prot_attr(prot, &map_data);
+ if (WARN_ON((pgt->flags & KVM_PGTABLE_S2_IDMAP) && (addr != phys)))
+ return -EINVAL;
+
+ ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
if (ret)
return ret;
@@ -651,38 +792,57 @@ int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
return ret;
}
-static void stage2_flush_dcache(void *addr, u64 size)
+int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
+ void *mc, u8 owner_id)
{
- if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
- return;
+ int ret;
+ struct stage2_map_data map_data = {
+ .phys = KVM_PHYS_INVALID,
+ .mmu = pgt->mmu,
+ .memcache = mc,
+ .mm_ops = pgt->mm_ops,
+ .owner_id = owner_id,
+ };
+ struct kvm_pgtable_walker walker = {
+ .cb = stage2_map_walker,
+ .flags = KVM_PGTABLE_WALK_TABLE_PRE |
+ KVM_PGTABLE_WALK_LEAF |
+ KVM_PGTABLE_WALK_TABLE_POST,
+ .arg = &map_data,
+ };
- __flush_dcache_area(addr, size);
-}
+ if (owner_id > KVM_MAX_OWNER_ID)
+ return -EINVAL;
-static bool stage2_pte_cacheable(kvm_pte_t pte)
-{
- u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR;
- return memattr == PAGE_S2_MEMATTR(NORMAL);
+ ret = kvm_pgtable_walk(pgt, addr, size, &walker);
+ return ret;
}
static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
enum kvm_pgtable_walk_flags flag,
void * const arg)
{
- struct kvm_s2_mmu *mmu = arg;
+ struct kvm_pgtable *pgt = arg;
+ struct kvm_s2_mmu *mmu = pgt->mmu;
+ struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
kvm_pte_t pte = *ptep, *childp = NULL;
bool need_flush = false;
- if (!kvm_pte_valid(pte))
+ if (!kvm_pte_valid(pte)) {
+ if (stage2_pte_is_counted(pte)) {
+ kvm_clear_pte(ptep);
+ mm_ops->put_page(ptep);
+ }
return 0;
+ }
if (kvm_pte_table(pte, level)) {
- childp = kvm_pte_follow(pte);
+ childp = kvm_pte_follow(pte, mm_ops);
- if (page_count(virt_to_page(childp)) != 1)
+ if (mm_ops->page_count(childp) != 1)
return 0;
- } else if (stage2_pte_cacheable(pte)) {
- need_flush = true;
+ } else if (stage2_pte_cacheable(pgt, pte)) {
+ need_flush = !stage2_has_fwb(pgt);
}
/*
@@ -690,17 +850,18 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
* block entry and rely on the remaining portions being faulted
* back lazily.
*/
- kvm_set_invalid_pte(ptep);
- kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, addr, level);
- put_page(virt_to_page(ptep));
+ stage2_put_pte(ptep, mmu, addr, level, mm_ops);
if (need_flush) {
- stage2_flush_dcache(kvm_pte_follow(pte),
- kvm_granule_size(level));
+ kvm_pte_t *pte_follow = kvm_pte_follow(pte, mm_ops);
+
+ dcache_clean_inval_poc((unsigned long)pte_follow,
+ (unsigned long)pte_follow +
+ kvm_granule_size(level));
}
if (childp)
- free_page((unsigned long)childp);
+ mm_ops->put_page(childp);
return 0;
}
@@ -709,7 +870,7 @@ int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
{
struct kvm_pgtable_walker walker = {
.cb = stage2_unmap_walker,
- .arg = pgt->mmu,
+ .arg = pgt,
.flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
};
@@ -717,10 +878,11 @@ int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
}
struct stage2_attr_data {
- kvm_pte_t attr_set;
- kvm_pte_t attr_clr;
- kvm_pte_t pte;
- u32 level;
+ kvm_pte_t attr_set;
+ kvm_pte_t attr_clr;
+ kvm_pte_t pte;
+ u32 level;
+ struct kvm_pgtable_mm_ops *mm_ops;
};
static int stage2_attr_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
@@ -729,6 +891,7 @@ static int stage2_attr_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
{
kvm_pte_t pte = *ptep;
struct stage2_attr_data *data = arg;
+ struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
if (!kvm_pte_valid(pte))
return 0;
@@ -743,8 +906,17 @@ static int stage2_attr_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
* but worst-case the access flag update gets lost and will be
* set on the next access instead.
*/
- if (data->pte != pte)
+ if (data->pte != pte) {
+ /*
+ * Invalidate instruction cache before updating the guest
+ * stage-2 PTE if we are going to add executable permission.
+ */
+ if (mm_ops->icache_inval_pou &&
+ stage2_pte_executable(pte) && !stage2_pte_executable(*ptep))
+ mm_ops->icache_inval_pou(kvm_pte_follow(pte, mm_ops),
+ kvm_granule_size(level));
WRITE_ONCE(*ptep, pte);
+ }
return 0;
}
@@ -759,6 +931,7 @@ static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr,
struct stage2_attr_data data = {
.attr_set = attr_set & attr_mask,
.attr_clr = attr_clr & attr_mask,
+ .mm_ops = pgt->mm_ops,
};
struct kvm_pgtable_walker walker = {
.cb = stage2_attr_walker,
@@ -841,12 +1014,18 @@ static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
enum kvm_pgtable_walk_flags flag,
void * const arg)
{
+ struct kvm_pgtable *pgt = arg;
+ struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
kvm_pte_t pte = *ptep;
+ kvm_pte_t *pte_follow;
- if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pte))
+ if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte))
return 0;
- stage2_flush_dcache(kvm_pte_follow(pte), kvm_granule_size(level));
+ pte_follow = kvm_pte_follow(pte, mm_ops);
+ dcache_clean_inval_poc((unsigned long)pte_follow,
+ (unsigned long)pte_follow +
+ kvm_granule_size(level));
return 0;
}
@@ -855,30 +1034,35 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
struct kvm_pgtable_walker walker = {
.cb = stage2_flush_walker,
.flags = KVM_PGTABLE_WALK_LEAF,
+ .arg = pgt,
};
- if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
+ if (stage2_has_fwb(pgt))
return 0;
return kvm_pgtable_walk(pgt, addr, size, &walker);
}
-int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm *kvm)
+int kvm_pgtable_stage2_init_flags(struct kvm_pgtable *pgt, struct kvm_arch *arch,
+ struct kvm_pgtable_mm_ops *mm_ops,
+ enum kvm_pgtable_stage2_flags flags)
{
size_t pgd_sz;
- u64 vtcr = kvm->arch.vtcr;
+ u64 vtcr = arch->vtcr;
u32 ia_bits = VTCR_EL2_IPA(vtcr);
u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
- pgt->pgd = alloc_pages_exact(pgd_sz, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+ pgt->pgd = mm_ops->zalloc_pages_exact(pgd_sz);
if (!pgt->pgd)
return -ENOMEM;
pgt->ia_bits = ia_bits;
pgt->start_level = start_level;
- pgt->mmu = &kvm->arch.mmu;
+ pgt->mm_ops = mm_ops;
+ pgt->mmu = &arch->mmu;
+ pgt->flags = flags;
/* Ensure zeroed PGD pages are visible to the hardware walker */
dsb(ishst);
@@ -889,15 +1073,16 @@ static int stage2_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
enum kvm_pgtable_walk_flags flag,
void * const arg)
{
+ struct kvm_pgtable_mm_ops *mm_ops = arg;
kvm_pte_t pte = *ptep;
- if (!kvm_pte_valid(pte))
+ if (!stage2_pte_is_counted(pte))
return 0;
- put_page(virt_to_page(ptep));
+ mm_ops->put_page(ptep);
if (kvm_pte_table(pte, level))
- free_page((unsigned long)kvm_pte_follow(pte));
+ mm_ops->put_page(kvm_pte_follow(pte, mm_ops));
return 0;
}
@@ -909,10 +1094,85 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
.cb = stage2_free_walker,
.flags = KVM_PGTABLE_WALK_LEAF |
KVM_PGTABLE_WALK_TABLE_POST,
+ .arg = pgt->mm_ops,
};
WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
- free_pages_exact(pgt->pgd, pgd_sz);
+ pgt->mm_ops->free_pages_exact(pgt->pgd, pgd_sz);
pgt->pgd = NULL;
}
+
+#define KVM_PTE_LEAF_S2_COMPAT_MASK (KVM_PTE_LEAF_ATTR_S2_PERMS | \
+ KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR | \
+ KVM_PTE_LEAF_ATTR_S2_IGNORED)
+
+static int stage2_check_permission_walker(u64 addr, u64 end, u32 level,
+ kvm_pte_t *ptep,
+ enum kvm_pgtable_walk_flags flag,
+ void * const arg)
+{
+ kvm_pte_t old_attr, pte = *ptep, *new_attr = arg;
+
+ /*
+ * Compatible mappings are either invalid and owned by the page-table
+ * owner (whose id is 0), or valid with matching permission attributes.
+ */
+ if (kvm_pte_valid(pte)) {
+ old_attr = pte & KVM_PTE_LEAF_S2_COMPAT_MASK;
+ if (old_attr != *new_attr)
+ return -EEXIST;
+ } else if (pte) {
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+int kvm_pgtable_stage2_find_range(struct kvm_pgtable *pgt, u64 addr,
+ enum kvm_pgtable_prot prot,
+ struct kvm_mem_range *range)
+{
+ kvm_pte_t attr;
+ struct kvm_pgtable_walker check_perm_walker = {
+ .cb = stage2_check_permission_walker,
+ .flags = KVM_PGTABLE_WALK_LEAF,
+ .arg = &attr,
+ };
+ u64 granule, start, end;
+ u32 level;
+ int ret;
+
+ ret = stage2_set_prot_attr(pgt, prot, &attr);
+ if (ret)
+ return ret;
+ attr &= KVM_PTE_LEAF_S2_COMPAT_MASK;
+
+ for (level = pgt->start_level; level < KVM_PGTABLE_MAX_LEVELS; level++) {
+ granule = kvm_granule_size(level);
+ start = ALIGN_DOWN(addr, granule);
+ end = start + granule;
+
+ if (!kvm_level_supports_block_mapping(level))
+ continue;
+
+ if (start < range->start || range->end < end)
+ continue;
+
+ /*
+ * Check the presence of existing mappings with incompatible
+ * permissions within the current block range, and try one level
+ * deeper if one is found.
+ */
+ ret = kvm_pgtable_walk(pgt, start, granule, &check_perm_walker);
+ if (ret != -EEXIST)
+ break;
+ }
+
+ if (!ret) {
+ range->start = start;
+ range->end = end;
+ }
+
+ return ret;
+}
diff --git a/arch/arm64/kvm/hyp/reserved_mem.c b/arch/arm64/kvm/hyp/reserved_mem.c
new file mode 100644
index 000000000000..d654921dd09b
--- /dev/null
+++ b/arch/arm64/kvm/hyp/reserved_mem.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 - Google LLC
+ * Author: Quentin Perret <qperret@google.com>
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/memblock.h>
+#include <linux/sort.h>
+
+#include <asm/kvm_host.h>
+
+#include <nvhe/memory.h>
+#include <nvhe/mm.h>
+
+static struct memblock_region *hyp_memory = kvm_nvhe_sym(hyp_memory);
+static unsigned int *hyp_memblock_nr_ptr = &kvm_nvhe_sym(hyp_memblock_nr);
+
+phys_addr_t hyp_mem_base;
+phys_addr_t hyp_mem_size;
+
+static int cmp_hyp_memblock(const void *p1, const void *p2)
+{
+ const struct memblock_region *r1 = p1;
+ const struct memblock_region *r2 = p2;
+
+ return r1->base < r2->base ? -1 : (r1->base > r2->base);
+}
+
+static void __init sort_memblock_regions(void)
+{
+ sort(hyp_memory,
+ *hyp_memblock_nr_ptr,
+ sizeof(struct memblock_region),
+ cmp_hyp_memblock,
+ NULL);
+}
+
+static int __init register_memblock_regions(void)
+{
+ struct memblock_region *reg;
+
+ for_each_mem_region(reg) {
+ if (*hyp_memblock_nr_ptr >= HYP_MEMBLOCK_REGIONS)
+ return -ENOMEM;
+
+ hyp_memory[*hyp_memblock_nr_ptr] = *reg;
+ (*hyp_memblock_nr_ptr)++;
+ }
+ sort_memblock_regions();
+
+ return 0;
+}
+
+void __init kvm_hyp_reserve(void)
+{
+ u64 nr_pages, prev, hyp_mem_pages = 0;
+ int ret;
+
+ if (!is_hyp_mode_available() || is_kernel_in_hyp_mode())
+ return;
+
+ if (kvm_get_mode() != KVM_MODE_PROTECTED)
+ return;
+
+ ret = register_memblock_regions();
+ if (ret) {
+ *hyp_memblock_nr_ptr = 0;
+ kvm_err("Failed to register hyp memblocks: %d\n", ret);
+ return;
+ }
+
+ hyp_mem_pages += hyp_s1_pgtable_pages();
+ hyp_mem_pages += host_s2_pgtable_pages();
+
+ /*
+ * The hyp_vmemmap needs to be backed by pages, but these pages
+ * themselves need to be present in the vmemmap, so compute the number
+ * of pages needed by looking for a fixed point.
+ */
+ nr_pages = 0;
+ do {
+ prev = nr_pages;
+ nr_pages = hyp_mem_pages + prev;
+ nr_pages = DIV_ROUND_UP(nr_pages * sizeof(struct hyp_page), PAGE_SIZE);
+ nr_pages += __hyp_pgtable_max_pages(nr_pages);
+ } while (nr_pages != prev);
+ hyp_mem_pages += nr_pages;
+
+ /*
+ * Try to allocate a PMD-aligned region to reduce TLB pressure once
+ * this is unmapped from the host stage-2, and fallback to PAGE_SIZE.
+ */
+ hyp_mem_size = hyp_mem_pages << PAGE_SHIFT;
+ hyp_mem_base = memblock_find_in_range(0, memblock_end_of_DRAM(),
+ ALIGN(hyp_mem_size, PMD_SIZE),
+ PMD_SIZE);
+ if (!hyp_mem_base)
+ hyp_mem_base = memblock_find_in_range(0, memblock_end_of_DRAM(),
+ hyp_mem_size, PAGE_SIZE);
+ else
+ hyp_mem_size = ALIGN(hyp_mem_size, PMD_SIZE);
+
+ if (!hyp_mem_base) {
+ kvm_err("Failed to reserve hyp memory\n");
+ return;
+ }
+ memblock_reserve(hyp_mem_base, hyp_mem_size);
+
+ kvm_info("Reserved %lld MiB at 0x%llx\n", hyp_mem_size >> 20,
+ hyp_mem_base);
+}
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index 80406f463c28..39f8f7f9227c 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -405,9 +405,54 @@ void __vgic_v3_init_lrs(void)
__gic_v3_set_lr(0, i);
}
-u64 __vgic_v3_get_ich_vtr_el2(void)
+/*
+ * Return the GIC CPU configuration:
+ * - [31:0] ICH_VTR_EL2
+ * - [62:32] RES0
+ * - [63] MMIO (GICv2) capable
+ */
+u64 __vgic_v3_get_gic_config(void)
{
- return read_gicreg(ICH_VTR_EL2);
+ u64 val, sre = read_gicreg(ICC_SRE_EL1);
+ unsigned long flags = 0;
+
+ /*
+ * To check whether we have a MMIO-based (GICv2 compatible)
+ * CPU interface, we need to disable the system register
+ * view. To do that safely, we have to prevent any interrupt
+ * from firing (which would be deadly).
+ *
+ * Note that this only makes sense on VHE, as interrupts are
+ * already masked for nVHE as part of the exception entry to
+ * EL2.
+ */
+ if (has_vhe())
+ flags = local_daif_save();
+
+ /*
+ * Table 11-2 "Permitted ICC_SRE_ELx.SRE settings" indicates
+ * that to be able to set ICC_SRE_EL1.SRE to 0, all the
+ * interrupt overrides must be set. You've got to love this.
+ */
+ sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
+ isb();
+ write_gicreg(0, ICC_SRE_EL1);
+ isb();
+
+ val = read_gicreg(ICC_SRE_EL1);
+
+ write_gicreg(sre, ICC_SRE_EL1);
+ isb();
+ sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
+ isb();
+
+ if (has_vhe())
+ local_daif_restore(flags);
+
+ val = (val & ICC_SRE_EL1_SRE) ? 0 : (1ULL << 63);
+ val |= read_gicreg(ICH_VTR_EL2);
+
+ return val;
}
u64 __vgic_v3_read_vmcr(void)
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index af8e940d0f03..b3229924d243 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -4,7 +4,6 @@
* Author: Marc Zyngier <marc.zyngier@arm.com>
*/
-#include <hyp/adjust_pc.h>
#include <hyp/switch.h>
#include <linux/arm-smccc.h>
@@ -27,8 +26,6 @@
#include <asm/processor.h>
#include <asm/thread_info.h>
-const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
-
/* VHE specific context */
DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
@@ -134,7 +131,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
__load_guest_stage2(vcpu->arch.hw_mmu);
__activate_traps(vcpu);
- __adjust_pc(vcpu);
+ __kvm_adjust_pc(vcpu);
sysreg_restore_guest_state_vhe(guest_ctxt);
__debug_switch_to_guest(vcpu);
@@ -207,7 +204,7 @@ static void __hyp_call_panic(u64 spsr, u64 elr, u64 par)
__deactivate_traps(vcpu);
sysreg_restore_host_state_vhe(host_ctxt);
- panic(__hyp_panic_string,
+ panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n",
spsr, elr,
read_sysreg_el2(SYS_ESR), read_sysreg_el2(SYS_FAR),
read_sysreg(hpfar_el2), par, vcpu);
diff --git a/arch/arm64/kvm/hyp/vhe/tlb.c b/arch/arm64/kvm/hyp/vhe/tlb.c
index fd7895945bbc..66f17349f0c3 100644
--- a/arch/arm64/kvm/hyp/vhe/tlb.c
+++ b/arch/arm64/kvm/hyp/vhe/tlb.c
@@ -127,7 +127,7 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
__tlb_switch_to_host(&cxt);
}
-void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
+void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
{
struct tlb_inv_context cxt;
@@ -135,6 +135,7 @@ void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
__tlb_switch_to_guest(mmu, &cxt);
__tlbi(vmalle1);
+ asm volatile("ic iallu");
dsb(nsh);
isb();
diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c
index ead21b98b620..30da78f72b3b 100644
--- a/arch/arm64/kvm/hypercalls.c
+++ b/arch/arm64/kvm/hypercalls.c
@@ -9,16 +9,65 @@
#include <kvm/arm_hypercalls.h>
#include <kvm/arm_psci.h>
+static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val)
+{
+ struct system_time_snapshot systime_snapshot;
+ u64 cycles = ~0UL;
+ u32 feature;
+
+ /*
+ * system time and counter value must captured at the same
+ * time to keep consistency and precision.
+ */
+ ktime_get_snapshot(&systime_snapshot);
+
+ /*
+ * This is only valid if the current clocksource is the
+ * architected counter, as this is the only one the guest
+ * can see.
+ */
+ if (systime_snapshot.cs_id != CSID_ARM_ARCH_COUNTER)
+ return;
+
+ /*
+ * The guest selects one of the two reference counters
+ * (virtual or physical) with the first argument of the SMCCC
+ * call. In case the identifier is not supported, error out.
+ */
+ feature = smccc_get_arg1(vcpu);
+ switch (feature) {
+ case KVM_PTP_VIRT_COUNTER:
+ cycles = systime_snapshot.cycles - vcpu_read_sys_reg(vcpu, CNTVOFF_EL2);
+ break;
+ case KVM_PTP_PHYS_COUNTER:
+ cycles = systime_snapshot.cycles;
+ break;
+ default:
+ return;
+ }
+
+ /*
+ * This relies on the top bit of val[0] never being set for
+ * valid values of system time, because that is *really* far
+ * in the future (about 292 years from 1970, and at that stage
+ * nobody will give a damn about it).
+ */
+ val[0] = upper_32_bits(systime_snapshot.real);
+ val[1] = lower_32_bits(systime_snapshot.real);
+ val[2] = upper_32_bits(cycles);
+ val[3] = lower_32_bits(cycles);
+}
+
int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
{
u32 func_id = smccc_get_function(vcpu);
- long val = SMCCC_RET_NOT_SUPPORTED;
+ u64 val[4] = {SMCCC_RET_NOT_SUPPORTED};
u32 feature;
gpa_t gpa;
switch (func_id) {
case ARM_SMCCC_VERSION_FUNC_ID:
- val = ARM_SMCCC_VERSION_1_1;
+ val[0] = ARM_SMCCC_VERSION_1_1;
break;
case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
feature = smccc_get_arg1(vcpu);
@@ -28,10 +77,10 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
case SPECTRE_VULNERABLE:
break;
case SPECTRE_MITIGATED:
- val = SMCCC_RET_SUCCESS;
+ val[0] = SMCCC_RET_SUCCESS;
break;
case SPECTRE_UNAFFECTED:
- val = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
+ val[0] = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
break;
}
break;
@@ -54,22 +103,35 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
break;
fallthrough;
case SPECTRE_UNAFFECTED:
- val = SMCCC_RET_NOT_REQUIRED;
+ val[0] = SMCCC_RET_NOT_REQUIRED;
break;
}
break;
case ARM_SMCCC_HV_PV_TIME_FEATURES:
- val = SMCCC_RET_SUCCESS;
+ val[0] = SMCCC_RET_SUCCESS;
break;
}
break;
case ARM_SMCCC_HV_PV_TIME_FEATURES:
- val = kvm_hypercall_pv_features(vcpu);
+ val[0] = kvm_hypercall_pv_features(vcpu);
break;
case ARM_SMCCC_HV_PV_TIME_ST:
gpa = kvm_init_stolen_time(vcpu);
if (gpa != GPA_INVALID)
- val = gpa;
+ val[0] = gpa;
+ break;
+ case ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID:
+ val[0] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0;
+ val[1] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1;
+ val[2] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2;
+ val[3] = ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3;
+ break;
+ case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID:
+ val[0] = BIT(ARM_SMCCC_KVM_FUNC_FEATURES);
+ val[0] |= BIT(ARM_SMCCC_KVM_FUNC_PTP);
+ break;
+ case ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID:
+ kvm_ptp_get_time(vcpu, val);
break;
case ARM_SMCCC_TRNG_VERSION:
case ARM_SMCCC_TRNG_FEATURES:
@@ -81,6 +143,6 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
return kvm_psci_call(vcpu);
}
- smccc_set_retval(vcpu, val, 0, 0, 0);
+ smccc_set_retval(vcpu, val[0], val[1], val[2], val[3]);
return 1;
}
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 77cb2d28f2a4..3155c9e778f0 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -85,7 +85,55 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
static bool kvm_is_device_pfn(unsigned long pfn)
{
- return !pfn_valid(pfn);
+ return !pfn_is_map_memory(pfn);
+}
+
+static void *stage2_memcache_zalloc_page(void *arg)
+{
+ struct kvm_mmu_memory_cache *mc = arg;
+
+ /* Allocated with __GFP_ZERO, so no need to zero */
+ return kvm_mmu_memory_cache_alloc(mc);
+}
+
+static void *kvm_host_zalloc_pages_exact(size_t size)
+{
+ return alloc_pages_exact(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+}
+
+static void kvm_host_get_page(void *addr)
+{
+ get_page(virt_to_page(addr));
+}
+
+static void kvm_host_put_page(void *addr)
+{
+ put_page(virt_to_page(addr));
+}
+
+static int kvm_host_page_count(void *addr)
+{
+ return page_count(virt_to_page(addr));
+}
+
+static phys_addr_t kvm_host_pa(void *addr)
+{
+ return __pa(addr);
+}
+
+static void *kvm_host_va(phys_addr_t phys)
+{
+ return __va(phys);
+}
+
+static void clean_dcache_guest_page(void *va, size_t size)
+{
+ __clean_dcache_guest_page(va, size);
+}
+
+static void invalidate_icache_guest_page(void *va, size_t size)
+{
+ __invalidate_icache_guest_page(va, size);
}
/*
@@ -127,7 +175,7 @@ static bool kvm_is_device_pfn(unsigned long pfn)
static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size,
bool may_block)
{
- struct kvm *kvm = mmu->kvm;
+ struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
phys_addr_t end = start + size;
assert_spin_locked(&kvm->mmu_lock);
@@ -183,15 +231,39 @@ void free_hyp_pgds(void)
if (hyp_pgtable) {
kvm_pgtable_hyp_destroy(hyp_pgtable);
kfree(hyp_pgtable);
+ hyp_pgtable = NULL;
}
mutex_unlock(&kvm_hyp_pgd_mutex);
}
+static bool kvm_host_owns_hyp_mappings(void)
+{
+ if (static_branch_likely(&kvm_protected_mode_initialized))
+ return false;
+
+ /*
+ * This can happen at boot time when __create_hyp_mappings() is called
+ * after the hyp protection has been enabled, but the static key has
+ * not been flipped yet.
+ */
+ if (!hyp_pgtable && is_protected_kvm_enabled())
+ return false;
+
+ WARN_ON(!hyp_pgtable);
+
+ return true;
+}
+
static int __create_hyp_mappings(unsigned long start, unsigned long size,
unsigned long phys, enum kvm_pgtable_prot prot)
{
int err;
+ if (!kvm_host_owns_hyp_mappings()) {
+ return kvm_call_hyp_nvhe(__pkvm_create_mappings,
+ start, size, phys, prot);
+ }
+
mutex_lock(&kvm_hyp_pgd_mutex);
err = kvm_pgtable_hyp_map(hyp_pgtable, start, size, phys, prot);
mutex_unlock(&kvm_hyp_pgd_mutex);
@@ -253,6 +325,16 @@ static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
unsigned long base;
int ret = 0;
+ if (!kvm_host_owns_hyp_mappings()) {
+ base = kvm_call_hyp_nvhe(__pkvm_create_private_mapping,
+ phys_addr, size, prot);
+ if (IS_ERR_OR_NULL((void *)base))
+ return PTR_ERR((void *)base);
+ *haddr = base;
+
+ return 0;
+ }
+
mutex_lock(&kvm_hyp_pgd_mutex);
/*
@@ -351,6 +433,19 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
return 0;
}
+static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
+ .zalloc_page = stage2_memcache_zalloc_page,
+ .zalloc_pages_exact = kvm_host_zalloc_pages_exact,
+ .free_pages_exact = free_pages_exact,
+ .get_page = kvm_host_get_page,
+ .put_page = kvm_host_put_page,
+ .page_count = kvm_host_page_count,
+ .phys_to_virt = kvm_host_va,
+ .virt_to_phys = kvm_host_pa,
+ .dcache_clean_inval_poc = clean_dcache_guest_page,
+ .icache_inval_pou = invalidate_icache_guest_page,
+};
+
/**
* kvm_init_stage2_mmu - Initialise a S2 MMU strucrure
* @kvm: The pointer to the KVM structure
@@ -374,7 +469,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
if (!pgt)
return -ENOMEM;
- err = kvm_pgtable_stage2_init(pgt, kvm);
+ err = kvm_pgtable_stage2_init(pgt, &kvm->arch, &kvm_s2_mm_ops);
if (err)
goto out_free_pgtable;
@@ -387,7 +482,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
for_each_possible_cpu(cpu)
*per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;
- mmu->kvm = kvm;
+ mmu->arch = &kvm->arch;
mmu->pgt = pgt;
mmu->pgd_phys = __pa(pgt->pgd);
mmu->vmid.vmid_gen = 0;
@@ -421,10 +516,11 @@ static void stage2_unmap_memslot(struct kvm *kvm,
* +--------------------------------------------+
*/
do {
- struct vm_area_struct *vma = find_vma(current->mm, hva);
+ struct vm_area_struct *vma;
hva_t vm_start, vm_end;
- if (!vma || vma->vm_start >= reg_end)
+ vma = find_vma_intersection(current->mm, hva, reg_end);
+ if (!vma)
break;
/*
@@ -469,7 +565,7 @@ void stage2_unmap_vm(struct kvm *kvm)
void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
{
- struct kvm *kvm = mmu->kvm;
+ struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
struct kvm_pgtable *pgt = NULL;
spin_lock(&kvm->mmu_lock);
@@ -538,7 +634,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
*/
static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
{
- struct kvm *kvm = mmu->kvm;
+ struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_wrprotect);
}
@@ -555,7 +651,7 @@ static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_
* Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
* serializing operations for VM memory regions.
*/
-void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
+static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
{
struct kvm_memslots *slots = kvm_memslots(kvm);
struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
@@ -609,16 +705,6 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
}
-static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
-{
- __clean_dcache_guest_page(pfn, size);
-}
-
-static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
-{
- __invalidate_icache_guest_page(pfn, size);
-}
-
static void kvm_send_hwpoison_signal(unsigned long address, short lsb)
{
send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
@@ -738,6 +824,74 @@ transparent_hugepage_adjust(struct kvm_memory_slot *memslot,
return PAGE_SIZE;
}
+static int get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva)
+{
+ unsigned long pa;
+
+ if (is_vm_hugetlb_page(vma) && !(vma->vm_flags & VM_PFNMAP))
+ return huge_page_shift(hstate_vma(vma));
+
+ if (!(vma->vm_flags & VM_PFNMAP))
+ return PAGE_SHIFT;
+
+ VM_BUG_ON(is_vm_hugetlb_page(vma));
+
+ pa = (vma->vm_pgoff << PAGE_SHIFT) + (hva - vma->vm_start);
+
+#ifndef __PAGETABLE_PMD_FOLDED
+ if ((hva & (PUD_SIZE - 1)) == (pa & (PUD_SIZE - 1)) &&
+ ALIGN_DOWN(hva, PUD_SIZE) >= vma->vm_start &&
+ ALIGN(hva, PUD_SIZE) <= vma->vm_end)
+ return PUD_SHIFT;
+#endif
+
+ if ((hva & (PMD_SIZE - 1)) == (pa & (PMD_SIZE - 1)) &&
+ ALIGN_DOWN(hva, PMD_SIZE) >= vma->vm_start &&
+ ALIGN(hva, PMD_SIZE) <= vma->vm_end)
+ return PMD_SHIFT;
+
+ return PAGE_SHIFT;
+}
+
+/*
+ * The page will be mapped in stage 2 as Normal Cacheable, so the VM will be
+ * able to see the page's tags and therefore they must be initialised first. If
+ * PG_mte_tagged is set, tags have already been initialised.
+ *
+ * The race in the test/set of the PG_mte_tagged flag is handled by:
+ * - preventing VM_SHARED mappings in a memslot with MTE preventing two VMs
+ * racing to santise the same page
+ * - mmap_lock protects between a VM faulting a page in and the VMM performing
+ * an mprotect() to add VM_MTE
+ */
+static int sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
+ unsigned long size)
+{
+ unsigned long i, nr_pages = size >> PAGE_SHIFT;
+ struct page *page;
+
+ if (!kvm_has_mte(kvm))
+ return 0;
+
+ /*
+ * pfn_to_online_page() is used to reject ZONE_DEVICE pages
+ * that may not support tags.
+ */
+ page = pfn_to_online_page(pfn);
+
+ if (!page)
+ return -EFAULT;
+
+ for (i = 0; i < nr_pages; i++, page++) {
+ if (!test_bit(PG_mte_tagged, &page->flags)) {
+ mte_clear_page_tags(page_address(page));
+ set_bit(PG_mte_tagged, &page->flags);
+ }
+ }
+
+ return 0;
+}
+
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_memory_slot *memslot, unsigned long hva,
unsigned long fault_status)
@@ -746,6 +900,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
bool write_fault, writable, force_pte = false;
bool exec_fault;
bool device = false;
+ bool shared;
unsigned long mmu_seq;
struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
@@ -769,26 +924,31 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return -EFAULT;
}
- /* Let's check if we will get back a huge page backed by hugetlbfs */
+ /*
+ * Let's check if we will get back a huge page backed by hugetlbfs, or
+ * get block mapping for device MMIO region.
+ */
mmap_read_lock(current->mm);
- vma = find_vma_intersection(current->mm, hva, hva + 1);
+ vma = vma_lookup(current->mm, hva);
if (unlikely(!vma)) {
kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
mmap_read_unlock(current->mm);
return -EFAULT;
}
- if (is_vm_hugetlb_page(vma))
- vma_shift = huge_page_shift(hstate_vma(vma));
- else
- vma_shift = PAGE_SHIFT;
-
- if (logging_active ||
- (vma->vm_flags & VM_PFNMAP)) {
+ /*
+ * logging_active is guaranteed to never be true for VM_PFNMAP
+ * memslots.
+ */
+ if (logging_active) {
force_pte = true;
vma_shift = PAGE_SHIFT;
+ } else {
+ vma_shift = get_vma_page_shift(vma, hva);
}
+ shared = (vma->vm_flags & VM_PFNMAP);
+
switch (vma_shift) {
#ifndef __PAGETABLE_PMD_FOLDED
case PUD_SHIFT:
@@ -839,13 +999,18 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
* the page we just got a reference to gets unmapped before we have a
* chance to grab the mmu_lock, which ensure that if the page gets
- * unmapped afterwards, the call to kvm_unmap_hva will take it away
+ * unmapped afterwards, the call to kvm_unmap_gfn will take it away
* from us again properly. This smp_rmb() interacts with the smp_wmb()
* in kvm_mmu_notifier_invalidate_<page|range_end>.
+ *
+ * Besides, __gfn_to_pfn_memslot() instead of gfn_to_pfn_prot() is
+ * used to avoid unnecessary overhead introduced to locate the memory
+ * slot because it's always fixed even @gfn is adjusted for huge pages.
*/
smp_rmb();
- pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
+ pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
+ write_fault, &writable, NULL);
if (pfn == KVM_PFN_ERR_HWPOISON) {
kvm_send_hwpoison_signal(hva, vma_shift);
return 0;
@@ -854,8 +1019,17 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
return -EFAULT;
if (kvm_is_device_pfn(pfn)) {
+ /*
+ * If the page was identified as device early by looking at
+ * the VMA flags, vma_pagesize is already representing the
+ * largest quantity we can map. If instead it was mapped
+ * via gfn_to_pfn_prot(), vma_pagesize is set to PAGE_SIZE
+ * and must not be upgraded.
+ *
+ * In both cases, we don't let transparent_hugepage_adjust()
+ * change things at the last minute.
+ */
device = true;
- force_pte = true;
} else if (logging_active && !write_fault) {
/*
* Only actually map the page as writable if this was a write
@@ -876,19 +1050,25 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* If we are not forced to use page mapping, check if we are
* backed by a THP and thus use block mapping if possible.
*/
- if (vma_pagesize == PAGE_SIZE && !force_pte)
+ if (vma_pagesize == PAGE_SIZE && !(force_pte || device))
vma_pagesize = transparent_hugepage_adjust(memslot, hva,
&pfn, &fault_ipa);
+
+ if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) {
+ /* Check the VMM hasn't introduced a new VM_SHARED VMA */
+ if (!shared)
+ ret = sanitise_mte_tags(kvm, pfn, vma_pagesize);
+ else
+ ret = -EFAULT;
+ if (ret)
+ goto out_unlock;
+ }
+
if (writable)
prot |= KVM_PGTABLE_PROT_W;
- if (fault_status != FSC_PERM && !device)
- clean_dcache_guest_page(pfn, vma_pagesize);
-
- if (exec_fault) {
+ if (exec_fault)
prot |= KVM_PGTABLE_PROT_X;
- invalidate_icache_guest_page(pfn, vma_pagesize);
- }
if (device)
prot |= KVM_PGTABLE_PROT_DEVICE;
@@ -911,7 +1091,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
/* Mark the page dirty only if the fault is handled successfully */
if (writable && !ret) {
kvm_set_pfn_dirty(pfn);
- mark_page_dirty(kvm, gfn);
+ mark_page_dirty_in_slot(kvm, memslot, gfn);
}
out_unlock:
@@ -1064,126 +1244,73 @@ out_unlock:
return ret;
}
-static int handle_hva_to_gpa(struct kvm *kvm,
- unsigned long start,
- unsigned long end,
- int (*handler)(struct kvm *kvm,
- gpa_t gpa, u64 size,
- void *data),
- void *data)
+bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
- int ret = 0;
-
- slots = kvm_memslots(kvm);
-
- /* we only care about the pages that the guest sees */
- kvm_for_each_memslot(memslot, slots) {
- unsigned long hva_start, hva_end;
- gfn_t gpa;
-
- hva_start = max(start, memslot->userspace_addr);
- hva_end = min(end, memslot->userspace_addr +
- (memslot->npages << PAGE_SHIFT));
- if (hva_start >= hva_end)
- continue;
+ if (!kvm->arch.mmu.pgt)
+ return false;
- gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
- ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
- }
+ __unmap_stage2_range(&kvm->arch.mmu, range->start << PAGE_SHIFT,
+ (range->end - range->start) << PAGE_SHIFT,
+ range->may_block);
- return ret;
+ return false;
}
-static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
+bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- unsigned flags = *(unsigned *)data;
- bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE;
-
- __unmap_stage2_range(&kvm->arch.mmu, gpa, size, may_block);
- return 0;
-}
+ kvm_pfn_t pfn = pte_pfn(range->pte);
+ int ret;
-int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end, unsigned flags)
-{
if (!kvm->arch.mmu.pgt)
- return 0;
-
- trace_kvm_unmap_hva_range(start, end);
- handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags);
- return 0;
-}
+ return false;
-static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
-{
- kvm_pfn_t *pfn = (kvm_pfn_t *)data;
+ WARN_ON(range->end - range->start != 1);
- WARN_ON(size != PAGE_SIZE);
+ ret = sanitise_mte_tags(kvm, pfn, PAGE_SIZE);
+ if (ret)
+ return false;
/*
+ * We've moved a page around, probably through CoW, so let's treat
+ * it just like a translation fault and the map handler will clean
+ * the cache to the PoC.
+ *
* The MMU notifiers will have unmapped a huge PMD before calling
- * ->change_pte() (which in turn calls kvm_set_spte_hva()) and
+ * ->change_pte() (which in turn calls kvm_set_spte_gfn()) and
* therefore we never need to clear out a huge PMD through this
* calling path and a memcache is not required.
*/
- kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, gpa, PAGE_SIZE,
- __pfn_to_phys(*pfn), KVM_PGTABLE_PROT_R, NULL);
- return 0;
-}
-
-int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
-{
- unsigned long end = hva + PAGE_SIZE;
- kvm_pfn_t pfn = pte_pfn(pte);
-
- if (!kvm->arch.mmu.pgt)
- return 0;
-
- trace_kvm_set_spte_hva(hva);
+ kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT,
+ PAGE_SIZE, __pfn_to_phys(pfn),
+ KVM_PGTABLE_PROT_R, NULL);
- /*
- * We've moved a page around, probably through CoW, so let's treat it
- * just like a translation fault and clean the cache to the PoC.
- */
- clean_dcache_guest_page(pfn, PAGE_SIZE);
- handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pfn);
- return 0;
+ return false;
}
-static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
+bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- pte_t pte;
+ u64 size = (range->end - range->start) << PAGE_SHIFT;
kvm_pte_t kpte;
+ pte_t pte;
+
+ if (!kvm->arch.mmu.pgt)
+ return false;
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
- kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt, gpa);
+
+ kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt,
+ range->start << PAGE_SHIFT);
pte = __pte(kpte);
return pte_valid(pte) && pte_young(pte);
}
-static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
-{
- WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
- return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt, gpa);
-}
-
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
+bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
if (!kvm->arch.mmu.pgt)
- return 0;
- trace_kvm_age_hva(start, end);
- return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
-}
+ return false;
-int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
-{
- if (!kvm->arch.mmu.pgt)
- return 0;
- trace_kvm_test_age_hva(hva);
- return handle_hva_to_gpa(kvm, hva, hva + PAGE_SIZE,
- kvm_test_age_hva_handler, NULL);
+ return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt,
+ range->start << PAGE_SHIFT);
}
phys_addr_t kvm_mmu_get_httbr(void)
@@ -1208,10 +1335,22 @@ static int kvm_map_idmap_text(void)
return err;
}
-int kvm_mmu_init(void)
+static void *kvm_hyp_zalloc_page(void *arg)
+{
+ return (void *)get_zeroed_page(GFP_KERNEL);
+}
+
+static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops = {
+ .zalloc_page = kvm_hyp_zalloc_page,
+ .get_page = kvm_host_get_page,
+ .put_page = kvm_host_put_page,
+ .phys_to_virt = kvm_host_va,
+ .virt_to_phys = kvm_host_pa,
+};
+
+int kvm_mmu_init(u32 *hyp_va_bits)
{
int err;
- u32 hyp_va_bits;
hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start);
hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
@@ -1225,8 +1364,8 @@ int kvm_mmu_init(void)
*/
BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
- hyp_va_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET);
- kvm_debug("Using %u-bit virtual addresses at EL2\n", hyp_va_bits);
+ *hyp_va_bits = 64 - ((idmap_t0sz & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET);
+ kvm_debug("Using %u-bit virtual addresses at EL2\n", *hyp_va_bits);
kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
kvm_debug("HYP VA range: %lx:%lx\n",
kern_hyp_va(PAGE_OFFSET),
@@ -1251,7 +1390,7 @@ int kvm_mmu_init(void)
goto out;
}
- err = kvm_pgtable_hyp_init(hyp_pgtable, hyp_va_bits);
+ err = kvm_pgtable_hyp_init(hyp_pgtable, *hyp_va_bits, &kvm_hyp_mm_ops);
if (err)
goto out_free_pgtable;
@@ -1301,7 +1440,6 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
{
hva_t hva = mem->userspace_addr;
hva_t reg_end = hva + mem->memory_size;
- bool writable = !(mem->flags & KVM_MEM_READONLY);
int ret = 0;
if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
@@ -1312,15 +1450,13 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
* Prevent userspace from creating a memory region outside of the IPA
* space addressable by the KVM guest IPA space.
*/
- if (memslot->base_gfn + memslot->npages >=
- (kvm_phys_size(kvm) >> PAGE_SHIFT))
+ if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT))
return -EFAULT;
mmap_read_lock(current->mm);
/*
* A memory region could potentially cover multiple VMAs, and any holes
- * between them, so iterate over all of them to find out if we can map
- * any of them right now.
+ * between them, so iterate over all of them.
*
* +--------------------------------------------+
* +---------------+----------------+ +----------------+
@@ -1330,51 +1466,30 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
* +--------------------------------------------+
*/
do {
- struct vm_area_struct *vma = find_vma(current->mm, hva);
- hva_t vm_start, vm_end;
+ struct vm_area_struct *vma;
- if (!vma || vma->vm_start >= reg_end)
+ vma = find_vma_intersection(current->mm, hva, reg_end);
+ if (!vma)
break;
/*
- * Take the intersection of this VMA with the memory region
+ * VM_SHARED mappings are not allowed with MTE to avoid races
+ * when updating the PG_mte_tagged page flag, see
+ * sanitise_mte_tags for more details.
*/
- vm_start = max(hva, vma->vm_start);
- vm_end = min(reg_end, vma->vm_end);
+ if (kvm_has_mte(kvm) && vma->vm_flags & VM_SHARED)
+ return -EINVAL;
if (vma->vm_flags & VM_PFNMAP) {
- gpa_t gpa = mem->guest_phys_addr +
- (vm_start - mem->userspace_addr);
- phys_addr_t pa;
-
- pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
- pa += vm_start - vma->vm_start;
-
/* IO region dirty page logging not allowed */
if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
ret = -EINVAL;
- goto out;
- }
-
- ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
- vm_end - vm_start,
- writable);
- if (ret)
break;
+ }
}
- hva = vm_end;
+ hva = min(reg_end, vma->vm_end);
} while (hva < reg_end);
- if (change == KVM_MR_FLAGS_ONLY)
- goto out;
-
- spin_lock(&kvm->mmu_lock);
- if (ret)
- unmap_stage2_range(&kvm->arch.mmu, mem->guest_phys_addr, mem->memory_size);
- else if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
- stage2_flush_memslot(kvm, memslot);
- spin_unlock(&kvm->mmu_lock);
-out:
mmap_read_unlock(current->mm);
return ret;
}
diff --git a/arch/arm64/kvm/perf.c b/arch/arm64/kvm/perf.c
index d45b8b9a4415..151c31fb9860 100644
--- a/arch/arm64/kvm/perf.c
+++ b/arch/arm64/kvm/perf.c
@@ -11,6 +11,8 @@
#include <asm/kvm_emulate.h>
+DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
+
static int kvm_is_in_guest(void)
{
return kvm_get_running_vcpu() != NULL;
@@ -48,6 +50,9 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = {
int kvm_perf_init(void)
{
+ if (kvm_pmu_probe_pmuver() != 0xf && !is_protected_kvm_enabled())
+ static_branch_enable(&kvm_arm_pmu_available);
+
return perf_register_guest_info_callbacks(&kvm_guest_cbs);
}
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index e9ec08b0b070..f33825c995cb 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -578,6 +578,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
if (val & ARMV8_PMU_PMCR_P) {
+ mask &= ~BIT(ARMV8_PMU_CYCLE_IDX);
for_each_set_bit(i, &mask, 32)
kvm_pmu_set_counter_value(vcpu, i, 0);
}
@@ -739,7 +740,7 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
kvm_pmu_create_perf_event(vcpu, select_idx);
}
-static int kvm_pmu_probe_pmuver(void)
+int kvm_pmu_probe_pmuver(void)
{
struct perf_event_attr attr = { };
struct perf_event *event;
@@ -823,16 +824,6 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
return val & mask;
}
-bool kvm_arm_support_pmu_v3(void)
-{
- /*
- * Check if HW_PERF_EVENTS are supported by checking the number of
- * hardware performance counters. This could ensure the presence of
- * a physical PMU and CONFIG_PERF_EVENT is selected.
- */
- return (perf_num_counters() > 0);
-}
-
int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
{
if (!kvm_vcpu_has_pmu(vcpu))
@@ -860,6 +851,9 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
return -EINVAL;
}
+ /* One-off reload of the PMU on first run */
+ kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
+
return 0;
}
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index faf32a44ba04..03a6c1f4a09a 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -33,7 +33,7 @@ void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
{
struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
- if (!ctx || !kvm_pmu_switch_needed(attr))
+ if (!kvm_arm_support_pmu_v3() || !ctx || !kvm_pmu_switch_needed(attr))
return;
if (!attr->exclude_host)
@@ -49,7 +49,7 @@ void kvm_clr_pmu_events(u32 clr)
{
struct kvm_host_data *ctx = this_cpu_ptr_hyp_sym(kvm_host_data);
- if (!ctx)
+ if (!kvm_arm_support_pmu_v3() || !ctx)
return;
ctx->pmu_events.events_host &= ~clr;
@@ -172,7 +172,7 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
struct kvm_host_data *host;
u32 events_guest, events_host;
- if (!has_vhe())
+ if (!kvm_arm_support_pmu_v3() || !has_vhe())
return;
preempt_disable();
@@ -193,7 +193,7 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
struct kvm_host_data *host;
u32 events_guest, events_host;
- if (!has_vhe())
+ if (!kvm_arm_support_pmu_v3() || !has_vhe())
return;
host = this_cpu_ptr_hyp_sym(kvm_host_data);
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 47f3f035f3ea..cba7872d69a8 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -74,10 +74,6 @@ static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
if (!system_supports_sve())
return -EINVAL;
- /* Verify that KVM startup enforced this when SVE was detected: */
- if (WARN_ON(!has_vhe()))
- return -EINVAL;
-
vcpu->arch.sve_max_vl = kvm_sve_max_vl;
/*
@@ -170,6 +166,29 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
return 0;
}
+static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu *tmp;
+ bool is32bit;
+ int i;
+
+ is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
+ if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
+ return false;
+
+ /* MTE is incompatible with AArch32 */
+ if (kvm_has_mte(vcpu->kvm) && is32bit)
+ return false;
+
+ /* Check that the vcpus are either all 32bit or all 64bit */
+ kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
+ if (vcpu_has_feature(tmp, KVM_ARM_VCPU_EL1_32BIT) != is32bit)
+ return false;
+ }
+
+ return true;
+}
+
/**
* kvm_reset_vcpu - sets core registers and sys_regs to reset value
* @vcpu: The VCPU pointer
@@ -221,13 +240,14 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
}
}
+ if (!vcpu_allowed_register_width(vcpu)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
switch (vcpu->arch.target) {
default:
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
- if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) {
- ret = -EINVAL;
- goto out;
- }
pstate = VCPU_RESET_PSTATE_SVC;
} else {
pstate = VCPU_RESET_PSTATE_EL1;
@@ -242,6 +262,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
/* Reset core registers */
memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
+ memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
+ vcpu->arch.ctxt.spsr_abt = 0;
+ vcpu->arch.ctxt.spsr_und = 0;
+ vcpu->arch.ctxt.spsr_irq = 0;
+ vcpu->arch.ctxt.spsr_fiq = 0;
vcpu_gp_regs(vcpu)->pstate = pstate;
/* Reset system registers */
@@ -311,40 +336,32 @@ int kvm_set_ipa_limit(void)
}
switch (cpuid_feature_extract_unsigned_field(mmfr0, tgran_2)) {
- default:
- case 1:
+ case ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE:
kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
return -EINVAL;
- case 0:
+ case ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT:
kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
break;
- case 2:
+ case ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX:
kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
break;
+ default:
+ kvm_err("Unsupported value for TGRAN_2, giving up\n");
+ return -EINVAL;
}
kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange);
- WARN(kvm_ipa_limit < KVM_PHYS_SHIFT,
- "KVM IPA Size Limit (%d bits) is smaller than default size\n",
- kvm_ipa_limit);
- kvm_info("IPA Size Limit: %d bits\n", kvm_ipa_limit);
+ kvm_info("IPA Size Limit: %d bits%s\n", kvm_ipa_limit,
+ ((kvm_ipa_limit < KVM_PHYS_SHIFT) ?
+ " (Reduced IPA size, limited VM/VMM compatibility)" : ""));
return 0;
}
-/*
- * Configure the VTCR_EL2 for this VM. The VTCR value is common
- * across all the physical CPUs on the system. We use system wide
- * sanitised values to fill in different fields, except for Hardware
- * Management of Access Flags. HA Flag is set unconditionally on
- * all CPUs, as it is safe to run with or without the feature and
- * the bit is RES0 on CPUs that don't support it.
- */
int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
{
- u64 vtcr = VTCR_EL2_FLAGS, mmfr0;
- u32 parange, phys_shift;
- u8 lvls;
+ u64 mmfr0, mmfr1;
+ u32 phys_shift;
if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
return -EINVAL;
@@ -356,36 +373,16 @@ int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
return -EINVAL;
} else {
phys_shift = KVM_PHYS_SHIFT;
+ if (phys_shift > kvm_ipa_limit) {
+ pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n",
+ current->comm);
+ return -EINVAL;
+ }
}
mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
- parange = cpuid_feature_extract_unsigned_field(mmfr0,
- ID_AA64MMFR0_PARANGE_SHIFT);
- if (parange > ID_AA64MMFR0_PARANGE_MAX)
- parange = ID_AA64MMFR0_PARANGE_MAX;
- vtcr |= parange << VTCR_EL2_PS_SHIFT;
-
- vtcr |= VTCR_EL2_T0SZ(phys_shift);
- /*
- * Use a minimum 2 level page table to prevent splitting
- * host PMD huge pages at stage2.
- */
- lvls = stage2_pgtable_levels(phys_shift);
- if (lvls < 2)
- lvls = 2;
- vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
-
- /*
- * Enable the Hardware Access Flag management, unconditionally
- * on all CPUs. The features is RES0 on CPUs without the support
- * and must be ignored by the CPUs.
- */
- vtcr |= VTCR_EL2_HA;
+ mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
+ kvm->arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift);
- /* Set the vmid bits */
- vtcr |= (kvm_get_vmid_bits() == 16) ?
- VTCR_EL2_VS_16BIT :
- VTCR_EL2_VS_8BIT;
- kvm->arch.vtcr = vtcr;
return 0;
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 4f2f1e3145de..f6f126eb6ac1 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -399,14 +399,14 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
if (p->is_write)
reg_to_dbg(vcpu, p, rd, dbg_reg);
else
dbg_to_reg(vcpu, p, rd, dbg_reg);
- trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+ trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
return true;
}
@@ -414,7 +414,7 @@ static bool trap_bvr(struct kvm_vcpu *vcpu,
static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -424,7 +424,7 @@ static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -434,21 +434,21 @@ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static void reset_bvr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
- vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
+ vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
}
static bool trap_bcr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
if (p->is_write)
reg_to_dbg(vcpu, p, rd, dbg_reg);
else
dbg_to_reg(vcpu, p, rd, dbg_reg);
- trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+ trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
return true;
}
@@ -456,7 +456,7 @@ static bool trap_bcr(struct kvm_vcpu *vcpu,
static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -467,7 +467,7 @@ static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -477,22 +477,22 @@ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static void reset_bcr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
- vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
+ vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
}
static bool trap_wvr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
if (p->is_write)
reg_to_dbg(vcpu, p, rd, dbg_reg);
else
dbg_to_reg(vcpu, p, rd, dbg_reg);
- trace_trap_reg(__func__, rd->reg, p->is_write,
- vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
+ trace_trap_reg(__func__, rd->CRm, p->is_write,
+ vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
return true;
}
@@ -500,7 +500,7 @@ static bool trap_wvr(struct kvm_vcpu *vcpu,
static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -510,7 +510,7 @@ static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -520,21 +520,21 @@ static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static void reset_wvr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
- vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
+ vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
}
static bool trap_wcr(struct kvm_vcpu *vcpu,
struct sys_reg_params *p,
const struct sys_reg_desc *rd)
{
- u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
if (p->is_write)
reg_to_dbg(vcpu, p, rd, dbg_reg);
else
dbg_to_reg(vcpu, p, rd, dbg_reg);
- trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+ trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
return true;
}
@@ -542,7 +542,7 @@ static bool trap_wcr(struct kvm_vcpu *vcpu,
static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -552,7 +552,7 @@ static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
return -EFAULT;
@@ -562,7 +562,7 @@ static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
static void reset_wcr(struct kvm_vcpu *vcpu,
const struct sys_reg_desc *rd)
{
- vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
+ vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
}
static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
@@ -1047,6 +1047,13 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
break;
case SYS_ID_AA64PFR1_EL1:
val &= ~FEATURE(ID_AA64PFR1_MTE);
+ if (kvm_has_mte(vcpu->kvm)) {
+ u64 pfr, mte;
+
+ pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
+ mte = cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR1_MTE_SHIFT);
+ val |= FIELD_PREP(FEATURE(ID_AA64PFR1_MTE), mte);
+ }
break;
case SYS_ID_AA64ISAR1_EL1:
if (!vcpu_has_ptrauth(vcpu))
@@ -1063,6 +1070,8 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
val = cpuid_feature_cap_perfmon_field(val,
ID_AA64DFR0_PMUVER_SHIFT,
kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0);
+ /* Hide SPE from guests */
+ val &= ~FEATURE(ID_AA64DFR0_PMSVER);
break;
case SYS_ID_DFR0_EL1:
/* Limit guests to PMUv3 for ARMv8.4 */
@@ -1300,6 +1309,23 @@ static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
return true;
}
+static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ if (kvm_has_mte(vcpu->kvm))
+ return 0;
+
+ return REG_HIDDEN;
+}
+
+#define MTE_REG(name) { \
+ SYS_DESC(SYS_##name), \
+ .access = undef_access, \
+ .reset = reset_unknown, \
+ .reg = name, \
+ .visibility = mte_visibility, \
+}
+
/* sys_reg_desc initialiser for known cpufeature ID registers */
#define ID_SANITISED(name) { \
SYS_DESC(SYS_##name), \
@@ -1468,10 +1494,11 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
{ SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
- { SYS_DESC(SYS_RGSR_EL1), undef_access },
- { SYS_DESC(SYS_GCR_EL1), undef_access },
+ MTE_REG(RGSR_EL1),
+ MTE_REG(GCR_EL1),
{ SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
+ { SYS_DESC(SYS_TRFCR_EL1), undef_access },
{ SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
{ SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
{ SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
@@ -1495,12 +1522,25 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
{ SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
- { SYS_DESC(SYS_TFSR_EL1), undef_access },
- { SYS_DESC(SYS_TFSRE0_EL1), undef_access },
+ MTE_REG(TFSR_EL1),
+ MTE_REG(TFSRE0_EL1),
{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
+ { SYS_DESC(SYS_PMSCR_EL1), undef_access },
+ { SYS_DESC(SYS_PMSNEVFR_EL1), undef_access },
+ { SYS_DESC(SYS_PMSICR_EL1), undef_access },
+ { SYS_DESC(SYS_PMSIRR_EL1), undef_access },
+ { SYS_DESC(SYS_PMSFCR_EL1), undef_access },
+ { SYS_DESC(SYS_PMSEVFR_EL1), undef_access },
+ { SYS_DESC(SYS_PMSLATFR_EL1), undef_access },
+ { SYS_DESC(SYS_PMSIDR_EL1), undef_access },
+ { SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
+ { SYS_DESC(SYS_PMBPTR_EL1), undef_access },
+ { SYS_DESC(SYS_PMBSR_EL1), undef_access },
+ /* PMBIDR_EL1 is not trapped */
+
{ PMU_SYS_REG(SYS_PMINTENSET_EL1),
.access = access_pminten, .reg = PMINTENSET_EL1 },
{ PMU_SYS_REG(SYS_PMINTENCLR_EL1),
diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h
index ff0444352bba..33e4e7dd2719 100644
--- a/arch/arm64/kvm/trace_arm.h
+++ b/arch/arm64/kvm/trace_arm.h
@@ -135,72 +135,6 @@ TRACE_EVENT(kvm_mmio_emulate,
__entry->vcpu_pc, __entry->instr, __entry->cpsr)
);
-TRACE_EVENT(kvm_unmap_hva_range,
- TP_PROTO(unsigned long start, unsigned long end),
- TP_ARGS(start, end),
-
- TP_STRUCT__entry(
- __field( unsigned long, start )
- __field( unsigned long, end )
- ),
-
- TP_fast_assign(
- __entry->start = start;
- __entry->end = end;
- ),
-
- TP_printk("mmu notifier unmap range: %#016lx -- %#016lx",
- __entry->start, __entry->end)
-);
-
-TRACE_EVENT(kvm_set_spte_hva,
- TP_PROTO(unsigned long hva),
- TP_ARGS(hva),
-
- TP_STRUCT__entry(
- __field( unsigned long, hva )
- ),
-
- TP_fast_assign(
- __entry->hva = hva;
- ),
-
- TP_printk("mmu notifier set pte hva: %#016lx", __entry->hva)
-);
-
-TRACE_EVENT(kvm_age_hva,
- TP_PROTO(unsigned long start, unsigned long end),
- TP_ARGS(start, end),
-
- TP_STRUCT__entry(
- __field( unsigned long, start )
- __field( unsigned long, end )
- ),
-
- TP_fast_assign(
- __entry->start = start;
- __entry->end = end;
- ),
-
- TP_printk("mmu notifier age hva: %#016lx -- %#016lx",
- __entry->start, __entry->end)
-);
-
-TRACE_EVENT(kvm_test_age_hva,
- TP_PROTO(unsigned long hva),
- TP_ARGS(hva),
-
- TP_STRUCT__entry(
- __field( unsigned long, hva )
- ),
-
- TP_fast_assign(
- __entry->hva = hva;
- ),
-
- TP_printk("mmu notifier test age hva: %#016lx", __entry->hva)
-);
-
TRACE_EVENT(kvm_set_way_flush,
TP_PROTO(unsigned long vcpu_pc, bool cache),
TP_ARGS(vcpu_pc, cache),
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index 978301392d67..acdb7b3cc97d 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -288,3 +288,10 @@ void kvm_get_kimage_voffset(struct alt_instr *alt,
{
generate_mov_q(kimage_voffset, origptr, updptr, nr_inst);
}
+
+void kvm_compute_final_ctr_el0(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+ generate_mov_q(read_sanitised_ftr_reg(SYS_CTR_EL0),
+ origptr, updptr, nr_inst);
+}
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
index 052917deb149..340c51d87677 100644
--- a/arch/arm64/kvm/vgic/vgic-init.c
+++ b/arch/arm64/kvm/vgic/vgic-init.c
@@ -335,13 +335,14 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
kfree(dist->spis);
dist->spis = NULL;
dist->nr_spis = 0;
+ dist->vgic_dist_base = VGIC_ADDR_UNDEF;
- if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
- list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list) {
- list_del(&rdreg->list);
- kfree(rdreg);
- }
+ if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
+ list_for_each_entry_safe(rdreg, next, &dist->rd_regions, list)
+ vgic_v3_free_redist_region(rdreg);
INIT_LIST_HEAD(&dist->rd_regions);
+ } else {
+ dist->vgic_cpu_base = VGIC_ADDR_UNDEF;
}
if (vgic_has_its(kvm))
@@ -362,6 +363,7 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
vgic_flush_pending_lpis(vcpu);
INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
+ vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
}
/* To be called with kvm->lock held */
@@ -480,6 +482,16 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data)
return IRQ_HANDLED;
}
+static struct gic_kvm_info *gic_kvm_info;
+
+void __init vgic_set_kvm_info(const struct gic_kvm_info *info)
+{
+ BUG_ON(gic_kvm_info != NULL);
+ gic_kvm_info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (gic_kvm_info)
+ *gic_kvm_info = *info;
+}
+
/**
* kvm_vgic_init_cpu_hardware - initialize the GIC VE hardware
*
@@ -507,18 +519,29 @@ void kvm_vgic_init_cpu_hardware(void)
*/
int kvm_vgic_hyp_init(void)
{
- const struct gic_kvm_info *gic_kvm_info;
+ bool has_mask;
int ret;
- gic_kvm_info = gic_get_kvm_info();
if (!gic_kvm_info)
return -ENODEV;
- if (!gic_kvm_info->maint_irq) {
+ has_mask = !gic_kvm_info->no_maint_irq_mask;
+
+ if (has_mask && !gic_kvm_info->maint_irq) {
kvm_err("No vgic maintenance irq\n");
return -ENXIO;
}
+ /*
+ * If we get one of these oddball non-GICs, taint the kernel,
+ * as we have no idea of how they *really* behave.
+ */
+ if (gic_kvm_info->no_hw_deactivation) {
+ kvm_info("Non-architectural vgic, tainting kernel\n");
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+ kvm_vgic_global_state.no_hw_deactivation = true;
+ }
+
switch (gic_kvm_info->type) {
case GIC_V2:
ret = vgic_v2_probe(gic_kvm_info);
@@ -534,10 +557,17 @@ int kvm_vgic_hyp_init(void)
ret = -ENODEV;
}
+ kvm_vgic_global_state.maint_irq = gic_kvm_info->maint_irq;
+
+ kfree(gic_kvm_info);
+ gic_kvm_info = NULL;
+
if (ret)
return ret;
- kvm_vgic_global_state.maint_irq = gic_kvm_info->maint_irq;
+ if (!has_mask)
+ return 0;
+
ret = request_percpu_irq(kvm_vgic_global_state.maint_irq,
vgic_maintenance_handler,
"vgic", kvm_get_running_vcpus());
diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
index 40cbaca81333..61728c543eb9 100644
--- a/arch/arm64/kvm/vgic/vgic-its.c
+++ b/arch/arm64/kvm/vgic/vgic-its.c
@@ -2190,8 +2190,8 @@ static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
return offset;
}
-static int vgic_its_ite_cmp(void *priv, struct list_head *a,
- struct list_head *b)
+static int vgic_its_ite_cmp(void *priv, const struct list_head *a,
+ const struct list_head *b)
{
struct its_ite *itea = container_of(a, struct its_ite, ite_list);
struct its_ite *iteb = container_of(b, struct its_ite, ite_list);
@@ -2218,10 +2218,10 @@ static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device)
/*
* If an LPI carries the HW bit, this means that this
* interrupt is controlled by GICv4, and we do not
- * have direct access to that state. Let's simply fail
- * the save operation...
+ * have direct access to that state without GICv4.1.
+ * Let's simply fail the save operation...
*/
- if (ite->irq->hw)
+ if (ite->irq->hw && !kvm_vgic_global_state.has_gicv4_1)
return -EACCES;
ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz);
@@ -2329,8 +2329,8 @@ static int vgic_its_restore_dte(struct vgic_its *its, u32 id,
return offset;
}
-static int vgic_its_device_cmp(void *priv, struct list_head *a,
- struct list_head *b)
+static int vgic_its_device_cmp(void *priv, const struct list_head *a,
+ const struct list_head *b)
{
struct its_device *deva = container_of(a, struct its_device, dev_list);
struct its_device *devb = container_of(b, struct its_device, dev_list);
diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c
index 44419679f91a..7740995de982 100644
--- a/arch/arm64/kvm/vgic/vgic-kvm-device.c
+++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c
@@ -87,8 +87,8 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
r = vgic_v3_set_redist_base(kvm, 0, *addr, 0);
goto out;
}
- rdreg = list_first_entry(&vgic->rd_regions,
- struct vgic_redist_region, list);
+ rdreg = list_first_entry_or_null(&vgic->rd_regions,
+ struct vgic_redist_region, list);
if (!rdreg)
addr_ptr = &undef_value;
else
@@ -226,6 +226,9 @@ static int vgic_get_common_attr(struct kvm_device *dev,
u64 addr;
unsigned long type = (unsigned long)attr->attr;
+ if (copy_from_user(&addr, uaddr, sizeof(addr)))
+ return -EFAULT;
+
r = kvm_vgic_addr(dev->kvm, type, &addr, false);
if (r)
return (r == -ENODEV) ? -ENXIO : r;
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
index 15a6c98ee92f..a09cdc0b953c 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
@@ -86,7 +86,7 @@ static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
}
break;
case GICD_TYPER2:
- if (kvm_vgic_global_state.has_gicv4_1)
+ if (kvm_vgic_global_state.has_gicv4_1 && gic_cpuif_has_vsgi())
value = GICD_TYPER2_nASSGIcap;
break;
case GICD_IIDR:
@@ -119,7 +119,7 @@ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
/* Not a GICv4.1? No HW SGIs */
- if (!kvm_vgic_global_state.has_gicv4_1)
+ if (!kvm_vgic_global_state.has_gicv4_1 || !gic_cpuif_has_vsgi())
val &= ~GICD_CTLR_nASSGIreq;
/* Dist stays enabled? nASSGIreq is RO */
@@ -251,30 +251,35 @@ static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu,
vgic_enable_lpis(vcpu);
}
-static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
- gpa_t addr, unsigned int len)
+static bool vgic_mmio_vcpu_rdist_is_last(struct kvm_vcpu *vcpu)
{
- unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
+ struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- struct vgic_redist_region *rdreg = vgic_cpu->rdreg;
- int target_vcpu_id = vcpu->vcpu_id;
- gpa_t last_rdist_typer = rdreg->base + GICR_TYPER +
- (rdreg->free_index - 1) * KVM_VGIC_V3_REDIST_SIZE;
- u64 value;
+ struct vgic_redist_region *iter, *rdreg = vgic_cpu->rdreg;
- value = (u64)(mpidr & GENMASK(23, 0)) << 32;
- value |= ((target_vcpu_id & 0xffff) << 8);
+ if (!rdreg)
+ return false;
- if (addr == last_rdist_typer)
- value |= GICR_TYPER_LAST;
- if (vgic_has_its(vcpu->kvm))
- value |= GICR_TYPER_PLPIS;
+ if (vgic_cpu->rdreg_index < rdreg->free_index - 1) {
+ return false;
+ } else if (rdreg->count && vgic_cpu->rdreg_index == (rdreg->count - 1)) {
+ struct list_head *rd_regions = &vgic->rd_regions;
+ gpa_t end = rdreg->base + rdreg->count * KVM_VGIC_V3_REDIST_SIZE;
- return extract_bytes(value, addr & 7, len);
+ /*
+ * the rdist is the last one of the redist region,
+ * check whether there is no other contiguous rdist region
+ */
+ list_for_each_entry(iter, rd_regions, list) {
+ if (iter->base == end && iter->free_index > 0)
+ return false;
+ }
+ }
+ return true;
}
-static unsigned long vgic_uaccess_read_v3r_typer(struct kvm_vcpu *vcpu,
- gpa_t addr, unsigned int len)
+static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
{
unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
int target_vcpu_id = vcpu->vcpu_id;
@@ -286,7 +291,9 @@ static unsigned long vgic_uaccess_read_v3r_typer(struct kvm_vcpu *vcpu,
if (vgic_has_its(vcpu->kvm))
value |= GICR_TYPER_PLPIS;
- /* reporting of the Last bit is not supported for userspace */
+ if (vgic_mmio_vcpu_rdist_is_last(vcpu))
+ value |= GICR_TYPER_LAST;
+
return extract_bytes(value, addr & 7, len);
}
@@ -612,7 +619,7 @@ static const struct vgic_register_region vgic_v3_rd_registers[] = {
VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_TYPER,
vgic_mmio_read_v3r_typer, vgic_mmio_write_wi,
- vgic_uaccess_read_v3r_typer, vgic_mmio_uaccess_write_wi, 8,
+ NULL, vgic_mmio_uaccess_write_wi, 8,
VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
REGISTER_DESC_WITH_LENGTH(GICR_WAKER,
vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
@@ -714,6 +721,7 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
return -EINVAL;
vgic_cpu->rdreg = rdreg;
+ vgic_cpu->rdreg_index = rdreg->free_index;
rd_base = rdreg->base + rdreg->free_index * KVM_VGIC_V3_REDIST_SIZE;
@@ -768,7 +776,7 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
}
/**
- * vgic_v3_insert_redist_region - Insert a new redistributor region
+ * vgic_v3_alloc_redist_region - Allocate a new redistributor region
*
* Performs various checks before inserting the rdist region in the list.
* Those tests depend on whether the size of the rdist region is known
@@ -782,8 +790,8 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
*
* Return 0 on success, < 0 otherwise
*/
-static int vgic_v3_insert_redist_region(struct kvm *kvm, uint32_t index,
- gpa_t base, uint32_t count)
+static int vgic_v3_alloc_redist_region(struct kvm *kvm, uint32_t index,
+ gpa_t base, uint32_t count)
{
struct vgic_dist *d = &kvm->arch.vgic;
struct vgic_redist_region *rdreg;
@@ -791,10 +799,6 @@ static int vgic_v3_insert_redist_region(struct kvm *kvm, uint32_t index,
size_t size = count * KVM_VGIC_V3_REDIST_SIZE;
int ret;
- /* single rdist region already set ?*/
- if (!count && !list_empty(rd_regions))
- return -EINVAL;
-
/* cross the end of memory ? */
if (base + size < base)
return -EINVAL;
@@ -805,11 +809,15 @@ static int vgic_v3_insert_redist_region(struct kvm *kvm, uint32_t index,
} else {
rdreg = list_last_entry(rd_regions,
struct vgic_redist_region, list);
- if (index != rdreg->index + 1)
+
+ /* Don't mix single region and discrete redist regions */
+ if (!count && rdreg->count)
return -EINVAL;
- /* Cannot add an explicitly sized regions after legacy region */
- if (!rdreg->count)
+ if (!count)
+ return -EEXIST;
+
+ if (index != rdreg->index + 1)
return -EINVAL;
}
@@ -848,11 +856,17 @@ free:
return ret;
}
+void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg)
+{
+ list_del(&rdreg->list);
+ kfree(rdreg);
+}
+
int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
{
int ret;
- ret = vgic_v3_insert_redist_region(kvm, index, addr, count);
+ ret = vgic_v3_alloc_redist_region(kvm, index, addr, count);
if (ret)
return ret;
@@ -861,8 +875,13 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
* afterwards will register the iodevs when needed.
*/
ret = vgic_register_all_redist_iodevs(kvm);
- if (ret)
+ if (ret) {
+ struct vgic_redist_region *rdreg;
+
+ rdreg = vgic_v3_rdist_region_from_index(kvm, index);
+ vgic_v3_free_redist_region(rdreg);
return ret;
+ }
return 0;
}
diff --git a/arch/arm64/kvm/vgic/vgic-mmio.c b/arch/arm64/kvm/vgic/vgic-mmio.c
index b2d73fc0d1ef..48c6067fc5ec 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio.c
@@ -938,10 +938,9 @@ vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
return region;
}
-static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
gpa_t addr, u32 *val)
{
- struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
const struct vgic_register_region *region;
struct kvm_vcpu *r_vcpu;
@@ -960,10 +959,9 @@ static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
return 0;
}
-static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
gpa_t addr, const u32 *val)
{
- struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
const struct vgic_register_region *region;
struct kvm_vcpu *r_vcpu;
@@ -986,9 +984,9 @@ int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
bool is_write, int offset, u32 *val)
{
if (is_write)
- return vgic_uaccess_write(vcpu, &dev->dev, offset, val);
+ return vgic_uaccess_write(vcpu, dev, offset, val);
else
- return vgic_uaccess_read(vcpu, &dev->dev, offset, val);
+ return vgic_uaccess_read(vcpu, dev, offset, val);
}
static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
diff --git a/arch/arm64/kvm/vgic/vgic-v2.c b/arch/arm64/kvm/vgic/vgic-v2.c
index 11934c2af2f4..2c580204f1dc 100644
--- a/arch/arm64/kvm/vgic/vgic-v2.c
+++ b/arch/arm64/kvm/vgic/vgic-v2.c
@@ -108,11 +108,22 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
* If this causes us to lower the level, we have to also clear
* the physical active state, since we will otherwise never be
* told when the interrupt becomes asserted again.
+ *
+ * Another case is when the interrupt requires a helping hand
+ * on deactivation (no HW deactivation, for example).
*/
- if (vgic_irq_is_mapped_level(irq) && (val & GICH_LR_PENDING_BIT)) {
- irq->line_level = vgic_get_phys_line_level(irq);
+ if (vgic_irq_is_mapped_level(irq)) {
+ bool resample = false;
+
+ if (val & GICH_LR_PENDING_BIT) {
+ irq->line_level = vgic_get_phys_line_level(irq);
+ resample = !irq->line_level;
+ } else if (vgic_irq_needs_resampling(irq) &&
+ !(irq->active || irq->pending_latch)) {
+ resample = true;
+ }
- if (!irq->line_level)
+ if (resample)
vgic_irq_set_phys_active(irq, false);
}
@@ -152,7 +163,7 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
if (irq->group)
val |= GICH_LR_GROUP1;
- if (irq->hw) {
+ if (irq->hw && !vgic_irq_needs_resampling(irq)) {
val |= GICH_LR_HW;
val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT;
/*
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index 52915b342351..66004f61cd83 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <kvm/arm_vgic.h>
@@ -99,11 +101,22 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
* If this causes us to lower the level, we have to also clear
* the physical active state, since we will otherwise never be
* told when the interrupt becomes asserted again.
+ *
+ * Another case is when the interrupt requires a helping hand
+ * on deactivation (no HW deactivation, for example).
*/
- if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT)) {
- irq->line_level = vgic_get_phys_line_level(irq);
+ if (vgic_irq_is_mapped_level(irq)) {
+ bool resample = false;
+
+ if (val & ICH_LR_PENDING_BIT) {
+ irq->line_level = vgic_get_phys_line_level(irq);
+ resample = !irq->line_level;
+ } else if (vgic_irq_needs_resampling(irq) &&
+ !(irq->active || irq->pending_latch)) {
+ resample = true;
+ }
- if (!irq->line_level)
+ if (resample)
vgic_irq_set_phys_active(irq, false);
}
@@ -134,7 +147,7 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
}
}
- if (irq->hw) {
+ if (irq->hw && !vgic_irq_needs_resampling(irq)) {
val |= ICH_LR_HW;
val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
/*
@@ -356,6 +369,32 @@ retry:
return 0;
}
+/*
+ * The deactivation of the doorbell interrupt will trigger the
+ * unmapping of the associated vPE.
+ */
+static void unmap_all_vpes(struct vgic_dist *dist)
+{
+ struct irq_desc *desc;
+ int i;
+
+ for (i = 0; i < dist->its_vm.nr_vpes; i++) {
+ desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
+ irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
+ }
+}
+
+static void map_all_vpes(struct vgic_dist *dist)
+{
+ struct irq_desc *desc;
+ int i;
+
+ for (i = 0; i < dist->its_vm.nr_vpes; i++) {
+ desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
+ irq_domain_activate_irq(irq_desc_get_irq_data(desc), false);
+ }
+}
+
/**
* vgic_v3_save_pending_tables - Save the pending tables into guest RAM
* kvm lock and all vcpu lock must be held
@@ -365,13 +404,28 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
struct vgic_dist *dist = &kvm->arch.vgic;
struct vgic_irq *irq;
gpa_t last_ptr = ~(gpa_t)0;
- int ret;
+ bool vlpi_avail = false;
+ int ret = 0;
u8 val;
+ if (unlikely(!vgic_initialized(kvm)))
+ return -ENXIO;
+
+ /*
+ * A preparation for getting any VLPI states.
+ * The above vgic initialized check also ensures that the allocation
+ * and enabling of the doorbells have already been done.
+ */
+ if (kvm_vgic_global_state.has_gicv4_1) {
+ unmap_all_vpes(dist);
+ vlpi_avail = true;
+ }
+
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
int byte_offset, bit_nr;
struct kvm_vcpu *vcpu;
gpa_t pendbase, ptr;
+ bool is_pending;
bool stored;
vcpu = irq->target_vcpu;
@@ -387,24 +441,35 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
if (ptr != last_ptr) {
ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
if (ret)
- return ret;
+ goto out;
last_ptr = ptr;
}
stored = val & (1U << bit_nr);
- if (stored == irq->pending_latch)
+
+ is_pending = irq->pending_latch;
+
+ if (irq->hw && vlpi_avail)
+ vgic_v4_get_vlpi_state(irq, &is_pending);
+
+ if (stored == is_pending)
continue;
- if (irq->pending_latch)
+ if (is_pending)
val |= 1 << bit_nr;
else
val &= ~(1 << bit_nr);
ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
if (ret)
- return ret;
+ goto out;
}
- return 0;
+
+out:
+ if (vlpi_avail)
+ map_all_vpes(dist);
+
+ return ret;
}
/**
@@ -574,9 +639,13 @@ early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
*/
int vgic_v3_probe(const struct gic_kvm_info *info)
{
- u32 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_ich_vtr_el2);
+ u64 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_gic_config);
+ bool has_v2;
int ret;
+ has_v2 = ich_vtr_el2 >> 63;
+ ich_vtr_el2 = (u32)ich_vtr_el2;
+
/*
* The ListRegs field is 5 bits, but there is an architectural
* maximum of 16 list registers. Just ignore bit 4...
@@ -594,13 +663,15 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
gicv4_enable ? "en" : "dis");
}
+ kvm_vgic_global_state.vcpu_base = 0;
+
if (!info->vcpu.start) {
kvm_info("GICv3: no GICV resource entry\n");
- kvm_vgic_global_state.vcpu_base = 0;
+ } else if (!has_v2) {
+ pr_warn(FW_BUG "CPU interface incapable of MMIO access\n");
} else if (!PAGE_ALIGNED(info->vcpu.start)) {
pr_warn("GICV physical address 0x%llx not page aligned\n",
(unsigned long long)info->vcpu.start);
- kvm_vgic_global_state.vcpu_base = 0;
} else {
kvm_vgic_global_state.vcpu_base = info->vcpu.start;
kvm_vgic_global_state.can_emulate_gicv2 = true;
diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c
index 66508b03094f..c1845d8f5f7e 100644
--- a/arch/arm64/kvm/vgic/vgic-v4.c
+++ b/arch/arm64/kvm/vgic/vgic-v4.c
@@ -203,6 +203,25 @@ void vgic_v4_configure_vsgis(struct kvm *kvm)
kvm_arm_resume_guest(kvm);
}
+/*
+ * Must be called with GICv4.1 and the vPE unmapped, which
+ * indicates the invalidation of any VPT caches associated
+ * with the vPE, thus we can get the VLPI state by peeking
+ * at the VPT.
+ */
+void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val)
+{
+ struct its_vpe *vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
+ int mask = BIT(irq->intid % BITS_PER_BYTE);
+ void *va;
+ u8 *ptr;
+
+ va = page_address(vpe->vpt_page);
+ ptr = va + irq->intid / BITS_PER_BYTE;
+
+ *val = !!(*ptr & mask);
+}
+
/**
* vgic_v4_init - Initialize the GICv4 data structures
* @kvm: Pointer to the VM being initialized
@@ -385,6 +404,7 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
struct vgic_its *its;
struct vgic_irq *irq;
struct its_vlpi_map map;
+ unsigned long flags;
int ret;
if (!vgic_supports_direct_msis(kvm))
@@ -430,6 +450,24 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
irq->host_irq = virq;
atomic_inc(&map.vpe->vlpi_count);
+ /* Transfer pending state */
+ raw_spin_lock_irqsave(&irq->irq_lock, flags);
+ if (irq->pending_latch) {
+ ret = irq_set_irqchip_state(irq->host_irq,
+ IRQCHIP_STATE_PENDING,
+ irq->pending_latch);
+ WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq);
+
+ /*
+ * Clear pending_latch and communicate this state
+ * change via vgic_queue_irq_unlock.
+ */
+ irq->pending_latch = false;
+ vgic_queue_irq_unlock(kvm, irq, flags);
+ } else {
+ raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+ }
+
out:
mutex_unlock(&its->its_lock);
return ret;
diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
index 1c597c9885fa..111bff47e471 100644
--- a/arch/arm64/kvm/vgic/vgic.c
+++ b/arch/arm64/kvm/vgic/vgic.c
@@ -182,8 +182,8 @@ bool vgic_get_phys_line_level(struct vgic_irq *irq)
BUG_ON(!irq->hw);
- if (irq->get_input_level)
- return irq->get_input_level(irq->intid);
+ if (irq->ops && irq->ops->get_input_level)
+ return irq->ops->get_input_level(irq->intid);
WARN_ON(irq_get_irqchip_state(irq->host_irq,
IRQCHIP_STATE_PENDING,
@@ -255,7 +255,8 @@ static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
* Return negative if "a" sorts before "b", 0 to preserve order, and positive
* to sort "b" before "a".
*/
-static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
+static int vgic_irq_cmp(void *priv, const struct list_head *a,
+ const struct list_head *b)
{
struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list);
struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list);
@@ -479,7 +480,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
/* @irq->irq_lock must be held */
static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
unsigned int host_irq,
- bool (*get_input_level)(int vindid))
+ struct irq_ops *ops)
{
struct irq_desc *desc;
struct irq_data *data;
@@ -499,7 +500,7 @@ static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
irq->hw = true;
irq->host_irq = host_irq;
irq->hwintid = data->hwirq;
- irq->get_input_level = get_input_level;
+ irq->ops = ops;
return 0;
}
@@ -508,11 +509,11 @@ static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
{
irq->hw = false;
irq->hwintid = 0;
- irq->get_input_level = NULL;
+ irq->ops = NULL;
}
int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
- u32 vintid, bool (*get_input_level)(int vindid))
+ u32 vintid, struct irq_ops *ops)
{
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
unsigned long flags;
@@ -521,7 +522,7 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
BUG_ON(!irq);
raw_spin_lock_irqsave(&irq->irq_lock, flags);
- ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level);
+ ret = kvm_vgic_map_irq(vcpu, irq, host_irq, ops);
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
index 64fcd7511110..dc1f3d1657ee 100644
--- a/arch/arm64/kvm/vgic/vgic.h
+++ b/arch/arm64/kvm/vgic/vgic.h
@@ -293,6 +293,7 @@ vgic_v3_rd_region_size(struct kvm *kvm, struct vgic_redist_region *rdreg)
struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
u32 index);
+void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg);
bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size);
@@ -317,5 +318,6 @@ bool vgic_supports_direct_msis(struct kvm *kvm);
int vgic_v4_init(struct kvm *kvm);
void vgic_v4_teardown(struct kvm *kvm);
void vgic_v4_configure_vsgis(struct kvm *kvm);
+void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val);
#endif
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index d31e1169d9b8..6dd56a49790a 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
lib-y := clear_user.o delay.o copy_from_user.o \
copy_to_user.o copy_in_user.o copy_page.o \
- clear_page.o csum.o memchr.o memcpy.o memmove.o \
+ clear_page.o csum.o insn.o memchr.o memcpy.o \
memset.o memcmp.o strcmp.o strncmp.o strlen.o \
strnlen.o strchr.o strrchr.o tishift.o
@@ -18,3 +18,5 @@ obj-$(CONFIG_CRC32) += crc32.o
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
obj-$(CONFIG_ARM64_MTE) += mte.o
+
+obj-$(CONFIG_KASAN_SW_TAGS) += kasan_sw_tags.o
diff --git a/arch/arm64/lib/clear_page.S b/arch/arm64/lib/clear_page.S
index 073acbf02a7c..b84b179edba3 100644
--- a/arch/arm64/lib/clear_page.S
+++ b/arch/arm64/lib/clear_page.S
@@ -14,7 +14,7 @@
* Parameters:
* x0 - dest
*/
-SYM_FUNC_START(clear_page)
+SYM_FUNC_START_PI(clear_page)
mrs x1, dczid_el0
and w1, w1, #0xf
mov x2, #4
@@ -25,5 +25,5 @@ SYM_FUNC_START(clear_page)
tst x0, #(PAGE_SIZE - 1)
b.ne 1b
ret
-SYM_FUNC_END(clear_page)
+SYM_FUNC_END_PI(clear_page)
EXPORT_SYMBOL(clear_page)
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index af9afcbec92c..a7efb2ad2a1c 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -1,12 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Based on arch/arm/lib/clear_user.S
- *
- * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2021 Arm Ltd.
*/
-#include <linux/linkage.h>
-#include <asm/asm-uaccess.h>
+#include <linux/linkage.h>
#include <asm/assembler.h>
.text
@@ -19,25 +16,33 @@
*
* Alignment fixed up by hardware.
*/
+
+ .p2align 4
+ // Alignment is for the loop, but since the prologue (including BTI)
+ // is also 16 bytes we can keep any padding outside the function
SYM_FUNC_START(__arch_clear_user)
- mov x2, x1 // save the size for fixup return
+ add x2, x0, x1
subs x1, x1, #8
b.mi 2f
1:
-user_ldst 9f, sttr, xzr, x0, 8
+USER(9f, sttr xzr, [x0])
+ add x0, x0, #8
subs x1, x1, #8
- b.pl 1b
-2: adds x1, x1, #4
- b.mi 3f
-user_ldst 9f, sttr, wzr, x0, 4
- sub x1, x1, #4
-3: adds x1, x1, #2
- b.mi 4f
-user_ldst 9f, sttrh, wzr, x0, 2
- sub x1, x1, #2
-4: adds x1, x1, #1
- b.mi 5f
-user_ldst 9f, sttrb, wzr, x0, 0
+ b.hi 1b
+USER(9f, sttr xzr, [x2, #-8])
+ mov x0, #0
+ ret
+
+2: tbz x1, #2, 3f
+USER(9f, sttr wzr, [x0])
+USER(8f, sttr wzr, [x2, #-4])
+ mov x0, #0
+ ret
+
+3: tbz x1, #1, 4f
+USER(9f, sttrh wzr, [x0])
+4: tbz x1, #0, 5f
+USER(7f, sttrb wzr, [x2, #-1])
5: mov x0, #0
ret
SYM_FUNC_END(__arch_clear_user)
@@ -45,6 +50,8 @@ EXPORT_SYMBOL(__arch_clear_user)
.section .fixup,"ax"
.align 2
-9: mov x0, x2 // return the original size
+7: sub x0, x2, #5 // Adjust for faulting on the final byte...
+8: add x0, x0, #4 // ...or the second word of the 4-7 byte case
+9: sub x0, x2, x0
ret
.previous
diff --git a/arch/arm64/lib/copy_page.S b/arch/arm64/lib/copy_page.S
index e7a793961408..29144f4cd449 100644
--- a/arch/arm64/lib/copy_page.S
+++ b/arch/arm64/lib/copy_page.S
@@ -17,7 +17,7 @@
* x0 - dest
* x1 - src
*/
-SYM_FUNC_START(copy_page)
+SYM_FUNC_START_PI(copy_page)
alternative_if ARM64_HAS_NO_HW_PREFETCH
// Prefetch three cache lines ahead.
prfm pldl1strm, [x1, #128]
@@ -75,5 +75,5 @@ alternative_else_nop_endif
stnp x16, x17, [x0, #112 - 256]
ret
-SYM_FUNC_END(copy_page)
+SYM_FUNC_END_PI(copy_page)
EXPORT_SYMBOL(copy_page)
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/lib/insn.c
index 6c0de2f60ea9..b506a4b1e38c 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/lib/insn.c
@@ -7,21 +7,14 @@
*/
#include <linux/bitops.h>
#include <linux/bug.h>
-#include <linux/compiler.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/spinlock.h>
-#include <linux/stop_machine.h>
+#include <linux/printk.h>
+#include <linux/sizes.h>
#include <linux/types.h>
-#include <linux/uaccess.h>
-#include <asm/cacheflush.h>
#include <asm/debug-monitors.h>
-#include <asm/fixmap.h>
+#include <asm/errno.h>
#include <asm/insn.h>
#include <asm/kprobes.h>
-#include <asm/sections.h>
#define AARCH64_INSN_SF_BIT BIT(31)
#define AARCH64_INSN_N_BIT BIT(22)
@@ -30,7 +23,7 @@
static const int aarch64_insn_encoding_class[] = {
AARCH64_INSN_CLS_UNKNOWN,
AARCH64_INSN_CLS_UNKNOWN,
- AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_SVE,
AARCH64_INSN_CLS_UNKNOWN,
AARCH64_INSN_CLS_LDST,
AARCH64_INSN_CLS_DP_REG,
@@ -83,81 +76,6 @@ bool aarch64_insn_is_branch_imm(u32 insn)
aarch64_insn_is_bcond(insn));
}
-static DEFINE_RAW_SPINLOCK(patch_lock);
-
-static bool is_exit_text(unsigned long addr)
-{
- /* discarded with init text/data */
- return system_state < SYSTEM_RUNNING &&
- addr >= (unsigned long)__exittext_begin &&
- addr < (unsigned long)__exittext_end;
-}
-
-static bool is_image_text(unsigned long addr)
-{
- return core_kernel_text(addr) || is_exit_text(addr);
-}
-
-static void __kprobes *patch_map(void *addr, int fixmap)
-{
- unsigned long uintaddr = (uintptr_t) addr;
- bool image = is_image_text(uintaddr);
- struct page *page;
-
- if (image)
- page = phys_to_page(__pa_symbol(addr));
- else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
- page = vmalloc_to_page(addr);
- else
- return addr;
-
- BUG_ON(!page);
- return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
- (uintaddr & ~PAGE_MASK));
-}
-
-static void __kprobes patch_unmap(int fixmap)
-{
- clear_fixmap(fixmap);
-}
-/*
- * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
- * little-endian.
- */
-int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
-{
- int ret;
- __le32 val;
-
- ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE);
- if (!ret)
- *insnp = le32_to_cpu(val);
-
- return ret;
-}
-
-static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
-{
- void *waddr = addr;
- unsigned long flags = 0;
- int ret;
-
- raw_spin_lock_irqsave(&patch_lock, flags);
- waddr = patch_map(addr, FIX_TEXT_POKE0);
-
- ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE);
-
- patch_unmap(FIX_TEXT_POKE0);
- raw_spin_unlock_irqrestore(&patch_lock, flags);
-
- return ret;
-}
-
-int __kprobes aarch64_insn_write(void *addr, u32 insn)
-{
- return __aarch64_insn_write(addr, cpu_to_le32(insn));
-}
-
bool __kprobes aarch64_insn_uses_literal(u32 insn)
{
/* ldr/ldrsw (literal), prfm */
@@ -187,67 +105,6 @@ bool __kprobes aarch64_insn_is_branch(u32 insn)
aarch64_insn_is_bcond(insn);
}
-int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
-{
- u32 *tp = addr;
- int ret;
-
- /* A64 instructions must be word aligned */
- if ((uintptr_t)tp & 0x3)
- return -EINVAL;
-
- ret = aarch64_insn_write(tp, insn);
- if (ret == 0)
- __flush_icache_range((uintptr_t)tp,
- (uintptr_t)tp + AARCH64_INSN_SIZE);
-
- return ret;
-}
-
-struct aarch64_insn_patch {
- void **text_addrs;
- u32 *new_insns;
- int insn_cnt;
- atomic_t cpu_count;
-};
-
-static int __kprobes aarch64_insn_patch_text_cb(void *arg)
-{
- int i, ret = 0;
- struct aarch64_insn_patch *pp = arg;
-
- /* The first CPU becomes master */
- if (atomic_inc_return(&pp->cpu_count) == 1) {
- for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
- ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
- pp->new_insns[i]);
- /* Notify other processors with an additional increment. */
- atomic_inc(&pp->cpu_count);
- } else {
- while (atomic_read(&pp->cpu_count) <= num_online_cpus())
- cpu_relax();
- isb();
- }
-
- return ret;
-}
-
-int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
-{
- struct aarch64_insn_patch patch = {
- .text_addrs = addrs,
- .new_insns = insns,
- .insn_cnt = cnt,
- .cpu_count = ATOMIC_INIT(0),
- };
-
- if (cnt <= 0)
- return -EINVAL;
-
- return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
- cpu_online_mask);
-}
-
static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
u32 *maskp, int *shiftp)
{
@@ -1432,104 +1289,6 @@ u32 aarch32_insn_mcr_extract_crm(u32 insn)
return insn & CRM_MASK;
}
-static bool __kprobes __check_eq(unsigned long pstate)
-{
- return (pstate & PSR_Z_BIT) != 0;
-}
-
-static bool __kprobes __check_ne(unsigned long pstate)
-{
- return (pstate & PSR_Z_BIT) == 0;
-}
-
-static bool __kprobes __check_cs(unsigned long pstate)
-{
- return (pstate & PSR_C_BIT) != 0;
-}
-
-static bool __kprobes __check_cc(unsigned long pstate)
-{
- return (pstate & PSR_C_BIT) == 0;
-}
-
-static bool __kprobes __check_mi(unsigned long pstate)
-{
- return (pstate & PSR_N_BIT) != 0;
-}
-
-static bool __kprobes __check_pl(unsigned long pstate)
-{
- return (pstate & PSR_N_BIT) == 0;
-}
-
-static bool __kprobes __check_vs(unsigned long pstate)
-{
- return (pstate & PSR_V_BIT) != 0;
-}
-
-static bool __kprobes __check_vc(unsigned long pstate)
-{
- return (pstate & PSR_V_BIT) == 0;
-}
-
-static bool __kprobes __check_hi(unsigned long pstate)
-{
- pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
- return (pstate & PSR_C_BIT) != 0;
-}
-
-static bool __kprobes __check_ls(unsigned long pstate)
-{
- pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
- return (pstate & PSR_C_BIT) == 0;
-}
-
-static bool __kprobes __check_ge(unsigned long pstate)
-{
- pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
- return (pstate & PSR_N_BIT) == 0;
-}
-
-static bool __kprobes __check_lt(unsigned long pstate)
-{
- pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
- return (pstate & PSR_N_BIT) != 0;
-}
-
-static bool __kprobes __check_gt(unsigned long pstate)
-{
- /*PSR_N_BIT ^= PSR_V_BIT */
- unsigned long temp = pstate ^ (pstate << 3);
-
- temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
- return (temp & PSR_N_BIT) == 0;
-}
-
-static bool __kprobes __check_le(unsigned long pstate)
-{
- /*PSR_N_BIT ^= PSR_V_BIT */
- unsigned long temp = pstate ^ (pstate << 3);
-
- temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
- return (temp & PSR_N_BIT) != 0;
-}
-
-static bool __kprobes __check_al(unsigned long pstate)
-{
- return true;
-}
-
-/*
- * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
- * it behaves identically to 0b1110 ("al").
- */
-pstate_check_t * const aarch32_opcode_cond_checks[16] = {
- __check_eq, __check_ne, __check_cs, __check_cc,
- __check_mi, __check_pl, __check_vs, __check_vc,
- __check_hi, __check_ls, __check_ge, __check_lt,
- __check_gt, __check_le, __check_al, __check_al
-};
-
static bool range_of_ones(u64 val)
{
/* Doesn't handle full ones or full zeroes */
diff --git a/arch/arm64/lib/kasan_sw_tags.S b/arch/arm64/lib/kasan_sw_tags.S
new file mode 100644
index 000000000000..5b04464c045e
--- /dev/null
+++ b/arch/arm64/lib/kasan_sw_tags.S
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Google LLC
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * Report a tag mismatch detected by tag-based KASAN.
+ *
+ * A compiler-generated thunk calls this with a non-AAPCS calling
+ * convention. Upon entry to this function, registers are as follows:
+ *
+ * x0: fault address (see below for restore)
+ * x1: fault description (see below for restore)
+ * x2 to x15: callee-saved
+ * x16 to x17: safe to clobber
+ * x18 to x30: callee-saved
+ * sp: pre-decremented by 256 bytes (see below for restore)
+ *
+ * The caller has decremented the SP by 256 bytes, and created a
+ * structure on the stack as follows:
+ *
+ * sp + 0..15: x0 and x1 to be restored
+ * sp + 16..231: free for use
+ * sp + 232..247: x29 and x30 (same as in GPRs)
+ * sp + 248..255: free for use
+ *
+ * Note that this is not a struct pt_regs.
+ *
+ * To call a regular AAPCS function we must save x2 to x15 (which we can
+ * store in the gaps), and create a frame record (for which we can use
+ * x29 and x30 spilled by the caller as those match the GPRs).
+ *
+ * The caller expects x0 and x1 to be restored from the structure, and
+ * for the structure to be removed from the stack (i.e. the SP must be
+ * incremented by 256 prior to return).
+ */
+SYM_CODE_START(__hwasan_tag_mismatch)
+#ifdef BTI_C
+ BTI_C
+#endif
+ add x29, sp, #232
+ stp x2, x3, [sp, #8 * 2]
+ stp x4, x5, [sp, #8 * 4]
+ stp x6, x7, [sp, #8 * 6]
+ stp x8, x9, [sp, #8 * 8]
+ stp x10, x11, [sp, #8 * 10]
+ stp x12, x13, [sp, #8 * 12]
+ stp x14, x15, [sp, #8 * 14]
+#ifndef CONFIG_SHADOW_CALL_STACK
+ str x18, [sp, #8 * 18]
+#endif
+
+ mov x2, x30
+ bl kasan_tag_mismatch
+
+ ldp x0, x1, [sp]
+ ldp x2, x3, [sp, #8 * 2]
+ ldp x4, x5, [sp, #8 * 4]
+ ldp x6, x7, [sp, #8 * 6]
+ ldp x8, x9, [sp, #8 * 8]
+ ldp x10, x11, [sp, #8 * 10]
+ ldp x12, x13, [sp, #8 * 12]
+ ldp x14, x15, [sp, #8 * 14]
+#ifndef CONFIG_SHADOW_CALL_STACK
+ ldr x18, [sp, #8 * 18]
+#endif
+ ldp x29, x30, [sp, #8 * 29]
+
+ /* remove the structure from the stack */
+ add sp, sp, #256
+ ret
+SYM_CODE_END(__hwasan_tag_mismatch)
+EXPORT_SYMBOL(__hwasan_tag_mismatch)
diff --git a/arch/arm64/lib/memchr.S b/arch/arm64/lib/memchr.S
index edf6b970a277..7c2276fdab54 100644
--- a/arch/arm64/lib/memchr.S
+++ b/arch/arm64/lib/memchr.S
@@ -1,9 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Based on arch/arm/lib/memchr.S
- *
- * Copyright (C) 1995-2000 Russell King
- * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2021 Arm Ltd.
*/
#include <linux/linkage.h>
@@ -19,16 +16,60 @@
* Returns:
* x0 - address of first occurrence of 'c' or 0
*/
+
+#define L(label) .L ## label
+
+#define REP8_01 0x0101010101010101
+#define REP8_7f 0x7f7f7f7f7f7f7f7f
+
+#define srcin x0
+#define chrin w1
+#define cntin x2
+
+#define result x0
+
+#define wordcnt x3
+#define rep01 x4
+#define repchr x5
+#define cur_word x6
+#define cur_byte w6
+#define tmp x7
+#define tmp2 x8
+
+ .p2align 4
+ nop
SYM_FUNC_START_WEAK_PI(memchr)
- and w1, w1, #0xff
-1: subs x2, x2, #1
- b.mi 2f
- ldrb w3, [x0], #1
- cmp w3, w1
- b.ne 1b
- sub x0, x0, #1
+ and chrin, chrin, #0xff
+ lsr wordcnt, cntin, #3
+ cbz wordcnt, L(byte_loop)
+ mov rep01, #REP8_01
+ mul repchr, x1, rep01
+ and cntin, cntin, #7
+L(word_loop):
+ ldr cur_word, [srcin], #8
+ sub wordcnt, wordcnt, #1
+ eor cur_word, cur_word, repchr
+ sub tmp, cur_word, rep01
+ orr tmp2, cur_word, #REP8_7f
+ bics tmp, tmp, tmp2
+ b.ne L(found_word)
+ cbnz wordcnt, L(word_loop)
+L(byte_loop):
+ cbz cntin, L(not_found)
+ ldrb cur_byte, [srcin], #1
+ sub cntin, cntin, #1
+ cmp cur_byte, chrin
+ b.ne L(byte_loop)
+ sub srcin, srcin, #1
+ ret
+L(found_word):
+CPU_LE( rev tmp, tmp)
+ clz tmp, tmp
+ sub tmp, tmp, #64
+ add result, srcin, tmp, asr #3
ret
-2: mov x0, #0
+L(not_found):
+ mov result, #0
ret
SYM_FUNC_END_PI(memchr)
EXPORT_SYMBOL_NOKASAN(memchr)
diff --git a/arch/arm64/lib/memcmp.S b/arch/arm64/lib/memcmp.S
index c0671e793ea9..7d956384222f 100644
--- a/arch/arm64/lib/memcmp.S
+++ b/arch/arm64/lib/memcmp.S
@@ -1,247 +1,139 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
+ * Copyright (c) 2013-2021, Arm Limited.
*
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
+ * Adapted from the original at:
+ * https://github.com/ARM-software/optimized-routines/blob/e823e3abf5f89ecb/string/aarch64/memcmp.S
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-/*
-* compare memory areas(when two memory areas' offset are different,
-* alignment handled by the hardware)
-*
-* Parameters:
-* x0 - const memory area 1 pointer
-* x1 - const memory area 2 pointer
-* x2 - the maximal compare byte length
-* Returns:
-* x0 - a compare result, maybe less than, equal to, or greater than ZERO
-*/
+/* Assumptions:
+ *
+ * ARMv8-a, AArch64, unaligned accesses.
+ */
+
+#define L(label) .L ## label
/* Parameters and result. */
-src1 .req x0
-src2 .req x1
-limit .req x2
-result .req x0
+#define src1 x0
+#define src2 x1
+#define limit x2
+#define result w0
/* Internal variables. */
-data1 .req x3
-data1w .req w3
-data2 .req x4
-data2w .req w4
-has_nul .req x5
-diff .req x6
-endloop .req x7
-tmp1 .req x8
-tmp2 .req x9
-tmp3 .req x10
-pos .req x11
-limit_wd .req x12
-mask .req x13
+#define data1 x3
+#define data1w w3
+#define data1h x4
+#define data2 x5
+#define data2w w5
+#define data2h x6
+#define tmp1 x7
+#define tmp2 x8
SYM_FUNC_START_WEAK_PI(memcmp)
- cbz limit, .Lret0
- eor tmp1, src1, src2
- tst tmp1, #7
- b.ne .Lmisaligned8
- ands tmp1, src1, #7
- b.ne .Lmutual_align
- sub limit_wd, limit, #1 /* limit != 0, so no underflow. */
- lsr limit_wd, limit_wd, #3 /* Convert to Dwords. */
- /*
- * The input source addresses are at alignment boundary.
- * Directly compare eight bytes each time.
- */
-.Lloop_aligned:
- ldr data1, [src1], #8
- ldr data2, [src2], #8
-.Lstart_realigned:
- subs limit_wd, limit_wd, #1
- eor diff, data1, data2 /* Non-zero if differences found. */
- csinv endloop, diff, xzr, cs /* Last Dword or differences. */
- cbz endloop, .Lloop_aligned
-
- /* Not reached the limit, must have found a diff. */
- tbz limit_wd, #63, .Lnot_limit
-
- /* Limit % 8 == 0 => the diff is in the last 8 bytes. */
- ands limit, limit, #7
- b.eq .Lnot_limit
- /*
- * The remained bytes less than 8. It is needed to extract valid data
- * from last eight bytes of the intended memory range.
- */
- lsl limit, limit, #3 /* bytes-> bits. */
- mov mask, #~0
-CPU_BE( lsr mask, mask, limit )
-CPU_LE( lsl mask, mask, limit )
- bic data1, data1, mask
- bic data2, data2, mask
-
- orr diff, diff, mask
- b .Lnot_limit
-
-.Lmutual_align:
- /*
- * Sources are mutually aligned, but are not currently at an
- * alignment boundary. Round down the addresses and then mask off
- * the bytes that precede the start point.
- */
- bic src1, src1, #7
- bic src2, src2, #7
- ldr data1, [src1], #8
- ldr data2, [src2], #8
- /*
- * We can not add limit with alignment offset(tmp1) here. Since the
- * addition probably make the limit overflown.
- */
- sub limit_wd, limit, #1/*limit != 0, so no underflow.*/
- and tmp3, limit_wd, #7
- lsr limit_wd, limit_wd, #3
- add tmp3, tmp3, tmp1
- add limit_wd, limit_wd, tmp3, lsr #3
- add limit, limit, tmp1/* Adjust the limit for the extra. */
-
- lsl tmp1, tmp1, #3/* Bytes beyond alignment -> bits.*/
- neg tmp1, tmp1/* Bits to alignment -64. */
- mov tmp2, #~0
- /*mask off the non-intended bytes before the start address.*/
-CPU_BE( lsl tmp2, tmp2, tmp1 )/*Big-endian.Early bytes are at MSB*/
- /* Little-endian. Early bytes are at LSB. */
-CPU_LE( lsr tmp2, tmp2, tmp1 )
-
- orr data1, data1, tmp2
- orr data2, data2, tmp2
- b .Lstart_realigned
-
- /*src1 and src2 have different alignment offset.*/
-.Lmisaligned8:
- cmp limit, #8
- b.lo .Ltiny8proc /*limit < 8: compare byte by byte*/
-
- and tmp1, src1, #7
- neg tmp1, tmp1
- add tmp1, tmp1, #8/*valid length in the first 8 bytes of src1*/
- and tmp2, src2, #7
- neg tmp2, tmp2
- add tmp2, tmp2, #8/*valid length in the first 8 bytes of src2*/
- subs tmp3, tmp1, tmp2
- csel pos, tmp1, tmp2, hi /*Choose the maximum.*/
-
- sub limit, limit, pos
- /*compare the proceeding bytes in the first 8 byte segment.*/
-.Ltinycmp:
- ldrb data1w, [src1], #1
- ldrb data2w, [src2], #1
- subs pos, pos, #1
- ccmp data1w, data2w, #0, ne /* NZCV = 0b0000. */
- b.eq .Ltinycmp
- cbnz pos, 1f /*diff occurred before the last byte.*/
- cmp data1w, data2w
- b.eq .Lstart_align
-1:
- sub result, data1, data2
+ subs limit, limit, 8
+ b.lo L(less8)
+
+ ldr data1, [src1], 8
+ ldr data2, [src2], 8
+ cmp data1, data2
+ b.ne L(return)
+
+ subs limit, limit, 8
+ b.gt L(more16)
+
+ ldr data1, [src1, limit]
+ ldr data2, [src2, limit]
+ b L(return)
+
+L(more16):
+ ldr data1, [src1], 8
+ ldr data2, [src2], 8
+ cmp data1, data2
+ bne L(return)
+
+ /* Jump directly to comparing the last 16 bytes for 32 byte (or less)
+ strings. */
+ subs limit, limit, 16
+ b.ls L(last_bytes)
+
+ /* We overlap loads between 0-32 bytes at either side of SRC1 when we
+ try to align, so limit it only to strings larger than 128 bytes. */
+ cmp limit, 96
+ b.ls L(loop16)
+
+ /* Align src1 and adjust src2 with bytes not yet done. */
+ and tmp1, src1, 15
+ add limit, limit, tmp1
+ sub src1, src1, tmp1
+ sub src2, src2, tmp1
+
+ /* Loop performing 16 bytes per iteration using aligned src1.
+ Limit is pre-decremented by 16 and must be larger than zero.
+ Exit if <= 16 bytes left to do or if the data is not equal. */
+ .p2align 4
+L(loop16):
+ ldp data1, data1h, [src1], 16
+ ldp data2, data2h, [src2], 16
+ subs limit, limit, 16
+ ccmp data1, data2, 0, hi
+ ccmp data1h, data2h, 0, eq
+ b.eq L(loop16)
+
+ cmp data1, data2
+ bne L(return)
+ mov data1, data1h
+ mov data2, data2h
+ cmp data1, data2
+ bne L(return)
+
+ /* Compare last 1-16 bytes using unaligned access. */
+L(last_bytes):
+ add src1, src1, limit
+ add src2, src2, limit
+ ldp data1, data1h, [src1]
+ ldp data2, data2h, [src2]
+ cmp data1, data2
+ bne L(return)
+ mov data1, data1h
+ mov data2, data2h
+ cmp data1, data2
+
+ /* Compare data bytes and set return value to 0, -1 or 1. */
+L(return):
+#ifndef __AARCH64EB__
+ rev data1, data1
+ rev data2, data2
+#endif
+ cmp data1, data2
+L(ret_eq):
+ cset result, ne
+ cneg result, result, lo
ret
-.Lstart_align:
- lsr limit_wd, limit, #3
- cbz limit_wd, .Lremain8
-
- ands xzr, src1, #7
- b.eq .Lrecal_offset
- /*process more leading bytes to make src1 aligned...*/
- add src1, src1, tmp3 /*backwards src1 to alignment boundary*/
- add src2, src2, tmp3
- sub limit, limit, tmp3
- lsr limit_wd, limit, #3
- cbz limit_wd, .Lremain8
- /*load 8 bytes from aligned SRC1..*/
- ldr data1, [src1], #8
- ldr data2, [src2], #8
-
- subs limit_wd, limit_wd, #1
- eor diff, data1, data2 /*Non-zero if differences found.*/
- csinv endloop, diff, xzr, ne
- cbnz endloop, .Lunequal_proc
- /*How far is the current SRC2 from the alignment boundary...*/
- and tmp3, tmp3, #7
-
-.Lrecal_offset:/*src1 is aligned now..*/
- neg pos, tmp3
-.Lloopcmp_proc:
- /*
- * Divide the eight bytes into two parts. First,backwards the src2
- * to an alignment boundary,load eight bytes and compare from
- * the SRC2 alignment boundary. If all 8 bytes are equal,then start
- * the second part's comparison. Otherwise finish the comparison.
- * This special handle can garantee all the accesses are in the
- * thread/task space in avoid to overrange access.
- */
- ldr data1, [src1,pos]
- ldr data2, [src2,pos]
- eor diff, data1, data2 /* Non-zero if differences found. */
- cbnz diff, .Lnot_limit
-
- /*The second part process*/
- ldr data1, [src1], #8
- ldr data2, [src2], #8
- eor diff, data1, data2 /* Non-zero if differences found. */
- subs limit_wd, limit_wd, #1
- csinv endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/
- cbz endloop, .Lloopcmp_proc
-.Lunequal_proc:
- cbz diff, .Lremain8
-
-/* There is difference occurred in the latest comparison. */
-.Lnot_limit:
-/*
-* For little endian,reverse the low significant equal bits into MSB,then
-* following CLZ can find how many equal bits exist.
-*/
-CPU_LE( rev diff, diff )
-CPU_LE( rev data1, data1 )
-CPU_LE( rev data2, data2 )
-
- /*
- * The MS-non-zero bit of DIFF marks either the first bit
- * that is different, or the end of the significant data.
- * Shifting left now will bring the critical information into the
- * top bits.
- */
- clz pos, diff
- lsl data1, data1, pos
- lsl data2, data2, pos
- /*
- * We need to zero-extend (char is unsigned) the value and then
- * perform a signed subtraction.
- */
- lsr data1, data1, #56
- sub result, data1, data2, lsr #56
+ .p2align 4
+ /* Compare up to 8 bytes. Limit is [-8..-1]. */
+L(less8):
+ adds limit, limit, 4
+ b.lo L(less4)
+ ldr data1w, [src1], 4
+ ldr data2w, [src2], 4
+ cmp data1w, data2w
+ b.ne L(return)
+ sub limit, limit, 4
+L(less4):
+ adds limit, limit, 4
+ beq L(ret_eq)
+L(byte_loop):
+ ldrb data1w, [src1], 1
+ ldrb data2w, [src2], 1
+ subs limit, limit, 1
+ ccmp data1w, data2w, 0, ne /* NZCV = 0b0000. */
+ b.eq L(byte_loop)
+ sub result, data1w, data2w
ret
-.Lremain8:
- /* Limit % 8 == 0 =>. all data are equal.*/
- ands limit, limit, #7
- b.eq .Lret0
-
-.Ltiny8proc:
- ldrb data1w, [src1], #1
- ldrb data2w, [src2], #1
- subs limit, limit, #1
-
- ccmp data1w, data2w, #0, ne /* NZCV = 0b0000. */
- b.eq .Ltiny8proc
- sub result, data1, data2
- ret
-.Lret0:
- mov result, #0
- ret
SYM_FUNC_END_PI(memcmp)
EXPORT_SYMBOL_NOKASAN(memcmp)
diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S
index dc8d2a216a6e..b82fd64ee1e1 100644
--- a/arch/arm64/lib/memcpy.S
+++ b/arch/arm64/lib/memcpy.S
@@ -1,66 +1,252 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
+ * Copyright (c) 2012-2021, Arm Limited.
*
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
+ * Adapted from the original at:
+ * https://github.com/ARM-software/optimized-routines/blob/afd6244a1f8d9229/string/aarch64/memcpy.S
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-#include <asm/cache.h>
-/*
- * Copy a buffer from src to dest (alignment handled by the hardware)
+/* Assumptions:
+ *
+ * ARMv8-a, AArch64, unaligned accesses.
*
- * Parameters:
- * x0 - dest
- * x1 - src
- * x2 - n
- * Returns:
- * x0 - dest
*/
- .macro ldrb1 reg, ptr, val
- ldrb \reg, [\ptr], \val
- .endm
-
- .macro strb1 reg, ptr, val
- strb \reg, [\ptr], \val
- .endm
- .macro ldrh1 reg, ptr, val
- ldrh \reg, [\ptr], \val
- .endm
+#define L(label) .L ## label
- .macro strh1 reg, ptr, val
- strh \reg, [\ptr], \val
- .endm
+#define dstin x0
+#define src x1
+#define count x2
+#define dst x3
+#define srcend x4
+#define dstend x5
+#define A_l x6
+#define A_lw w6
+#define A_h x7
+#define B_l x8
+#define B_lw w8
+#define B_h x9
+#define C_l x10
+#define C_lw w10
+#define C_h x11
+#define D_l x12
+#define D_h x13
+#define E_l x14
+#define E_h x15
+#define F_l x16
+#define F_h x17
+#define G_l count
+#define G_h dst
+#define H_l src
+#define H_h srcend
+#define tmp1 x14
- .macro ldr1 reg, ptr, val
- ldr \reg, [\ptr], \val
- .endm
+/* This implementation handles overlaps and supports both memcpy and memmove
+ from a single entry point. It uses unaligned accesses and branchless
+ sequences to keep the code small, simple and improve performance.
- .macro str1 reg, ptr, val
- str \reg, [\ptr], \val
- .endm
+ Copies are split into 3 main cases: small copies of up to 32 bytes, medium
+ copies of up to 128 bytes, and large copies. The overhead of the overlap
+ check is negligible since it is only required for large copies.
- .macro ldp1 reg1, reg2, ptr, val
- ldp \reg1, \reg2, [\ptr], \val
- .endm
-
- .macro stp1 reg1, reg2, ptr, val
- stp \reg1, \reg2, [\ptr], \val
- .endm
+ Large copies use a software pipelined loop processing 64 bytes per iteration.
+ The destination pointer is 16-byte aligned to minimize unaligned accesses.
+ The loop tail is handled by always copying 64 bytes from the end.
+*/
+SYM_FUNC_START_ALIAS(__memmove)
+SYM_FUNC_START_WEAK_ALIAS_PI(memmove)
SYM_FUNC_START_ALIAS(__memcpy)
SYM_FUNC_START_WEAK_PI(memcpy)
-#include "copy_template.S"
+ add srcend, src, count
+ add dstend, dstin, count
+ cmp count, 128
+ b.hi L(copy_long)
+ cmp count, 32
+ b.hi L(copy32_128)
+
+ /* Small copies: 0..32 bytes. */
+ cmp count, 16
+ b.lo L(copy16)
+ ldp A_l, A_h, [src]
+ ldp D_l, D_h, [srcend, -16]
+ stp A_l, A_h, [dstin]
+ stp D_l, D_h, [dstend, -16]
+ ret
+
+ /* Copy 8-15 bytes. */
+L(copy16):
+ tbz count, 3, L(copy8)
+ ldr A_l, [src]
+ ldr A_h, [srcend, -8]
+ str A_l, [dstin]
+ str A_h, [dstend, -8]
+ ret
+
+ .p2align 3
+ /* Copy 4-7 bytes. */
+L(copy8):
+ tbz count, 2, L(copy4)
+ ldr A_lw, [src]
+ ldr B_lw, [srcend, -4]
+ str A_lw, [dstin]
+ str B_lw, [dstend, -4]
+ ret
+
+ /* Copy 0..3 bytes using a branchless sequence. */
+L(copy4):
+ cbz count, L(copy0)
+ lsr tmp1, count, 1
+ ldrb A_lw, [src]
+ ldrb C_lw, [srcend, -1]
+ ldrb B_lw, [src, tmp1]
+ strb A_lw, [dstin]
+ strb B_lw, [dstin, tmp1]
+ strb C_lw, [dstend, -1]
+L(copy0):
+ ret
+
+ .p2align 4
+ /* Medium copies: 33..128 bytes. */
+L(copy32_128):
+ ldp A_l, A_h, [src]
+ ldp B_l, B_h, [src, 16]
+ ldp C_l, C_h, [srcend, -32]
+ ldp D_l, D_h, [srcend, -16]
+ cmp count, 64
+ b.hi L(copy128)
+ stp A_l, A_h, [dstin]
+ stp B_l, B_h, [dstin, 16]
+ stp C_l, C_h, [dstend, -32]
+ stp D_l, D_h, [dstend, -16]
ret
+
+ .p2align 4
+ /* Copy 65..128 bytes. */
+L(copy128):
+ ldp E_l, E_h, [src, 32]
+ ldp F_l, F_h, [src, 48]
+ cmp count, 96
+ b.ls L(copy96)
+ ldp G_l, G_h, [srcend, -64]
+ ldp H_l, H_h, [srcend, -48]
+ stp G_l, G_h, [dstend, -64]
+ stp H_l, H_h, [dstend, -48]
+L(copy96):
+ stp A_l, A_h, [dstin]
+ stp B_l, B_h, [dstin, 16]
+ stp E_l, E_h, [dstin, 32]
+ stp F_l, F_h, [dstin, 48]
+ stp C_l, C_h, [dstend, -32]
+ stp D_l, D_h, [dstend, -16]
+ ret
+
+ .p2align 4
+ /* Copy more than 128 bytes. */
+L(copy_long):
+ /* Use backwards copy if there is an overlap. */
+ sub tmp1, dstin, src
+ cbz tmp1, L(copy0)
+ cmp tmp1, count
+ b.lo L(copy_long_backwards)
+
+ /* Copy 16 bytes and then align dst to 16-byte alignment. */
+
+ ldp D_l, D_h, [src]
+ and tmp1, dstin, 15
+ bic dst, dstin, 15
+ sub src, src, tmp1
+ add count, count, tmp1 /* Count is now 16 too large. */
+ ldp A_l, A_h, [src, 16]
+ stp D_l, D_h, [dstin]
+ ldp B_l, B_h, [src, 32]
+ ldp C_l, C_h, [src, 48]
+ ldp D_l, D_h, [src, 64]!
+ subs count, count, 128 + 16 /* Test and readjust count. */
+ b.ls L(copy64_from_end)
+
+L(loop64):
+ stp A_l, A_h, [dst, 16]
+ ldp A_l, A_h, [src, 16]
+ stp B_l, B_h, [dst, 32]
+ ldp B_l, B_h, [src, 32]
+ stp C_l, C_h, [dst, 48]
+ ldp C_l, C_h, [src, 48]
+ stp D_l, D_h, [dst, 64]!
+ ldp D_l, D_h, [src, 64]!
+ subs count, count, 64
+ b.hi L(loop64)
+
+ /* Write the last iteration and copy 64 bytes from the end. */
+L(copy64_from_end):
+ ldp E_l, E_h, [srcend, -64]
+ stp A_l, A_h, [dst, 16]
+ ldp A_l, A_h, [srcend, -48]
+ stp B_l, B_h, [dst, 32]
+ ldp B_l, B_h, [srcend, -32]
+ stp C_l, C_h, [dst, 48]
+ ldp C_l, C_h, [srcend, -16]
+ stp D_l, D_h, [dst, 64]
+ stp E_l, E_h, [dstend, -64]
+ stp A_l, A_h, [dstend, -48]
+ stp B_l, B_h, [dstend, -32]
+ stp C_l, C_h, [dstend, -16]
+ ret
+
+ .p2align 4
+
+ /* Large backwards copy for overlapping copies.
+ Copy 16 bytes and then align dst to 16-byte alignment. */
+L(copy_long_backwards):
+ ldp D_l, D_h, [srcend, -16]
+ and tmp1, dstend, 15
+ sub srcend, srcend, tmp1
+ sub count, count, tmp1
+ ldp A_l, A_h, [srcend, -16]
+ stp D_l, D_h, [dstend, -16]
+ ldp B_l, B_h, [srcend, -32]
+ ldp C_l, C_h, [srcend, -48]
+ ldp D_l, D_h, [srcend, -64]!
+ sub dstend, dstend, tmp1
+ subs count, count, 128
+ b.ls L(copy64_from_start)
+
+L(loop64_backwards):
+ stp A_l, A_h, [dstend, -16]
+ ldp A_l, A_h, [srcend, -16]
+ stp B_l, B_h, [dstend, -32]
+ ldp B_l, B_h, [srcend, -32]
+ stp C_l, C_h, [dstend, -48]
+ ldp C_l, C_h, [srcend, -48]
+ stp D_l, D_h, [dstend, -64]!
+ ldp D_l, D_h, [srcend, -64]!
+ subs count, count, 64
+ b.hi L(loop64_backwards)
+
+ /* Write the last iteration and copy 64 bytes from the start. */
+L(copy64_from_start):
+ ldp G_l, G_h, [src, 48]
+ stp A_l, A_h, [dstend, -16]
+ ldp A_l, A_h, [src, 32]
+ stp B_l, B_h, [dstend, -32]
+ ldp B_l, B_h, [src, 16]
+ stp C_l, C_h, [dstend, -48]
+ ldp C_l, C_h, [src]
+ stp D_l, D_h, [dstend, -64]
+ stp G_l, G_h, [dstin, 48]
+ stp A_l, A_h, [dstin, 32]
+ stp B_l, B_h, [dstin, 16]
+ stp C_l, C_h, [dstin]
+ ret
+
SYM_FUNC_END_PI(memcpy)
EXPORT_SYMBOL(memcpy)
SYM_FUNC_END_ALIAS(__memcpy)
EXPORT_SYMBOL(__memcpy)
+SYM_FUNC_END_ALIAS_PI(memmove)
+EXPORT_SYMBOL(memmove)
+SYM_FUNC_END_ALIAS(__memmove)
+EXPORT_SYMBOL(__memmove)
diff --git a/arch/arm64/lib/memmove.S b/arch/arm64/lib/memmove.S
deleted file mode 100644
index 1035dce4bdaf..000000000000
--- a/arch/arm64/lib/memmove.S
+++ /dev/null
@@ -1,189 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
- *
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/cache.h>
-
-/*
- * Move a buffer from src to test (alignment handled by the hardware).
- * If dest <= src, call memcpy, otherwise copy in reverse order.
- *
- * Parameters:
- * x0 - dest
- * x1 - src
- * x2 - n
- * Returns:
- * x0 - dest
- */
-dstin .req x0
-src .req x1
-count .req x2
-tmp1 .req x3
-tmp1w .req w3
-tmp2 .req x4
-tmp2w .req w4
-tmp3 .req x5
-tmp3w .req w5
-dst .req x6
-
-A_l .req x7
-A_h .req x8
-B_l .req x9
-B_h .req x10
-C_l .req x11
-C_h .req x12
-D_l .req x13
-D_h .req x14
-
-SYM_FUNC_START_ALIAS(__memmove)
-SYM_FUNC_START_WEAK_PI(memmove)
- cmp dstin, src
- b.lo __memcpy
- add tmp1, src, count
- cmp dstin, tmp1
- b.hs __memcpy /* No overlap. */
-
- add dst, dstin, count
- add src, src, count
- cmp count, #16
- b.lo .Ltail15 /*probably non-alignment accesses.*/
-
- ands tmp2, src, #15 /* Bytes to reach alignment. */
- b.eq .LSrcAligned
- sub count, count, tmp2
- /*
- * process the aligned offset length to make the src aligned firstly.
- * those extra instructions' cost is acceptable. It also make the
- * coming accesses are based on aligned address.
- */
- tbz tmp2, #0, 1f
- ldrb tmp1w, [src, #-1]!
- strb tmp1w, [dst, #-1]!
-1:
- tbz tmp2, #1, 2f
- ldrh tmp1w, [src, #-2]!
- strh tmp1w, [dst, #-2]!
-2:
- tbz tmp2, #2, 3f
- ldr tmp1w, [src, #-4]!
- str tmp1w, [dst, #-4]!
-3:
- tbz tmp2, #3, .LSrcAligned
- ldr tmp1, [src, #-8]!
- str tmp1, [dst, #-8]!
-
-.LSrcAligned:
- cmp count, #64
- b.ge .Lcpy_over64
-
- /*
- * Deal with small copies quickly by dropping straight into the
- * exit block.
- */
-.Ltail63:
- /*
- * Copy up to 48 bytes of data. At this point we only need the
- * bottom 6 bits of count to be accurate.
- */
- ands tmp1, count, #0x30
- b.eq .Ltail15
- cmp tmp1w, #0x20
- b.eq 1f
- b.lt 2f
- ldp A_l, A_h, [src, #-16]!
- stp A_l, A_h, [dst, #-16]!
-1:
- ldp A_l, A_h, [src, #-16]!
- stp A_l, A_h, [dst, #-16]!
-2:
- ldp A_l, A_h, [src, #-16]!
- stp A_l, A_h, [dst, #-16]!
-
-.Ltail15:
- tbz count, #3, 1f
- ldr tmp1, [src, #-8]!
- str tmp1, [dst, #-8]!
-1:
- tbz count, #2, 2f
- ldr tmp1w, [src, #-4]!
- str tmp1w, [dst, #-4]!
-2:
- tbz count, #1, 3f
- ldrh tmp1w, [src, #-2]!
- strh tmp1w, [dst, #-2]!
-3:
- tbz count, #0, .Lexitfunc
- ldrb tmp1w, [src, #-1]
- strb tmp1w, [dst, #-1]
-
-.Lexitfunc:
- ret
-
-.Lcpy_over64:
- subs count, count, #128
- b.ge .Lcpy_body_large
- /*
- * Less than 128 bytes to copy, so handle 64 bytes here and then jump
- * to the tail.
- */
- ldp A_l, A_h, [src, #-16]
- stp A_l, A_h, [dst, #-16]
- ldp B_l, B_h, [src, #-32]
- ldp C_l, C_h, [src, #-48]
- stp B_l, B_h, [dst, #-32]
- stp C_l, C_h, [dst, #-48]
- ldp D_l, D_h, [src, #-64]!
- stp D_l, D_h, [dst, #-64]!
-
- tst count, #0x3f
- b.ne .Ltail63
- ret
-
- /*
- * Critical loop. Start at a new cache line boundary. Assuming
- * 64 bytes per line this ensures the entire loop is in one line.
- */
- .p2align L1_CACHE_SHIFT
-.Lcpy_body_large:
- /* pre-load 64 bytes data. */
- ldp A_l, A_h, [src, #-16]
- ldp B_l, B_h, [src, #-32]
- ldp C_l, C_h, [src, #-48]
- ldp D_l, D_h, [src, #-64]!
-1:
- /*
- * interlace the load of next 64 bytes data block with store of the last
- * loaded 64 bytes data.
- */
- stp A_l, A_h, [dst, #-16]
- ldp A_l, A_h, [src, #-16]
- stp B_l, B_h, [dst, #-32]
- ldp B_l, B_h, [src, #-32]
- stp C_l, C_h, [dst, #-48]
- ldp C_l, C_h, [src, #-48]
- stp D_l, D_h, [dst, #-64]!
- ldp D_l, D_h, [src, #-64]!
- subs count, count, #64
- b.ge 1b
- stp A_l, A_h, [dst, #-16]
- stp B_l, B_h, [dst, #-32]
- stp C_l, C_h, [dst, #-48]
- stp D_l, D_h, [dst, #-64]!
-
- tst count, #0x3f
- b.ne .Ltail63
- ret
-SYM_FUNC_END_PI(memmove)
-EXPORT_SYMBOL(memmove)
-SYM_FUNC_END_ALIAS(__memmove)
-EXPORT_SYMBOL(__memmove)
diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S
index 351537c12f36..e83643b3995f 100644
--- a/arch/arm64/lib/mte.S
+++ b/arch/arm64/lib/mte.S
@@ -37,6 +37,26 @@ SYM_FUNC_START(mte_clear_page_tags)
SYM_FUNC_END(mte_clear_page_tags)
/*
+ * Zero the page and tags at the same time
+ *
+ * Parameters:
+ * x0 - address to the beginning of the page
+ */
+SYM_FUNC_START(mte_zero_clear_page_tags)
+ mrs x1, dczid_el0
+ and w1, w1, #0xf
+ mov x2, #4
+ lsl x1, x2, x1
+ and x0, x0, #(1 << MTE_TAG_SHIFT) - 1 // clear the tag
+
+1: dc gzva, x0
+ add x0, x0, x1
+ tst x0, #(PAGE_SIZE - 1)
+ b.ne 1b
+ ret
+SYM_FUNC_END(mte_zero_clear_page_tags)
+
+/*
* Copy the tags from the source page to the destination one
* x0 - address of the destination page
* x1 - address of the source page
diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S
index 4e79566726c8..d7bee210a798 100644
--- a/arch/arm64/lib/strcmp.S
+++ b/arch/arm64/lib/strcmp.S
@@ -1,84 +1,123 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
+ * Copyright (c) 2012-2021, Arm Limited.
*
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
+ * Adapted from the original at:
+ * https://github.com/ARM-software/optimized-routines/blob/afd6244a1f8d9229/string/aarch64/strcmp.S
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-/*
- * compare two strings
+/* Assumptions:
*
- * Parameters:
- * x0 - const string 1 pointer
- * x1 - const string 2 pointer
- * Returns:
- * x0 - an integer less than, equal to, or greater than zero
- * if s1 is found, respectively, to be less than, to match,
- * or be greater than s2.
+ * ARMv8-a, AArch64
*/
+#define L(label) .L ## label
+
#define REP8_01 0x0101010101010101
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
/* Parameters and result. */
-src1 .req x0
-src2 .req x1
-result .req x0
+#define src1 x0
+#define src2 x1
+#define result x0
/* Internal variables. */
-data1 .req x2
-data1w .req w2
-data2 .req x3
-data2w .req w3
-has_nul .req x4
-diff .req x5
-syndrome .req x6
-tmp1 .req x7
-tmp2 .req x8
-tmp3 .req x9
-zeroones .req x10
-pos .req x11
-
+#define data1 x2
+#define data1w w2
+#define data2 x3
+#define data2w w3
+#define has_nul x4
+#define diff x5
+#define syndrome x6
+#define tmp1 x7
+#define tmp2 x8
+#define tmp3 x9
+#define zeroones x10
+#define pos x11
+
+ /* Start of performance-critical section -- one 64B cache line. */
+ .align 6
SYM_FUNC_START_WEAK_PI(strcmp)
eor tmp1, src1, src2
mov zeroones, #REP8_01
tst tmp1, #7
- b.ne .Lmisaligned8
+ b.ne L(misaligned8)
ands tmp1, src1, #7
- b.ne .Lmutual_align
-
- /*
- * NUL detection works on the principle that (X - 1) & (~X) & 0x80
- * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
- * can be done in parallel across the entire word.
- */
-.Lloop_aligned:
+ b.ne L(mutual_align)
+ /* NUL detection works on the principle that (X - 1) & (~X) & 0x80
+ (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
+ can be done in parallel across the entire word. */
+L(loop_aligned):
ldr data1, [src1], #8
ldr data2, [src2], #8
-.Lstart_realigned:
+L(start_realigned):
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
eor diff, data1, data2 /* Non-zero if differences found. */
bic has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
orr syndrome, diff, has_nul
- cbz syndrome, .Lloop_aligned
- b .Lcal_cmpresult
+ cbz syndrome, L(loop_aligned)
+ /* End of performance-critical section -- one 64B cache line. */
+
+L(end):
+#ifndef __AARCH64EB__
+ rev syndrome, syndrome
+ rev data1, data1
+ /* The MS-non-zero bit of the syndrome marks either the first bit
+ that is different, or the top bit of the first zero byte.
+ Shifting left now will bring the critical information into the
+ top bits. */
+ clz pos, syndrome
+ rev data2, data2
+ lsl data1, data1, pos
+ lsl data2, data2, pos
+ /* But we need to zero-extend (char is unsigned) the value and then
+ perform a signed 32-bit subtraction. */
+ lsr data1, data1, #56
+ sub result, data1, data2, lsr #56
+ ret
+#else
+ /* For big-endian we cannot use the trick with the syndrome value
+ as carry-propagation can corrupt the upper bits if the trailing
+ bytes in the string contain 0x01. */
+ /* However, if there is no NUL byte in the dword, we can generate
+ the result directly. We can't just subtract the bytes as the
+ MSB might be significant. */
+ cbnz has_nul, 1f
+ cmp data1, data2
+ cset result, ne
+ cneg result, result, lo
+ ret
+1:
+ /* Re-compute the NUL-byte detection, using a byte-reversed value. */
+ rev tmp3, data1
+ sub tmp1, tmp3, zeroones
+ orr tmp2, tmp3, #REP8_7f
+ bic has_nul, tmp1, tmp2
+ rev has_nul, has_nul
+ orr syndrome, diff, has_nul
+ clz pos, syndrome
+ /* The MS-non-zero bit of the syndrome marks either the first bit
+ that is different, or the top bit of the first zero byte.
+ Shifting left now will bring the critical information into the
+ top bits. */
+ lsl data1, data1, pos
+ lsl data2, data2, pos
+ /* But we need to zero-extend (char is unsigned) the value and then
+ perform a signed 32-bit subtraction. */
+ lsr data1, data1, #56
+ sub result, data1, data2, lsr #56
+ ret
+#endif
-.Lmutual_align:
- /*
- * Sources are mutually aligned, but are not currently at an
- * alignment boundary. Round down the addresses and then mask off
- * the bytes that preceed the start point.
- */
+L(mutual_align):
+ /* Sources are mutually aligned, but are not currently at an
+ alignment boundary. Round down the addresses and then mask off
+ the bytes that preceed the start point. */
bic src1, src1, #7
bic src2, src2, #7
lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */
@@ -86,138 +125,52 @@ SYM_FUNC_START_WEAK_PI(strcmp)
neg tmp1, tmp1 /* Bits to alignment -64. */
ldr data2, [src2], #8
mov tmp2, #~0
+#ifdef __AARCH64EB__
/* Big-endian. Early bytes are at MSB. */
-CPU_BE( lsl tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */
+ lsl tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */
+#else
/* Little-endian. Early bytes are at LSB. */
-CPU_LE( lsr tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */
-
+ lsr tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */
+#endif
orr data1, data1, tmp2
orr data2, data2, tmp2
- b .Lstart_realigned
-
-.Lmisaligned8:
- /*
- * Get the align offset length to compare per byte first.
- * After this process, one string's address will be aligned.
- */
- and tmp1, src1, #7
- neg tmp1, tmp1
- add tmp1, tmp1, #8
- and tmp2, src2, #7
- neg tmp2, tmp2
- add tmp2, tmp2, #8
- subs tmp3, tmp1, tmp2
- csel pos, tmp1, tmp2, hi /*Choose the maximum. */
-.Ltinycmp:
+ b L(start_realigned)
+
+L(misaligned8):
+ /* Align SRC1 to 8 bytes and then compare 8 bytes at a time, always
+ checking to make sure that we don't access beyond page boundary in
+ SRC2. */
+ tst src1, #7
+ b.eq L(loop_misaligned)
+L(do_misaligned):
ldrb data1w, [src1], #1
ldrb data2w, [src2], #1
- subs pos, pos, #1
- ccmp data1w, #1, #0, ne /* NZCV = 0b0000. */
- ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
- b.eq .Ltinycmp
- cbnz pos, 1f /*find the null or unequal...*/
cmp data1w, #1
- ccmp data1w, data2w, #0, cs
- b.eq .Lstart_align /*the last bytes are equal....*/
-1:
- sub result, data1, data2
- ret
-
-.Lstart_align:
- ands xzr, src1, #7
- b.eq .Lrecal_offset
- /*process more leading bytes to make str1 aligned...*/
- add src1, src1, tmp3
- add src2, src2, tmp3
- /*load 8 bytes from aligned str1 and non-aligned str2..*/
+ ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
+ b.ne L(done)
+ tst src1, #7
+ b.ne L(do_misaligned)
+
+L(loop_misaligned):
+ /* Test if we are within the last dword of the end of a 4K page. If
+ yes then jump back to the misaligned loop to copy a byte at a time. */
+ and tmp1, src2, #0xff8
+ eor tmp1, tmp1, #0xff8
+ cbz tmp1, L(do_misaligned)
ldr data1, [src1], #8
ldr data2, [src2], #8
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
- bic has_nul, tmp1, tmp2
- eor diff, data1, data2 /* Non-zero if differences found. */
- orr syndrome, diff, has_nul
- cbnz syndrome, .Lcal_cmpresult
- /*How far is the current str2 from the alignment boundary...*/
- and tmp3, tmp3, #7
-.Lrecal_offset:
- neg pos, tmp3
-.Lloopcmp_proc:
- /*
- * Divide the eight bytes into two parts. First,backwards the src2
- * to an alignment boundary,load eight bytes from the SRC2 alignment
- * boundary,then compare with the relative bytes from SRC1.
- * If all 8 bytes are equal,then start the second part's comparison.
- * Otherwise finish the comparison.
- * This special handle can garantee all the accesses are in the
- * thread/task space in avoid to overrange access.
- */
- ldr data1, [src1,pos]
- ldr data2, [src2,pos]
- sub tmp1, data1, zeroones
- orr tmp2, data1, #REP8_7f
- bic has_nul, tmp1, tmp2
- eor diff, data1, data2 /* Non-zero if differences found. */
- orr syndrome, diff, has_nul
- cbnz syndrome, .Lcal_cmpresult
-
- /*The second part process*/
- ldr data1, [src1], #8
- ldr data2, [src2], #8
- sub tmp1, data1, zeroones
- orr tmp2, data1, #REP8_7f
- bic has_nul, tmp1, tmp2
- eor diff, data1, data2 /* Non-zero if differences found. */
+ eor diff, data1, data2 /* Non-zero if differences found. */
+ bic has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
orr syndrome, diff, has_nul
- cbz syndrome, .Lloopcmp_proc
+ cbz syndrome, L(loop_misaligned)
+ b L(end)
-.Lcal_cmpresult:
- /*
- * reversed the byte-order as big-endian,then CLZ can find the most
- * significant zero bits.
- */
-CPU_LE( rev syndrome, syndrome )
-CPU_LE( rev data1, data1 )
-CPU_LE( rev data2, data2 )
-
- /*
- * For big-endian we cannot use the trick with the syndrome value
- * as carry-propagation can corrupt the upper bits if the trailing
- * bytes in the string contain 0x01.
- * However, if there is no NUL byte in the dword, we can generate
- * the result directly. We cannot just subtract the bytes as the
- * MSB might be significant.
- */
-CPU_BE( cbnz has_nul, 1f )
-CPU_BE( cmp data1, data2 )
-CPU_BE( cset result, ne )
-CPU_BE( cneg result, result, lo )
-CPU_BE( ret )
-CPU_BE( 1: )
- /*Re-compute the NUL-byte detection, using a byte-reversed value. */
-CPU_BE( rev tmp3, data1 )
-CPU_BE( sub tmp1, tmp3, zeroones )
-CPU_BE( orr tmp2, tmp3, #REP8_7f )
-CPU_BE( bic has_nul, tmp1, tmp2 )
-CPU_BE( rev has_nul, has_nul )
-CPU_BE( orr syndrome, diff, has_nul )
-
- clz pos, syndrome
- /*
- * The MS-non-zero bit of the syndrome marks either the first bit
- * that is different, or the top bit of the first zero byte.
- * Shifting left now will bring the critical information into the
- * top bits.
- */
- lsl data1, data1, pos
- lsl data2, data2, pos
- /*
- * But we need to zero-extend (char is unsigned) the value and then
- * perform a signed 32-bit subtraction.
- */
- lsr data1, data1, #56
- sub result, data1, data2, lsr #56
+L(done):
+ sub result, data1, data2
ret
+
SYM_FUNC_END_PI(strcmp)
EXPORT_SYMBOL_NOKASAN(strcmp)
diff --git a/arch/arm64/lib/strlen.S b/arch/arm64/lib/strlen.S
index ee3ed882dd79..35fbdb7d6e1a 100644
--- a/arch/arm64/lib/strlen.S
+++ b/arch/arm64/lib/strlen.S
@@ -1,115 +1,203 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
+ * Copyright (c) 2013-2021, Arm Limited.
*
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
+ * Adapted from the original at:
+ * https://github.com/ARM-software/optimized-routines/blob/98e4d6a5c13c8e54/string/aarch64/strlen.S
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-/*
- * calculate the length of a string
+/* Assumptions:
*
- * Parameters:
- * x0 - const string pointer
- * Returns:
- * x0 - the return length of specific string
+ * ARMv8-a, AArch64, unaligned accesses, min page size 4k.
*/
+#define L(label) .L ## label
+
/* Arguments and results. */
-srcin .req x0
-len .req x0
+#define srcin x0
+#define len x0
/* Locals and temporaries. */
-src .req x1
-data1 .req x2
-data2 .req x3
-data2a .req x4
-has_nul1 .req x5
-has_nul2 .req x6
-tmp1 .req x7
-tmp2 .req x8
-tmp3 .req x9
-tmp4 .req x10
-zeroones .req x11
-pos .req x12
+#define src x1
+#define data1 x2
+#define data2 x3
+#define has_nul1 x4
+#define has_nul2 x5
+#define tmp1 x4
+#define tmp2 x5
+#define tmp3 x6
+#define tmp4 x7
+#define zeroones x8
+
+ /* NUL detection works on the principle that (X - 1) & (~X) & 0x80
+ (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
+ can be done in parallel across the entire word. A faster check
+ (X - 1) & 0x80 is zero for non-NUL ASCII characters, but gives
+ false hits for characters 129..255. */
#define REP8_01 0x0101010101010101
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
+#define MIN_PAGE_SIZE 4096
+
+ /* Since strings are short on average, we check the first 16 bytes
+ of the string for a NUL character. In order to do an unaligned ldp
+ safely we have to do a page cross check first. If there is a NUL
+ byte we calculate the length from the 2 8-byte words using
+ conditional select to reduce branch mispredictions (it is unlikely
+ strlen will be repeatedly called on strings with the same length).
+
+ If the string is longer than 16 bytes, we align src so don't need
+ further page cross checks, and process 32 bytes per iteration
+ using the fast NUL check. If we encounter non-ASCII characters,
+ fallback to a second loop using the full NUL check.
+
+ If the page cross check fails, we read 16 bytes from an aligned
+ address, remove any characters before the string, and continue
+ in the main loop using aligned loads. Since strings crossing a
+ page in the first 16 bytes are rare (probability of
+ 16/MIN_PAGE_SIZE ~= 0.4%), this case does not need to be optimized.
+
+ AArch64 systems have a minimum page size of 4k. We don't bother
+ checking for larger page sizes - the cost of setting up the correct
+ page size is just not worth the extra gain from a small reduction in
+ the cases taking the slow path. Note that we only care about
+ whether the first fetch, which may be misaligned, crosses a page
+ boundary. */
+
SYM_FUNC_START_WEAK_PI(strlen)
- mov zeroones, #REP8_01
- bic src, srcin, #15
- ands tmp1, srcin, #15
- b.ne .Lmisaligned
- /*
- * NUL detection works on the principle that (X - 1) & (~X) & 0x80
- * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
- * can be done in parallel across the entire word.
- */
- /*
- * The inner loop deals with two Dwords at a time. This has a
- * slightly higher start-up cost, but we should win quite quickly,
- * especially on cores with a high number of issue slots per
- * cycle, as we get much better parallelism out of the operations.
- */
-.Lloop:
- ldp data1, data2, [src], #16
-.Lrealigned:
+ and tmp1, srcin, MIN_PAGE_SIZE - 1
+ mov zeroones, REP8_01
+ cmp tmp1, MIN_PAGE_SIZE - 16
+ b.gt L(page_cross)
+ ldp data1, data2, [srcin]
+#ifdef __AARCH64EB__
+ /* For big-endian, carry propagation (if the final byte in the
+ string is 0x01) means we cannot use has_nul1/2 directly.
+ Since we expect strings to be small and early-exit,
+ byte-swap the data now so has_null1/2 will be correct. */
+ rev data1, data1
+ rev data2, data2
+#endif
sub tmp1, data1, zeroones
- orr tmp2, data1, #REP8_7f
+ orr tmp2, data1, REP8_7f
sub tmp3, data2, zeroones
- orr tmp4, data2, #REP8_7f
- bic has_nul1, tmp1, tmp2
- bics has_nul2, tmp3, tmp4
- ccmp has_nul1, #0, #0, eq /* NZCV = 0000 */
- b.eq .Lloop
+ orr tmp4, data2, REP8_7f
+ bics has_nul1, tmp1, tmp2
+ bic has_nul2, tmp3, tmp4
+ ccmp has_nul2, 0, 0, eq
+ beq L(main_loop_entry)
+
+ /* Enter with C = has_nul1 == 0. */
+ csel has_nul1, has_nul1, has_nul2, cc
+ mov len, 8
+ rev has_nul1, has_nul1
+ clz tmp1, has_nul1
+ csel len, xzr, len, cc
+ add len, len, tmp1, lsr 3
+ ret
+ /* The inner loop processes 32 bytes per iteration and uses the fast
+ NUL check. If we encounter non-ASCII characters, use a second
+ loop with the accurate NUL check. */
+ .p2align 4
+L(main_loop_entry):
+ bic src, srcin, 15
+ sub src, src, 16
+L(main_loop):
+ ldp data1, data2, [src, 32]!
+L(page_cross_entry):
+ sub tmp1, data1, zeroones
+ sub tmp3, data2, zeroones
+ orr tmp2, tmp1, tmp3
+ tst tmp2, zeroones, lsl 7
+ bne 1f
+ ldp data1, data2, [src, 16]
+ sub tmp1, data1, zeroones
+ sub tmp3, data2, zeroones
+ orr tmp2, tmp1, tmp3
+ tst tmp2, zeroones, lsl 7
+ beq L(main_loop)
+ add src, src, 16
+1:
+ /* The fast check failed, so do the slower, accurate NUL check. */
+ orr tmp2, data1, REP8_7f
+ orr tmp4, data2, REP8_7f
+ bics has_nul1, tmp1, tmp2
+ bic has_nul2, tmp3, tmp4
+ ccmp has_nul2, 0, 0, eq
+ beq L(nonascii_loop)
+
+ /* Enter with C = has_nul1 == 0. */
+L(tail):
+#ifdef __AARCH64EB__
+ /* For big-endian, carry propagation (if the final byte in the
+ string is 0x01) means we cannot use has_nul1/2 directly. The
+ easiest way to get the correct byte is to byte-swap the data
+ and calculate the syndrome a second time. */
+ csel data1, data1, data2, cc
+ rev data1, data1
+ sub tmp1, data1, zeroones
+ orr tmp2, data1, REP8_7f
+ bic has_nul1, tmp1, tmp2
+#else
+ csel has_nul1, has_nul1, has_nul2, cc
+#endif
sub len, src, srcin
- cbz has_nul1, .Lnul_in_data2
-CPU_BE( mov data2, data1 ) /*prepare data to re-calculate the syndrome*/
- sub len, len, #8
- mov has_nul2, has_nul1
-.Lnul_in_data2:
- /*
- * For big-endian, carry propagation (if the final byte in the
- * string is 0x01) means we cannot use has_nul directly. The
- * easiest way to get the correct byte is to byte-swap the data
- * and calculate the syndrome a second time.
- */
-CPU_BE( rev data2, data2 )
-CPU_BE( sub tmp1, data2, zeroones )
-CPU_BE( orr tmp2, data2, #REP8_7f )
-CPU_BE( bic has_nul2, tmp1, tmp2 )
-
- sub len, len, #8
- rev has_nul2, has_nul2
- clz pos, has_nul2
- add len, len, pos, lsr #3 /* Bits to bytes. */
+ rev has_nul1, has_nul1
+ add tmp2, len, 8
+ clz tmp1, has_nul1
+ csel len, len, tmp2, cc
+ add len, len, tmp1, lsr 3
ret
-.Lmisaligned:
- cmp tmp1, #8
- neg tmp1, tmp1
- ldp data1, data2, [src], #16
- lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */
- mov tmp2, #~0
- /* Big-endian. Early bytes are at MSB. */
-CPU_BE( lsl tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */
+L(nonascii_loop):
+ ldp data1, data2, [src, 16]!
+ sub tmp1, data1, zeroones
+ orr tmp2, data1, REP8_7f
+ sub tmp3, data2, zeroones
+ orr tmp4, data2, REP8_7f
+ bics has_nul1, tmp1, tmp2
+ bic has_nul2, tmp3, tmp4
+ ccmp has_nul2, 0, 0, eq
+ bne L(tail)
+ ldp data1, data2, [src, 16]!
+ sub tmp1, data1, zeroones
+ orr tmp2, data1, REP8_7f
+ sub tmp3, data2, zeroones
+ orr tmp4, data2, REP8_7f
+ bics has_nul1, tmp1, tmp2
+ bic has_nul2, tmp3, tmp4
+ ccmp has_nul2, 0, 0, eq
+ beq L(nonascii_loop)
+ b L(tail)
+
+ /* Load 16 bytes from [srcin & ~15] and force the bytes that precede
+ srcin to 0x7f, so we ignore any NUL bytes before the string.
+ Then continue in the aligned loop. */
+L(page_cross):
+ bic src, srcin, 15
+ ldp data1, data2, [src]
+ lsl tmp1, srcin, 3
+ mov tmp4, -1
+#ifdef __AARCH64EB__
+ /* Big-endian. Early bytes are at MSB. */
+ lsr tmp1, tmp4, tmp1 /* Shift (tmp1 & 63). */
+#else
/* Little-endian. Early bytes are at LSB. */
-CPU_LE( lsr tmp2, tmp2, tmp1 ) /* Shift (tmp1 & 63). */
+ lsl tmp1, tmp4, tmp1 /* Shift (tmp1 & 63). */
+#endif
+ orr tmp1, tmp1, REP8_80
+ orn data1, data1, tmp1
+ orn tmp2, data2, tmp1
+ tst srcin, 8
+ csel data1, data1, tmp4, eq
+ csel data2, data2, tmp2, eq
+ b L(page_cross_entry)
- orr data1, data1, tmp2
- orr data2a, data2, tmp2
- csinv data1, data1, xzr, le
- csel data2, data2, data2a, le
- b .Lrealigned
SYM_FUNC_END_PI(strlen)
EXPORT_SYMBOL_NOKASAN(strlen)
diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S
index 2a7ee949ed47..48d44f7fddb1 100644
--- a/arch/arm64/lib/strncmp.S
+++ b/arch/arm64/lib/strncmp.S
@@ -1,299 +1,261 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2013 ARM Ltd.
- * Copyright (C) 2013 Linaro.
+ * Copyright (c) 2013-2021, Arm Limited.
*
- * This code is based on glibc cortex strings work originally authored by Linaro
- * be found @
- *
- * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
- * files/head:/src/aarch64/
+ * Adapted from the original at:
+ * https://github.com/ARM-software/optimized-routines/blob/e823e3abf5f89ecb/string/aarch64/strncmp.S
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
-/*
- * compare two strings
+/* Assumptions:
*
- * Parameters:
- * x0 - const string 1 pointer
- * x1 - const string 2 pointer
- * x2 - the maximal length to be compared
- * Returns:
- * x0 - an integer less than, equal to, or greater than zero if s1 is found,
- * respectively, to be less than, to match, or be greater than s2.
+ * ARMv8-a, AArch64
*/
+#define L(label) .L ## label
+
#define REP8_01 0x0101010101010101
#define REP8_7f 0x7f7f7f7f7f7f7f7f
#define REP8_80 0x8080808080808080
/* Parameters and result. */
-src1 .req x0
-src2 .req x1
-limit .req x2
-result .req x0
+#define src1 x0
+#define src2 x1
+#define limit x2
+#define result x0
/* Internal variables. */
-data1 .req x3
-data1w .req w3
-data2 .req x4
-data2w .req w4
-has_nul .req x5
-diff .req x6
-syndrome .req x7
-tmp1 .req x8
-tmp2 .req x9
-tmp3 .req x10
-zeroones .req x11
-pos .req x12
-limit_wd .req x13
-mask .req x14
-endloop .req x15
+#define data1 x3
+#define data1w w3
+#define data2 x4
+#define data2w w4
+#define has_nul x5
+#define diff x6
+#define syndrome x7
+#define tmp1 x8
+#define tmp2 x9
+#define tmp3 x10
+#define zeroones x11
+#define pos x12
+#define limit_wd x13
+#define mask x14
+#define endloop x15
+#define count mask
SYM_FUNC_START_WEAK_PI(strncmp)
- cbz limit, .Lret0
+ cbz limit, L(ret0)
eor tmp1, src1, src2
mov zeroones, #REP8_01
tst tmp1, #7
- b.ne .Lmisaligned8
- ands tmp1, src1, #7
- b.ne .Lmutual_align
+ and count, src1, #7
+ b.ne L(misaligned8)
+ cbnz count, L(mutual_align)
/* Calculate the number of full and partial words -1. */
- /*
- * when limit is mulitply of 8, if not sub 1,
- * the judgement of last dword will wrong.
- */
- sub limit_wd, limit, #1 /* limit != 0, so no underflow. */
- lsr limit_wd, limit_wd, #3 /* Convert to Dwords. */
+ sub limit_wd, limit, #1 /* limit != 0, so no underflow. */
+ lsr limit_wd, limit_wd, #3 /* Convert to Dwords. */
- /*
- * NUL detection works on the principle that (X - 1) & (~X) & 0x80
- * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
- * can be done in parallel across the entire word.
- */
-.Lloop_aligned:
+ /* NUL detection works on the principle that (X - 1) & (~X) & 0x80
+ (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
+ can be done in parallel across the entire word. */
+ .p2align 4
+L(loop_aligned):
ldr data1, [src1], #8
ldr data2, [src2], #8
-.Lstart_realigned:
+L(start_realigned):
subs limit_wd, limit_wd, #1
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
- eor diff, data1, data2 /* Non-zero if differences found. */
- csinv endloop, diff, xzr, pl /* Last Dword or differences.*/
- bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
+ eor diff, data1, data2 /* Non-zero if differences found. */
+ csinv endloop, diff, xzr, pl /* Last Dword or differences. */
+ bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
ccmp endloop, #0, #0, eq
- b.eq .Lloop_aligned
+ b.eq L(loop_aligned)
+ /* End of main loop */
- /*Not reached the limit, must have found the end or a diff. */
- tbz limit_wd, #63, .Lnot_limit
+ /* Not reached the limit, must have found the end or a diff. */
+ tbz limit_wd, #63, L(not_limit)
/* Limit % 8 == 0 => all bytes significant. */
ands limit, limit, #7
- b.eq .Lnot_limit
+ b.eq L(not_limit)
- lsl limit, limit, #3 /* Bits -> bytes. */
+ lsl limit, limit, #3 /* Bits -> bytes. */
mov mask, #~0
-CPU_BE( lsr mask, mask, limit )
-CPU_LE( lsl mask, mask, limit )
+#ifdef __AARCH64EB__
+ lsr mask, mask, limit
+#else
+ lsl mask, mask, limit
+#endif
bic data1, data1, mask
bic data2, data2, mask
/* Make sure that the NUL byte is marked in the syndrome. */
orr has_nul, has_nul, mask
-.Lnot_limit:
+L(not_limit):
orr syndrome, diff, has_nul
- b .Lcal_cmpresult
-.Lmutual_align:
- /*
- * Sources are mutually aligned, but are not currently at an
- * alignment boundary. Round down the addresses and then mask off
- * the bytes that precede the start point.
- * We also need to adjust the limit calculations, but without
- * overflowing if the limit is near ULONG_MAX.
- */
+#ifndef __AARCH64EB__
+ rev syndrome, syndrome
+ rev data1, data1
+ /* The MS-non-zero bit of the syndrome marks either the first bit
+ that is different, or the top bit of the first zero byte.
+ Shifting left now will bring the critical information into the
+ top bits. */
+ clz pos, syndrome
+ rev data2, data2
+ lsl data1, data1, pos
+ lsl data2, data2, pos
+ /* But we need to zero-extend (char is unsigned) the value and then
+ perform a signed 32-bit subtraction. */
+ lsr data1, data1, #56
+ sub result, data1, data2, lsr #56
+ ret
+#else
+ /* For big-endian we cannot use the trick with the syndrome value
+ as carry-propagation can corrupt the upper bits if the trailing
+ bytes in the string contain 0x01. */
+ /* However, if there is no NUL byte in the dword, we can generate
+ the result directly. We can't just subtract the bytes as the
+ MSB might be significant. */
+ cbnz has_nul, 1f
+ cmp data1, data2
+ cset result, ne
+ cneg result, result, lo
+ ret
+1:
+ /* Re-compute the NUL-byte detection, using a byte-reversed value. */
+ rev tmp3, data1
+ sub tmp1, tmp3, zeroones
+ orr tmp2, tmp3, #REP8_7f
+ bic has_nul, tmp1, tmp2
+ rev has_nul, has_nul
+ orr syndrome, diff, has_nul
+ clz pos, syndrome
+ /* The MS-non-zero bit of the syndrome marks either the first bit
+ that is different, or the top bit of the first zero byte.
+ Shifting left now will bring the critical information into the
+ top bits. */
+ lsl data1, data1, pos
+ lsl data2, data2, pos
+ /* But we need to zero-extend (char is unsigned) the value and then
+ perform a signed 32-bit subtraction. */
+ lsr data1, data1, #56
+ sub result, data1, data2, lsr #56
+ ret
+#endif
+
+L(mutual_align):
+ /* Sources are mutually aligned, but are not currently at an
+ alignment boundary. Round down the addresses and then mask off
+ the bytes that precede the start point.
+ We also need to adjust the limit calculations, but without
+ overflowing if the limit is near ULONG_MAX. */
bic src1, src1, #7
bic src2, src2, #7
ldr data1, [src1], #8
- neg tmp3, tmp1, lsl #3 /* 64 - bits(bytes beyond align). */
+ neg tmp3, count, lsl #3 /* 64 - bits(bytes beyond align). */
ldr data2, [src2], #8
mov tmp2, #~0
- sub limit_wd, limit, #1 /* limit != 0, so no underflow. */
+ sub limit_wd, limit, #1 /* limit != 0, so no underflow. */
+#ifdef __AARCH64EB__
/* Big-endian. Early bytes are at MSB. */
-CPU_BE( lsl tmp2, tmp2, tmp3 ) /* Shift (tmp1 & 63). */
+ lsl tmp2, tmp2, tmp3 /* Shift (count & 63). */
+#else
/* Little-endian. Early bytes are at LSB. */
-CPU_LE( lsr tmp2, tmp2, tmp3 ) /* Shift (tmp1 & 63). */
-
+ lsr tmp2, tmp2, tmp3 /* Shift (count & 63). */
+#endif
and tmp3, limit_wd, #7
lsr limit_wd, limit_wd, #3
- /* Adjust the limit. Only low 3 bits used, so overflow irrelevant.*/
- add limit, limit, tmp1
- add tmp3, tmp3, tmp1
+ /* Adjust the limit. Only low 3 bits used, so overflow irrelevant. */
+ add limit, limit, count
+ add tmp3, tmp3, count
orr data1, data1, tmp2
orr data2, data2, tmp2
add limit_wd, limit_wd, tmp3, lsr #3
- b .Lstart_realigned
+ b L(start_realigned)
+
+ .p2align 4
+ /* Don't bother with dwords for up to 16 bytes. */
+L(misaligned8):
+ cmp limit, #16
+ b.hs L(try_misaligned_words)
-/*when src1 offset is not equal to src2 offset...*/
-.Lmisaligned8:
- cmp limit, #8
- b.lo .Ltiny8proc /*limit < 8... */
- /*
- * Get the align offset length to compare per byte first.
- * After this process, one string's address will be aligned.*/
- and tmp1, src1, #7
- neg tmp1, tmp1
- add tmp1, tmp1, #8
- and tmp2, src2, #7
- neg tmp2, tmp2
- add tmp2, tmp2, #8
- subs tmp3, tmp1, tmp2
- csel pos, tmp1, tmp2, hi /*Choose the maximum. */
- /*
- * Here, limit is not less than 8, so directly run .Ltinycmp
- * without checking the limit.*/
- sub limit, limit, pos
-.Ltinycmp:
+L(byte_loop):
+ /* Perhaps we can do better than this. */
ldrb data1w, [src1], #1
ldrb data2w, [src2], #1
- subs pos, pos, #1
- ccmp data1w, #1, #0, ne /* NZCV = 0b0000. */
- ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
- b.eq .Ltinycmp
- cbnz pos, 1f /*find the null or unequal...*/
- cmp data1w, #1
- ccmp data1w, data2w, #0, cs
- b.eq .Lstart_align /*the last bytes are equal....*/
-1:
+ subs limit, limit, #1
+ ccmp data1w, #1, #0, hi /* NZCV = 0b0000. */
+ ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
+ b.eq L(byte_loop)
+L(done):
sub result, data1, data2
ret
-
-.Lstart_align:
+ /* Align the SRC1 to a dword by doing a bytewise compare and then do
+ the dword loop. */
+L(try_misaligned_words):
lsr limit_wd, limit, #3
- cbz limit_wd, .Lremain8
- /*process more leading bytes to make str1 aligned...*/
- ands xzr, src1, #7
- b.eq .Lrecal_offset
- add src1, src1, tmp3 /*tmp3 is positive in this branch.*/
- add src2, src2, tmp3
- ldr data1, [src1], #8
- ldr data2, [src2], #8
+ cbz count, L(do_misaligned)
- sub limit, limit, tmp3
+ neg count, count
+ and count, count, #7
+ sub limit, limit, count
lsr limit_wd, limit, #3
- subs limit_wd, limit_wd, #1
- sub tmp1, data1, zeroones
- orr tmp2, data1, #REP8_7f
- eor diff, data1, data2 /* Non-zero if differences found. */
- csinv endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/
- bics has_nul, tmp1, tmp2
- ccmp endloop, #0, #0, eq /*has_null is ZERO: no null byte*/
- b.ne .Lunequal_proc
- /*How far is the current str2 from the alignment boundary...*/
- and tmp3, tmp3, #7
-.Lrecal_offset:
- neg pos, tmp3
-.Lloopcmp_proc:
- /*
- * Divide the eight bytes into two parts. First,backwards the src2
- * to an alignment boundary,load eight bytes from the SRC2 alignment
- * boundary,then compare with the relative bytes from SRC1.
- * If all 8 bytes are equal,then start the second part's comparison.
- * Otherwise finish the comparison.
- * This special handle can garantee all the accesses are in the
- * thread/task space in avoid to overrange access.
- */
- ldr data1, [src1,pos]
- ldr data2, [src2,pos]
- sub tmp1, data1, zeroones
- orr tmp2, data1, #REP8_7f
- bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
- eor diff, data1, data2 /* Non-zero if differences found. */
- csinv endloop, diff, xzr, eq
- cbnz endloop, .Lunequal_proc
+L(page_end_loop):
+ ldrb data1w, [src1], #1
+ ldrb data2w, [src2], #1
+ cmp data1w, #1
+ ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
+ b.ne L(done)
+ subs count, count, #1
+ b.hi L(page_end_loop)
+
+L(do_misaligned):
+ /* Prepare ourselves for the next page crossing. Unlike the aligned
+ loop, we fetch 1 less dword because we risk crossing bounds on
+ SRC2. */
+ mov count, #8
+ subs limit_wd, limit_wd, #1
+ b.lo L(done_loop)
+L(loop_misaligned):
+ and tmp2, src2, #0xff8
+ eor tmp2, tmp2, #0xff8
+ cbz tmp2, L(page_end_loop)
- /*The second part process*/
ldr data1, [src1], #8
ldr data2, [src2], #8
- subs limit_wd, limit_wd, #1
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
- eor diff, data1, data2 /* Non-zero if differences found. */
- csinv endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/
- bics has_nul, tmp1, tmp2
- ccmp endloop, #0, #0, eq /*has_null is ZERO: no null byte*/
- b.eq .Lloopcmp_proc
-
-.Lunequal_proc:
- orr syndrome, diff, has_nul
- cbz syndrome, .Lremain8
-.Lcal_cmpresult:
- /*
- * reversed the byte-order as big-endian,then CLZ can find the most
- * significant zero bits.
- */
-CPU_LE( rev syndrome, syndrome )
-CPU_LE( rev data1, data1 )
-CPU_LE( rev data2, data2 )
- /*
- * For big-endian we cannot use the trick with the syndrome value
- * as carry-propagation can corrupt the upper bits if the trailing
- * bytes in the string contain 0x01.
- * However, if there is no NUL byte in the dword, we can generate
- * the result directly. We can't just subtract the bytes as the
- * MSB might be significant.
- */
-CPU_BE( cbnz has_nul, 1f )
-CPU_BE( cmp data1, data2 )
-CPU_BE( cset result, ne )
-CPU_BE( cneg result, result, lo )
-CPU_BE( ret )
-CPU_BE( 1: )
- /* Re-compute the NUL-byte detection, using a byte-reversed value.*/
-CPU_BE( rev tmp3, data1 )
-CPU_BE( sub tmp1, tmp3, zeroones )
-CPU_BE( orr tmp2, tmp3, #REP8_7f )
-CPU_BE( bic has_nul, tmp1, tmp2 )
-CPU_BE( rev has_nul, has_nul )
-CPU_BE( orr syndrome, diff, has_nul )
- /*
- * The MS-non-zero bit of the syndrome marks either the first bit
- * that is different, or the top bit of the first zero byte.
- * Shifting left now will bring the critical information into the
- * top bits.
- */
- clz pos, syndrome
- lsl data1, data1, pos
- lsl data2, data2, pos
- /*
- * But we need to zero-extend (char is unsigned) the value and then
- * perform a signed 32-bit subtraction.
- */
- lsr data1, data1, #56
- sub result, data1, data2, lsr #56
- ret
-
-.Lremain8:
- /* Limit % 8 == 0 => all bytes significant. */
- ands limit, limit, #7
- b.eq .Lret0
-.Ltiny8proc:
- ldrb data1w, [src1], #1
- ldrb data2w, [src2], #1
- subs limit, limit, #1
+ eor diff, data1, data2 /* Non-zero if differences found. */
+ bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
+ ccmp diff, #0, #0, eq
+ b.ne L(not_limit)
+ subs limit_wd, limit_wd, #1
+ b.pl L(loop_misaligned)
- ccmp data1w, #1, #0, ne /* NZCV = 0b0000. */
- ccmp data1w, data2w, #0, cs /* NZCV = 0b0000. */
- b.eq .Ltiny8proc
- sub result, data1, data2
- ret
+L(done_loop):
+ /* We found a difference or a NULL before the limit was reached. */
+ and limit, limit, #7
+ cbz limit, L(not_limit)
+ /* Read the last word. */
+ sub src1, src1, 8
+ sub src2, src2, 8
+ ldr data1, [src1, limit]
+ ldr data2, [src2, limit]
+ sub tmp1, data1, zeroones
+ orr tmp2, data1, #REP8_7f
+ eor diff, data1, data2 /* Non-zero if differences found. */
+ bics has_nul, tmp1, tmp2 /* Non-zero if NUL terminator. */
+ ccmp diff, #0, #0, eq
+ b.ne L(not_limit)
-.Lret0:
+L(ret0):
mov result, #0
ret
+
SYM_FUNC_END_PI(strncmp)
EXPORT_SYMBOL_NOKASAN(strncmp)
diff --git a/arch/arm64/lib/uaccess_flushcache.c b/arch/arm64/lib/uaccess_flushcache.c
index c83bb5a4aad2..baee22961bdb 100644
--- a/arch/arm64/lib/uaccess_flushcache.c
+++ b/arch/arm64/lib/uaccess_flushcache.c
@@ -15,7 +15,7 @@ void memcpy_flushcache(void *dst, const void *src, size_t cnt)
* barrier to order the cache maintenance against the memcpy.
*/
memcpy(dst, src, cnt);
- __clean_dcache_area_pop(dst, cnt);
+ dcache_clean_pop((unsigned long)dst, (unsigned long)dst + cnt);
}
EXPORT_SYMBOL_GPL(memcpy_flushcache);
@@ -33,6 +33,6 @@ unsigned long __copy_user_flushcache(void *to, const void __user *from,
rc = raw_copy_from_user(to, from, n);
/* See above */
- __clean_dcache_area_pop(to, n - rc);
+ dcache_clean_pop((unsigned long)to, (unsigned long)to + n - rc);
return rc;
}
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 2d881f34dd9d..5051b3c1a4f1 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -15,7 +15,7 @@
#include <asm/asm-uaccess.h>
/*
- * flush_icache_range(start,end)
+ * caches_clean_inval_pou_macro(start,end) [fixup]
*
* Ensure that the I and D caches are coherent within specified region.
* This is typically used when code has been written to a memory region,
@@ -23,12 +23,27 @@
*
* - start - virtual start address of region
* - end - virtual end address of region
+ * - fixup - optional label to branch to on user fault
*/
-SYM_FUNC_START(__flush_icache_range)
- /* FALLTHROUGH */
+.macro caches_clean_inval_pou_macro, fixup
+alternative_if ARM64_HAS_CACHE_IDC
+ dsb ishst
+ b .Ldc_skip_\@
+alternative_else_nop_endif
+ mov x2, x0
+ mov x3, x1
+ dcache_by_line_op cvau, ish, x2, x3, x4, x5, \fixup
+.Ldc_skip_\@:
+alternative_if ARM64_HAS_CACHE_DIC
+ isb
+ b .Lic_skip_\@
+alternative_else_nop_endif
+ invalidate_icache_by_line x0, x1, x2, x3, \fixup
+.Lic_skip_\@:
+.endm
/*
- * __flush_cache_user_range(start,end)
+ * caches_clean_inval_pou(start,end)
*
* Ensure that the I and D caches are coherent within specified region.
* This is typically used when code has been written to a memory region,
@@ -37,117 +52,103 @@ SYM_FUNC_START(__flush_icache_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-SYM_FUNC_START(__flush_cache_user_range)
+SYM_FUNC_START(caches_clean_inval_pou)
+ caches_clean_inval_pou_macro
+ ret
+SYM_FUNC_END(caches_clean_inval_pou)
+
+/*
+ * caches_clean_inval_user_pou(start,end)
+ *
+ * Ensure that the I and D caches are coherent within specified region.
+ * This is typically used when code has been written to a memory region,
+ * and will be executed.
+ *
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+SYM_FUNC_START(caches_clean_inval_user_pou)
uaccess_ttbr0_enable x2, x3, x4
-alternative_if ARM64_HAS_CACHE_IDC
- dsb ishst
- b 7f
-alternative_else_nop_endif
- dcache_line_size x2, x3
- sub x3, x2, #1
- bic x4, x0, x3
-1:
-user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE
- add x4, x4, x2
- cmp x4, x1
- b.lo 1b
- dsb ish
-7:
-alternative_if ARM64_HAS_CACHE_DIC
- isb
- b 8f
-alternative_else_nop_endif
- invalidate_icache_by_line x0, x1, x2, x3, 9f
-8: mov x0, #0
+ caches_clean_inval_pou_macro 2f
+ mov x0, xzr
1:
uaccess_ttbr0_disable x1, x2
ret
-9:
+2:
mov x0, #-EFAULT
b 1b
-SYM_FUNC_END(__flush_icache_range)
-SYM_FUNC_END(__flush_cache_user_range)
+SYM_FUNC_END(caches_clean_inval_user_pou)
/*
- * invalidate_icache_range(start,end)
+ * icache_inval_pou(start,end)
*
* Ensure that the I cache is invalid within specified region.
*
* - start - virtual start address of region
* - end - virtual end address of region
*/
-SYM_FUNC_START(invalidate_icache_range)
+SYM_FUNC_START(icache_inval_pou)
alternative_if ARM64_HAS_CACHE_DIC
- mov x0, xzr
isb
ret
alternative_else_nop_endif
- uaccess_ttbr0_enable x2, x3, x4
-
- invalidate_icache_by_line x0, x1, x2, x3, 2f
- mov x0, xzr
-1:
- uaccess_ttbr0_disable x1, x2
+ invalidate_icache_by_line x0, x1, x2, x3
ret
-2:
- mov x0, #-EFAULT
- b 1b
-SYM_FUNC_END(invalidate_icache_range)
+SYM_FUNC_END(icache_inval_pou)
/*
- * __flush_dcache_area(kaddr, size)
+ * dcache_clean_inval_poc(start, end)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * Ensure that any D-cache lines for the interval [start, end)
* are cleaned and invalidated to the PoC.
*
- * - kaddr - kernel address
- * - size - size in question
+ * - start - virtual start address of region
+ * - end - virtual end address of region
*/
-SYM_FUNC_START_PI(__flush_dcache_area)
+SYM_FUNC_START_PI(dcache_clean_inval_poc)
dcache_by_line_op civac, sy, x0, x1, x2, x3
ret
-SYM_FUNC_END_PI(__flush_dcache_area)
+SYM_FUNC_END_PI(dcache_clean_inval_poc)
/*
- * __clean_dcache_area_pou(kaddr, size)
+ * dcache_clean_pou(start, end)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * Ensure that any D-cache lines for the interval [start, end)
* are cleaned to the PoU.
*
- * - kaddr - kernel address
- * - size - size in question
+ * - start - virtual start address of region
+ * - end - virtual end address of region
*/
-SYM_FUNC_START(__clean_dcache_area_pou)
+SYM_FUNC_START(dcache_clean_pou)
alternative_if ARM64_HAS_CACHE_IDC
dsb ishst
ret
alternative_else_nop_endif
dcache_by_line_op cvau, ish, x0, x1, x2, x3
ret
-SYM_FUNC_END(__clean_dcache_area_pou)
+SYM_FUNC_END(dcache_clean_pou)
/*
- * __inval_dcache_area(kaddr, size)
+ * dcache_inval_poc(start, end)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * Ensure that any D-cache lines for the interval [start, end)
* are invalidated. Any partial lines at the ends of the interval are
* also cleaned to PoC to prevent data loss.
*
- * - kaddr - kernel address
- * - size - size in question
+ * - start - kernel start address of region
+ * - end - kernel end address of region
*/
SYM_FUNC_START_LOCAL(__dma_inv_area)
-SYM_FUNC_START_PI(__inval_dcache_area)
+SYM_FUNC_START_PI(dcache_inval_poc)
/* FALLTHROUGH */
/*
- * __dma_inv_area(start, size)
+ * __dma_inv_area(start, end)
* - start - virtual start address of region
- * - size - size in question
+ * - end - virtual end address of region
*/
- add x1, x1, x0
dcache_line_size x2, x3
sub x3, x2, #1
tst x1, x3 // end cache line aligned?
@@ -165,48 +166,48 @@ SYM_FUNC_START_PI(__inval_dcache_area)
b.lo 2b
dsb sy
ret
-SYM_FUNC_END_PI(__inval_dcache_area)
+SYM_FUNC_END_PI(dcache_inval_poc)
SYM_FUNC_END(__dma_inv_area)
/*
- * __clean_dcache_area_poc(kaddr, size)
+ * dcache_clean_poc(start, end)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * Ensure that any D-cache lines for the interval [start, end)
* are cleaned to the PoC.
*
- * - kaddr - kernel address
- * - size - size in question
+ * - start - virtual start address of region
+ * - end - virtual end address of region
*/
SYM_FUNC_START_LOCAL(__dma_clean_area)
-SYM_FUNC_START_PI(__clean_dcache_area_poc)
+SYM_FUNC_START_PI(dcache_clean_poc)
/* FALLTHROUGH */
/*
- * __dma_clean_area(start, size)
+ * __dma_clean_area(start, end)
* - start - virtual start address of region
- * - size - size in question
+ * - end - virtual end address of region
*/
dcache_by_line_op cvac, sy, x0, x1, x2, x3
ret
-SYM_FUNC_END_PI(__clean_dcache_area_poc)
+SYM_FUNC_END_PI(dcache_clean_poc)
SYM_FUNC_END(__dma_clean_area)
/*
- * __clean_dcache_area_pop(kaddr, size)
+ * dcache_clean_pop(start, end)
*
- * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
+ * Ensure that any D-cache lines for the interval [start, end)
* are cleaned to the PoP.
*
- * - kaddr - kernel address
- * - size - size in question
+ * - start - virtual start address of region
+ * - end - virtual end address of region
*/
-SYM_FUNC_START_PI(__clean_dcache_area_pop)
+SYM_FUNC_START_PI(dcache_clean_pop)
alternative_if_not ARM64_HAS_DCPOP
- b __clean_dcache_area_poc
+ b dcache_clean_poc
alternative_else_nop_endif
dcache_by_line_op cvap, sy, x0, x1, x2, x3
ret
-SYM_FUNC_END_PI(__clean_dcache_area_pop)
+SYM_FUNC_END_PI(dcache_clean_pop)
/*
* __dma_flush_area(start, size)
@@ -217,6 +218,7 @@ SYM_FUNC_END_PI(__clean_dcache_area_pop)
* - size - size in question
*/
SYM_FUNC_START_PI(__dma_flush_area)
+ add x1, x0, x1
dcache_by_line_op civac, sy, x0, x1, x2, x3
ret
SYM_FUNC_END_PI(__dma_flush_area)
@@ -228,6 +230,7 @@ SYM_FUNC_END_PI(__dma_flush_area)
* - dir - DMA direction
*/
SYM_FUNC_START_PI(__dma_map_area)
+ add x1, x0, x1
cmp w2, #DMA_FROM_DEVICE
b.eq __dma_inv_area
b __dma_clean_area
@@ -240,6 +243,7 @@ SYM_FUNC_END_PI(__dma_map_area)
* - dir - DMA direction
*/
SYM_FUNC_START_PI(__dma_unmap_area)
+ add x1, x0, x1
cmp w2, #DMA_TO_DEVICE
b.ne __dma_inv_area
ret
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 001737a8f309..cd72576ae2b7 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -402,14 +402,12 @@ static int asids_init(void)
{
asid_bits = get_cpu_asid_bits();
atomic64_set(&asid_generation, ASID_FIRST_VERSION);
- asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
- GFP_KERNEL);
+ asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
if (!asid_map)
panic("Failed to allocate bitmap for %lu ASIDs\n",
NUM_USER_ASIDS);
- pinned_asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS),
- sizeof(*pinned_asid_map), GFP_KERNEL);
+ pinned_asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
nr_pinned_asids = 0;
/*
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 93e87b287556..6719f9efea09 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -50,10 +50,10 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
dev->dma_coherent = coherent;
if (iommu)
- iommu_setup_dma_ops(dev, dma_base, size);
+ iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1);
#ifdef CONFIG_XEN
- if (xen_initial_domain())
+ if (xen_swiotlb_detect())
dev->dma_ops = &xen_swiotlb_dma_ops;
#endif
}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index f37d4e3830b7..349c488765ca 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -99,6 +99,8 @@ static void mem_abort_decode(unsigned int esr)
pr_alert(" EA = %lu, S1PTW = %lu\n",
(esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT,
(esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT);
+ pr_alert(" FSC = 0x%02x: %s\n", (esr & ESR_ELx_FSC),
+ esr_to_fault_info(esr)->name);
if (esr_is_data_abort(esr))
data_abort_decode(esr);
@@ -232,13 +234,17 @@ static bool is_el1_instruction_abort(unsigned int esr)
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
}
+static bool is_el1_data_abort(unsigned int esr)
+{
+ return ESR_ELx_EC(esr) == ESR_ELx_EC_DABT_CUR;
+}
+
static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
- unsigned int ec = ESR_ELx_EC(esr);
unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
- if (ec != ESR_ELx_EC_DABT_CUR && ec != ESR_ELx_EC_IABT_CUR)
+ if (!is_el1_data_abort(esr) && !is_el1_instruction_abort(esr))
return false;
if (fsc_type == ESR_ELx_FSC_PERM)
@@ -258,7 +264,7 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
unsigned long flags;
u64 par, dfsc;
- if (ESR_ELx_EC(esr) != ESR_ELx_EC_DABT_CUR ||
+ if (!is_el1_data_abort(esr) ||
(esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT)
return false;
@@ -346,10 +352,9 @@ static void do_tag_recovery(unsigned long addr, unsigned int esr,
static bool is_el1_mte_sync_tag_check_fault(unsigned int esr)
{
- unsigned int ec = ESR_ELx_EC(esr);
unsigned int fsc = esr & ESR_ELx_FSC;
- if (ec != ESR_ELx_EC_DABT_CUR)
+ if (!is_el1_data_abort(esr))
return false;
if (fsc == ESR_ELx_FSC_MTE)
@@ -504,7 +509,7 @@ static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
*/
if (!(vma->vm_flags & vm_flags))
return VM_FAULT_BADACCESS;
- return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags, regs);
+ return handle_mm_fault(vma, addr, mm_flags, regs);
}
static bool is_el0_instruction_abort(unsigned int esr)
@@ -527,7 +532,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned int esr,
const struct fault_info *inf;
struct mm_struct *mm = current->mm;
vm_fault_t fault;
- unsigned long vm_flags = VM_ACCESS_FLAGS;
+ unsigned long vm_flags;
unsigned int mm_flags = FAULT_FLAG_DEFAULT;
unsigned long addr = untagged_addr(far);
@@ -544,12 +549,28 @@ static int __kprobes do_page_fault(unsigned long far, unsigned int esr,
if (user_mode(regs))
mm_flags |= FAULT_FLAG_USER;
+ /*
+ * vm_flags tells us what bits we must have in vma->vm_flags
+ * for the fault to be benign, __do_page_fault() would check
+ * vma->vm_flags & vm_flags and returns an error if the
+ * intersection is empty
+ */
if (is_el0_instruction_abort(esr)) {
+ /* It was exec fault */
vm_flags = VM_EXEC;
mm_flags |= FAULT_FLAG_INSTRUCTION;
} else if (is_write_abort(esr)) {
+ /* It was write fault */
vm_flags = VM_WRITE;
mm_flags |= FAULT_FLAG_WRITE;
+ } else {
+ /* It was read fault */
+ vm_flags = VM_READ;
+ /* Write implies read */
+ vm_flags |= VM_WRITE;
+ /* If EPAN is absent then exec implies read */
+ if (!cpus_have_const_cap(ARM64_HAS_EPAN))
+ vm_flags |= VM_EXEC;
}
if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) {
@@ -820,13 +841,6 @@ void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs)
}
NOKPROBE_SYMBOL(do_mem_abort);
-void do_el0_irq_bp_hardening(void)
-{
- /* PC has already been checked in entry.S */
- arm64_apply_bp_hardening();
-}
-NOKPROBE_SYMBOL(do_el0_irq_bp_hardening);
-
void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN,
@@ -905,3 +919,29 @@ void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
debug_exception_exit(regs);
}
NOKPROBE_SYMBOL(do_debug_exception);
+
+/*
+ * Used during anonymous page fault handling.
+ */
+struct page *alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
+ unsigned long vaddr)
+{
+ gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO;
+
+ /*
+ * If the page is mapped with PROT_MTE, initialise the tags at the
+ * point of allocation and page zeroing as this is usually faster than
+ * separate DC ZVA and STGM.
+ */
+ if (vma->vm_flags & VM_MTE)
+ flags |= __GFP_ZEROTAGS;
+
+ return alloc_page_vma(flags, vma, vaddr);
+}
+
+void tag_clear_highpage(struct page *page)
+{
+ mte_zero_clear_page_tags(page_address(page));
+ page_kasan_tag_reset(page);
+ set_bit(PG_mte_tagged, &page->flags);
+}
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index ac485163a4a7..2aaf950b906c 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -14,28 +14,25 @@
#include <asm/cache.h>
#include <asm/tlbflush.h>
-void sync_icache_aliases(void *kaddr, unsigned long len)
+void sync_icache_aliases(unsigned long start, unsigned long end)
{
- unsigned long addr = (unsigned long)kaddr;
-
if (icache_is_aliasing()) {
- __clean_dcache_area_pou(kaddr, len);
- __flush_icache_all();
+ dcache_clean_pou(start, end);
+ icache_inval_all_pou();
} else {
/*
* Don't issue kick_all_cpus_sync() after I-cache invalidation
* for user mappings.
*/
- __flush_icache_range(addr, addr + len);
+ caches_clean_inval_pou(start, end);
}
}
-static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
- unsigned long uaddr, void *kaddr,
- unsigned long len)
+static void flush_ptrace_access(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
{
if (vma->vm_flags & VM_EXEC)
- sync_icache_aliases(kaddr, len);
+ sync_icache_aliases(start, end);
}
/*
@@ -48,15 +45,19 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long len)
{
memcpy(dst, src, len);
- flush_ptrace_access(vma, page, uaddr, dst, len);
+ flush_ptrace_access(vma, (unsigned long)dst, (unsigned long)dst + len);
}
void __sync_icache_dcache(pte_t pte)
{
struct page *page = pte_page(pte);
- if (!test_and_set_bit(PG_dcache_clean, &page->flags))
- sync_icache_aliases(page_address(page), page_size(page));
+ if (!test_bit(PG_dcache_clean, &page->flags)) {
+ sync_icache_aliases((unsigned long)page_address(page),
+ (unsigned long)page_address(page) +
+ page_size(page));
+ set_bit(PG_dcache_clean, &page->flags);
+ }
}
EXPORT_SYMBOL_GPL(__sync_icache_dcache);
@@ -75,20 +76,20 @@ EXPORT_SYMBOL(flush_dcache_page);
/*
* Additional functions defined in assembly.
*/
-EXPORT_SYMBOL(__flush_icache_range);
+EXPORT_SYMBOL(caches_clean_inval_pou);
#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_wb_cache_pmem(void *addr, size_t size)
{
/* Ensure order against any prior non-cacheable writes */
dmb(osh);
- __clean_dcache_area_pop(addr, size);
+ dcache_clean_pop((unsigned long)addr, (unsigned long)addr + size);
}
EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
void arch_invalidate_pmem(void *addr, size_t size)
{
- __inval_dcache_area(addr, size);
+ dcache_inval_poc((unsigned long)addr, (unsigned long)addr + size);
}
EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
#endif
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 55ecf6de9ff7..23505fc35324 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -252,7 +252,7 @@ void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
set_pte(ptep, pte);
}
-pte_t *huge_pte_alloc(struct mm_struct *mm,
+pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz)
{
pgd_t *pgdp;
@@ -284,9 +284,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
*/
ptep = pte_alloc_map(mm, pmdp, addr);
} else if (sz == PMD_SIZE) {
- if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) &&
- pud_none(READ_ONCE(*pudp)))
- ptep = huge_pmd_share(mm, addr, pudp);
+ if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp)))
+ ptep = huge_pmd_share(mm, vma, addr, pudp);
else
ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
} else if (sz == (CONT_PMD_SIZE)) {
@@ -340,10 +339,9 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
return NULL;
}
-pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable)
+pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
{
- size_t pagesize = huge_page_size(hstate_vma(vma));
+ size_t pagesize = 1UL << shift;
if (pagesize == CONT_PTE_SIZE) {
entry = pte_mkcont(entry);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 0ace5e68efba..8490ed2917ff 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -35,6 +35,7 @@
#include <asm/fixmap.h>
#include <asm/kasan.h>
#include <asm/kernel-pgtable.h>
+#include <asm/kvm_host.h>
#include <asm/memory.h>
#include <asm/numa.h>
#include <asm/sections.h>
@@ -42,6 +43,7 @@
#include <linux/sizes.h>
#include <asm/tlb.h>
#include <asm/alternative.h>
+#include <asm/xen/swiotlb-xen.h>
/*
* We need to be able to catch inadvertent references to memstart_addr
@@ -217,23 +219,17 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
free_area_init(max_zone_pfns);
}
-int pfn_valid(unsigned long pfn)
+int pfn_is_map_memory(unsigned long pfn)
{
- phys_addr_t addr = pfn << PAGE_SHIFT;
+ phys_addr_t addr = PFN_PHYS(pfn);
- if ((addr >> PAGE_SHIFT) != pfn)
+ /* avoid false positives for bogus PFNs, see comment in pfn_valid() */
+ if (PHYS_PFN(addr) != pfn)
return 0;
-#ifdef CONFIG_SPARSEMEM
- if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
- return 0;
-
- if (!valid_section(__pfn_to_section(pfn)))
- return 0;
-#endif
return memblock_is_map_memory(addr);
}
-EXPORT_SYMBOL(pfn_valid);
+EXPORT_SYMBOL(pfn_is_map_memory);
static phys_addr_t memory_limit = PHYS_ADDR_MAX;
@@ -429,6 +425,8 @@ void __init bootmem_init(void)
dma_pernuma_cma_reserve();
+ kvm_hyp_reserve();
+
/*
* sparse_init() tries to allocate memory from memblock, so must be
* done after the fixed reservations
@@ -460,7 +458,7 @@ void __init mem_init(void)
if (swiotlb_force == SWIOTLB_FORCE ||
max_pfn > PFN_DOWN(arm64_dma_phys_limit))
swiotlb_init(1);
- else
+ else if (!xen_swiotlb_detect())
swiotlb_force = SWIOTLB_NO_FORCE;
set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
@@ -468,8 +466,6 @@ void __init mem_init(void)
/* this will put all unused low memory onto the freelists */
memblock_free_all();
- mem_init_print_info(NULL);
-
/*
* Check boundaries twice: Some fundamental inconsistencies can be
* detected at build time already.
@@ -478,6 +474,13 @@ void __init mem_init(void)
BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64);
#endif
+ /*
+ * Selected page table levels should match when derived from
+ * scratch using the virtual address range and page size.
+ */
+ BUILD_BUG_ON(ARM64_HW_PGTABLE_LEVELS(CONFIG_ARM64_VA_BITS) !=
+ CONFIG_PGTABLE_LEVELS);
+
if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
extern int sysctl_overcommit_memory;
/*
@@ -498,7 +501,7 @@ void free_initmem(void)
* prevents the region from being reused for kernel modules, which
* is not supported by kallsyms.
*/
- unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
+ vunmap_range((u64)__init_begin, (u64)__init_end);
}
void dump_mem_limit(void)
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index b5e83c46b23e..b7c81dacabf0 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -43,7 +43,7 @@ static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
/*
* Don't allow RAM to be mapped.
*/
- if (WARN_ON(pfn_valid(__phys_to_pfn(phys_addr))))
+ if (WARN_ON(pfn_is_map_memory(__phys_to_pfn(phys_addr))))
return NULL;
area = get_vm_area_caller(size, VM_IOREMAP, caller);
@@ -84,7 +84,7 @@ EXPORT_SYMBOL(iounmap);
void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
{
/* For normal memory we already have a cacheable mapping. */
- if (pfn_valid(__phys_to_pfn(phys_addr)))
+ if (pfn_is_map_memory(__phys_to_pfn(phys_addr)))
return (void __iomem *)__phys_to_virt(phys_addr);
return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index d8e66c78440e..61b52a92b8b6 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -79,7 +79,7 @@ static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
phys_addr_t pmd_phys = early ?
__pa_symbol(kasan_early_shadow_pmd)
: kasan_alloc_zeroed_page(node);
- __pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE);
+ __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
}
return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr);
@@ -92,7 +92,7 @@ static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node,
phys_addr_t pud_phys = early ?
__pa_symbol(kasan_early_shadow_pud)
: kasan_alloc_zeroed_page(node);
- __p4d_populate(p4dp, pud_phys, PMD_TYPE_TABLE);
+ __p4d_populate(p4dp, pud_phys, P4D_TYPE_TABLE);
}
return early ? pud_offset_kimg(p4dp, addr) : pud_offset(p4dp, addr);
@@ -214,15 +214,18 @@ static void __init kasan_init_shadow(void)
{
u64 kimg_shadow_start, kimg_shadow_end;
u64 mod_shadow_start, mod_shadow_end;
+ u64 vmalloc_shadow_end;
phys_addr_t pa_start, pa_end;
u64 i;
- kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK;
- kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end));
+ kimg_shadow_start = (u64)kasan_mem_to_shadow(KERNEL_START) & PAGE_MASK;
+ kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(KERNEL_END));
mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
+ vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
+
/*
* We are going to perform proper setup of shadow memory.
* At first we should unmap early shadow (clear_pgds() call below).
@@ -237,16 +240,22 @@ static void __init kasan_init_shadow(void)
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
- early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
+ early_pfn_to_nid(virt_to_pfn(lm_alias(KERNEL_START))));
kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
(void *)mod_shadow_start);
- kasan_populate_early_shadow((void *)kimg_shadow_end,
- (void *)KASAN_SHADOW_END);
- if (kimg_shadow_start > mod_shadow_end)
- kasan_populate_early_shadow((void *)mod_shadow_end,
- (void *)kimg_shadow_start);
+ if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
+ BUILD_BUG_ON(VMALLOC_START != MODULES_END);
+ kasan_populate_early_shadow((void *)vmalloc_shadow_end,
+ (void *)KASAN_SHADOW_END);
+ } else {
+ kasan_populate_early_shadow((void *)kimg_shadow_end,
+ (void *)KASAN_SHADOW_END);
+ if (kimg_shadow_start > mod_shadow_end)
+ kasan_populate_early_shadow((void *)mod_shadow_end,
+ (void *)kimg_shadow_start);
+ }
for_each_mem_range(i, &pa_start, &pa_end) {
void *start = (void *)__phys_to_virt(pa_start);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 3802cfbdd20d..595fde9a47dd 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -39,8 +39,9 @@
#define NO_BLOCK_MAPPINGS BIT(0)
#define NO_CONT_MAPPINGS BIT(1)
+#define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */
-u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
+u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN);
u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
u64 __section(".mmuoff.data.write") vabits_actual;
@@ -81,7 +82,7 @@ void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
- if (!pfn_valid(pfn))
+ if (!pfn_is_map_memory(pfn))
return pgprot_noncached(vma_prot);
else if (file->f_flags & O_SYNC)
return pgprot_writecombine(vma_prot);
@@ -185,10 +186,14 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
BUG_ON(pmd_sect(pmd));
if (pmd_none(pmd)) {
+ pmdval_t pmdval = PMD_TYPE_TABLE | PMD_TABLE_UXN;
phys_addr_t pte_phys;
+
+ if (flags & NO_EXEC_MAPPINGS)
+ pmdval |= PMD_TABLE_PXN;
BUG_ON(!pgtable_alloc);
pte_phys = pgtable_alloc(PAGE_SHIFT);
- __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
+ __pmd_populate(pmdp, pte_phys, pmdval);
pmd = READ_ONCE(*pmdp);
}
BUG_ON(pmd_bad(pmd));
@@ -223,7 +228,7 @@ static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
next = pmd_addr_end(addr, end);
/* try section mapping first */
- if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
+ if (((addr | next | phys) & ~PMD_MASK) == 0 &&
(flags & NO_BLOCK_MAPPINGS) == 0) {
pmd_set_huge(pmdp, phys, prot);
@@ -259,10 +264,14 @@ static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
*/
BUG_ON(pud_sect(pud));
if (pud_none(pud)) {
+ pudval_t pudval = PUD_TYPE_TABLE | PUD_TABLE_UXN;
phys_addr_t pmd_phys;
+
+ if (flags & NO_EXEC_MAPPINGS)
+ pudval |= PUD_TABLE_PXN;
BUG_ON(!pgtable_alloc);
pmd_phys = pgtable_alloc(PMD_SHIFT);
- __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
+ __pud_populate(pudp, pmd_phys, pudval);
pud = READ_ONCE(*pudp);
}
BUG_ON(pud_bad(pud));
@@ -306,10 +315,14 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
p4d_t p4d = READ_ONCE(*p4dp);
if (p4d_none(p4d)) {
+ p4dval_t p4dval = P4D_TYPE_TABLE | P4D_TABLE_UXN;
phys_addr_t pud_phys;
+
+ if (flags & NO_EXEC_MAPPINGS)
+ p4dval |= P4D_TABLE_PXN;
BUG_ON(!pgtable_alloc);
pud_phys = pgtable_alloc(PUD_SHIFT);
- __p4d_populate(p4dp, pud_phys, PUD_TYPE_TABLE);
+ __p4d_populate(p4dp, pud_phys, p4dval);
p4d = READ_ONCE(*p4dp);
}
BUG_ON(p4d_bad(p4d));
@@ -486,14 +499,25 @@ early_param("crashkernel", enable_crash_mem_map);
static void __init map_mem(pgd_t *pgdp)
{
+ static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
phys_addr_t kernel_start = __pa_symbol(_stext);
phys_addr_t kernel_end = __pa_symbol(__init_begin);
phys_addr_t start, end;
- int flags = 0;
+ int flags = NO_EXEC_MAPPINGS;
u64 i;
- if (rodata_full || crash_mem_map || debug_pagealloc_enabled())
- flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
+ /*
+ * Setting hierarchical PXNTable attributes on table entries covering
+ * the linear region is only possible if it is guaranteed that no table
+ * entries at any level are being shared between the linear region and
+ * the vmalloc region. Check whether this is true for the PGD level, in
+ * which case it is guaranteed to be true for all other levels as well.
+ */
+ BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end));
+
+ if (rodata_full || crash_mem_map || debug_pagealloc_enabled() ||
+ IS_ENABLED(CONFIG_KFENCE))
+ flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
/*
* Take care not to create a writable alias for the
@@ -512,7 +536,8 @@ static void __init map_mem(pgd_t *pgdp)
* if MTE is present. Otherwise, it has the same attributes as
* PAGE_KERNEL.
*/
- __map_memblock(pgdp, start, end, PAGE_KERNEL_TAGGED, flags);
+ __map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL),
+ flags);
}
/*
@@ -1089,15 +1114,14 @@ static void free_empty_tables(unsigned long addr, unsigned long end,
}
#endif
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
-#if !ARM64_SWAPPER_USES_SECTION_MAPS
+#if !ARM64_KERNEL_USES_PMD_MAPS
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
return vmemmap_populate_basepages(start, end, node, altmap);
}
-#else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
+#else /* !ARM64_KERNEL_USES_PMD_MAPS */
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
@@ -1142,18 +1166,18 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
return 0;
}
-#endif /* !ARM64_SWAPPER_USES_SECTION_MAPS */
+#endif /* !ARM64_KERNEL_USES_PMD_MAPS */
+
+#ifdef CONFIG_MEMORY_HOTPLUG
void vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap)
{
-#ifdef CONFIG_MEMORY_HOTPLUG
WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
unmap_hotplug_range(start, end, true, altmap);
free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END);
-#endif
}
-#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+#endif /* CONFIG_MEMORY_HOTPLUG */
static inline pud_t *fixmap_pud(unsigned long addr)
{
@@ -1209,11 +1233,11 @@ void __init early_fixmap_init(void)
pudp = pud_offset_kimg(p4dp, addr);
} else {
if (p4d_none(p4d))
- __p4d_populate(p4dp, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
+ __p4d_populate(p4dp, __pa_symbol(bm_pud), P4D_TYPE_TABLE);
pudp = fixmap_pud(addr);
}
if (pud_none(READ_ONCE(*pudp)))
- __pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
+ __pud_populate(pudp, __pa_symbol(bm_pmd), PUD_TYPE_TABLE);
pmdp = fixmap_pmd(addr);
__pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
@@ -1315,27 +1339,7 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
return dt_virt;
}
-int __init arch_ioremap_p4d_supported(void)
-{
- return 0;
-}
-
-int __init arch_ioremap_pud_supported(void)
-{
- /*
- * Only 4k granule supports level 1 block mappings.
- * SW table walks can't handle removal of intermediate entries.
- */
- return IS_ENABLED(CONFIG_ARM64_4K_PAGES) &&
- !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
-}
-
-int __init arch_ioremap_pmd_supported(void)
-{
- /* See arch_ioremap_pud_supported() */
- return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
-}
-
+#if CONFIG_PGTABLE_LEVELS > 3
int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
{
pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot));
@@ -1350,6 +1354,16 @@ int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
return 1;
}
+int pud_clear_huge(pud_t *pudp)
+{
+ if (!pud_sect(READ_ONCE(*pudp)))
+ return 0;
+ pud_clear(pudp);
+ return 1;
+}
+#endif
+
+#if CONFIG_PGTABLE_LEVELS > 2
int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
{
pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot));
@@ -1364,14 +1378,6 @@ int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
return 1;
}
-int pud_clear_huge(pud_t *pudp)
-{
- if (!pud_sect(READ_ONCE(*pudp)))
- return 0;
- pud_clear(pudp);
- return 1;
-}
-
int pmd_clear_huge(pmd_t *pmdp)
{
if (!pmd_sect(READ_ONCE(*pmdp)))
@@ -1379,6 +1385,7 @@ int pmd_clear_huge(pmd_t *pmdp)
pmd_clear(pmdp);
return 1;
}
+#endif
int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
{
@@ -1427,11 +1434,6 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
return 1;
}
-int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
-{
- return 0; /* Don't attempt a block mapping */
-}
-
#ifdef CONFIG_MEMORY_HOTPLUG
static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
{
@@ -1447,6 +1449,22 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
struct range arch_get_mappable_range(void)
{
struct range mhp_range;
+ u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual));
+ u64 end_linear_pa = __pa(PAGE_END - 1);
+
+ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+ /*
+ * Check for a wrap, it is possible because of randomized linear
+ * mapping the start physical address is actually bigger than
+ * the end physical address. In this case set start to zero
+ * because [0, end_linear_pa] range must still be able to cover
+ * all addressable physical addresses.
+ */
+ if (start_linear_pa > end_linear_pa)
+ start_linear_pa = 0;
+ }
+
+ WARN_ON(start_linear_pa > end_linear_pa);
/*
* Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
@@ -1454,15 +1472,16 @@ struct range arch_get_mappable_range(void)
* range which can be mapped inside this linear mapping range, must
* also be derived from its end points.
*/
- mhp_range.start = __pa(_PAGE_OFFSET(vabits_actual));
- mhp_range.end = __pa(PAGE_END - 1);
+ mhp_range.start = start_linear_pa;
+ mhp_range.end = end_linear_pa;
+
return mhp_range;
}
int arch_add_memory(int nid, u64 start, u64 size,
struct mhp_params *params)
{
- int ret, flags = 0;
+ int ret, flags = NO_EXEC_MAPPINGS;
VM_BUG_ON(!mhp_range_allowed(start, size, true));
@@ -1472,7 +1491,7 @@ int arch_add_memory(int nid, u64 start, u64 size,
*/
if (rodata_full || debug_pagealloc_enabled() ||
IS_ENABLED(CONFIG_KFENCE))
- flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
+ flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
size, params->pgprot, __pgd_pgtable_alloc,
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index c967bfd30d2b..35936c5ae1ce 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -46,9 +46,13 @@
#endif
#ifdef CONFIG_KASAN_HW_TAGS
-#define TCR_KASAN_HW_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1
+#define TCR_MTE_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 | TCR_TBID1
#else
-#define TCR_KASAN_HW_FLAGS 0
+/*
+ * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on
+ * TBI being enabled at EL1.
+ */
+#define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1
#endif
/*
@@ -58,10 +62,8 @@
#define MAIR_EL1_SET \
(MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \
- MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \
- MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED))
#ifdef CONFIG_CPU_PM
@@ -83,11 +85,7 @@ SYM_FUNC_START(cpu_do_suspend)
mrs x9, mdscr_el1
mrs x10, oslsr_el1
mrs x11, sctlr_el1
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
- mrs x12, tpidr_el1
-alternative_else
- mrs x12, tpidr_el2
-alternative_endif
+ get_this_cpu_offset x12
mrs x13, sp_el0
stp x2, x3, [x0]
stp x4, x5, [x0, #16]
@@ -145,11 +143,7 @@ SYM_FUNC_START(cpu_do_resume)
msr mdscr_el1, x10
msr sctlr_el1, x12
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
- msr tpidr_el1, x13
-alternative_else
- msr tpidr_el2, x13
-alternative_endif
+ set_this_cpu_offset x13
msr sp_el0, x14
/*
* Restore oslsr_el1 by writing oslar_el1
@@ -419,14 +413,17 @@ SYM_FUNC_START(__cpu_setup)
reset_amuserenr_el0 x1 // Disable AMU access from EL0
/*
- * Memory region attributes
+ * Default values for VMSA control registers. These will be adjusted
+ * below depending on detected CPU features.
*/
- mov_q x5, MAIR_EL1_SET
-#ifdef CONFIG_ARM64_MTE
- mte_tcr .req x20
-
- mov mte_tcr, #0
+ mair .req x17
+ tcr .req x16
+ mov_q mair, MAIR_EL1_SET
+ mov_q tcr, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
+ TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
+ TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS
+#ifdef CONFIG_ARM64_MTE
/*
* Update MAIR_EL1, GCR_EL1 and TFSR*_EL1 if MTE is supported
* (ID_AA64PFR1_EL1[11:8] > 1).
@@ -438,48 +435,49 @@ SYM_FUNC_START(__cpu_setup)
/* Normal Tagged memory type at the corresponding MAIR index */
mov x10, #MAIR_ATTR_NORMAL_TAGGED
- bfi x5, x10, #(8 * MT_NORMAL_TAGGED), #8
+ bfi mair, x10, #(8 * MT_NORMAL_TAGGED), #8
/* initialize GCR_EL1: all non-zero tags excluded by default */
mov x10, #(SYS_GCR_EL1_RRND | SYS_GCR_EL1_EXCL_MASK)
msr_s SYS_GCR_EL1, x10
+ /*
+ * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
+ * RGSR_EL1.SEED must be non-zero for IRG to produce
+ * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
+ * must initialize it.
+ */
+ mrs x10, CNTVCT_EL0
+ ands x10, x10, #SYS_RGSR_EL1_SEED_MASK
+ csinc x10, x10, xzr, ne
+ lsl x10, x10, #SYS_RGSR_EL1_SEED_SHIFT
+ msr_s SYS_RGSR_EL1, x10
+
/* clear any pending tag check faults in TFSR*_EL1 */
msr_s SYS_TFSR_EL1, xzr
msr_s SYS_TFSRE0_EL1, xzr
/* set the TCR_EL1 bits */
- mov_q mte_tcr, TCR_KASAN_HW_FLAGS
+ mov_q x10, TCR_MTE_FLAGS
+ orr tcr, tcr, x10
1:
#endif
- msr mair_el1, x5
- /*
- * Set/prepare TCR and TTBR. TCR_EL1.T1SZ gets further
- * adjusted if the kernel is compiled with 52bit VA support.
- */
- mov_q x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
- TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
- TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS
-#ifdef CONFIG_ARM64_MTE
- orr x10, x10, mte_tcr
- .unreq mte_tcr
-#endif
- tcr_clear_errata_bits x10, x9, x5
+ tcr_clear_errata_bits tcr, x9, x5
#ifdef CONFIG_ARM64_VA_BITS_52
ldr_l x9, vabits_actual
sub x9, xzr, x9
add x9, x9, #64
- tcr_set_t1sz x10, x9
+ tcr_set_t1sz tcr, x9
#else
ldr_l x9, idmap_t0sz
#endif
- tcr_set_t0sz x10, x9
+ tcr_set_t0sz tcr, x9
/*
* Set the IPS bits in TCR_EL1.
*/
- tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6
+ tcr_compute_pa_size tcr, #TCR_IPS_SHIFT, x5, x6
#ifdef CONFIG_ARM64_HW_AFDBM
/*
* Enable hardware update of the Access Flags bit.
@@ -489,13 +487,17 @@ SYM_FUNC_START(__cpu_setup)
mrs x9, ID_AA64MMFR1_EL1
and x9, x9, #0xf
cbz x9, 1f
- orr x10, x10, #TCR_HA // hardware Access flag update
+ orr tcr, tcr, #TCR_HA // hardware Access flag update
1:
#endif /* CONFIG_ARM64_HW_AFDBM */
- msr tcr_el1, x10
+ msr mair_el1, mair
+ msr tcr_el1, tcr
/*
* Prepare SCTLR
*/
mov_q x0, INIT_SCTLR_EL1_MMU_ON
ret // return to head.S
+
+ .unreq mair
+ .unreq tcr
SYM_FUNC_END(__cpu_setup)
diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c
index 0e050d76b83a..1c403536c9bb 100644
--- a/arch/arm64/mm/ptdump.c
+++ b/arch/arm64/mm/ptdump.c
@@ -51,10 +51,8 @@ static struct addr_marker address_markers[] = {
{ FIXADDR_TOP, "Fixmap end" },
{ PCI_IO_START, "PCI I/O start" },
{ PCI_IO_END, "PCI I/O end" },
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
{ VMEMMAP_START, "vmemmap start" },
{ VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" },
-#endif
{ -1, NULL },
};
@@ -161,10 +159,6 @@ static const struct prot_bits pte_bits[] = {
.set = "DEVICE/nGnRE",
}, {
.mask = PTE_ATTRINDX_MASK,
- .val = PTE_ATTRINDX(MT_DEVICE_GRE),
- .set = "DEVICE/GRE",
- }, {
- .mask = PTE_ATTRINDX_MASK,
.val = PTE_ATTRINDX(MT_NORMAL_NC),
.set = "MEM/NORMAL-NC",
}, {
@@ -337,7 +331,7 @@ void ptdump_walk(struct seq_file *s, struct ptdump_info *info)
ptdump_walk_pgd(&st.ptdump, info->mm, NULL);
}
-static void ptdump_initialize(void)
+static void __init ptdump_initialize(void)
{
unsigned i, j;
@@ -381,7 +375,7 @@ void ptdump_check_wx(void)
pr_info("Checked W+X mappings: passed, no W+X pages found\n");
}
-static int ptdump_init(void)
+static int __init ptdump_init(void)
{
address_markers[PAGE_END_NR].start_address = PAGE_END;
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c
index d29d722ec3ec..68bf1a125502 100644
--- a/arch/arm64/mm/ptdump_debugfs.c
+++ b/arch/arm64/mm/ptdump_debugfs.c
@@ -16,7 +16,7 @@ static int ptdump_show(struct seq_file *m, void *v)
}
DEFINE_SHOW_ATTRIBUTE(ptdump);
-void ptdump_debugfs_register(struct ptdump_info *info, const char *name)
+void __init ptdump_debugfs_register(struct ptdump_info *info, const char *name)
{
debugfs_create_file(name, 0400, NULL, info, &ptdump_fops);
}
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index f7b194878a99..dccf98a37283 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -16,6 +16,7 @@
#include <asm/byteorder.h>
#include <asm/cacheflush.h>
#include <asm/debug-monitors.h>
+#include <asm/insn.h>
#include <asm/set_memory.h>
#include "bpf_jit.h"
@@ -178,9 +179,6 @@ static bool is_addsub_imm(u32 imm)
return !(imm & ~0xfff) || !(imm & ~0xfff000);
}
-/* Stack must be multiples of 16B */
-#define STACK_ALIGN(sz) (((sz) + 15) & ~15)
-
/* Tail call offset to jump into */
#if IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)
#define PROLOGUE_OFFSET 8
@@ -255,7 +253,8 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
emit(A64_BTI_J, ctx);
}
- ctx->stack_size = STACK_ALIGN(prog->aux->stack_depth);
+ /* Stack must be multiples of 16B */
+ ctx->stack_size = round_up(prog->aux->stack_depth, 16);
/* Set up function call stack */
emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
@@ -487,17 +486,12 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
break;
case BPF_ALU | BPF_DIV | BPF_X:
case BPF_ALU64 | BPF_DIV | BPF_X:
+ emit(A64_UDIV(is64, dst, dst, src), ctx);
+ break;
case BPF_ALU | BPF_MOD | BPF_X:
case BPF_ALU64 | BPF_MOD | BPF_X:
- switch (BPF_OP(code)) {
- case BPF_DIV:
- emit(A64_UDIV(is64, dst, dst, src), ctx);
- break;
- case BPF_MOD:
- emit(A64_UDIV(is64, tmp, dst, src), ctx);
- emit(A64_MSUB(is64, dst, dst, tmp, src), ctx);
- break;
- }
+ emit(A64_UDIV(is64, tmp, dst, src), ctx);
+ emit(A64_MSUB(is64, dst, dst, tmp, src), ctx);
break;
case BPF_ALU | BPF_LSH | BPF_X:
case BPF_ALU64 | BPF_LSH | BPF_X:
diff --git a/arch/arm64/tools/Makefile b/arch/arm64/tools/Makefile
new file mode 100644
index 000000000000..932b4fe5c768
--- /dev/null
+++ b/arch/arm64/tools/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+
+gen := arch/$(ARCH)/include/generated
+kapi := $(gen)/asm
+
+kapi-hdrs-y := $(kapi)/cpucaps.h
+
+targets += $(addprefix ../../../,$(gen-y) $(kapi-hdrs-y))
+
+PHONY += kapi
+
+kapi: $(kapi-hdrs-y) $(gen-y)
+
+# Create output directory if not already present
+_dummy := $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+
+quiet_cmd_gen_cpucaps = GEN $@
+ cmd_gen_cpucaps = mkdir -p $(dir $@) && \
+ $(AWK) -f $(filter-out $(PHONY),$^) > $@
+
+$(kapi)/cpucaps.h: $(src)/gen-cpucaps.awk $(src)/cpucaps FORCE
+ $(call if_changed,gen_cpucaps)
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
new file mode 100644
index 000000000000..49305c2e6dfd
--- /dev/null
+++ b/arch/arm64/tools/cpucaps
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Internal CPU capabilities constants, keep this list sorted
+
+BTI
+# Unreliable: use system_supports_32bit_el0() instead.
+HAS_32BIT_EL0_DO_NOT_USE
+HAS_32BIT_EL1
+HAS_ADDRESS_AUTH
+HAS_ADDRESS_AUTH_ARCH
+HAS_ADDRESS_AUTH_IMP_DEF
+HAS_AMU_EXTN
+HAS_ARMv8_4_TTL
+HAS_CACHE_DIC
+HAS_CACHE_IDC
+HAS_CNP
+HAS_CRC32
+HAS_DCPODP
+HAS_DCPOP
+HAS_E0PD
+HAS_EPAN
+HAS_GENERIC_AUTH
+HAS_GENERIC_AUTH_ARCH
+HAS_GENERIC_AUTH_IMP_DEF
+HAS_IRQ_PRIO_MASKING
+HAS_LDAPR
+HAS_LSE_ATOMICS
+HAS_NO_FPSIMD
+HAS_NO_HW_PREFETCH
+HAS_PAN
+HAS_RAS_EXTN
+HAS_RNG
+HAS_SB
+HAS_STAGE2_FWB
+HAS_SYSREG_GIC_CPUIF
+HAS_TLB_RANGE
+HAS_VIRT_HOST_EXTN
+HW_DBM
+KVM_PROTECTED_MODE
+MISMATCHED_CACHE_TYPE
+MTE
+SPECTRE_V2
+SPECTRE_V3A
+SPECTRE_V4
+SSBS
+SVE
+UNMAP_KERNEL_AT_EL0
+WORKAROUND_834220
+WORKAROUND_843419
+WORKAROUND_845719
+WORKAROUND_858921
+WORKAROUND_1418040
+WORKAROUND_1463225
+WORKAROUND_1508412
+WORKAROUND_1542419
+WORKAROUND_CAVIUM_23154
+WORKAROUND_CAVIUM_27456
+WORKAROUND_CAVIUM_30115
+WORKAROUND_CAVIUM_TX2_219_PRFM
+WORKAROUND_CAVIUM_TX2_219_TVM
+WORKAROUND_CLEAN_CACHE
+WORKAROUND_DEVICE_LOAD_ACQUIRE
+WORKAROUND_NVIDIA_CARMEL_CNP
+WORKAROUND_QCOM_FALKOR_E1003
+WORKAROUND_REPEAT_TLBI
+WORKAROUND_SPECULATIVE_AT
diff --git a/arch/arm64/tools/gen-cpucaps.awk b/arch/arm64/tools/gen-cpucaps.awk
new file mode 100755
index 000000000000..00c9e72a200a
--- /dev/null
+++ b/arch/arm64/tools/gen-cpucaps.awk
@@ -0,0 +1,40 @@
+#!/bin/awk -f
+# SPDX-License-Identifier: GPL-2.0
+# gen-cpucaps.awk: arm64 cpucaps header generator
+#
+# Usage: awk -f gen-cpucaps.awk cpucaps.txt
+
+# Log an error and terminate
+function fatal(msg) {
+ print "Error at line " NR ": " msg > "/dev/stderr"
+ exit 1
+}
+
+# skip blank lines and comment lines
+/^$/ { next }
+/^#/ { next }
+
+BEGIN {
+ print "#ifndef __ASM_CPUCAPS_H"
+ print "#define __ASM_CPUCAPS_H"
+ print ""
+ print "/* Generated file - do not edit */"
+ cap_num = 0
+ print ""
+}
+
+/^[vA-Z0-9_]+$/ {
+ printf("#define ARM64_%-30s\t%d\n", $0, cap_num++)
+ next
+}
+
+END {
+ printf("#define ARM64_NCAPS\t\t\t\t%d\n", cap_num)
+ print ""
+ print "#endif /* __ASM_CPUCAPS_H */"
+}
+
+# Any lines not handled by previous rules are unexpected
+{
+ fatal("unhandled statement")
+}
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index 34e91224adc3..8de5b987edb9 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -314,7 +314,7 @@ config FORCE_MAX_ZONEORDER
int "Maximum zone order"
default "11"
-config RAM_BASE
+config DRAM_BASE
hex "DRAM start addr (the same with memory-section in dts)"
default 0x0
diff --git a/arch/csky/abiv1/cacheflush.c b/arch/csky/abiv1/cacheflush.c
index 9f1fe80cc847..07ff17ea33de 100644
--- a/arch/csky/abiv1/cacheflush.c
+++ b/arch/csky/abiv1/cacheflush.c
@@ -4,6 +4,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/fs.h>
+#include <linux/pagemap.h>
#include <linux/syscalls.h>
#include <linux/spinlock.h>
#include <asm/page.h>
diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild
index cc24bb8e539f..904a18a818be 100644
--- a/arch/csky/include/asm/Kbuild
+++ b/arch/csky/include/asm/Kbuild
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generic-y += asm-offsets.h
+generic-y += extable.h
generic-y += gpio.h
generic-y += kvm_para.h
generic-y += qrwlock.h
diff --git a/arch/csky/include/asm/asid.h b/arch/csky/include/asm/asid.h
index ac08b0ffbe1f..6ff205a97a14 100644
--- a/arch/csky/include/asm/asid.h
+++ b/arch/csky/include/asm/asid.h
@@ -37,7 +37,7 @@ void asid_new_context(struct asid_info *info, atomic64_t *pasid,
* Check the ASID is still valid for the context. If not generate a new ASID.
*
* @pasid: Pointer to the current ASID batch
- * @cpu: current CPU ID. Must have been acquired throught get_cpu()
+ * @cpu: current CPU ID. Must have been acquired through get_cpu()
*/
static inline void asid_check_context(struct asid_info *info,
atomic64_t *pasid, unsigned int cpu,
diff --git a/arch/csky/include/asm/barrier.h b/arch/csky/include/asm/barrier.h
index 84fc600c8b45..f4045dd53e17 100644
--- a/arch/csky/include/asm/barrier.h
+++ b/arch/csky/include/asm/barrier.h
@@ -64,7 +64,7 @@
/*
* sync: completion barrier, all sync.xx instructions
- * guarantee the last response recieved by bus transaction
+ * guarantee the last response received by bus transaction
* made by ld/st instructions before sync.s
* sync.s: inherit from sync, but also shareable to other cores
* sync.i: inherit from sync, but also flush cpu pipeline
diff --git a/arch/csky/include/asm/cmpxchg.h b/arch/csky/include/asm/cmpxchg.h
index dabc8e46ce7b..d1bef11f8dc9 100644
--- a/arch/csky/include/asm/cmpxchg.h
+++ b/arch/csky/include/asm/cmpxchg.h
@@ -31,7 +31,7 @@ extern void __bad_xchg(void);
__ret; \
})
-#define xchg_relaxed(ptr, x) \
+#define arch_xchg_relaxed(ptr, x) \
(__xchg_relaxed((x), (ptr), sizeof(*(ptr))))
#define __cmpxchg_relaxed(ptr, old, new, size) \
@@ -61,14 +61,14 @@ extern void __bad_xchg(void);
__ret; \
})
-#define cmpxchg_relaxed(ptr, o, n) \
+#define arch_cmpxchg_relaxed(ptr, o, n) \
(__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
__smp_release_fence(); \
- __ret = cmpxchg_relaxed(ptr, o, n); \
+ __ret = arch_cmpxchg_relaxed(ptr, o, n); \
__smp_acquire_fence(); \
__ret; \
})
diff --git a/arch/csky/include/asm/page.h b/arch/csky/include/asm/page.h
index 3b91fc3cf36f..ed7451478b1b 100644
--- a/arch/csky/include/asm/page.h
+++ b/arch/csky/include/asm/page.h
@@ -28,7 +28,7 @@
#define SSEG_SIZE 0x20000000
#define LOWMEM_LIMIT (SSEG_SIZE * 2)
-#define PHYS_OFFSET_OFFSET (CONFIG_RAM_BASE & (SSEG_SIZE - 1))
+#define PHYS_OFFSET_OFFSET (CONFIG_DRAM_BASE & (SSEG_SIZE - 1))
#ifndef __ASSEMBLY__
diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h
index cd211aabbefd..bbbd0698b397 100644
--- a/arch/csky/include/asm/pgalloc.h
+++ b/arch/csky/include/asm/pgalloc.h
@@ -22,8 +22,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
set_pmd(pmd, __pmd(__pa(page_address(pte))));
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
-
extern void pgd_init(unsigned long *p);
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h
index 0d60367b6bfa..151607ed5158 100644
--- a/arch/csky/include/asm/pgtable.h
+++ b/arch/csky/include/asm/pgtable.h
@@ -14,7 +14,6 @@
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define USER_PTRS_PER_PGD (PAGE_OFFSET/PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
/*
* C-SKY is two-level paging structure:
diff --git a/arch/csky/include/asm/segment.h b/arch/csky/include/asm/segment.h
index 589e8321dc14..5bc1cc62b87f 100644
--- a/arch/csky/include/asm/segment.h
+++ b/arch/csky/include/asm/segment.h
@@ -7,11 +7,4 @@ typedef struct {
unsigned long seg;
} mm_segment_t;
-#define KERNEL_DS ((mm_segment_t) { 0xFFFFFFFF })
-
-#define USER_DS ((mm_segment_t) { PAGE_OFFSET })
-#define get_fs() (current_thread_info()->addr_limit)
-#define set_fs(x) (current_thread_info()->addr_limit = (x))
-#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
-
#endif /* __ASM_CSKY_SEGMENT_H */
diff --git a/arch/csky/include/asm/uaccess.h b/arch/csky/include/asm/uaccess.h
index 3dec272e1fa3..ac83823fc437 100644
--- a/arch/csky/include/asm/uaccess.h
+++ b/arch/csky/include/asm/uaccess.h
@@ -3,122 +3,26 @@
#ifndef __ASM_CSKY_UACCESS_H
#define __ASM_CSKY_UACCESS_H
-/*
- * User space memory access functions
- */
-#include <linux/compiler.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/version.h>
-#include <asm/segment.h>
+#define user_addr_max() \
+ (uaccess_kernel() ? KERNEL_DS.seg : get_fs().seg)
-static inline int access_ok(const void *addr, unsigned long size)
+static inline int __access_ok(unsigned long addr, unsigned long size)
{
unsigned long limit = current_thread_info()->addr_limit.seg;
- return (((unsigned long)addr < limit) &&
- ((unsigned long)(addr + size) < limit));
+ return ((addr < limit) && ((addr + size) < limit));
}
-
-#define __addr_ok(addr) (access_ok(addr, 0))
-
-extern int __put_user_bad(void);
+#define __access_ok __access_ok
/*
- * Tell gcc we read from memory instead of writing: this is because
- * we do not write to any memory gcc knows about, so there are no
- * aliasing issues.
+ * __put_user_fn
*/
+extern int __put_user_bad(void);
-/*
- * These are the main single-value transfer routines. They automatically
- * use the right size if we just have the right pointer type.
- *
- * This gets kind of ugly. We want to return _two_ values in "get_user()"
- * and yet we don't want to do any pointers, because that is too much
- * of a performance impact. Thus we have a few rather ugly macros here,
- * and hide all the ugliness from the user.
- *
- * The "__xxx" versions of the user access functions are versions that
- * do not verify the address space, that must have been done previously
- * with a separate "access_ok()" call (this is used when we do multiple
- * accesses to the same area of user memory).
- *
- * As we use the same address space for kernel and user data on
- * Ckcore, we can just do these as direct assignments. (Of course, the
- * exception handling means that it's no longer "just"...)
- */
-
-#define put_user(x, ptr) \
- __put_user_check((x), (ptr), sizeof(*(ptr)))
-
-#define __put_user(x, ptr) \
- __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
-
-#define __ptr(x) ((unsigned long *)(x))
-
-#define get_user(x, ptr) \
- __get_user_check((x), (ptr), sizeof(*(ptr)))
-
-#define __get_user(x, ptr) \
- __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
-
-#define __put_user_nocheck(x, ptr, size) \
-({ \
- long __pu_err = 0; \
- typeof(*(ptr)) *__pu_addr = (ptr); \
- typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x); \
- if (__pu_addr) \
- __put_user_size(__pu_val, (__pu_addr), (size), \
- __pu_err); \
- __pu_err; \
-})
-
-#define __put_user_check(x, ptr, size) \
-({ \
- long __pu_err = -EFAULT; \
- typeof(*(ptr)) *__pu_addr = (ptr); \
- typeof(*(ptr)) __pu_val = (typeof(*(ptr)))(x); \
- if (access_ok(__pu_addr, size) && __pu_addr) \
- __put_user_size(__pu_val, __pu_addr, (size), __pu_err); \
- __pu_err; \
-})
-
-#define __put_user_size(x, ptr, size, retval) \
-do { \
- retval = 0; \
- switch (size) { \
- case 1: \
- __put_user_asm_b(x, ptr, retval); \
- break; \
- case 2: \
- __put_user_asm_h(x, ptr, retval); \
- break; \
- case 4: \
- __put_user_asm_w(x, ptr, retval); \
- break; \
- case 8: \
- __put_user_asm_64(x, ptr, retval); \
- break; \
- default: \
- __put_user_bad(); \
- } \
-} while (0)
-
-/*
- * We don't tell gcc that we are accessing memory, but this is OK
- * because we do not write to any memory gcc knows about, so there
- * are no aliasing issues.
- *
- * Note that PC at a fault is the address *after* the faulting
- * instruction.
- */
#define __put_user_asm_b(x, ptr, err) \
do { \
int errcode; \
- asm volatile( \
+ __asm__ __volatile__( \
"1: stb %1, (%2,0) \n" \
" br 3f \n" \
"2: mov %0, %3 \n" \
@@ -136,7 +40,7 @@ do { \
#define __put_user_asm_h(x, ptr, err) \
do { \
int errcode; \
- asm volatile( \
+ __asm__ __volatile__( \
"1: sth %1, (%2,0) \n" \
" br 3f \n" \
"2: mov %0, %3 \n" \
@@ -154,7 +58,7 @@ do { \
#define __put_user_asm_w(x, ptr, err) \
do { \
int errcode; \
- asm volatile( \
+ __asm__ __volatile__( \
"1: stw %1, (%2,0) \n" \
" br 3f \n" \
"2: mov %0, %3 \n" \
@@ -169,241 +73,149 @@ do { \
: "memory"); \
} while (0)
-#define __put_user_asm_64(x, ptr, err) \
-do { \
- int tmp; \
- int errcode; \
- typeof(*(ptr))src = (typeof(*(ptr)))x; \
- typeof(*(ptr))*psrc = &src; \
- \
- asm volatile( \
- " ldw %3, (%1, 0) \n" \
- "1: stw %3, (%2, 0) \n" \
- " ldw %3, (%1, 4) \n" \
- "2: stw %3, (%2, 4) \n" \
- " br 4f \n" \
- "3: mov %0, %4 \n" \
- " br 4f \n" \
- ".section __ex_table, \"a\" \n" \
- ".align 2 \n" \
- ".long 1b, 3b \n" \
- ".long 2b, 3b \n" \
- ".previous \n" \
- "4: \n" \
- : "=r"(err), "=r"(psrc), "=r"(ptr), \
- "=r"(tmp), "=r"(errcode) \
- : "0"(err), "1"(psrc), "2"(ptr), "3"(0), "4"(-EFAULT) \
- : "memory"); \
-} while (0)
-
-#define __get_user_nocheck(x, ptr, size) \
-({ \
- long __gu_err; \
- __get_user_size(x, (ptr), (size), __gu_err); \
- __gu_err; \
-})
-
-#define __get_user_check(x, ptr, size) \
-({ \
- int __gu_err = -EFAULT; \
- const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
- if (access_ok(__gu_ptr, size) && __gu_ptr) \
- __get_user_size(x, __gu_ptr, size, __gu_err); \
- __gu_err; \
-})
-
-#define __get_user_size(x, ptr, size, retval) \
-do { \
- switch (size) { \
- case 1: \
- __get_user_asm_common((x), ptr, "ldb", retval); \
- break; \
- case 2: \
- __get_user_asm_common((x), ptr, "ldh", retval); \
- break; \
- case 4: \
- __get_user_asm_common((x), ptr, "ldw", retval); \
- break; \
- default: \
- x = 0; \
- (retval) = __get_user_bad(); \
- } \
+#define __put_user_asm_64(x, ptr, err) \
+do { \
+ int tmp; \
+ int errcode; \
+ \
+ __asm__ __volatile__( \
+ " ldw %3, (%1, 0) \n" \
+ "1: stw %3, (%2, 0) \n" \
+ " ldw %3, (%1, 4) \n" \
+ "2: stw %3, (%2, 4) \n" \
+ " br 4f \n" \
+ "3: mov %0, %4 \n" \
+ " br 4f \n" \
+ ".section __ex_table, \"a\" \n" \
+ ".align 2 \n" \
+ ".long 1b, 3b \n" \
+ ".long 2b, 3b \n" \
+ ".previous \n" \
+ "4: \n" \
+ : "=r"(err), "=r"(x), "=r"(ptr), \
+ "=r"(tmp), "=r"(errcode) \
+ : "0"(err), "1"(x), "2"(ptr), "3"(0), \
+ "4"(-EFAULT) \
+ : "memory"); \
} while (0)
-#define __get_user_asm_common(x, ptr, ins, err) \
-do { \
- int errcode; \
- asm volatile( \
- "1: " ins " %1, (%4,0) \n" \
- " br 3f \n" \
- /* Fix up codes */ \
- "2: mov %0, %2 \n" \
- " movi %1, 0 \n" \
- " br 3f \n" \
- ".section __ex_table,\"a\" \n" \
- ".align 2 \n" \
- ".long 1b, 2b \n" \
- ".previous \n" \
- "3: \n" \
- : "=r"(err), "=r"(x), "=r"(errcode) \
- : "0"(0), "r"(ptr), "2"(-EFAULT) \
- : "memory"); \
-} while (0)
+static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
+{
+ int retval = 0;
+ u32 tmp;
+
+ switch (size) {
+ case 1:
+ tmp = *(u8 *)x;
+ __put_user_asm_b(tmp, ptr, retval);
+ break;
+ case 2:
+ tmp = *(u16 *)x;
+ __put_user_asm_h(tmp, ptr, retval);
+ break;
+ case 4:
+ tmp = *(u32 *)x;
+ __put_user_asm_w(tmp, ptr, retval);
+ break;
+ case 8:
+ __put_user_asm_64(x, (u64 *)ptr, retval);
+ break;
+ }
+
+ return retval;
+}
+#define __put_user_fn __put_user_fn
+/*
+ * __get_user_fn
+ */
extern int __get_user_bad(void);
-#define ___copy_to_user(to, from, n) \
+#define __get_user_asm_common(x, ptr, ins, err) \
do { \
- int w0, w1, w2, w3; \
- asm volatile( \
- "0: cmpnei %1, 0 \n" \
- " bf 8f \n" \
- " mov %3, %1 \n" \
- " or %3, %2 \n" \
- " andi %3, 3 \n" \
- " cmpnei %3, 0 \n" \
- " bf 1f \n" \
- " br 5f \n" \
- "1: cmplti %0, 16 \n" /* 4W */ \
- " bt 3f \n" \
- " ldw %3, (%2, 0) \n" \
- " ldw %4, (%2, 4) \n" \
- " ldw %5, (%2, 8) \n" \
- " ldw %6, (%2, 12) \n" \
- "2: stw %3, (%1, 0) \n" \
- "9: stw %4, (%1, 4) \n" \
- "10: stw %5, (%1, 8) \n" \
- "11: stw %6, (%1, 12) \n" \
- " addi %2, 16 \n" \
- " addi %1, 16 \n" \
- " subi %0, 16 \n" \
- " br 1b \n" \
- "3: cmplti %0, 4 \n" /* 1W */ \
- " bt 5f \n" \
- " ldw %3, (%2, 0) \n" \
- "4: stw %3, (%1, 0) \n" \
- " addi %2, 4 \n" \
- " addi %1, 4 \n" \
- " subi %0, 4 \n" \
- " br 3b \n" \
- "5: cmpnei %0, 0 \n" /* 1B */ \
- " bf 13f \n" \
- " ldb %3, (%2, 0) \n" \
- "6: stb %3, (%1, 0) \n" \
- " addi %2, 1 \n" \
- " addi %1, 1 \n" \
- " subi %0, 1 \n" \
- " br 5b \n" \
- "7: subi %0, 4 \n" \
- "8: subi %0, 4 \n" \
- "12: subi %0, 4 \n" \
- " br 13f \n" \
- ".section __ex_table, \"a\" \n" \
- ".align 2 \n" \
- ".long 2b, 13f \n" \
- ".long 4b, 13f \n" \
- ".long 6b, 13f \n" \
- ".long 9b, 12b \n" \
- ".long 10b, 8b \n" \
- ".long 11b, 7b \n" \
- ".previous \n" \
- "13: \n" \
- : "=r"(n), "=r"(to), "=r"(from), "=r"(w0), \
- "=r"(w1), "=r"(w2), "=r"(w3) \
- : "0"(n), "1"(to), "2"(from) \
+ int errcode; \
+ __asm__ __volatile__( \
+ "1: " ins " %1, (%4, 0) \n" \
+ " br 3f \n" \
+ "2: mov %0, %2 \n" \
+ " movi %1, 0 \n" \
+ " br 3f \n" \
+ ".section __ex_table,\"a\" \n" \
+ ".align 2 \n" \
+ ".long 1b, 2b \n" \
+ ".previous \n" \
+ "3: \n" \
+ : "=r"(err), "=r"(x), "=r"(errcode) \
+ : "0"(0), "r"(ptr), "2"(-EFAULT) \
: "memory"); \
} while (0)
-#define ___copy_from_user(to, from, n) \
+#define __get_user_asm_64(x, ptr, err) \
do { \
int tmp; \
- int nsave; \
- asm volatile( \
- "0: cmpnei %1, 0 \n" \
- " bf 7f \n" \
- " mov %3, %1 \n" \
- " or %3, %2 \n" \
- " andi %3, 3 \n" \
- " cmpnei %3, 0 \n" \
- " bf 1f \n" \
- " br 5f \n" \
- "1: cmplti %0, 16 \n" \
- " bt 3f \n" \
- "2: ldw %3, (%2, 0) \n" \
- "10: ldw %4, (%2, 4) \n" \
- " stw %3, (%1, 0) \n" \
- " stw %4, (%1, 4) \n" \
- "11: ldw %3, (%2, 8) \n" \
- "12: ldw %4, (%2, 12) \n" \
- " stw %3, (%1, 8) \n" \
- " stw %4, (%1, 12) \n" \
- " addi %2, 16 \n" \
- " addi %1, 16 \n" \
- " subi %0, 16 \n" \
- " br 1b \n" \
- "3: cmplti %0, 4 \n" \
- " bt 5f \n" \
- "4: ldw %3, (%2, 0) \n" \
- " stw %3, (%1, 0) \n" \
- " addi %2, 4 \n" \
- " addi %1, 4 \n" \
- " subi %0, 4 \n" \
- " br 3b \n" \
- "5: cmpnei %0, 0 \n" \
- " bf 7f \n" \
- "6: ldb %3, (%2, 0) \n" \
- " stb %3, (%1, 0) \n" \
- " addi %2, 1 \n" \
- " addi %1, 1 \n" \
- " subi %0, 1 \n" \
- " br 5b \n" \
- "8: stw %3, (%1, 0) \n" \
- " subi %0, 4 \n" \
- " bf 7f \n" \
- "9: subi %0, 8 \n" \
- " bf 7f \n" \
- "13: stw %3, (%1, 8) \n" \
- " subi %0, 12 \n" \
- " bf 7f \n" \
- ".section __ex_table, \"a\" \n" \
- ".align 2 \n" \
- ".long 2b, 7f \n" \
- ".long 4b, 7f \n" \
- ".long 6b, 7f \n" \
- ".long 10b, 8b \n" \
- ".long 11b, 9b \n" \
- ".long 12b,13b \n" \
- ".previous \n" \
- "7: \n" \
- : "=r"(n), "=r"(to), "=r"(from), "=r"(nsave), \
- "=r"(tmp) \
- : "0"(n), "1"(to), "2"(from) \
+ int errcode; \
+ \
+ __asm__ __volatile__( \
+ "1: ldw %3, (%2, 0) \n" \
+ " stw %3, (%1, 0) \n" \
+ "2: ldw %3, (%2, 4) \n" \
+ " stw %3, (%1, 4) \n" \
+ " br 4f \n" \
+ "3: mov %0, %4 \n" \
+ " br 4f \n" \
+ ".section __ex_table, \"a\" \n" \
+ ".align 2 \n" \
+ ".long 1b, 3b \n" \
+ ".long 2b, 3b \n" \
+ ".previous \n" \
+ "4: \n" \
+ : "=r"(err), "=r"(x), "=r"(ptr), \
+ "=r"(tmp), "=r"(errcode) \
+ : "0"(err), "1"(x), "2"(ptr), "3"(0), \
+ "4"(-EFAULT) \
: "memory"); \
} while (0)
+static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
+{
+ int retval;
+ u32 tmp;
+
+ switch (size) {
+ case 1:
+ __get_user_asm_common(tmp, ptr, "ldb", retval);
+ *(u8 *)x = (u8)tmp;
+ break;
+ case 2:
+ __get_user_asm_common(tmp, ptr, "ldh", retval);
+ *(u16 *)x = (u16)tmp;
+ break;
+ case 4:
+ __get_user_asm_common(tmp, ptr, "ldw", retval);
+ *(u32 *)x = (u32)tmp;
+ break;
+ case 8:
+ __get_user_asm_64(x, ptr, retval);
+ break;
+ }
+
+ return retval;
+}
+#define __get_user_fn __get_user_fn
+
unsigned long raw_copy_from_user(void *to, const void *from, unsigned long n);
unsigned long raw_copy_to_user(void *to, const void *from, unsigned long n);
-unsigned long clear_user(void *to, unsigned long n);
unsigned long __clear_user(void __user *to, unsigned long n);
+#define __clear_user __clear_user
-long strncpy_from_user(char *dst, const char *src, long count);
long __strncpy_from_user(char *dst, const char *src, long count);
+#define __strncpy_from_user __strncpy_from_user
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 on exception, a value greater than N if too long
- */
-long strnlen_user(const char *src, long n);
-
-#define strlen_user(str) strnlen_user(str, 32767)
-
-struct exception_table_entry {
- unsigned long insn;
- unsigned long nextinsn;
-};
+long __strnlen_user(const char *s, long n);
+#define __strnlen_user __strnlen_user
-extern int fixup_exception(struct pt_regs *regs);
+#include <asm/segment.h>
+#include <asm-generic/uaccess.h>
#endif /* __ASM_CSKY_UACCESS_H */
diff --git a/arch/csky/include/asm/vdso.h b/arch/csky/include/asm/vdso.h
index eb5142f9c564..bdce581b5fcb 100644
--- a/arch/csky/include/asm/vdso.h
+++ b/arch/csky/include/asm/vdso.h
@@ -16,7 +16,7 @@ struct vdso_data {
* offset of 0, but since the linker must support setting weak undefined
* symbols to the absolute address 0 it also happens to support other low
* addresses even when the code model suggests those low addresses would not
- * otherwise be availiable.
+ * otherwise be available.
*/
#define VDSO_SYMBOL(base, name) \
({ \
diff --git a/arch/csky/kernel/asm-offsets.c b/arch/csky/kernel/asm-offsets.c
index 17479860d43d..1cbcba4b0dd1 100644
--- a/arch/csky/kernel/asm-offsets.c
+++ b/arch/csky/kernel/asm-offsets.c
@@ -9,7 +9,6 @@
int main(void)
{
/* offsets into the task struct */
- DEFINE(TASK_STATE, offsetof(struct task_struct, state));
DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S
index c1bd7a6b4ab6..00e3c8ebf9b8 100644
--- a/arch/csky/kernel/entry.S
+++ b/arch/csky/kernel/entry.S
@@ -9,7 +9,6 @@
#include <asm/unistd.h>
#include <asm/asm-offsets.h>
#include <linux/threads.h>
-#include <asm/setup.h>
#include <asm/page.h>
#include <asm/thread_info.h>
diff --git a/arch/csky/kernel/probes/ftrace.c b/arch/csky/kernel/probes/ftrace.c
index ae2b1c7b3b5c..ef2bb9bd9605 100644
--- a/arch/csky/kernel/probes/ftrace.c
+++ b/arch/csky/kernel/probes/ftrace.c
@@ -9,7 +9,7 @@ int arch_check_ftrace_location(struct kprobe *p)
return 0;
}
-/* Ftrace callback handler for kprobes -- called under preepmt disabed */
+/* Ftrace callback handler for kprobes -- called under preepmt disabled */
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
diff --git a/arch/csky/kernel/probes/kprobes.c b/arch/csky/kernel/probes/kprobes.c
index 589f090f48b9..68b22b499aeb 100644
--- a/arch/csky/kernel/probes/kprobes.c
+++ b/arch/csky/kernel/probes/kprobes.c
@@ -295,23 +295,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c
index 0f9f5eef9338..e2993539af8e 100644
--- a/arch/csky/kernel/smp.c
+++ b/arch/csky/kernel/smp.c
@@ -281,7 +281,6 @@ void csky_start_secondary(void)
pr_info("CPU%u Online: %s...\n", cpu, __func__);
local_irq_enable();
- preempt_disable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
diff --git a/arch/csky/kernel/stacktrace.c b/arch/csky/kernel/stacktrace.c
index 16ae20a0af34..1b280ef08004 100644
--- a/arch/csky/kernel/stacktrace.c
+++ b/arch/csky/kernel/stacktrace.c
@@ -115,7 +115,7 @@ unsigned long get_wchan(struct task_struct *task)
{
unsigned long pc = 0;
- if (likely(task && task != current && task->state != TASK_RUNNING))
+ if (likely(task && task != current && !task_is_running(task)))
walk_stackframe(task, NULL, save_wchan, &pc);
return pc;
}
diff --git a/arch/csky/lib/usercopy.c b/arch/csky/lib/usercopy.c
index 3c9bd645e643..c5d394a0ae78 100644
--- a/arch/csky/lib/usercopy.c
+++ b/arch/csky/lib/usercopy.c
@@ -7,7 +7,70 @@
unsigned long raw_copy_from_user(void *to, const void *from,
unsigned long n)
{
- ___copy_from_user(to, from, n);
+ int tmp, nsave;
+
+ __asm__ __volatile__(
+ "0: cmpnei %1, 0 \n"
+ " bf 7f \n"
+ " mov %3, %1 \n"
+ " or %3, %2 \n"
+ " andi %3, 3 \n"
+ " cmpnei %3, 0 \n"
+ " bf 1f \n"
+ " br 5f \n"
+ "1: cmplti %0, 16 \n"
+ " bt 3f \n"
+ "2: ldw %3, (%2, 0) \n"
+ "10: ldw %4, (%2, 4) \n"
+ " stw %3, (%1, 0) \n"
+ " stw %4, (%1, 4) \n"
+ "11: ldw %3, (%2, 8) \n"
+ "12: ldw %4, (%2, 12) \n"
+ " stw %3, (%1, 8) \n"
+ " stw %4, (%1, 12) \n"
+ " addi %2, 16 \n"
+ " addi %1, 16 \n"
+ " subi %0, 16 \n"
+ " br 1b \n"
+ "3: cmplti %0, 4 \n"
+ " bt 5f \n"
+ "4: ldw %3, (%2, 0) \n"
+ " stw %3, (%1, 0) \n"
+ " addi %2, 4 \n"
+ " addi %1, 4 \n"
+ " subi %0, 4 \n"
+ " br 3b \n"
+ "5: cmpnei %0, 0 \n"
+ " bf 7f \n"
+ "6: ldb %3, (%2, 0) \n"
+ " stb %3, (%1, 0) \n"
+ " addi %2, 1 \n"
+ " addi %1, 1 \n"
+ " subi %0, 1 \n"
+ " br 5b \n"
+ "8: stw %3, (%1, 0) \n"
+ " subi %0, 4 \n"
+ " bf 7f \n"
+ "9: subi %0, 8 \n"
+ " bf 7f \n"
+ "13: stw %3, (%1, 8) \n"
+ " subi %0, 12 \n"
+ " bf 7f \n"
+ ".section __ex_table, \"a\" \n"
+ ".align 2 \n"
+ ".long 2b, 7f \n"
+ ".long 4b, 7f \n"
+ ".long 6b, 7f \n"
+ ".long 10b, 8b \n"
+ ".long 11b, 9b \n"
+ ".long 12b,13b \n"
+ ".previous \n"
+ "7: \n"
+ : "=r"(n), "=r"(to), "=r"(from), "=r"(nsave),
+ "=r"(tmp)
+ : "0"(n), "1"(to), "2"(from)
+ : "memory");
+
return n;
}
EXPORT_SYMBOL(raw_copy_from_user);
@@ -15,48 +78,70 @@ EXPORT_SYMBOL(raw_copy_from_user);
unsigned long raw_copy_to_user(void *to, const void *from,
unsigned long n)
{
- ___copy_to_user(to, from, n);
+ int w0, w1, w2, w3;
+
+ __asm__ __volatile__(
+ "0: cmpnei %1, 0 \n"
+ " bf 8f \n"
+ " mov %3, %1 \n"
+ " or %3, %2 \n"
+ " andi %3, 3 \n"
+ " cmpnei %3, 0 \n"
+ " bf 1f \n"
+ " br 5f \n"
+ "1: cmplti %0, 16 \n" /* 4W */
+ " bt 3f \n"
+ " ldw %3, (%2, 0) \n"
+ " ldw %4, (%2, 4) \n"
+ " ldw %5, (%2, 8) \n"
+ " ldw %6, (%2, 12) \n"
+ "2: stw %3, (%1, 0) \n"
+ "9: stw %4, (%1, 4) \n"
+ "10: stw %5, (%1, 8) \n"
+ "11: stw %6, (%1, 12) \n"
+ " addi %2, 16 \n"
+ " addi %1, 16 \n"
+ " subi %0, 16 \n"
+ " br 1b \n"
+ "3: cmplti %0, 4 \n" /* 1W */
+ " bt 5f \n"
+ " ldw %3, (%2, 0) \n"
+ "4: stw %3, (%1, 0) \n"
+ " addi %2, 4 \n"
+ " addi %1, 4 \n"
+ " subi %0, 4 \n"
+ " br 3b \n"
+ "5: cmpnei %0, 0 \n" /* 1B */
+ " bf 13f \n"
+ " ldb %3, (%2, 0) \n"
+ "6: stb %3, (%1, 0) \n"
+ " addi %2, 1 \n"
+ " addi %1, 1 \n"
+ " subi %0, 1 \n"
+ " br 5b \n"
+ "7: subi %0, 4 \n"
+ "8: subi %0, 4 \n"
+ "12: subi %0, 4 \n"
+ " br 13f \n"
+ ".section __ex_table, \"a\" \n"
+ ".align 2 \n"
+ ".long 2b, 13f \n"
+ ".long 4b, 13f \n"
+ ".long 6b, 13f \n"
+ ".long 9b, 12b \n"
+ ".long 10b, 8b \n"
+ ".long 11b, 7b \n"
+ ".previous \n"
+ "13: \n"
+ : "=r"(n), "=r"(to), "=r"(from), "=r"(w0),
+ "=r"(w1), "=r"(w2), "=r"(w3)
+ : "0"(n), "1"(to), "2"(from)
+ : "memory");
+
return n;
}
EXPORT_SYMBOL(raw_copy_to_user);
-
-/*
- * copy a null terminated string from userspace.
- */
-#define __do_strncpy_from_user(dst, src, count, res) \
-do { \
- int tmp; \
- long faultres; \
- asm volatile( \
- " cmpnei %3, 0 \n" \
- " bf 4f \n" \
- "1: cmpnei %1, 0 \n" \
- " bf 5f \n" \
- "2: ldb %4, (%3, 0) \n" \
- " stb %4, (%2, 0) \n" \
- " cmpnei %4, 0 \n" \
- " bf 3f \n" \
- " addi %3, 1 \n" \
- " addi %2, 1 \n" \
- " subi %1, 1 \n" \
- " br 1b \n" \
- "3: subu %0, %1 \n" \
- " br 5f \n" \
- "4: mov %0, %5 \n" \
- " br 5f \n" \
- ".section __ex_table, \"a\" \n" \
- ".align 2 \n" \
- ".long 2b, 4b \n" \
- ".previous \n" \
- "5: \n" \
- : "=r"(res), "=r"(count), "=r"(dst), \
- "=r"(src), "=r"(tmp), "=r"(faultres) \
- : "5"(-EFAULT), "0"(count), "1"(count), \
- "2"(dst), "3"(src) \
- : "memory", "cc"); \
-} while (0)
-
/*
* __strncpy_from_user: - Copy a NUL terminated string from userspace,
* with less checking.
@@ -80,43 +165,43 @@ do { \
*/
long __strncpy_from_user(char *dst, const char *src, long count)
{
- long res;
+ long res, faultres;
+ int tmp;
- __do_strncpy_from_user(dst, src, count, res);
- return res;
-}
-EXPORT_SYMBOL(__strncpy_from_user);
-
-/*
- * strncpy_from_user: - Copy a NUL terminated string from userspace.
- * @dst: Destination address, in kernel space. This buffer must be at
- * least @count bytes long.
- * @src: Source address, in user space.
- * @count: Maximum number of bytes to copy, including the trailing NUL.
- *
- * Copies a NUL-terminated string from userspace to kernel space.
- *
- * On success, returns the length of the string (not including the trailing
- * NUL).
- *
- * If access to userspace fails, returns -EFAULT (some data may have been
- * copied).
- *
- * If @count is smaller than the length of the string, copies @count bytes
- * and returns @count.
- */
-long strncpy_from_user(char *dst, const char *src, long count)
-{
- long res = -EFAULT;
+ __asm__ __volatile__(
+ " cmpnei %3, 0 \n"
+ " bf 4f \n"
+ "1: cmpnei %1, 0 \n"
+ " bf 5f \n"
+ "2: ldb %4, (%3, 0) \n"
+ " stb %4, (%2, 0) \n"
+ " cmpnei %4, 0 \n"
+ " bf 3f \n"
+ " addi %3, 1 \n"
+ " addi %2, 1 \n"
+ " subi %1, 1 \n"
+ " br 1b \n"
+ "3: subu %0, %1 \n"
+ " br 5f \n"
+ "4: mov %0, %5 \n"
+ " br 5f \n"
+ ".section __ex_table, \"a\" \n"
+ ".align 2 \n"
+ ".long 2b, 4b \n"
+ ".previous \n"
+ "5: \n"
+ : "=r"(res), "=r"(count), "=r"(dst),
+ "=r"(src), "=r"(tmp), "=r"(faultres)
+ : "5"(-EFAULT), "0"(count), "1"(count),
+ "2"(dst), "3"(src)
+ : "memory");
- if (access_ok(src, 1))
- __do_strncpy_from_user(dst, src, count, res);
return res;
}
-EXPORT_SYMBOL(strncpy_from_user);
+EXPORT_SYMBOL(__strncpy_from_user);
/*
- * strlen_user: - Get the size of a string in user space.
+ * strnlen_user: - Get the size of a string in user space.
* @str: The string to measure.
* @n: The maximum valid length
*
@@ -126,14 +211,11 @@ EXPORT_SYMBOL(strncpy_from_user);
* On exception, returns 0.
* If the string is too long, returns a value greater than @n.
*/
-long strnlen_user(const char *s, long n)
+long __strnlen_user(const char *s, long n)
{
unsigned long res, tmp;
- if (s == NULL)
- return 0;
-
- asm volatile(
+ __asm__ __volatile__(
" cmpnei %1, 0 \n"
" bf 3f \n"
"1: cmpnei %0, 0 \n"
@@ -156,87 +238,11 @@ long strnlen_user(const char *s, long n)
"5: \n"
: "=r"(n), "=r"(s), "=r"(res), "=r"(tmp)
: "0"(n), "1"(s), "2"(n)
- : "memory", "cc");
+ : "memory");
return res;
}
-EXPORT_SYMBOL(strnlen_user);
-
-#define __do_clear_user(addr, size) \
-do { \
- int __d0, zvalue, tmp; \
- \
- asm volatile( \
- "0: cmpnei %1, 0 \n" \
- " bf 7f \n" \
- " mov %3, %1 \n" \
- " andi %3, 3 \n" \
- " cmpnei %3, 0 \n" \
- " bf 1f \n" \
- " br 5f \n" \
- "1: cmplti %0, 32 \n" /* 4W */ \
- " bt 3f \n" \
- "8: stw %2, (%1, 0) \n" \
- "10: stw %2, (%1, 4) \n" \
- "11: stw %2, (%1, 8) \n" \
- "12: stw %2, (%1, 12) \n" \
- "13: stw %2, (%1, 16) \n" \
- "14: stw %2, (%1, 20) \n" \
- "15: stw %2, (%1, 24) \n" \
- "16: stw %2, (%1, 28) \n" \
- " addi %1, 32 \n" \
- " subi %0, 32 \n" \
- " br 1b \n" \
- "3: cmplti %0, 4 \n" /* 1W */ \
- " bt 5f \n" \
- "4: stw %2, (%1, 0) \n" \
- " addi %1, 4 \n" \
- " subi %0, 4 \n" \
- " br 3b \n" \
- "5: cmpnei %0, 0 \n" /* 1B */ \
- "9: bf 7f \n" \
- "6: stb %2, (%1, 0) \n" \
- " addi %1, 1 \n" \
- " subi %0, 1 \n" \
- " br 5b \n" \
- ".section __ex_table,\"a\" \n" \
- ".align 2 \n" \
- ".long 8b, 9b \n" \
- ".long 10b, 9b \n" \
- ".long 11b, 9b \n" \
- ".long 12b, 9b \n" \
- ".long 13b, 9b \n" \
- ".long 14b, 9b \n" \
- ".long 15b, 9b \n" \
- ".long 16b, 9b \n" \
- ".long 4b, 9b \n" \
- ".long 6b, 9b \n" \
- ".previous \n" \
- "7: \n" \
- : "=r"(size), "=r" (__d0), \
- "=r"(zvalue), "=r"(tmp) \
- : "0"(size), "1"(addr), "2"(0) \
- : "memory", "cc"); \
-} while (0)
-
-/*
- * clear_user: - Zero a block of memory in user space.
- * @to: Destination address, in user space.
- * @n: Number of bytes to zero.
- *
- * Zero a block of memory in user space.
- *
- * Returns number of bytes that could not be cleared.
- * On success, this will be zero.
- */
-unsigned long
-clear_user(void __user *to, unsigned long n)
-{
- if (access_ok(to, n))
- __do_clear_user(to, n);
- return n;
-}
-EXPORT_SYMBOL(clear_user);
+EXPORT_SYMBOL(__strnlen_user);
/*
* __clear_user: - Zero a block of memory in user space, with less checking.
@@ -252,7 +258,59 @@ EXPORT_SYMBOL(clear_user);
unsigned long
__clear_user(void __user *to, unsigned long n)
{
- __do_clear_user(to, n);
+ int data, value, tmp;
+
+ __asm__ __volatile__(
+ "0: cmpnei %1, 0 \n"
+ " bf 7f \n"
+ " mov %3, %1 \n"
+ " andi %3, 3 \n"
+ " cmpnei %3, 0 \n"
+ " bf 1f \n"
+ " br 5f \n"
+ "1: cmplti %0, 32 \n" /* 4W */
+ " bt 3f \n"
+ "8: stw %2, (%1, 0) \n"
+ "10: stw %2, (%1, 4) \n"
+ "11: stw %2, (%1, 8) \n"
+ "12: stw %2, (%1, 12) \n"
+ "13: stw %2, (%1, 16) \n"
+ "14: stw %2, (%1, 20) \n"
+ "15: stw %2, (%1, 24) \n"
+ "16: stw %2, (%1, 28) \n"
+ " addi %1, 32 \n"
+ " subi %0, 32 \n"
+ " br 1b \n"
+ "3: cmplti %0, 4 \n" /* 1W */
+ " bt 5f \n"
+ "4: stw %2, (%1, 0) \n"
+ " addi %1, 4 \n"
+ " subi %0, 4 \n"
+ " br 3b \n"
+ "5: cmpnei %0, 0 \n" /* 1B */
+ "9: bf 7f \n"
+ "6: stb %2, (%1, 0) \n"
+ " addi %1, 1 \n"
+ " subi %0, 1 \n"
+ " br 5b \n"
+ ".section __ex_table,\"a\" \n"
+ ".align 2 \n"
+ ".long 8b, 9b \n"
+ ".long 10b, 9b \n"
+ ".long 11b, 9b \n"
+ ".long 12b, 9b \n"
+ ".long 13b, 9b \n"
+ ".long 14b, 9b \n"
+ ".long 15b, 9b \n"
+ ".long 16b, 9b \n"
+ ".long 4b, 9b \n"
+ ".long 6b, 9b \n"
+ ".previous \n"
+ "7: \n"
+ : "=r"(n), "=r" (data), "=r"(value), "=r"(tmp)
+ : "0"(n), "1"(to), "2"(0)
+ : "memory");
+
return n;
}
EXPORT_SYMBOL(__clear_user);
diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c
index 1482de56f4f7..466ad949818a 100644
--- a/arch/csky/mm/fault.c
+++ b/arch/csky/mm/fault.c
@@ -12,7 +12,7 @@ int fixup_exception(struct pt_regs *regs)
fixup = search_exception_tables(instruction_pointer(regs));
if (fixup) {
- regs->pc = fixup->nextinsn;
+ regs->pc = fixup->fixup;
return 1;
}
diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c
index 894050a8ce09..bf2004aa811a 100644
--- a/arch/csky/mm/init.c
+++ b/arch/csky/mm/init.c
@@ -107,7 +107,6 @@ void __init mem_init(void)
free_highmem_page(page);
}
#endif
- mem_init_print_info(NULL);
}
void free_initmem(void)
diff --git a/arch/csky/mm/syscache.c b/arch/csky/mm/syscache.c
index ffade2f9a4c8..4e51d63850c4 100644
--- a/arch/csky/mm/syscache.c
+++ b/arch/csky/mm/syscache.c
@@ -17,6 +17,7 @@ SYSCALL_DEFINE3(cacheflush,
flush_icache_mm_range(current->mm,
(unsigned long)addr,
(unsigned long)addr + bytes);
+ fallthrough;
case DCACHE:
dcache_wb_range((unsigned long)addr,
(unsigned long)addr + bytes);
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild
index 60ee7f0d60a8..e23139c8fc0d 100644
--- a/arch/h8300/include/asm/Kbuild
+++ b/arch/h8300/include/asm/Kbuild
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generic-y += asm-offsets.h
+generic-y += cmpxchg.h
generic-y += extable.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h
deleted file mode 100644
index a990d151f163..000000000000
--- a/arch/h8300/include/asm/atomic.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ARCH_H8300_ATOMIC__
-#define __ARCH_H8300_ATOMIC__
-
-#include <linux/compiler.h>
-#include <linux/types.h>
-#include <asm/cmpxchg.h>
-#include <asm/irqflags.h>
-
-/*
- * Atomic operations that C can't guarantee us. Useful for
- * resource counting etc..
- */
-
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-
-#define ATOMIC_OP_RETURN(op, c_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
-{ \
- h8300flags flags; \
- int ret; \
- \
- flags = arch_local_irq_save(); \
- ret = v->counter c_op i; \
- arch_local_irq_restore(flags); \
- return ret; \
-}
-
-#define ATOMIC_FETCH_OP(op, c_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
-{ \
- h8300flags flags; \
- int ret; \
- \
- flags = arch_local_irq_save(); \
- ret = v->counter; \
- v->counter c_op i; \
- arch_local_irq_restore(flags); \
- return ret; \
-}
-
-#define ATOMIC_OP(op, c_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
-{ \
- h8300flags flags; \
- \
- flags = arch_local_irq_save(); \
- v->counter c_op i; \
- arch_local_irq_restore(flags); \
-}
-
-ATOMIC_OP_RETURN(add, +=)
-ATOMIC_OP_RETURN(sub, -=)
-
-#define ATOMIC_OPS(op, c_op) \
- ATOMIC_OP(op, c_op) \
- ATOMIC_FETCH_OP(op, c_op)
-
-ATOMIC_OPS(and, &=)
-ATOMIC_OPS(or, |=)
-ATOMIC_OPS(xor, ^=)
-ATOMIC_OPS(add, +=)
-ATOMIC_OPS(sub, -=)
-
-#undef ATOMIC_OPS
-#undef ATOMIC_OP_RETURN
-#undef ATOMIC_OP
-
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
-{
- int ret;
- h8300flags flags;
-
- flags = arch_local_irq_save();
- ret = v->counter;
- if (likely(ret == old))
- v->counter = new;
- arch_local_irq_restore(flags);
- return ret;
-}
-
-static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
-{
- int ret;
- h8300flags flags;
-
- flags = arch_local_irq_save();
- ret = v->counter;
- if (ret != u)
- v->counter += a;
- arch_local_irq_restore(flags);
- return ret;
-}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
-
-#endif /* __ARCH_H8300_ATOMIC __ */
diff --git a/arch/h8300/include/asm/bitops.h b/arch/h8300/include/asm/bitops.h
index 7aa16c732aa9..c867a80cab5b 100644
--- a/arch/h8300/include/asm/bitops.h
+++ b/arch/h8300/include/asm/bitops.h
@@ -9,6 +9,10 @@
#include <linux/compiler.h>
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/fls64.h>
+
#ifdef __KERNEL__
#ifndef _LINUX_BITOPS_H
@@ -173,8 +177,4 @@ static inline unsigned long __ffs(unsigned long word)
#endif /* __KERNEL__ */
-#include <asm-generic/bitops/fls.h>
-#include <asm-generic/bitops/__fls.h>
-#include <asm-generic/bitops/fls64.h>
-
#endif /* _H8300_BITOPS_H */
diff --git a/arch/h8300/include/asm/cmpxchg.h b/arch/h8300/include/asm/cmpxchg.h
deleted file mode 100644
index c64bb38ce242..000000000000
--- a/arch/h8300/include/asm/cmpxchg.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ARCH_H8300_CMPXCHG__
-#define __ARCH_H8300_CMPXCHG__
-
-#include <linux/irqflags.h>
-
-#define xchg(ptr, x) \
- ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
- sizeof(*(ptr))))
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((volatile struct __xchg_dummy *)(x))
-
-static inline unsigned long __xchg(unsigned long x,
- volatile void *ptr, int size)
-{
- unsigned long tmp, flags;
-
- local_irq_save(flags);
-
- switch (size) {
- case 1:
- __asm__ __volatile__
- ("mov.b %2,%0\n\t"
- "mov.b %1,%2"
- : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)));
- break;
- case 2:
- __asm__ __volatile__
- ("mov.w %2,%0\n\t"
- "mov.w %1,%2"
- : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)));
- break;
- case 4:
- __asm__ __volatile__
- ("mov.l %2,%0\n\t"
- "mov.l %1,%2"
- : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)));
- break;
- default:
- tmp = 0;
- }
- local_irq_restore(flags);
- return tmp;
-}
-
-#include <asm-generic/cmpxchg-local.h>
-
-/*
- * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
- * them available.
- */
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), \
- (unsigned long)(o), \
- (unsigned long)(n), \
- sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-
-#ifndef CONFIG_SMP
-#include <asm-generic/cmpxchg.h>
-#endif
-
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
-#endif /* __ARCH_H8300_CMPXCHG__ */
diff --git a/arch/h8300/kernel/asm-offsets.c b/arch/h8300/kernel/asm-offsets.c
index d4b53af657c8..65571ee15132 100644
--- a/arch/h8300/kernel/asm-offsets.c
+++ b/arch/h8300/kernel/asm-offsets.c
@@ -21,7 +21,6 @@
int main(void)
{
/* offsets into the task struct */
- OFFSET(TASK_STATE, task_struct, state);
OFFSET(TASK_FLAGS, task_struct, flags);
OFFSET(TASK_PTRACE, task_struct, ptrace);
OFFSET(TASK_BLOCKED, task_struct, blocked);
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
index 46b1342ce515..2ac27e4248a4 100644
--- a/arch/h8300/kernel/process.c
+++ b/arch/h8300/kernel/process.c
@@ -134,7 +134,7 @@ unsigned long get_wchan(struct task_struct *p)
unsigned long stack_page;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
stack_page = (unsigned long)p;
diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c
index 0281f92eea3d..c3590b2e9592 100644
--- a/arch/h8300/kernel/setup.c
+++ b/arch/h8300/kernel/setup.c
@@ -69,8 +69,6 @@ void __init h8300_fdt_init(void *fdt, char *bootargs)
static void __init bootmem_init(void)
{
- struct memblock_region *region;
-
memory_end = memory_start = 0;
/* Find main memory where is the kernel */
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
index 1f3b345d68b9..f7bf4693e3b2 100644
--- a/arch/h8300/mm/init.c
+++ b/arch/h8300/mm/init.c
@@ -98,6 +98,4 @@ void __init mem_init(void)
/* this will put all low memory onto the freelists */
memblock_free_all();
-
- mem_init_print_info(NULL);
}
diff --git a/arch/hexagon/Makefile b/arch/hexagon/Makefile
index c168c6980d05..74b644ea8a00 100644
--- a/arch/hexagon/Makefile
+++ b/arch/hexagon/Makefile
@@ -10,6 +10,9 @@ LDFLAGS_vmlinux += -G0
# Do not use single-byte enums; these will overflow.
KBUILD_CFLAGS += -fno-short-enums
+# We must use long-calls:
+KBUILD_CFLAGS += -mlong-calls
+
# Modules must use either long-calls, or use pic/plt.
# Use long-calls for now, it's easier. And faster.
# KBUILD_CFLAGS_MODULE += -fPIC
@@ -30,9 +33,6 @@ TIR_NAME := r19
KBUILD_CFLAGS += -ffixed-$(TIR_NAME) -DTHREADINFO_REG=$(TIR_NAME) -D__linux__
KBUILD_AFLAGS += -DTHREADINFO_REG=$(TIR_NAME)
-LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name 2>/dev/null)
-libs-y += $(LIBGCC)
-
head-y := arch/hexagon/kernel/head.o
core-y += arch/hexagon/kernel/ \
diff --git a/arch/hexagon/configs/comet_defconfig b/arch/hexagon/configs/comet_defconfig
index f19ae2ab0aaa..9b2b1cc0794a 100644
--- a/arch/hexagon/configs/comet_defconfig
+++ b/arch/hexagon/configs/comet_defconfig
@@ -34,7 +34,6 @@ CONFIG_NET_ETHERNET=y
# CONFIG_SERIO is not set
# CONFIG_CONSOLE_TRANSLATIONS is not set
CONFIG_LEGACY_PTY_COUNT=64
-# CONFIG_DEVKMEM is not set
# CONFIG_HW_RANDOM is not set
CONFIG_SPI=y
CONFIG_SPI_DEBUG=y
@@ -81,4 +80,3 @@ CONFIG_FRAME_WARN=0
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
# CONFIG_SCHED_DEBUG is not set
-CONFIG_DEBUG_INFO=y
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index 4ab895d7111f..6e94f8d04146 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -14,7 +14,7 @@
/* Normal writes in our arch don't clear lock reservations */
-static inline void atomic_set(atomic_t *v, int new)
+static inline void arch_atomic_set(atomic_t *v, int new)
{
asm volatile(
"1: r6 = memw_locked(%0);\n"
@@ -26,26 +26,26 @@ static inline void atomic_set(atomic_t *v, int new)
);
}
-#define atomic_set_release(v, i) atomic_set((v), (i))
+#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
/**
- * atomic_read - reads a word, atomically
+ * arch_atomic_read - reads a word, atomically
* @v: pointer to atomic value
*
* Assumes all word reads on our architecture are atomic.
*/
-#define atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
/**
- * atomic_xchg - atomic
+ * arch_atomic_xchg - atomic
* @v: pointer to memory to change
* @new: new value (technically passed in a register -- see xchg)
*/
-#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
/**
- * atomic_cmpxchg - atomic compare-and-exchange values
+ * arch_atomic_cmpxchg - atomic compare-and-exchange values
* @v: pointer to value to change
* @old: desired old value to match
* @new: new value to put in
@@ -61,7 +61,7 @@ static inline void atomic_set(atomic_t *v, int new)
*
* "old" is "expected" old val, __oldval is actual old value
*/
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
int __oldval;
@@ -81,7 +81,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
}
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
int output; \
\
@@ -97,7 +97,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int output; \
\
@@ -114,7 +114,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int output, val; \
\
@@ -148,7 +148,7 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OP
/**
- * atomic_fetch_add_unless - add unless the number is a given value
+ * arch_atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer to value
* @a: amount to add
* @u: unless value is equal to u
@@ -157,7 +157,7 @@ ATOMIC_OPS(xor)
*
*/
-static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int __oldval;
register int tmp;
@@ -180,6 +180,6 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
);
return __oldval;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
#endif
diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h
index 92b8a02e588a..cdb705e1496a 100644
--- a/arch/hexagon/include/asm/cmpxchg.h
+++ b/arch/hexagon/include/asm/cmpxchg.h
@@ -42,7 +42,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
* Atomically swap the contents of a register with memory. Should be atomic
* between multiple CPU's and within interrupts on the same CPU.
*/
-#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \
+#define arch_xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \
sizeof(*(ptr))))
/*
@@ -51,7 +51,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
* variable casting.
*/
-#define cmpxchg(ptr, old, new) \
+#define arch_cmpxchg(ptr, old, new) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(ptr)) __old = (old); \
diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h
index 6b9c554aee78..9fb00a0ae89f 100644
--- a/arch/hexagon/include/asm/futex.h
+++ b/arch/hexagon/include/asm/futex.h
@@ -21,7 +21,7 @@
"3:\n" \
".section .fixup,\"ax\"\n" \
"4: %1 = #%5;\n" \
- " jump 3b\n" \
+ " jump ##3b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
".long 1b,4b,2b,4b\n" \
@@ -90,7 +90,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
"3:\n"
".section .fixup,\"ax\"\n"
"4: %0 = #%6\n"
- " jump 3b\n"
+ " jump ##3b\n"
".previous\n"
".section __ex_table,\"a\"\n"
".long 1b,4b,2b,4b\n"
diff --git a/arch/hexagon/include/asm/io.h b/arch/hexagon/include/asm/io.h
index bda2a9c2df78..c33241425a5c 100644
--- a/arch/hexagon/include/asm/io.h
+++ b/arch/hexagon/include/asm/io.h
@@ -64,7 +64,6 @@ static inline void *phys_to_virt(unsigned long address)
* convert a physical pointer to a virtual kernel pointer for
* /dev/mem access.
*/
-#define xlate_dev_kmem_ptr(p) __va(p)
#define xlate_dev_mem_ptr(p) __va(p)
/*
diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h
index dbb22b80b8c4..18cd6ea9ab23 100644
--- a/arch/hexagon/include/asm/pgtable.h
+++ b/arch/hexagon/include/asm/pgtable.h
@@ -155,9 +155,6 @@ extern unsigned long _dflt_cache_att;
extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* located in head.S */
-/* Seems to be zero even in architectures where the zero page is firewalled? */
-#define FIRST_USER_ADDRESS 0UL
-
/* HUGETLB not working currently */
#ifdef CONFIG_HUGETLB_PAGE
#define pte_mkhuge(pte) __pte((pte_val(pte) & ~0x3) | HVM_HUGEPAGE_SIZE)
@@ -242,7 +239,6 @@ static inline int pmd_bad(pmd_t pmd)
* pmd_page - converts a PMD entry to a page pointer
*/
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
-#define pmd_pgtable(pmd) pmd_page(pmd)
/**
* pte_none - check if pte is mapped
diff --git a/arch/hexagon/include/asm/timex.h b/arch/hexagon/include/asm/timex.h
index 78338d8ada83..8d4ec76fceb4 100644
--- a/arch/hexagon/include/asm/timex.h
+++ b/arch/hexagon/include/asm/timex.h
@@ -8,6 +8,7 @@
#include <asm-generic/timex.h>
#include <asm/timer-regs.h>
+#include <asm/hexagon_vm.h>
/* Using TCX0 as our clock. CLOCK_TICK_RATE scheduled to be removed. */
#define CLOCK_TICK_RATE TCX0_CLK_RATE
@@ -16,7 +17,7 @@
static inline int read_current_timer(unsigned long *timer_val)
{
- *timer_val = (unsigned long) __vmgettime();
+ *timer_val = __vmgettime();
return 0;
}
diff --git a/arch/hexagon/kernel/hexagon_ksyms.c b/arch/hexagon/kernel/hexagon_ksyms.c
index 6fb1aaab1c29..35545a7386a0 100644
--- a/arch/hexagon/kernel/hexagon_ksyms.c
+++ b/arch/hexagon/kernel/hexagon_ksyms.c
@@ -35,8 +35,8 @@ EXPORT_SYMBOL(_dflt_cache_att);
DECLARE_EXPORT(__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes);
/* Additional functions */
-DECLARE_EXPORT(__divsi3);
-DECLARE_EXPORT(__modsi3);
-DECLARE_EXPORT(__udivsi3);
-DECLARE_EXPORT(__umodsi3);
+DECLARE_EXPORT(__hexagon_divsi3);
+DECLARE_EXPORT(__hexagon_modsi3);
+DECLARE_EXPORT(__hexagon_udivsi3);
+DECLARE_EXPORT(__hexagon_umodsi3);
DECLARE_EXPORT(csum_tcpudp_magic);
diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c
index c61165c99ae0..6a6835fb4242 100644
--- a/arch/hexagon/kernel/process.c
+++ b/arch/hexagon/kernel/process.c
@@ -135,7 +135,7 @@ unsigned long get_wchan(struct task_struct *p)
unsigned long fp, pc;
unsigned long stack_page;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
stack_page = (unsigned long)task_stack_page(p);
diff --git a/arch/hexagon/kernel/ptrace.c b/arch/hexagon/kernel/ptrace.c
index a5a89e944257..8975f9b4cedf 100644
--- a/arch/hexagon/kernel/ptrace.c
+++ b/arch/hexagon/kernel/ptrace.c
@@ -35,7 +35,7 @@ void user_disable_single_step(struct task_struct *child)
static int genregs_get(struct task_struct *target,
const struct user_regset *regset,
- srtuct membuf to)
+ struct membuf to)
{
struct pt_regs *regs = task_pt_regs(target);
@@ -54,7 +54,7 @@ static int genregs_get(struct task_struct *target,
membuf_store(&to, regs->m0);
membuf_store(&to, regs->m1);
membuf_store(&to, regs->usr);
- membuf_store(&to, regs->p3_0);
+ membuf_store(&to, regs->preds);
membuf_store(&to, regs->gp);
membuf_store(&to, regs->ugp);
membuf_store(&to, pt_elr(regs)); // pc
diff --git a/arch/hexagon/lib/Makefile b/arch/hexagon/lib/Makefile
index 54be529d17a2..a64641e89d5f 100644
--- a/arch/hexagon/lib/Makefile
+++ b/arch/hexagon/lib/Makefile
@@ -2,4 +2,5 @@
#
# Makefile for hexagon-specific library files.
#
-obj-y = checksum.o io.o memcpy.o memset.o
+obj-y = checksum.o io.o memcpy.o memset.o memcpy_likely_aligned.o \
+ divsi3.o modsi3.o udivsi3.o umodsi3.o
diff --git a/arch/hexagon/lib/divsi3.S b/arch/hexagon/lib/divsi3.S
new file mode 100644
index 000000000000..783e09424c2c
--- /dev/null
+++ b/arch/hexagon/lib/divsi3.S
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/linkage.h>
+
+SYM_FUNC_START(__hexagon_divsi3)
+ {
+ p0 = cmp.gt(r0,#-1)
+ p1 = cmp.gt(r1,#-1)
+ r3:2 = vabsw(r1:0)
+ }
+ {
+ p3 = xor(p0,p1)
+ r4 = sub(r2,r3)
+ r6 = cl0(r2)
+ p0 = cmp.gtu(r3,r2)
+ }
+ {
+ r0 = mux(p3,#-1,#1)
+ r7 = cl0(r3)
+ p1 = cmp.gtu(r3,r4)
+ }
+ {
+ r0 = mux(p0,#0,r0)
+ p0 = or(p0,p1)
+ if (p0.new) jumpr:nt r31
+ r6 = sub(r7,r6)
+ }
+ {
+ r7 = r6
+ r5:4 = combine(#1,r3)
+ r6 = add(#1,lsr(r6,#1))
+ p0 = cmp.gtu(r6,#4)
+ }
+ {
+ r5:4 = vaslw(r5:4,r7)
+ if (!p0) r6 = #3
+ }
+ {
+ loop0(1f,r6)
+ r7:6 = vlsrw(r5:4,#1)
+ r1:0 = #0
+ }
+ .falign
+1:
+ {
+ r5:4 = vlsrw(r5:4,#2)
+ if (!p0.new) r0 = add(r0,r5)
+ if (!p0.new) r2 = sub(r2,r4)
+ p0 = cmp.gtu(r4,r2)
+ }
+ {
+ r7:6 = vlsrw(r7:6,#2)
+ if (!p0.new) r0 = add(r0,r7)
+ if (!p0.new) r2 = sub(r2,r6)
+ p0 = cmp.gtu(r6,r2)
+ }:endloop0
+ {
+ if (!p0) r0 = add(r0,r7)
+ }
+ {
+ if (p3) r0 = sub(r1,r0)
+ jumpr r31
+ }
+SYM_FUNC_END(__hexagon_divsi3)
diff --git a/arch/hexagon/lib/memcpy_likely_aligned.S b/arch/hexagon/lib/memcpy_likely_aligned.S
new file mode 100644
index 000000000000..6a541fb90a54
--- /dev/null
+++ b/arch/hexagon/lib/memcpy_likely_aligned.S
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/linkage.h>
+
+SYM_FUNC_START(__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes)
+ {
+ p0 = bitsclr(r1,#7)
+ p0 = bitsclr(r0,#7)
+ if (p0.new) r5:4 = memd(r1)
+ if (p0.new) r7:6 = memd(r1+#8)
+ }
+ {
+ if (!p0) jump:nt .Lmemcpy_call
+ if (p0) r9:8 = memd(r1+#16)
+ if (p0) r11:10 = memd(r1+#24)
+ p0 = cmp.gtu(r2,#64)
+ }
+ {
+ if (p0) jump:nt .Lmemcpy_call
+ if (!p0) memd(r0) = r5:4
+ if (!p0) memd(r0+#8) = r7:6
+ p0 = cmp.gtu(r2,#32)
+ }
+ {
+ p1 = cmp.gtu(r2,#40)
+ p2 = cmp.gtu(r2,#48)
+ if (p0) r13:12 = memd(r1+#32)
+ if (p1.new) r15:14 = memd(r1+#40)
+ }
+ {
+ memd(r0+#16) = r9:8
+ memd(r0+#24) = r11:10
+ }
+ {
+ if (p0) memd(r0+#32) = r13:12
+ if (p1) memd(r0+#40) = r15:14
+ if (!p2) jumpr:t r31
+ }
+ {
+ p0 = cmp.gtu(r2,#56)
+ r5:4 = memd(r1+#48)
+ if (p0.new) r7:6 = memd(r1+#56)
+ }
+ {
+ memd(r0+#48) = r5:4
+ if (p0) memd(r0+#56) = r7:6
+ jumpr r31
+ }
+
+.Lmemcpy_call:
+ jump memcpy
+
+SYM_FUNC_END(__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes)
diff --git a/arch/hexagon/lib/modsi3.S b/arch/hexagon/lib/modsi3.S
new file mode 100644
index 000000000000..9ea1c86efac2
--- /dev/null
+++ b/arch/hexagon/lib/modsi3.S
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/linkage.h>
+
+SYM_FUNC_START(__hexagon_modsi3)
+ {
+ p2 = cmp.ge(r0,#0)
+ r2 = abs(r0)
+ r1 = abs(r1)
+ }
+ {
+ r3 = cl0(r2)
+ r4 = cl0(r1)
+ p0 = cmp.gtu(r1,r2)
+ }
+ {
+ r3 = sub(r4,r3)
+ if (p0) jumpr r31
+ }
+ {
+ p1 = cmp.eq(r3,#0)
+ loop0(1f,r3)
+ r0 = r2
+ r2 = lsl(r1,r3)
+ }
+ .falign
+1:
+ {
+ p0 = cmp.gtu(r2,r0)
+ if (!p0.new) r0 = sub(r0,r2)
+ r2 = lsr(r2,#1)
+ if (p1) r1 = #0
+ }:endloop0
+ {
+ p0 = cmp.gtu(r2,r0)
+ if (!p0.new) r0 = sub(r0,r1)
+ if (p2) jumpr r31
+ }
+ {
+ r0 = neg(r0)
+ jumpr r31
+ }
+SYM_FUNC_END(__hexagon_modsi3)
diff --git a/arch/hexagon/lib/udivsi3.S b/arch/hexagon/lib/udivsi3.S
new file mode 100644
index 000000000000..477f27b9311c
--- /dev/null
+++ b/arch/hexagon/lib/udivsi3.S
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/linkage.h>
+
+SYM_FUNC_START(__hexagon_udivsi3)
+ {
+ r2 = cl0(r0)
+ r3 = cl0(r1)
+ r5:4 = combine(#1,#0)
+ p0 = cmp.gtu(r1,r0)
+ }
+ {
+ r6 = sub(r3,r2)
+ r4 = r1
+ r1:0 = combine(r0,r4)
+ if (p0) jumpr r31
+ }
+ {
+ r3:2 = vlslw(r5:4,r6)
+ loop0(1f,r6)
+ }
+ .falign
+1:
+ {
+ p0 = cmp.gtu(r2,r1)
+ if (!p0.new) r1 = sub(r1,r2)
+ if (!p0.new) r0 = add(r0,r3)
+ r3:2 = vlsrw(r3:2,#1)
+ }:endloop0
+ {
+ p0 = cmp.gtu(r2,r1)
+ if (!p0.new) r0 = add(r0,r3)
+ jumpr r31
+ }
+SYM_FUNC_END(__hexagon_udivsi3)
diff --git a/arch/hexagon/lib/umodsi3.S b/arch/hexagon/lib/umodsi3.S
new file mode 100644
index 000000000000..280bf06a55e7
--- /dev/null
+++ b/arch/hexagon/lib/umodsi3.S
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/linkage.h>
+
+SYM_FUNC_START(__hexagon_umodsi3)
+ {
+ r2 = cl0(r0)
+ r3 = cl0(r1)
+ p0 = cmp.gtu(r1,r0)
+ }
+ {
+ r2 = sub(r3,r2)
+ if (p0) jumpr r31
+ }
+ {
+ loop0(1f,r2)
+ p1 = cmp.eq(r2,#0)
+ r2 = lsl(r1,r2)
+ }
+ .falign
+1:
+ {
+ p0 = cmp.gtu(r2,r0)
+ if (!p0.new) r0 = sub(r0,r2)
+ r2 = lsr(r2,#1)
+ if (p1) r1 = #0
+ }:endloop0
+ {
+ p0 = cmp.gtu(r2,r0)
+ if (!p0.new) r0 = sub(r0,r1)
+ jumpr r31
+ }
+SYM_FUNC_END(__hexagon_umodsi3)
diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c
index f2e6c868e477..f01e91e10d95 100644
--- a/arch/hexagon/mm/init.c
+++ b/arch/hexagon/mm/init.c
@@ -55,7 +55,6 @@ void __init mem_init(void)
{
/* No idea where this is actually declared. Seems to evade LXR. */
memblock_free_all();
- mem_init_print_info(NULL);
/*
* To-Do: someone somewhere should wipe out the bootmem map
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 2ad7a8d29fcc..cf425c2c63af 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -13,6 +13,8 @@ config IA64
select ARCH_MIGHT_HAVE_PC_SERIO
select ACPI
select ACPI_NUMA if NUMA
+ select ARCH_ENABLE_MEMORY_HOTPLUG
+ select ARCH_ENABLE_MEMORY_HOTREMOVE
select ARCH_SUPPORTS_ACPI
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
@@ -32,6 +34,7 @@ config IA64
select TTY
select HAVE_ARCH_TRACEHOOK
select HAVE_VIRT_CPU_ACCOUNTING
+ select HUGETLB_PAGE_SIZE_VARIABLE if HUGETLB_PAGE
select VIRT_TO_BUS
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
@@ -57,6 +60,7 @@ config IA64
select NUMA if !FLATMEM
select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
select SET_FS
+ select ZONE_DMA32
default y
help
The Itanium Processor Family is Intel's 64-bit successor to
@@ -69,9 +73,6 @@ config 64BIT
select ATA_NONSTANDARD if ATA
default y
-config ZONE_DMA32
- def_bool y
-
config MMU
bool
default y
@@ -82,11 +83,6 @@ config STACKTRACE_SUPPORT
config GENERIC_LOCKBREAK
def_bool n
-config HUGETLB_PAGE_SIZE_VARIABLE
- bool
- depends on HUGETLB_PAGE
- default y
-
config GENERIC_CALIBRATE_DELAY
bool
default y
@@ -250,12 +246,6 @@ config HOTPLUG_CPU
can be controlled through /sys/devices/system/cpu/cpu#.
Say N if you want to disable CPU hotplug.
-config ARCH_ENABLE_MEMORY_HOTPLUG
- def_bool y
-
-config ARCH_ENABLE_MEMORY_HOTREMOVE
- def_bool y
-
config SCHED_SMT
bool "SMT scheduler support"
depends on SMP
@@ -286,15 +276,6 @@ config FORCE_CPEI_RETARGET
config ARCH_SELECT_MEMORY_MODEL
def_bool y
-config ARCH_DISCONTIGMEM_ENABLE
- def_bool y
- depends on BROKEN
- help
- Say Y to support efficient handling of discontiguous physical memory,
- for architectures which are either NUMA (Non-Uniform Memory Access)
- or have huge holes in the physical address space for other reasons.
- See <file:Documentation/vm/numa.rst> for more.
-
config ARCH_FLATMEM_ENABLE
def_bool y
@@ -319,29 +300,12 @@ config NODES_SHIFT
int "Max num nodes shift(3-10)"
range 3 10
default "10"
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
help
This option specifies the maximum number of nodes in your SSI system.
MAX_NUMNODES will be 2^(This value).
If in doubt, use the default.
-# VIRTUAL_MEM_MAP and FLAT_NODE_MEM_MAP are functionally equivalent.
-# VIRTUAL_MEM_MAP has been retained for historical reasons.
-config VIRTUAL_MEM_MAP
- bool "Virtual mem map"
- depends on !SPARSEMEM && !FLATMEM
- default y
- help
- Say Y to compile the kernel with support for a virtual mem map.
- This code also only takes effect if a memory hole of greater than
- 1 Gb is found during boot. You must turn this option on if you
- require the DISCONTIGMEM option for your machine. If you are
- unsure, say Y.
-
-config HOLES_IN_ZONE
- bool
- default y if VIRTUAL_MEM_MAP
-
config HAVE_ARCH_NODEDATA_EXTENSION
def_bool y
depends on NUMA
diff --git a/arch/ia64/configs/bigsur_defconfig b/arch/ia64/configs/bigsur_defconfig
index c409756b5396..0341a67cc1bf 100644
--- a/arch/ia64/configs/bigsur_defconfig
+++ b/arch/ia64/configs/bigsur_defconfig
@@ -9,7 +9,6 @@ CONFIG_SGI_PARTITION=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_PREEMPT=y
-# CONFIG_VIRTUAL_MEM_MAP is not set
CONFIG_IA64_PALINFO=y
CONFIG_EFI_VARS=y
CONFIG_BINFMT_MISC=m
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig
index ca0d596c800d..8916a2850c48 100644
--- a/arch/ia64/configs/generic_defconfig
+++ b/arch/ia64/configs/generic_defconfig
@@ -55,8 +55,6 @@ CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_QLOGIC_1280=y
-CONFIG_ATA=y
-CONFIG_ATA_PIIX=y
CONFIG_SATA_VITESSE=y
CONFIG_MD=y
CONFIG_BLK_DEV_MD=m
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index f267d956458f..266c429b9137 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -21,11 +21,11 @@
#define ATOMIC64_INIT(i) { (i) }
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic64_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic64_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
-#define atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i))
#define ATOMIC_OP(op, c_op) \
static __inline__ int \
@@ -36,7 +36,7 @@ ia64_atomic_##op (int i, atomic_t *v) \
\
do { \
CMPXCHG_BUGCHECK(v); \
- old = atomic_read(v); \
+ old = arch_atomic_read(v); \
new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
return new; \
@@ -51,7 +51,7 @@ ia64_atomic_fetch_##op (int i, atomic_t *v) \
\
do { \
CMPXCHG_BUGCHECK(v); \
- old = atomic_read(v); \
+ old = arch_atomic_read(v); \
new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
return old; \
@@ -74,7 +74,7 @@ ATOMIC_OPS(sub, -)
#define __ia64_atomic_const(i) 0
#endif
-#define atomic_add_return(i,v) \
+#define arch_atomic_add_return(i,v) \
({ \
int __ia64_aar_i = (i); \
__ia64_atomic_const(i) \
@@ -82,7 +82,7 @@ ATOMIC_OPS(sub, -)
: ia64_atomic_add(__ia64_aar_i, v); \
})
-#define atomic_sub_return(i,v) \
+#define arch_atomic_sub_return(i,v) \
({ \
int __ia64_asr_i = (i); \
__ia64_atomic_const(i) \
@@ -90,7 +90,7 @@ ATOMIC_OPS(sub, -)
: ia64_atomic_sub(__ia64_asr_i, v); \
})
-#define atomic_fetch_add(i,v) \
+#define arch_atomic_fetch_add(i,v) \
({ \
int __ia64_aar_i = (i); \
__ia64_atomic_const(i) \
@@ -98,7 +98,7 @@ ATOMIC_OPS(sub, -)
: ia64_atomic_fetch_add(__ia64_aar_i, v); \
})
-#define atomic_fetch_sub(i,v) \
+#define arch_atomic_fetch_sub(i,v) \
({ \
int __ia64_asr_i = (i); \
__ia64_atomic_const(i) \
@@ -110,13 +110,13 @@ ATOMIC_FETCH_OP(and, &)
ATOMIC_FETCH_OP(or, |)
ATOMIC_FETCH_OP(xor, ^)
-#define atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v)
-#define atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v)
-#define atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v)
+#define arch_atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v)
+#define arch_atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v)
+#define arch_atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v)
-#define atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v)
-#define atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v)
-#define atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v)
+#define arch_atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v)
+#define arch_atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v)
+#define arch_atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
@@ -131,7 +131,7 @@ ia64_atomic64_##op (s64 i, atomic64_t *v) \
\
do { \
CMPXCHG_BUGCHECK(v); \
- old = atomic64_read(v); \
+ old = arch_atomic64_read(v); \
new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
return new; \
@@ -146,7 +146,7 @@ ia64_atomic64_fetch_##op (s64 i, atomic64_t *v) \
\
do { \
CMPXCHG_BUGCHECK(v); \
- old = atomic64_read(v); \
+ old = arch_atomic64_read(v); \
new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
return old; \
@@ -159,7 +159,7 @@ ia64_atomic64_fetch_##op (s64 i, atomic64_t *v) \
ATOMIC64_OPS(add, +)
ATOMIC64_OPS(sub, -)
-#define atomic64_add_return(i,v) \
+#define arch_atomic64_add_return(i,v) \
({ \
s64 __ia64_aar_i = (i); \
__ia64_atomic_const(i) \
@@ -167,7 +167,7 @@ ATOMIC64_OPS(sub, -)
: ia64_atomic64_add(__ia64_aar_i, v); \
})
-#define atomic64_sub_return(i,v) \
+#define arch_atomic64_sub_return(i,v) \
({ \
s64 __ia64_asr_i = (i); \
__ia64_atomic_const(i) \
@@ -175,7 +175,7 @@ ATOMIC64_OPS(sub, -)
: ia64_atomic64_sub(__ia64_asr_i, v); \
})
-#define atomic64_fetch_add(i,v) \
+#define arch_atomic64_fetch_add(i,v) \
({ \
s64 __ia64_aar_i = (i); \
__ia64_atomic_const(i) \
@@ -183,7 +183,7 @@ ATOMIC64_OPS(sub, -)
: ia64_atomic64_fetch_add(__ia64_aar_i, v); \
})
-#define atomic64_fetch_sub(i,v) \
+#define arch_atomic64_fetch_sub(i,v) \
({ \
s64 __ia64_asr_i = (i); \
__ia64_atomic_const(i) \
@@ -195,29 +195,29 @@ ATOMIC64_FETCH_OP(and, &)
ATOMIC64_FETCH_OP(or, |)
ATOMIC64_FETCH_OP(xor, ^)
-#define atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v)
-#define atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v)
-#define atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v)
+#define arch_atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v)
+#define arch_atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v)
+#define arch_atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v)
-#define atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v)
-#define atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v)
-#define atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v)
+#define arch_atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v)
+#define arch_atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v)
+#define arch_atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v)
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP
-#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), old, new))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
-#define atomic64_cmpxchg(v, old, new) \
- (cmpxchg(&((v)->counter), old, new))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic64_cmpxchg(v, old, new) \
+ (arch_cmpxchg(&((v)->counter), old, new))
+#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
-#define atomic_add(i,v) (void)atomic_add_return((i), (v))
-#define atomic_sub(i,v) (void)atomic_sub_return((i), (v))
+#define arch_atomic_add(i,v) (void)arch_atomic_add_return((i), (v))
+#define arch_atomic_sub(i,v) (void)arch_atomic_sub_return((i), (v))
-#define atomic64_add(i,v) (void)atomic64_add_return((i), (v))
-#define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v))
+#define arch_atomic64_add(i,v) (void)arch_atomic64_add_return((i), (v))
+#define arch_atomic64_sub(i,v) (void)arch_atomic64_sub_return((i), (v))
#endif /* _ASM_IA64_ATOMIC_H */
diff --git a/arch/ia64/include/asm/cmpxchg.h b/arch/ia64/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..94ef84429843
--- /dev/null
+++ b/arch/ia64/include/asm/cmpxchg.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_IA64_CMPXCHG_H
+#define _ASM_IA64_CMPXCHG_H
+
+#include <uapi/asm/cmpxchg.h>
+
+#define arch_xchg(ptr, x) \
+({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
+
+#define arch_cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
+#define arch_cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
+
+#define arch_cmpxchg_local arch_cmpxchg
+#define arch_cmpxchg64_local arch_cmpxchg64
+
+#endif /* _ASM_IA64_CMPXCHG_H */
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index 3d666a11a2de..6d93b923b379 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -277,7 +277,6 @@ extern void memset_io(volatile void __iomem *s, int c, long n);
#define memcpy_fromio memcpy_fromio
#define memcpy_toio memcpy_toio
#define memset_io memset_io
-#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr
#define xlate_dev_mem_ptr xlate_dev_mem_ptr
#include <asm-generic/io.h>
#undef PCI_IOBASE
diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h
index e789c0818edb..6c47a239fc26 100644
--- a/arch/ia64/include/asm/meminit.h
+++ b/arch/ia64/include/asm/meminit.h
@@ -58,15 +58,4 @@ extern int reserve_elfcorehdr(u64 *start, u64 *end);
extern int register_active_ranges(u64 start, u64 len, int nid);
-#ifdef CONFIG_VIRTUAL_MEM_MAP
- extern unsigned long VMALLOC_END;
- extern struct page *vmem_map;
- extern int create_mem_map_page_table(u64 start, u64 end, void *arg);
- extern int vmemmap_find_next_valid_pfn(int, int);
-#else
-static inline int vmemmap_find_next_valid_pfn(int node, int i)
-{
- return i + 1;
-}
-#endif
#endif /* meminit_h */
diff --git a/arch/ia64/include/asm/module.h b/arch/ia64/include/asm/module.h
index 5a29652e6def..7271b9c5fc76 100644
--- a/arch/ia64/include/asm/module.h
+++ b/arch/ia64/include/asm/module.h
@@ -14,16 +14,20 @@
struct elf64_shdr; /* forward declration */
struct mod_arch_specific {
+ /* Used only at module load time. */
struct elf64_shdr *core_plt; /* core PLT section */
struct elf64_shdr *init_plt; /* init PLT section */
struct elf64_shdr *got; /* global offset table */
struct elf64_shdr *opd; /* official procedure descriptors */
struct elf64_shdr *unwind; /* unwind-table section */
unsigned long gp; /* global-pointer for module */
+ unsigned int next_got_entry; /* index of next available got entry */
+ /* Used at module run and cleanup time. */
void *core_unw_table; /* core unwind-table cookie returned by unwinder */
void *init_unw_table; /* init unwind-table cookie returned by unwinder */
- unsigned int next_got_entry; /* index of next available got entry */
+ void *opd_addr; /* symbolize uses .opd to get to actual function */
+ unsigned long opd_size;
};
#define ARCH_SHF_SMALL SHF_IA_64_SHORT
diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h
index b69a5499d75b..1b990466d540 100644
--- a/arch/ia64/include/asm/page.h
+++ b/arch/ia64/include/asm/page.h
@@ -82,44 +82,23 @@ do { \
} while (0)
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
+#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
({ \
struct page *page = alloc_page_vma( \
- GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr); \
+ GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr); \
if (page) \
flush_dcache_page(page); \
page; \
})
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-#ifdef CONFIG_VIRTUAL_MEM_MAP
-extern int ia64_pfn_valid (unsigned long pfn);
-#else
-# define ia64_pfn_valid(pfn) 1
-#endif
-
-#ifdef CONFIG_VIRTUAL_MEM_MAP
-extern struct page *vmem_map;
-#ifdef CONFIG_DISCONTIGMEM
-# define page_to_pfn(page) ((unsigned long) (page - vmem_map))
-# define pfn_to_page(pfn) (vmem_map + (pfn))
-# define __pfn_to_phys(pfn) PFN_PHYS(pfn)
-#else
-# include <asm-generic/memory_model.h>
-#endif
-#else
-# include <asm-generic/memory_model.h>
-#endif
+#include <asm-generic/memory_model.h>
#ifdef CONFIG_FLATMEM
-# define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
-#elif defined(CONFIG_DISCONTIGMEM)
-extern unsigned long min_low_pfn;
-extern unsigned long max_low_pfn;
-# define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn))
+# define pfn_valid(pfn) ((pfn) < max_mapnr)
#endif
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
diff --git a/arch/ia64/include/asm/pal.h b/arch/ia64/include/asm/pal.h
index b1d87955e8cc..e6b652f9e45e 100644
--- a/arch/ia64/include/asm/pal.h
+++ b/arch/ia64/include/asm/pal.h
@@ -99,6 +99,7 @@
#include <linux/types.h>
#include <asm/fpu.h>
+#include <asm/intrinsics.h>
/*
* Data types needed to pass information into PAL procedures and
@@ -1086,7 +1087,7 @@ static inline long ia64_pal_freq_base(unsigned long *platform_base_freq)
/*
* Get the ratios for processor frequency, bus frequency and interval timer to
- * to base frequency of the platform
+ * the base frequency of the platform
*/
static inline s64
ia64_pal_freq_ratios (struct pal_freq_ratio *proc_ratio, struct pal_freq_ratio *bus_ratio,
diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
index 9601cfe83c94..0fb2b6291d58 100644
--- a/arch/ia64/include/asm/pgalloc.h
+++ b/arch/ia64/include/asm/pgalloc.h
@@ -52,7 +52,6 @@ pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, pgtable_t pte)
{
pmd_val(*pmd_entry) = page_to_phys(pte);
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 9b4efe89e62d..3f5dbbd8b9d8 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -128,7 +128,6 @@
#define PTRS_PER_PGD_SHIFT PTRS_PER_PTD_SHIFT
#define PTRS_PER_PGD (1UL << PTRS_PER_PGD_SHIFT)
#define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */
-#define FIRST_USER_ADDRESS 0UL
/*
* All the normal masks have the "page accessed" bits on, as any time
@@ -223,10 +222,6 @@ ia64_phys_addr_valid (unsigned long addr)
#define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL)
-#ifdef CONFIG_VIRTUAL_MEM_MAP
-# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
-extern unsigned long VMALLOC_END;
-#else
#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP)
/* SPARSEMEM_VMEMMAP uses half of vmalloc... */
# define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 10)))
@@ -234,7 +229,6 @@ extern unsigned long VMALLOC_END;
#else
# define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
#endif
-#endif
/* fs/proc/kcore.c */
#define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE))
@@ -328,7 +322,7 @@ extern void __ia64_sync_icache_dcache(pte_t pteval);
static inline void set_pte(pte_t *ptep, pte_t pteval)
{
/* page is present && page is user && page is executable
- * && (page swapin or new page or page migraton
+ * && (page swapin or new page or page migration
* || copy_on_write with page copying.)
*/
if (pte_present_exec_user(pteval) &&
diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h
index b3aa46090101..08179135905c 100644
--- a/arch/ia64/include/asm/ptrace.h
+++ b/arch/ia64/include/asm/ptrace.h
@@ -54,8 +54,7 @@
static inline unsigned long user_stack_pointer(struct pt_regs *regs)
{
- /* FIXME: should this be bspstore + nr_dirty regs? */
- return regs->ar_bspstore;
+ return regs->r12;
}
static inline int is_syscall_success(struct pt_regs *regs)
@@ -79,11 +78,6 @@ static inline long regs_return_value(struct pt_regs *regs)
unsigned long __ip = instruction_pointer(regs); \
(__ip & ~3UL) + ((__ip & 3UL) << 2); \
})
-/*
- * Why not default? Because user_stack_pointer() on ia64 gives register
- * stack backing store instead...
- */
-#define current_user_stack_pointer() (current_pt_regs()->r12)
/* given a pointer to a task_struct, return the user's pt_regs */
# define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 5f620e66384e..864775970c50 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -26,7 +26,7 @@
* the queue, and the other indicating the current tail. The lock is acquired
* by atomically noting the tail and incrementing it by one (thus adding
* ourself to the queue and noting our position), then waiting until the head
- * becomes equal to the the initial value of the tail.
+ * becomes equal to the initial value of the tail.
* The pad bits in the middle are used to prevent the next_ticket number
* overflowing into the now_serving number.
*
diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h
index 6c6f16e409a8..0d23c0049301 100644
--- a/arch/ia64/include/asm/syscall.h
+++ b/arch/ia64/include/asm/syscall.h
@@ -32,7 +32,7 @@ static inline void syscall_rollback(struct task_struct *task,
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
- return regs->r10 == -1 ? regs->r8:0;
+ return regs->r10 == -1 ? -regs->r8:0;
}
static inline long syscall_get_return_value(struct task_struct *task,
diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
index 179243c3dfc7..e19d2dcc0ced 100644
--- a/arch/ia64/include/asm/uaccess.h
+++ b/arch/ia64/include/asm/uaccess.h
@@ -272,22 +272,4 @@ xlate_dev_mem_ptr(phys_addr_t p)
return ptr;
}
-/*
- * Convert a virtual cached kernel memory pointer to an uncached pointer
- */
-static __inline__ void *
-xlate_dev_kmem_ptr(void *p)
-{
- struct page *page;
- void *ptr;
-
- page = virt_to_page((unsigned long)p);
- if (PageUncached(page))
- ptr = (void *)__pa(p) + __IA64_UNCACHED_OFFSET;
- else
- ptr = p;
-
- return ptr;
-}
-
#endif /* _ASM_IA64_UACCESS_H */
diff --git a/arch/ia64/include/asm/unaligned.h b/arch/ia64/include/asm/unaligned.h
deleted file mode 100644
index 328942e3cbce..000000000000
--- a/arch/ia64/include/asm/unaligned.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_IA64_UNALIGNED_H
-#define _ASM_IA64_UNALIGNED_H
-
-#include <linux/unaligned/le_struct.h>
-#include <linux/unaligned/be_byteshift.h>
-#include <linux/unaligned/generic.h>
-
-#define get_unaligned __get_unaligned_le
-#define put_unaligned __put_unaligned_le
-
-#endif /* _ASM_IA64_UNALIGNED_H */
diff --git a/arch/ia64/include/asm/uv/uv_hub.h b/arch/ia64/include/asm/uv/uv_hub.h
index 2a88c7204e52..809ddb6896db 100644
--- a/arch/ia64/include/asm/uv/uv_hub.h
+++ b/arch/ia64/include/asm/uv/uv_hub.h
@@ -257,7 +257,7 @@ static inline int uv_numa_blade_id(void)
return 0;
}
-/* Convert a cpu number to the the UV blade number */
+/* Convert a cpu number to the UV blade number */
static inline int uv_cpu_to_blade_id(int cpu)
{
return 0;
diff --git a/arch/ia64/include/uapi/asm/cmpxchg.h b/arch/ia64/include/uapi/asm/cmpxchg.h
index 5d90307fd6e0..926c6cb1e029 100644
--- a/arch/ia64/include/uapi/asm/cmpxchg.h
+++ b/arch/ia64/include/uapi/asm/cmpxchg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _ASM_IA64_CMPXCHG_H
-#define _ASM_IA64_CMPXCHG_H
+#ifndef _UAPI_ASM_IA64_CMPXCHG_H
+#define _UAPI_ASM_IA64_CMPXCHG_H
/*
* Compare/Exchange, forked from asm/intrinsics.h
@@ -53,8 +53,10 @@ extern void ia64_xchg_called_with_bad_pointer(void);
__xchg_result; \
})
+#ifndef __KERNEL__
#define xchg(ptr, x) \
({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
+#endif
/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
@@ -126,12 +128,14 @@ extern long ia64_cmpxchg_called_with_bad_pointer(void);
* we had to back-pedal and keep the "legacy" behavior of a full fence :-(
*/
+#ifndef __KERNEL__
/* for compatibility with other platforms: */
#define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
#define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
#define cmpxchg_local cmpxchg
#define cmpxchg64_local cmpxchg64
+#endif
#ifdef CONFIG_IA64_DEBUG_CMPXCHG
# define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128;
@@ -152,4 +156,4 @@ do { \
#endif /* !__ASSEMBLY__ */
-#endif /* _ASM_IA64_CMPXCHG_H */
+#endif /* _UAPI_ASM_IA64_CMPXCHG_H */
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 78717819131c..08d4a2ba0652 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -9,7 +9,7 @@ endif
extra-y := head.o vmlinux.lds
-obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
+obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o irq.o irq_ia64.o \
irq_lsapic.o ivt.o pal.o patch.o process.o ptrace.o sal.o \
salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
unwind.o mca.o mca_asm.o topology.o dma-mapping.o iosapic.o acpi.o \
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index a5636524af76..e2af6b172200 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -446,7 +446,8 @@ void __init acpi_numa_fixup(void)
if (srat_num_cpus == 0) {
node_set_online(0);
node_cpuid[0].phys_id = hard_smp_processor_id();
- return;
+ slit_distance(0, 0) = LOCAL_DISTANCE;
+ goto out;
}
/*
@@ -489,7 +490,7 @@ void __init acpi_numa_fixup(void)
for (j = 0; j < MAX_NUMNODES; j++)
slit_distance(i, j) = i == j ?
LOCAL_DISTANCE : REMOTE_DISTANCE;
- return;
+ goto out;
}
memset(numa_slit, -1, sizeof(numa_slit));
@@ -514,6 +515,8 @@ void __init acpi_numa_fixup(void)
printk("\n");
}
#endif
+out:
+ node_possible_map = node_online_map;
}
#endif /* CONFIG_ACPI_NUMA */
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index c5fe21de46a8..31149e41f9be 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -415,10 +415,10 @@ efi_get_pal_addr (void)
mask = ~((1 << IA64_GRANULE_SHIFT) - 1);
printk(KERN_INFO "CPU %d: mapping PAL code "
- "[0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
- smp_processor_id(), md->phys_addr,
- md->phys_addr + efi_md_size(md),
- vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
+ "[0x%llx-0x%llx) into [0x%llx-0x%llx)\n",
+ smp_processor_id(), md->phys_addr,
+ md->phys_addr + efi_md_size(md),
+ vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
#endif
return __va(md->phys_addr);
}
@@ -560,6 +560,7 @@ efi_init (void)
{
efi_memory_desc_t *md;
void *p;
+ unsigned int i;
for (i = 0, p = efi_map_start; p < efi_map_end;
++i, p += efi_desc_size)
@@ -586,7 +587,7 @@ efi_init (void)
}
printk("mem%02d: %s "
- "range=[0x%016lx-0x%016lx) (%4lu%s)\n",
+ "range=[0x%016llx-0x%016llx) (%4lu%s)\n",
i, efi_md_typeattr_format(buf, sizeof(buf), md),
md->phys_addr,
md->phys_addr + efi_md_size(md), size, unit);
diff --git a/arch/ia64/kernel/efi_stub.S b/arch/ia64/kernel/efi_stub.S
index 58233bb7976d..1fd61b78fb29 100644
--- a/arch/ia64/kernel/efi_stub.S
+++ b/arch/ia64/kernel/efi_stub.S
@@ -7,7 +7,7 @@
*
* This stub allows us to make EFI calls in physical mode with interrupts
* turned off. We need this because we can't call SetVirtualMap() until
- * the kernel has booted far enough to allow allocation of struct vma_struct
+ * the kernel has booted far enough to allow allocation of struct vm_area_struct
* entries (which we would need to map stuff with memory attributes other
* than uncached or writeback...). Since the GetTime() service gets called
* earlier than that, we need to be able to make physical mode EFI calls from
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index e98e3dafffd8..5eba3fb2e311 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1420,10 +1420,9 @@ END(ftrace_stub)
#endif /* CONFIG_FUNCTION_TRACER */
-#define __SYSCALL(nr, entry, nargs) data8 entry
+#define __SYSCALL(nr, entry) data8 entry
.rodata
.align 8
.globl sys_call_table
sys_call_table:
#include <asm/syscall_table.h>
-#undef __SYSCALL
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
index 8b5b8e6bc9d9..dd5bfed52031 100644
--- a/arch/ia64/kernel/err_inject.c
+++ b/arch/ia64/kernel/err_inject.c
@@ -59,7 +59,7 @@ show_##name(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
u32 cpu=dev->id; \
- return sprintf(buf, "%lx\n", name[cpu]); \
+ return sprintf(buf, "%llx\n", name[cpu]); \
}
#define store(name) \
@@ -86,9 +86,9 @@ store_call_start(struct device *dev, struct device_attribute *attr,
#ifdef ERR_INJ_DEBUG
printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu);
- printk(KERN_DEBUG "err_type_info=%lx,\n", err_type_info[cpu]);
- printk(KERN_DEBUG "err_struct_info=%lx,\n", err_struct_info[cpu]);
- printk(KERN_DEBUG "err_data_buffer=%lx, %lx, %lx.\n",
+ printk(KERN_DEBUG "err_type_info=%llx,\n", err_type_info[cpu]);
+ printk(KERN_DEBUG "err_struct_info=%llx,\n", err_struct_info[cpu]);
+ printk(KERN_DEBUG "err_data_buffer=%llx, %llx, %llx.\n",
err_data_buffer[cpu].data1,
err_data_buffer[cpu].data2,
err_data_buffer[cpu].data3);
@@ -117,8 +117,8 @@ store_call_start(struct device *dev, struct device_attribute *attr,
#ifdef ERR_INJ_DEBUG
printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]);
- printk(KERN_DEBUG "capabilities=%lx,\n", capabilities[cpu]);
- printk(KERN_DEBUG "resources=%lx\n", resources[cpu]);
+ printk(KERN_DEBUG "capabilities=%llx,\n", capabilities[cpu]);
+ printk(KERN_DEBUG "resources=%llx\n", resources[cpu]);
#endif
return size;
}
@@ -131,7 +131,7 @@ show_virtual_to_phys(struct device *dev, struct device_attribute *attr,
char *buf)
{
unsigned int cpu=dev->id;
- return sprintf(buf, "%lx\n", phys_addr[cpu]);
+ return sprintf(buf, "%llx\n", phys_addr[cpu]);
}
static ssize_t
@@ -145,7 +145,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL);
if (ret<=0) {
#ifdef ERR_INJ_DEBUG
- printk("Virtual address %lx is not existing.\n",virt_addr);
+ printk("Virtual address %llx is not existing.\n", virt_addr);
#endif
return -EINVAL;
}
@@ -163,7 +163,7 @@ show_err_data_buffer(struct device *dev,
{
unsigned int cpu=dev->id;
- return sprintf(buf, "%lx, %lx, %lx\n",
+ return sprintf(buf, "%llx, %llx, %llx\n",
err_data_buffer[cpu].data1,
err_data_buffer[cpu].data2,
err_data_buffer[cpu].data3);
@@ -178,13 +178,13 @@ store_err_data_buffer(struct device *dev,
int ret;
#ifdef ERR_INJ_DEBUG
- printk("write err_data_buffer=[%lx,%lx,%lx] on cpu%d\n",
+ printk("write err_data_buffer=[%llx,%llx,%llx] on cpu%d\n",
err_data_buffer[cpu].data1,
err_data_buffer[cpu].data2,
err_data_buffer[cpu].data3,
cpu);
#endif
- ret=sscanf(buf, "%lx, %lx, %lx",
+ ret = sscanf(buf, "%llx, %llx, %llx",
&err_data_buffer[cpu].data1,
&err_data_buffer[cpu].data2,
&err_data_buffer[cpu].data3);
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index 0750a716adc7..2094f3249019 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -172,7 +172,7 @@ ENTRY(fsys_gettimeofday)
// r25 = itc_lastcycle value
// r26 = address clocksource cycle_last
// r27 = (not used)
- // r28 = sequence number at the beginning of critcal section
+ // r28 = sequence number at the beginning of critical section
// r29 = address of itc_jitter
// r30 = time processing flags / memory address
// r31 = pointer to result
@@ -432,7 +432,7 @@ GLOBAL_ENTRY(fsys_bubble_down)
* - r29: psr
*
* We used to clear some PSR bits here but that requires slow
- * serialization. Fortuntely, that isn't really necessary.
+ * serialization. Fortunately, that isn't really necessary.
* The rationale is as follows: we used to clear bits
* ~PSR_PRESERVED_BITS in PSR.L. Since
* PSR_PRESERVED_BITS==PSR.{UP,MFL,MFH,PK,DT,PP,SP,RT,IC}, we
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 30f1ef760136..f22469f1c1fc 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -33,7 +33,6 @@
#include <asm/mca_asm.h>
#include <linux/init.h>
#include <linux/linkage.h>
-#include <linux/pgtable.h>
#include <asm/export.h>
#ifdef CONFIG_HOTPLUG_CPU
@@ -405,11 +404,6 @@ start_ap:
// This is executed by the bootstrap processor (bsp) only:
-#ifdef CONFIG_IA64_FW_EMU
- // initialize PAL & SAL emulator:
- br.call.sptk.many rp=sys_fw_init
-.ret1:
-#endif
br.call.sptk.many rp=start_kernel
.ret2: addl r3=@ltoff(halt_msg),gp
;;
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
deleted file mode 100644
index f8150ee74f29..000000000000
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ /dev/null
@@ -1,12 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Architecture-specific kernel symbols
- */
-
-#if defined(CONFIG_VIRTUAL_MEM_MAP) || defined(CONFIG_DISCONTIGMEM)
-#include <linux/compiler.h>
-#include <linux/export.h>
-#include <linux/memblock.h>
-EXPORT_SYMBOL(min_low_pfn); /* defined by bootmem.c, but not exported by generic code */
-EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic code */
-#endif
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index fc1ff8a4d7de..441ed04b1037 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -844,22 +844,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
index af310dc8a356..4db9ca144fa5 100644
--- a/arch/ia64/kernel/machine_kexec.c
+++ b/arch/ia64/kernel/machine_kexec.c
@@ -143,7 +143,7 @@ void machine_kexec(struct kimage *image)
void arch_crash_save_vmcoreinfo(void)
{
-#if defined(CONFIG_DISCONTIGMEM) || defined(CONFIG_SPARSEMEM)
+#if defined(CONFIG_SPARSEMEM)
VMCOREINFO_SYMBOL(pgdat_list);
VMCOREINFO_LENGTH(pgdat_list, MAX_NUMNODES);
#endif
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index d4cae2fc69ca..e628a88607bb 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -109,9 +109,9 @@
#include "irq.h"
#if defined(IA64_MCA_DEBUG_INFO)
-# define IA64_MCA_DEBUG(fmt...) printk(fmt)
+# define IA64_MCA_DEBUG(fmt...) printk(fmt)
#else
-# define IA64_MCA_DEBUG(fmt...)
+# define IA64_MCA_DEBUG(fmt...) do {} while (0)
#endif
#define NOTIFY_INIT(event, regs, arg, spin) \
@@ -1788,7 +1788,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
ti->task = p;
ti->cpu = cpu;
p->stack = ti;
- p->state = TASK_UNINTERRUPTIBLE;
+ p->__state = TASK_UNINTERRUPTIBLE;
cpumask_set_cpu(cpu, &p->cpus_mask);
INIT_LIST_HEAD(&p->tasks);
p->parent = p->real_parent = p->group_leader = p;
@@ -1824,7 +1824,7 @@ ia64_mca_cpu_init(void *cpu_data)
data = mca_bootmem();
first_time = 0;
} else
- data = (void *)__get_free_pages(GFP_KERNEL,
+ data = (void *)__get_free_pages(GFP_ATOMIC,
get_order(sz));
if (!data)
panic("Could not allocate MCA memory for cpu %d\n",
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index 36a69b4e6169..5bfc79be4cef 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -343,7 +343,7 @@ init_record_index_pools(void)
/* - 2 - */
sect_min_size = sal_log_sect_min_sizes[0];
- for (i = 1; i < sizeof sal_log_sect_min_sizes/sizeof(size_t); i++)
+ for (i = 1; i < ARRAY_SIZE(sal_log_sect_min_sizes); i++)
if (sect_min_size > sal_log_sect_min_sizes[i])
sect_min_size = sal_log_sect_min_sizes[i];
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index 00a496cb346f..2cba53c1da82 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -905,9 +905,31 @@ register_unwind_table (struct module *mod)
int
module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
{
+ struct mod_arch_specific *mas = &mod->arch;
+
DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
- if (mod->arch.unwind)
+ if (mas->unwind)
register_unwind_table(mod);
+
+ /*
+ * ".opd" was already relocated to the final destination. Store
+ * it's address for use in symbolizer.
+ */
+ mas->opd_addr = (void *)mas->opd->sh_addr;
+ mas->opd_size = mas->opd->sh_size;
+
+ /*
+ * Module relocation was already done at this point. Section
+ * headers are about to be deleted. Wipe out load-time context.
+ */
+ mas->core_plt = NULL;
+ mas->init_plt = NULL;
+ mas->got = NULL;
+ mas->opd = NULL;
+ mas->unwind = NULL;
+ mas->gp = 0;
+ mas->next_got_entry = 0;
+
return 0;
}
@@ -926,10 +948,9 @@ module_arch_cleanup (struct module *mod)
void *dereference_module_function_descriptor(struct module *mod, void *ptr)
{
- Elf64_Shdr *opd = mod->arch.opd;
+ struct mod_arch_specific *mas = &mod->arch;
- if (ptr < (void *)opd->sh_addr ||
- ptr >= (void *)(opd->sh_addr + opd->sh_size))
+ if (ptr < mas->opd_addr || ptr >= mas->opd_addr + mas->opd_size)
return ptr;
return dereference_function_descriptor(ptr);
diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S
index d3e22c018b68..06d01a070aae 100644
--- a/arch/ia64/kernel/pal.S
+++ b/arch/ia64/kernel/pal.S
@@ -86,7 +86,7 @@ GLOBAL_ENTRY(ia64_pal_call_static)
mov ar.pfs = loc1
mov rp = loc0
;;
- srlz.d // seralize restoration of psr.l
+ srlz.d // serialize restoration of psr.l
br.ret.sptk.many b0
END(ia64_pal_call_static)
EXPORT_SYMBOL(ia64_pal_call_static)
@@ -194,7 +194,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static)
mov rp = loc0
;;
mov ar.rsc=loc4 // restore RSE configuration
- srlz.d // seralize restoration of psr.l
+ srlz.d // serialize restoration of psr.l
br.ret.sptk.many b0
END(ia64_pal_call_phys_static)
EXPORT_SYMBOL(ia64_pal_call_phys_static)
@@ -252,7 +252,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
mov rp = loc0
;;
mov ar.rsc=loc4 // restore RSE configuration
- srlz.d // seralize restoration of psr.l
+ srlz.d // serialize restoration of psr.l
br.ret.sptk.many b0
END(ia64_pal_call_phys_stacked)
EXPORT_SYMBOL(ia64_pal_call_phys_stacked)
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 7e1a1525e202..e56d63f4abf9 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -529,7 +529,7 @@ get_wchan (struct task_struct *p)
unsigned long ip;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
/*
@@ -542,7 +542,7 @@ get_wchan (struct task_struct *p)
*/
unw_init_from_blocked_task(&info, p);
do {
- if (p->state == TASK_RUNNING)
+ if (task_is_running(p))
return 0;
if (unw_unwind(&info) < 0)
return 0;
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index c3490ee2daa5..df28c7dd164f 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -641,11 +641,11 @@ ptrace_attach_sync_user_rbs (struct task_struct *child)
read_lock(&tasklist_lock);
if (child->sighand) {
spin_lock_irq(&child->sighand->siglock);
- if (child->state == TASK_STOPPED &&
+ if (READ_ONCE(child->__state) == TASK_STOPPED &&
!test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
set_notify_resume(child);
- child->state = TASK_TRACED;
+ WRITE_ONCE(child->__state, TASK_TRACED);
stopped = 1;
}
spin_unlock_irq(&child->sighand->siglock);
@@ -665,9 +665,9 @@ ptrace_attach_sync_user_rbs (struct task_struct *child)
read_lock(&tasklist_lock);
if (child->sighand) {
spin_lock_irq(&child->sighand->siglock);
- if (child->state == TASK_TRACED &&
+ if (READ_ONCE(child->__state) == TASK_TRACED &&
(child->signal->flags & SIGNAL_STOP_STOPPED)) {
- child->state = TASK_STOPPED;
+ WRITE_ONCE(child->__state, TASK_STOPPED);
}
spin_unlock_irq(&child->sighand->siglock);
}
@@ -2013,27 +2013,39 @@ static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
{
struct syscall_get_set_args *args = data;
struct pt_regs *pt = args->regs;
- unsigned long *krbs, cfm, ndirty;
+ unsigned long *krbs, cfm, ndirty, nlocals, nouts;
int i, count;
if (unw_unwind_to_user(info) < 0)
return;
+ /*
+ * We get here via a few paths:
+ * - break instruction: cfm is shared with caller.
+ * syscall args are in out= regs, locals are non-empty.
+ * - epsinstruction: cfm is set by br.call
+ * locals don't exist.
+ *
+ * For both cases argguments are reachable in cfm.sof - cfm.sol.
+ * CFM: [ ... | sor: 17..14 | sol : 13..7 | sof : 6..0 ]
+ */
cfm = pt->cr_ifs;
+ nlocals = (cfm >> 7) & 0x7f; /* aka sol */
+ nouts = (cfm & 0x7f) - nlocals; /* aka sof - sol */
krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
count = 0;
if (in_syscall(pt))
- count = min_t(int, args->n, cfm & 0x7f);
+ count = min_t(int, args->n, nouts);
+ /* Iterate over outs. */
for (i = 0; i < count; i++) {
+ int j = ndirty + nlocals + i + args->i;
if (args->rw)
- *ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
- args->args[i];
+ *ia64_rse_skip_regs(krbs, j) = args->args[i];
else
- args->args[i] = *ia64_rse_skip_regs(krbs,
- ndirty + i + args->i);
+ args->args[i] = *ia64_rse_skip_regs(krbs, j);
}
if (!args->rw) {
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 49b488580939..d10f780c13b9 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -441,7 +441,6 @@ start_secondary (void *unused)
#endif
efi_map_pal_code();
cpu_init();
- preempt_disable();
smp_callin();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
diff --git a/arch/ia64/kernel/syscalls/Makefile b/arch/ia64/kernel/syscalls/Makefile
index bf4bda0f63eb..14f40ecf8b65 100644
--- a/arch/ia64/kernel/syscalls/Makefile
+++ b/arch/ia64/kernel/syscalls/Makefile
@@ -6,26 +6,18 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
$(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
syscall := $(src)/syscall.tbl
-syshdr := $(srctree)/$(src)/syscallhdr.sh
-systbl := $(srctree)/$(src)/syscalltbl.sh
+syshdr := $(srctree)/scripts/syscallhdr.sh
+systbl := $(srctree)/scripts/syscalltbl.sh
quiet_cmd_syshdr = SYSHDR $@
- cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
- '$(syshdr_abis_$(basetarget))' \
- '$(syshdr_pfx_$(basetarget))' \
- '$(syshdr_offset_$(basetarget))'
+ cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr --offset __NR_Linux $< $@
quiet_cmd_systbl = SYSTBL $@
- cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \
- '$(systbl_abis_$(basetarget))' \
- '$(systbl_abi_$(basetarget))' \
- '$(systbl_offset_$(basetarget))'
+ cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@
-syshdr_offset_unistd_64 := __NR_Linux
$(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
-systbl_offset_syscall_table := 1024
$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl
index d89231166e19..6d07742c57b8 100644
--- a/arch/ia64/kernel/syscalls/syscall.tbl
+++ b/arch/ia64/kernel/syscalls/syscall.tbl
@@ -363,3 +363,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/ia64/kernel/syscalls/syscallhdr.sh b/arch/ia64/kernel/syscalls/syscallhdr.sh
deleted file mode 100644
index f407b6e53283..000000000000
--- a/arch/ia64/kernel/syscalls/syscallhdr.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-prefix="$4"
-offset="$5"
-
-fileguard=_UAPI_ASM_IA64_`basename "$out" | sed \
- -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
- -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- printf "#ifndef %s\n" "${fileguard}"
- printf "#define %s\n" "${fileguard}"
- printf "\n"
-
- nxt=0
- while read nr abi name entry ; do
- if [ -z "$offset" ]; then
- printf "#define __NR_%s%s\t%s\n" \
- "${prefix}" "${name}" "${nr}"
- else
- printf "#define __NR_%s%s\t(%s + %s)\n" \
- "${prefix}" "${name}" "${offset}" "${nr}"
- fi
- nxt=$((nr+1))
- done
-
- printf "\n"
- printf "#ifdef __KERNEL__\n"
- printf "#define __NR_syscalls\t%s\n" "${nxt}"
- printf "#endif\n"
- printf "\n"
- printf "#endif /* %s */\n" "${fileguard}"
-) > "$out"
diff --git a/arch/ia64/kernel/syscalls/syscalltbl.sh b/arch/ia64/kernel/syscalls/syscalltbl.sh
deleted file mode 100644
index 85d78d9309ad..000000000000
--- a/arch/ia64/kernel/syscalls/syscalltbl.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-my_abi="$4"
-offset="$5"
-
-emit() {
- t_nxt="$1"
- t_nr="$2"
- t_entry="$3"
-
- while [ $t_nxt -lt $t_nr ]; do
- printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}"
- t_nxt=$((t_nxt+1))
- done
- printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}"
-}
-
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- nxt=0
- if [ -z "$offset" ]; then
- offset=0
- fi
-
- while read nr abi name entry ; do
- emit $((nxt+offset)) $((nr+offset)) $entry
- nxt=$((nr+1))
- done
-) > "$out"
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 09fc385c2acd..3639e0a7cb3b 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -3,9 +3,8 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * This file contains NUMA specific variables and functions which can
- * be split away from DISCONTIGMEM and are used on NUMA machines with
- * contiguous memory.
+ * This file contains NUMA specific variables and functions which are used on
+ * NUMA machines with contiguous memory.
* 2002/08/07 Erich Focht <efocht@ess.nec.de>
* Populate cpu entries in sysfs for non-numa systems as well
* Intel Corporation - Ashok Raj
diff --git a/arch/ia64/mm/Makefile b/arch/ia64/mm/Makefile
index 99a35039b548..c03f63c62ac4 100644
--- a/arch/ia64/mm/Makefile
+++ b/arch/ia64/mm/Makefile
@@ -7,6 +7,5 @@ obj-y := init.o fault.o tlb.o extable.o ioremap.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_NUMA) += numa.o
-obj-$(CONFIG_DISCONTIGMEM) += discontig.o
obj-$(CONFIG_SPARSEMEM) += discontig.o
obj-$(CONFIG_FLATMEM) += contig.o
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 62fe80a16f42..42e025cfbd08 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -153,11 +153,7 @@ find_memory (void)
efi_memmap_walk(find_max_min_low_pfn, NULL);
max_pfn = max_low_pfn;
-#ifdef CONFIG_VIRTUAL_MEM_MAP
- efi_memmap_walk(filter_memory, register_active_ranges);
-#else
memblock_add_node(0, PFN_PHYS(max_low_pfn), 0);
-#endif
find_initrd();
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 03b3a02375ff..791d4176e4a6 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -95,7 +95,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len,
* acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
* called yet. Note that node 0 will also count all non-existent cpus.
*/
-static int __meminit early_nr_cpus_node(int node)
+static int early_nr_cpus_node(int node)
{
int cpu, n = 0;
@@ -110,7 +110,7 @@ static int __meminit early_nr_cpus_node(int node)
* compute_pernodesize - compute size of pernode data
* @node: the node id.
*/
-static unsigned long __meminit compute_pernodesize(int node)
+static unsigned long compute_pernodesize(int node)
{
unsigned long pernodesize = 0, cpus;
@@ -367,7 +367,7 @@ static void __init reserve_pernode_space(void)
}
}
-static void __meminit scatter_node_data(void)
+static void scatter_node_data(void)
{
pg_data_t **dst;
int node;
@@ -585,25 +585,6 @@ void call_pernode_memory(unsigned long start, unsigned long len, void *arg)
}
}
-static void __init virtual_map_init(void)
-{
-#ifdef CONFIG_VIRTUAL_MEM_MAP
- int node;
-
- VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
- sizeof(struct page));
- vmem_map = (struct page *) VMALLOC_END;
- efi_memmap_walk(create_mem_map_page_table, NULL);
- printk("Virtual mem_map starts at 0x%p\n", vmem_map);
-
- for_each_online_node(node) {
- unsigned long pfn_offset = mem_data[node].min_pfn;
-
- NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset;
- }
-#endif
-}
-
/**
* paging_init - setup page tables
*
@@ -619,8 +600,6 @@ void __init paging_init(void)
sparse_init();
- virtual_map_init();
-
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
max_zone_pfns[ZONE_DMA32] = max_dma;
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index cd9766d2b6e0..02de2e70c587 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -84,18 +84,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
if (faulthandler_disabled() || !mm)
goto no_context;
-#ifdef CONFIG_VIRTUAL_MEM_MAP
- /*
- * If fault is in region 5 and we are in the kernel, we may already
- * have the mmap_lock (pfn_valid macro is called during mmap). There
- * is no vma for region 5 addr's anyway, so skip getting the semaphore
- * and go directly to the exception handling code.
- */
-
- if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
- goto bad_area_no_up;
-#endif
-
/*
* This is to handle the kprobes on user space access instructions
*/
@@ -213,9 +201,6 @@ retry:
bad_area:
mmap_read_unlock(mm);
-#ifdef CONFIG_VIRTUAL_MEM_MAP
- bad_area_no_up:
-#endif
if ((isr & IA64_ISR_SP)
|| ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
{
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index b331f94d20ac..f993cb36c062 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -25,7 +25,8 @@ unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT;
EXPORT_SYMBOL(hpage_shift);
pte_t *
-huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
+huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, unsigned long sz)
{
unsigned long taddr = htlbpage_to_page(addr);
pgd_t *pgd;
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 16d0d7d22657..064a967a7b6e 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -43,13 +43,6 @@ extern void ia64_tlb_init (void);
unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
-#ifdef CONFIG_VIRTUAL_MEM_MAP
-unsigned long VMALLOC_END = VMALLOC_END_INIT;
-EXPORT_SYMBOL(VMALLOC_END);
-struct page *vmem_map;
-EXPORT_SYMBOL(vmem_map);
-#endif
-
struct page *zero_page_memmap_ptr; /* map entry for zero page */
EXPORT_SYMBOL(zero_page_memmap_ptr);
@@ -373,212 +366,6 @@ void ia64_mmu_init(void *my_cpu_data)
#endif
}
-#ifdef CONFIG_VIRTUAL_MEM_MAP
-int vmemmap_find_next_valid_pfn(int node, int i)
-{
- unsigned long end_address, hole_next_pfn;
- unsigned long stop_address;
- pg_data_t *pgdat = NODE_DATA(node);
-
- end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
- end_address = PAGE_ALIGN(end_address);
- stop_address = (unsigned long) &vmem_map[pgdat_end_pfn(pgdat)];
-
- do {
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
-
- pgd = pgd_offset_k(end_address);
- if (pgd_none(*pgd)) {
- end_address += PGDIR_SIZE;
- continue;
- }
-
- p4d = p4d_offset(pgd, end_address);
- if (p4d_none(*p4d)) {
- end_address += P4D_SIZE;
- continue;
- }
-
- pud = pud_offset(p4d, end_address);
- if (pud_none(*pud)) {
- end_address += PUD_SIZE;
- continue;
- }
-
- pmd = pmd_offset(pud, end_address);
- if (pmd_none(*pmd)) {
- end_address += PMD_SIZE;
- continue;
- }
-
- pte = pte_offset_kernel(pmd, end_address);
-retry_pte:
- if (pte_none(*pte)) {
- end_address += PAGE_SIZE;
- pte++;
- if ((end_address < stop_address) &&
- (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
- goto retry_pte;
- continue;
- }
- /* Found next valid vmem_map page */
- break;
- } while (end_address < stop_address);
-
- end_address = min(end_address, stop_address);
- end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
- hole_next_pfn = end_address / sizeof(struct page);
- return hole_next_pfn - pgdat->node_start_pfn;
-}
-
-int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
-{
- unsigned long address, start_page, end_page;
- struct page *map_start, *map_end;
- int node;
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
-
- map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
- map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
-
- start_page = (unsigned long) map_start & PAGE_MASK;
- end_page = PAGE_ALIGN((unsigned long) map_end);
- node = paddr_to_nid(__pa(start));
-
- for (address = start_page; address < end_page; address += PAGE_SIZE) {
- pgd = pgd_offset_k(address);
- if (pgd_none(*pgd)) {
- p4d = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
- if (!p4d)
- goto err_alloc;
- pgd_populate(&init_mm, pgd, p4d);
- }
- p4d = p4d_offset(pgd, address);
-
- if (p4d_none(*p4d)) {
- pud = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
- if (!pud)
- goto err_alloc;
- p4d_populate(&init_mm, p4d, pud);
- }
- pud = pud_offset(p4d, address);
-
- if (pud_none(*pud)) {
- pmd = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
- if (!pmd)
- goto err_alloc;
- pud_populate(&init_mm, pud, pmd);
- }
- pmd = pmd_offset(pud, address);
-
- if (pmd_none(*pmd)) {
- pte = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
- if (!pte)
- goto err_alloc;
- pmd_populate_kernel(&init_mm, pmd, pte);
- }
- pte = pte_offset_kernel(pmd, address);
-
- if (pte_none(*pte)) {
- void *page = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE,
- node);
- if (!page)
- goto err_alloc;
- set_pte(pte, pfn_pte(__pa(page) >> PAGE_SHIFT,
- PAGE_KERNEL));
- }
- }
- return 0;
-
-err_alloc:
- panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d\n",
- __func__, PAGE_SIZE, PAGE_SIZE, node);
- return -ENOMEM;
-}
-
-struct memmap_init_callback_data {
- struct page *start;
- struct page *end;
- int nid;
- unsigned long zone;
-};
-
-static int __meminit
-virtual_memmap_init(u64 start, u64 end, void *arg)
-{
- struct memmap_init_callback_data *args;
- struct page *map_start, *map_end;
-
- args = (struct memmap_init_callback_data *) arg;
- map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
- map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
-
- if (map_start < args->start)
- map_start = args->start;
- if (map_end > args->end)
- map_end = args->end;
-
- /*
- * We have to initialize "out of bounds" struct page elements that fit completely
- * on the same pages that were allocated for the "in bounds" elements because they
- * may be referenced later (and found to be "reserved").
- */
- map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
- map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
- / sizeof(struct page));
-
- if (map_start < map_end)
- memmap_init_range((unsigned long)(map_end - map_start),
- args->nid, args->zone, page_to_pfn(map_start), page_to_pfn(map_end),
- MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
- return 0;
-}
-
-void __meminit memmap_init_zone(struct zone *zone)
-{
- int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
- unsigned long start_pfn = zone->zone_start_pfn;
- unsigned long size = zone->spanned_pages;
-
- if (!vmem_map) {
- memmap_init_range(size, nid, zone_id, start_pfn, start_pfn + size,
- MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
- } else {
- struct page *start;
- struct memmap_init_callback_data args;
-
- start = pfn_to_page(start_pfn);
- args.start = start;
- args.end = start + size;
- args.nid = nid;
- args.zone = zone_id;
-
- efi_memmap_walk(virtual_memmap_init, &args);
- }
-}
-
-int
-ia64_pfn_valid (unsigned long pfn)
-{
- char byte;
- struct page *pg = pfn_to_page(pfn);
-
- return (__get_user(byte, (char __user *) pg) == 0)
- && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
- || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
-}
-EXPORT_SYMBOL(ia64_pfn_valid);
-
-#endif /* CONFIG_VIRTUAL_MEM_MAP */
-
int __init register_active_ranges(u64 start, u64 len, int nid)
{
u64 end = start + len;
@@ -644,13 +431,16 @@ mem_init (void)
* _before_ any drivers that may need the PCI DMA interface are
* initialized or bootmem has been freed.
*/
+ do {
#ifdef CONFIG_INTEL_IOMMU
- detect_intel_iommu();
- if (!iommu_detected)
+ detect_intel_iommu();
+ if (iommu_detected)
+ break;
#endif
#ifdef CONFIG_SWIOTLB
swiotlb_init(1);
#endif
+ } while (0);
#ifdef CONFIG_FLATMEM
BUG_ON(!mem_map);
@@ -659,7 +449,6 @@ mem_init (void)
set_max_mapnr(max_low_pfn);
high_memory = __va(max_low_pfn * PAGE_SIZE);
memblock_free_all();
- mem_init_print_info(NULL);
/*
* For fsyscall entrpoints with no light-weight handler, use the ordinary
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index 46b6e5f3a40f..d6579ec3ea32 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -3,9 +3,8 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * This file contains NUMA specific variables and functions which can
- * be split away from DISCONTIGMEM and are used on NUMA machines with
- * contiguous memory.
+ * This file contains NUMA specific variables and functions which are used on
+ * NUMA machines with contiguous memory.
*
* 2002/08/07 Erich Focht <efocht@ess.nec.de>
*/
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 372e4e69c43a..96989ad46f66 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -21,6 +21,7 @@ config M68K
select HAVE_AOUT if MMU
select HAVE_ASM_MODVERSIONS
select HAVE_DEBUG_BUGVERBOSE
+ select HAVE_EFFICIENT_UNALIGNED_ACCESS if !CPU_HAS_NO_UNALIGNED
select HAVE_FUTEX_CMPXCHG if MMU && FUTEX
select HAVE_IDE
select HAVE_MOD_ARCH_SPECIFIC
@@ -34,6 +35,7 @@ config M68K
select SET_FS
select UACCESS_MEMCPY if !MMU
select VIRT_TO_BUS
+ select ZONE_DMA
config CPU_BIG_ENDIAN
def_bool y
@@ -62,10 +64,6 @@ config TIME_LOW_RES
config NO_IOPORT_MAP
def_bool y
-config ZONE_DMA
- bool
- default y
-
config HZ
int
default 1000 if CLEOPATRA
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index f4d23977d2a5..29e946394fdb 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -408,10 +408,6 @@ config SINGLE_MEMORY_CHUNK
order" to save memory that could be wasted for unused memory map.
Say N if not sure.
-config ARCH_DISCONTIGMEM_ENABLE
- depends on BROKEN
- def_bool MMU && !SINGLE_MEMORY_CHUNK
-
config FORCE_MAX_ZONEORDER
int "Maximum zone order" if ADVANCED
depends on !SINGLE_MEMORY_CHUNK
@@ -451,11 +447,6 @@ config M68K_L2_CACHE
depends on MAC
default y
-config NODES_SHIFT
- int
- default "3"
- depends on DISCONTIGMEM
-
config CPU_HAS_NO_BITFIELDS
bool
@@ -553,4 +544,3 @@ config CACHE_COPYBACK
The ColdFire CPU cache is set into Copy-back mode.
endchoice
endif
-
diff --git a/arch/m68k/Kconfig.machine b/arch/m68k/Kconfig.machine
index 4d59ec2f5b8d..d964c1f27399 100644
--- a/arch/m68k/Kconfig.machine
+++ b/arch/m68k/Kconfig.machine
@@ -25,6 +25,9 @@ config ATARI
this kernel on an Atari, say Y here and browse the material
available in <file:Documentation/m68k>; otherwise say N.
+config ATARI_KBD_CORE
+ bool
+
config MAC
bool "Macintosh support"
depends on MMU
diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
index ea14f2046fb4..c54055a3d284 100644
--- a/arch/m68k/Makefile
+++ b/arch/m68k/Makefile
@@ -16,7 +16,7 @@
KBUILD_DEFCONFIG := multi_defconfig
-ifneq ($(SUBARCH),$(ARCH))
+ifdef cross_compiling
ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE := $(call cc-cross-prefix, \
m68k-linux-gnu- m68k-linux- m68k-unknown-linux-gnu-)
@@ -66,8 +66,7 @@ KBUILD_CFLAGS += $(cpuflags-y)
KBUILD_CFLAGS += -pipe -ffreestanding
ifdef CONFIG_MMU
-# without -fno-strength-reduce the 53c7xx.c driver fails ;-(
-KBUILD_CFLAGS += -fno-strength-reduce -ffixed-a2
+KBUILD_CFLAGS += -ffixed-a2
else
# we can use a m68k-linux-gcc toolchain with these in place
KBUILD_CPPFLAGS += -DUTS_SYSNAME=\"uClinux\"
diff --git a/arch/m68k/atari/config.c b/arch/m68k/atari/config.c
index 44f9b5216ac9..261a0f57cc9a 100644
--- a/arch/m68k/atari/config.c
+++ b/arch/m68k/atari/config.c
@@ -875,16 +875,8 @@ static const struct resource atari_scsi_tt_rsrc[] __initconst = {
#define FALCON_IDE_BASE 0xfff00000
static const struct resource atari_falconide_rsrc[] __initconst = {
- {
- .flags = IORESOURCE_MEM,
- .start = FALCON_IDE_BASE,
- .end = FALCON_IDE_BASE + 0x39,
- },
- {
- .flags = IORESOURCE_IRQ,
- .start = IRQ_MFP_FSCSI,
- .end = IRQ_MFP_FSCSI,
- },
+ DEFINE_RES_MEM(FALCON_IDE_BASE, 0x38),
+ DEFINE_RES_MEM(FALCON_IDE_BASE + 0x38, 2),
};
int __init atari_platform_init(void)
diff --git a/arch/m68k/atari/time.c b/arch/m68k/atari/time.c
index 1068670cb741..7e44d0e9d0f8 100644
--- a/arch/m68k/atari/time.c
+++ b/arch/m68k/atari/time.c
@@ -317,10 +317,3 @@ int atari_tt_hwclk( int op, struct rtc_time *t )
return( 0 );
}
-
-/*
- * Local variables:
- * c-indent-level: 4
- * tab-width: 8
- * End:
- */
diff --git a/arch/m68k/coldfire/clk.c b/arch/m68k/coldfire/clk.c
index 076a9caa9557..2ed841e94111 100644
--- a/arch/m68k/coldfire/clk.c
+++ b/arch/m68k/coldfire/clk.c
@@ -73,20 +73,6 @@ struct clk_ops clk_ops1 = {
#endif /* MCFPM_PPMCR1 */
#endif /* MCFPM_PPMCR0 */
-struct clk *clk_get(struct device *dev, const char *id)
-{
- const char *clk_name = dev ? dev_name(dev) : id ? id : NULL;
- struct clk *clk;
- unsigned i;
-
- for (i = 0; (clk = mcf_clks[i]) != NULL; ++i)
- if (!strcmp(clk->name, clk_name))
- return clk;
- pr_warn("clk_get: didn't find clock %s\n", clk_name);
- return ERR_PTR(-ENOENT);
-}
-EXPORT_SYMBOL(clk_get);
-
int clk_enable(struct clk *clk)
{
unsigned long flags;
@@ -117,13 +103,6 @@ void clk_disable(struct clk *clk)
}
EXPORT_SYMBOL(clk_disable);
-void clk_put(struct clk *clk)
-{
- if (clk->enabled != 0)
- pr_warn("clk_put %s still enabled\n", clk->name);
-}
-EXPORT_SYMBOL(clk_put);
-
unsigned long clk_get_rate(struct clk *clk)
{
if (!clk)
diff --git a/arch/m68k/coldfire/intc-simr.c b/arch/m68k/coldfire/intc-simr.c
index 15c4b7a6e38f..f7c2c41b3156 100644
--- a/arch/m68k/coldfire/intc-simr.c
+++ b/arch/m68k/coldfire/intc-simr.c
@@ -68,9 +68,9 @@ static void intc_irq_mask(struct irq_data *d)
{
unsigned int irq = d->irq - MCFINT_VECBASE;
- if (MCFINTC2_SIMR && (irq > 128))
+ if (MCFINTC2_SIMR && (irq > 127))
__raw_writeb(irq - 128, MCFINTC2_SIMR);
- else if (MCFINTC1_SIMR && (irq > 64))
+ else if (MCFINTC1_SIMR && (irq > 63))
__raw_writeb(irq - 64, MCFINTC1_SIMR);
else
__raw_writeb(irq, MCFINTC0_SIMR);
@@ -80,9 +80,9 @@ static void intc_irq_unmask(struct irq_data *d)
{
unsigned int irq = d->irq - MCFINT_VECBASE;
- if (MCFINTC2_CIMR && (irq > 128))
+ if (MCFINTC2_CIMR && (irq > 127))
__raw_writeb(irq - 128, MCFINTC2_CIMR);
- else if (MCFINTC1_CIMR && (irq > 64))
+ else if (MCFINTC1_CIMR && (irq > 63))
__raw_writeb(irq - 64, MCFINTC1_CIMR);
else
__raw_writeb(irq, MCFINTC0_CIMR);
@@ -115,9 +115,9 @@ static unsigned int intc_irq_startup(struct irq_data *d)
}
irq -= MCFINT_VECBASE;
- if (MCFINTC2_ICR0 && (irq > 128))
+ if (MCFINTC2_ICR0 && (irq > 127))
__raw_writeb(5, MCFINTC2_ICR0 + irq - 128);
- else if (MCFINTC1_ICR0 && (irq > 64))
+ else if (MCFINTC1_ICR0 && (irq > 63))
__raw_writeb(5, MCFINTC1_ICR0 + irq - 64);
else
__raw_writeb(5, MCFINTC0_ICR0 + irq);
diff --git a/arch/m68k/coldfire/m5206.c b/arch/m68k/coldfire/m5206.c
index 2f14ea95c391..5e726e94b5ab 100644
--- a/arch/m68k/coldfire/m5206.c
+++ b/arch/m68k/coldfire/m5206.c
@@ -10,6 +10,7 @@
/***************************************************************************/
+#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
@@ -23,21 +24,15 @@
DEFINE_CLK(pll, "pll.0", MCF_CLK);
DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
-DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
-DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
-DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
-DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
-DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
-struct clk *mcf_clks[] = {
- &clk_pll,
- &clk_sys,
- &clk_mcftmr0,
- &clk_mcftmr1,
- &clk_mcfuart0,
- &clk_mcfuart1,
- &clk_mcfi2c0,
- NULL
+static struct clk_lookup m5206_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "pll.0", &clk_pll),
+ CLKDEV_INIT(NULL, "sys.0", &clk_sys),
+ CLKDEV_INIT("mcftmr.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcftmr.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.1", NULL, &clk_sys),
+ CLKDEV_INIT("imx1-i2c.0", NULL, &clk_sys),
};
/***************************************************************************/
@@ -66,6 +61,8 @@ void __init config_BSP(char *commandp, int size)
mcf_mapirq2imr(28, MCFINTC_EINT4);
mcf_mapirq2imr(31, MCFINTC_EINT7);
m5206_i2c_init();
+
+ clkdev_add_table(m5206_clk_lookup, ARRAY_SIZE(m5206_clk_lookup));
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m520x.c b/arch/m68k/coldfire/m520x.c
index b5b2a267dada..d2f96b40aee1 100644
--- a/arch/m68k/coldfire/m520x.c
+++ b/arch/m68k/coldfire/m520x.c
@@ -12,6 +12,7 @@
/***************************************************************************/
+#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
@@ -48,31 +49,29 @@ DEFINE_CLK(0, "sys.0", 40, MCF_BUSCLK);
DEFINE_CLK(0, "gpio.0", 41, MCF_BUSCLK);
DEFINE_CLK(0, "sdram.0", 42, MCF_CLK);
-struct clk *mcf_clks[] = {
- &__clk_0_2, /* flexbus */
- &__clk_0_12, /* fec.0 */
- &__clk_0_17, /* edma */
- &__clk_0_18, /* intc.0 */
- &__clk_0_21, /* iack.0 */
- &__clk_0_22, /* imx1-i2c.0 */
- &__clk_0_23, /* mcfqspi.0 */
- &__clk_0_24, /* mcfuart.0 */
- &__clk_0_25, /* mcfuart.1 */
- &__clk_0_26, /* mcfuart.2 */
- &__clk_0_28, /* mcftmr.0 */
- &__clk_0_29, /* mcftmr.1 */
- &__clk_0_30, /* mcftmr.2 */
- &__clk_0_31, /* mcftmr.3 */
-
- &__clk_0_32, /* mcfpit.0 */
- &__clk_0_33, /* mcfpit.1 */
- &__clk_0_34, /* mcfeport.0 */
- &__clk_0_35, /* mcfwdt.0 */
- &__clk_0_36, /* pll.0 */
- &__clk_0_40, /* sys.0 */
- &__clk_0_41, /* gpio.0 */
- &__clk_0_42, /* sdram.0 */
- NULL,
+static struct clk_lookup m520x_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "flexbus", &__clk_0_2),
+ CLKDEV_INIT("fec.0", NULL, &__clk_0_12),
+ CLKDEV_INIT("edma", NULL, &__clk_0_17),
+ CLKDEV_INIT("intc.0", NULL, &__clk_0_18),
+ CLKDEV_INIT("iack.0", NULL, &__clk_0_21),
+ CLKDEV_INIT("imx1-i2c.0", NULL, &__clk_0_22),
+ CLKDEV_INIT("mcfqspi.0", NULL, &__clk_0_23),
+ CLKDEV_INIT("mcfuart.0", NULL, &__clk_0_24),
+ CLKDEV_INIT("mcfuart.1", NULL, &__clk_0_25),
+ CLKDEV_INIT("mcfuart.2", NULL, &__clk_0_26),
+ CLKDEV_INIT("mcftmr.0", NULL, &__clk_0_28),
+ CLKDEV_INIT("mcftmr.1", NULL, &__clk_0_29),
+ CLKDEV_INIT("mcftmr.2", NULL, &__clk_0_30),
+ CLKDEV_INIT("mcftmr.3", NULL, &__clk_0_31),
+ CLKDEV_INIT("mcfpit.0", NULL, &__clk_0_32),
+ CLKDEV_INIT("mcfpit.1", NULL, &__clk_0_33),
+ CLKDEV_INIT("mcfeport.0", NULL, &__clk_0_34),
+ CLKDEV_INIT("mcfwdt.0", NULL, &__clk_0_35),
+ CLKDEV_INIT(NULL, "pll.0", &__clk_0_36),
+ CLKDEV_INIT(NULL, "sys.0", &__clk_0_40),
+ CLKDEV_INIT("gpio.0", NULL, &__clk_0_41),
+ CLKDEV_INIT("sdram.0", NULL, &__clk_0_42),
};
static struct clk * const enable_clks[] __initconst = {
@@ -115,6 +114,8 @@ static void __init m520x_clk_init(void)
/* make sure these clocks are disabled */
for (i = 0; i < ARRAY_SIZE(disable_clks); ++i)
__clk_init_disabled(disable_clks[i]);
+
+ clkdev_add_table(m520x_clk_lookup, ARRAY_SIZE(m520x_clk_lookup));
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m523x.c b/arch/m68k/coldfire/m523x.c
index ddf2496ed117..193c178162c1 100644
--- a/arch/m68k/coldfire/m523x.c
+++ b/arch/m68k/coldfire/m523x.c
@@ -13,6 +13,7 @@
/***************************************************************************/
+#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
@@ -26,31 +27,20 @@
DEFINE_CLK(pll, "pll.0", MCF_CLK);
DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
-DEFINE_CLK(mcfpit0, "mcfpit.0", MCF_CLK);
-DEFINE_CLK(mcfpit1, "mcfpit.1", MCF_CLK);
-DEFINE_CLK(mcfpit2, "mcfpit.2", MCF_CLK);
-DEFINE_CLK(mcfpit3, "mcfpit.3", MCF_CLK);
-DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
-DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
-DEFINE_CLK(mcfuart2, "mcfuart.2", MCF_BUSCLK);
-DEFINE_CLK(mcfqspi0, "mcfqspi.0", MCF_BUSCLK);
-DEFINE_CLK(fec0, "fec.0", MCF_BUSCLK);
-DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
-
-struct clk *mcf_clks[] = {
- &clk_pll,
- &clk_sys,
- &clk_mcfpit0,
- &clk_mcfpit1,
- &clk_mcfpit2,
- &clk_mcfpit3,
- &clk_mcfuart0,
- &clk_mcfuart1,
- &clk_mcfuart2,
- &clk_mcfqspi0,
- &clk_fec0,
- &clk_mcfi2c0,
- NULL
+
+struct clk_lookup m523x_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "pll.0", &clk_pll),
+ CLKDEV_INIT(NULL, "sys.0", &clk_sys),
+ CLKDEV_INIT("mcfpit.0", NULL, &clk_pll),
+ CLKDEV_INIT("mcfpit.1", NULL, &clk_pll),
+ CLKDEV_INIT("mcfpit.2", NULL, &clk_pll),
+ CLKDEV_INIT("mcfpit.3", NULL, &clk_pll),
+ CLKDEV_INIT("mcfuart.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.2", NULL, &clk_sys),
+ CLKDEV_INIT("mcfqspi.0", NULL, &clk_sys),
+ CLKDEV_INIT("fec.0", NULL, &clk_sys),
+ CLKDEV_INIT("imx1-i2c.0", NULL, &clk_sys),
};
/***************************************************************************/
@@ -100,6 +90,8 @@ void __init config_BSP(char *commandp, int size)
m523x_fec_init();
m523x_qspi_init();
m523x_i2c_init();
+
+ clkdev_add_table(m523x_clk_lookup, ARRAY_SIZE(m523x_clk_lookup));
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m5249.c b/arch/m68k/coldfire/m5249.c
index 0590f8c421f1..6d66972de214 100644
--- a/arch/m68k/coldfire/m5249.c
+++ b/arch/m68k/coldfire/m5249.c
@@ -9,6 +9,7 @@
/***************************************************************************/
+#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
@@ -23,25 +24,17 @@
DEFINE_CLK(pll, "pll.0", MCF_CLK);
DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
-DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
-DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
-DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
-DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
-DEFINE_CLK(mcfqspi0, "mcfqspi.0", MCF_BUSCLK);
-DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
-DEFINE_CLK(mcfi2c1, "imx1-i2c.1", MCF_BUSCLK);
-
-struct clk *mcf_clks[] = {
- &clk_pll,
- &clk_sys,
- &clk_mcftmr0,
- &clk_mcftmr1,
- &clk_mcfuart0,
- &clk_mcfuart1,
- &clk_mcfqspi0,
- &clk_mcfi2c0,
- &clk_mcfi2c1,
- NULL
+
+struct clk_lookup m5249_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "pll.0", &clk_pll),
+ CLKDEV_INIT(NULL, "sys.0", &clk_sys),
+ CLKDEV_INIT("mcftmr.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcftmr.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcfqspi.0", NULL, &clk_sys),
+ CLKDEV_INIT("imx1-i2c.0", NULL, &clk_sys),
+ CLKDEV_INIT("imx1-i2c.1", NULL, &clk_sys),
};
/***************************************************************************/
@@ -137,6 +130,8 @@ void __init config_BSP(char *commandp, int size)
#endif
m5249_qspi_init();
m5249_i2c_init();
+
+ clkdev_add_table(m5249_clk_lookup, ARRAY_SIZE(m5249_clk_lookup));
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m525x.c b/arch/m68k/coldfire/m525x.c
index 1772359c416c..2c4d2ca2f20d 100644
--- a/arch/m68k/coldfire/m525x.c
+++ b/arch/m68k/coldfire/m525x.c
@@ -9,6 +9,7 @@
/***************************************************************************/
+#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
@@ -23,25 +24,17 @@
DEFINE_CLK(pll, "pll.0", MCF_CLK);
DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
-DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
-DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
-DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
-DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
-DEFINE_CLK(mcfqspi0, "mcfqspi.0", MCF_BUSCLK);
-DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
-DEFINE_CLK(mcfi2c1, "imx1-i2c.1", MCF_BUSCLK);
-
-struct clk *mcf_clks[] = {
- &clk_pll,
- &clk_sys,
- &clk_mcftmr0,
- &clk_mcftmr1,
- &clk_mcfuart0,
- &clk_mcfuart1,
- &clk_mcfqspi0,
- &clk_mcfi2c0,
- &clk_mcfi2c1,
- NULL
+
+static struct clk_lookup m525x_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "pll.0", &pll),
+ CLKDEV_INIT(NULL, "sys.0", &clk_sys),
+ CLKDEV_INIT("mcftmr.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcftmr.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcfqspi.0", NULL, &clk_sys),
+ CLKDEV_INIT("imx1-i2c.0", NULL, &clk_sys),
+ CLKDEV_INIT("imx1-i2c.1", NULL, &clk_sys),
};
/***************************************************************************/
@@ -88,6 +81,8 @@ void __init config_BSP(char *commandp, int size)
m525x_qspi_init();
m525x_i2c_init();
+
+ clkdev_add_table(m525x_clk_lookup, ARRAY_SIZE(m525x_clk_lookup));
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m5272.c b/arch/m68k/coldfire/m5272.c
index 6b3ab583c698..734dab657fe3 100644
--- a/arch/m68k/coldfire/m5272.c
+++ b/arch/m68k/coldfire/m5272.c
@@ -10,6 +10,7 @@
/***************************************************************************/
+#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
@@ -34,27 +35,18 @@ unsigned char ledbank = 0xff;
DEFINE_CLK(pll, "pll.0", MCF_CLK);
DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
-DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
-DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
-DEFINE_CLK(mcftmr2, "mcftmr.2", MCF_BUSCLK);
-DEFINE_CLK(mcftmr3, "mcftmr.3", MCF_BUSCLK);
-DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
-DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
-DEFINE_CLK(mcfqspi0, "mcfqspi.0", MCF_BUSCLK);
-DEFINE_CLK(fec0, "fec.0", MCF_BUSCLK);
-
-struct clk *mcf_clks[] = {
- &clk_pll,
- &clk_sys,
- &clk_mcftmr0,
- &clk_mcftmr1,
- &clk_mcftmr2,
- &clk_mcftmr3,
- &clk_mcfuart0,
- &clk_mcfuart1,
- &clk_mcfqspi0,
- &clk_fec0,
- NULL
+
+static struct clk_lookup m5272_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "pll.0", &clk_pll),
+ CLKDEV_INIT(NULL, "sys.0", &clk_sys),
+ CLKDEV_INIT("mcftmr.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcftmr.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcftmr.2", NULL, &clk_sys),
+ CLKDEV_INIT("mcftmr.3", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcfqspi.0", NULL, &clk_sys),
+ CLKDEV_INIT("fec.0", NULL, &clk_sys),
};
/***************************************************************************/
@@ -128,6 +120,7 @@ static int __init init_BSP(void)
{
m5272_uarts_init();
fixed_phy_add(PHY_POLL, 0, &nettel_fixed_phy_status);
+ clkdev_add_table(m5272_clk_lookup, ARRAY_SIZE(m5272_clk_lookup));
return 0;
}
diff --git a/arch/m68k/coldfire/m527x.c b/arch/m68k/coldfire/m527x.c
index cad462df6861..037f3e520acc 100644
--- a/arch/m68k/coldfire/m527x.c
+++ b/arch/m68k/coldfire/m527x.c
@@ -13,6 +13,7 @@
/***************************************************************************/
+#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
@@ -27,33 +28,21 @@
DEFINE_CLK(pll, "pll.0", MCF_CLK);
DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
-DEFINE_CLK(mcfpit0, "mcfpit.0", MCF_CLK);
-DEFINE_CLK(mcfpit1, "mcfpit.1", MCF_CLK);
-DEFINE_CLK(mcfpit2, "mcfpit.2", MCF_CLK);
-DEFINE_CLK(mcfpit3, "mcfpit.3", MCF_CLK);
-DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
-DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
-DEFINE_CLK(mcfuart2, "mcfuart.2", MCF_BUSCLK);
-DEFINE_CLK(mcfqspi0, "mcfqspi.0", MCF_BUSCLK);
-DEFINE_CLK(fec0, "fec.0", MCF_BUSCLK);
-DEFINE_CLK(fec1, "fec.1", MCF_BUSCLK);
-DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
-
-struct clk *mcf_clks[] = {
- &clk_pll,
- &clk_sys,
- &clk_mcfpit0,
- &clk_mcfpit1,
- &clk_mcfpit2,
- &clk_mcfpit3,
- &clk_mcfuart0,
- &clk_mcfuart1,
- &clk_mcfuart2,
- &clk_mcfqspi0,
- &clk_fec0,
- &clk_fec1,
- &clk_mcfi2c0,
- NULL
+
+static struct clk_lookup m527x_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "pll.0", &clk_pll),
+ CLKDEV_INIT(NULL, "sys.0", &clk_sys),
+ CLKDEV_INIT("mcfpit.0", NULL, &clk_pll),
+ CLKDEV_INIT("mcfpit.1", NULL, &clk_pll),
+ CLKDEV_INIT("mcfpit.2", NULL, &clk_pll),
+ CLKDEV_INIT("mcfpit.3", NULL, &clk_pll),
+ CLKDEV_INIT("mcfuart.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.2", NULL, &clk_sys),
+ CLKDEV_INIT("mcfqspi.0", NULL, &clk_sys),
+ CLKDEV_INIT("fec.0", NULL, &clk_sys),
+ CLKDEV_INIT("fec.1", NULL, &clk_sys),
+ CLKDEV_INIT("imx1-i2c.0", NULL, &clk_sys),
};
/***************************************************************************/
@@ -151,6 +140,7 @@ void __init config_BSP(char *commandp, int size)
m527x_fec_init();
m527x_qspi_init();
m527x_i2c_init();
+ clkdev_add_table(m527x_clk_lookup, ARRAY_SIZE(m527x_clk_lookup));
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m528x.c b/arch/m68k/coldfire/m528x.c
index 7ad3193887e8..51a6a6236e12 100644
--- a/arch/m68k/coldfire/m528x.c
+++ b/arch/m68k/coldfire/m528x.c
@@ -13,6 +13,7 @@
/***************************************************************************/
+#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
@@ -28,31 +29,20 @@
DEFINE_CLK(pll, "pll.0", MCF_CLK);
DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
-DEFINE_CLK(mcfpit0, "mcfpit.0", MCF_CLK);
-DEFINE_CLK(mcfpit1, "mcfpit.1", MCF_CLK);
-DEFINE_CLK(mcfpit2, "mcfpit.2", MCF_CLK);
-DEFINE_CLK(mcfpit3, "mcfpit.3", MCF_CLK);
-DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
-DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
-DEFINE_CLK(mcfuart2, "mcfuart.2", MCF_BUSCLK);
-DEFINE_CLK(mcfqspi0, "mcfqspi.0", MCF_BUSCLK);
-DEFINE_CLK(fec0, "fec.0", MCF_BUSCLK);
-DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
-
-struct clk *mcf_clks[] = {
- &clk_pll,
- &clk_sys,
- &clk_mcfpit0,
- &clk_mcfpit1,
- &clk_mcfpit2,
- &clk_mcfpit3,
- &clk_mcfuart0,
- &clk_mcfuart1,
- &clk_mcfuart2,
- &clk_mcfqspi0,
- &clk_fec0,
- &clk_mcfi2c0,
- NULL
+
+static struct clk_lookup m528x_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "pll.0", &clk_pll),
+ CLKDEV_INIT(NULL, "sys.0", &clk_sys),
+ CLKDEV_INIT("mcfpit.0", NULL, &clk_pll),
+ CLKDEV_INIT("mcfpit.1", NULL, &clk_pll),
+ CLKDEV_INIT("mcfpit.2", NULL, &clk_pll),
+ CLKDEV_INIT("mcfpit.3", NULL, &clk_pll),
+ CLKDEV_INIT("mcfuart.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.2", NULL, &clk_sys),
+ CLKDEV_INIT("mcfqspi.0", NULL, &clk_sys),
+ CLKDEV_INIT("fec.0", NULL, &clk_sys),
+ CLKDEV_INIT("imx1-i2c.0", NULL, &clk_sys),
};
/***************************************************************************/
@@ -146,6 +136,8 @@ void __init config_BSP(char *commandp, int size)
m528x_fec_init();
m528x_qspi_init();
m528x_i2c_init();
+
+ clkdev_add_table(m528x_clk_lookup, ARRAY_SIZE(m528x_clk_lookup));
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m5307.c b/arch/m68k/coldfire/m5307.c
index 64b4b1fd34ff..4ed2e43ab3ad 100644
--- a/arch/m68k/coldfire/m5307.c
+++ b/arch/m68k/coldfire/m5307.c
@@ -10,6 +10,7 @@
/***************************************************************************/
+#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
@@ -32,21 +33,15 @@ unsigned char ledbank = 0xff;
DEFINE_CLK(pll, "pll.0", MCF_CLK);
DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
-DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
-DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
-DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
-DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
-DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
-
-struct clk *mcf_clks[] = {
- &clk_pll,
- &clk_sys,
- &clk_mcftmr0,
- &clk_mcftmr1,
- &clk_mcfuart0,
- &clk_mcfuart1,
- &clk_mcfi2c0,
- NULL
+
+static struct clk_lookup m5307_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "pll.0", &clk_pll),
+ CLKDEV_INIT(NULL, "sys.0", &clk_sys),
+ CLKDEV_INIT("mcftmr.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcftmr.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.1", NULL, &clk_sys),
+ CLKDEV_INIT("imx1-i2c.0", NULL, &clk_sys),
};
/***************************************************************************/
@@ -88,6 +83,8 @@ void __init config_BSP(char *commandp, int size)
wdebug(MCFDEBUG_CSR, MCFDEBUG_CSR_PSTCLK);
#endif
m5307_i2c_init();
+
+ clkdev_add_table(m5307_clk_lookup, ARRAY_SIZE(m5307_clk_lookup));
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m53xx.c b/arch/m68k/coldfire/m53xx.c
index 075722c0c4f0..bd033e1975bf 100644
--- a/arch/m68k/coldfire/m53xx.c
+++ b/arch/m68k/coldfire/m53xx.c
@@ -13,6 +13,7 @@
/***************************************************************************/
+#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
@@ -65,45 +66,42 @@ DEFINE_CLK(1, "mdha.0", 32, MCF_CLK);
DEFINE_CLK(1, "skha.0", 33, MCF_CLK);
DEFINE_CLK(1, "rng.0", 34, MCF_CLK);
-struct clk *mcf_clks[] = {
- &__clk_0_2, /* flexbus */
- &__clk_0_8, /* mcfcan.0 */
- &__clk_0_12, /* fec.0 */
- &__clk_0_17, /* edma */
- &__clk_0_18, /* intc.0 */
- &__clk_0_19, /* intc.1 */
- &__clk_0_21, /* iack.0 */
- &__clk_0_22, /* imx1-i2c.0 */
- &__clk_0_23, /* mcfqspi.0 */
- &__clk_0_24, /* mcfuart.0 */
- &__clk_0_25, /* mcfuart.1 */
- &__clk_0_26, /* mcfuart.2 */
- &__clk_0_28, /* mcftmr.0 */
- &__clk_0_29, /* mcftmr.1 */
- &__clk_0_30, /* mcftmr.2 */
- &__clk_0_31, /* mcftmr.3 */
-
- &__clk_0_32, /* mcfpit.0 */
- &__clk_0_33, /* mcfpit.1 */
- &__clk_0_34, /* mcfpit.2 */
- &__clk_0_35, /* mcfpit.3 */
- &__clk_0_36, /* mcfpwm.0 */
- &__clk_0_37, /* mcfeport.0 */
- &__clk_0_38, /* mcfwdt.0 */
- &__clk_0_40, /* sys.0 */
- &__clk_0_41, /* gpio.0 */
- &__clk_0_42, /* mcfrtc.0 */
- &__clk_0_43, /* mcflcd.0 */
- &__clk_0_44, /* mcfusb-otg.0 */
- &__clk_0_45, /* mcfusb-host.0 */
- &__clk_0_46, /* sdram.0 */
- &__clk_0_47, /* ssi.0 */
- &__clk_0_48, /* pll.0 */
-
- &__clk_1_32, /* mdha.0 */
- &__clk_1_33, /* skha.0 */
- &__clk_1_34, /* rng.0 */
- NULL,
+static struct clk_lookup m53xx_clk_lookup[] = {
+ CLKDEV_INIT("flexbus", NULL, &__clk_0_2),
+ CLKDEV_INIT("mcfcan.0", NULL, &__clk_0_8),
+ CLKDEV_INIT("fec.0", NULL, &__clk_0_12),
+ CLKDEV_INIT("edma", NULL, &__clk_0_17),
+ CLKDEV_INIT("intc.0", NULL, &__clk_0_18),
+ CLKDEV_INIT("intc.1", NULL, &__clk_0_19),
+ CLKDEV_INIT("iack.0", NULL, &__clk_0_21),
+ CLKDEV_INIT("imx1-i2c.0", NULL, &__clk_0_22),
+ CLKDEV_INIT("mcfqspi.0", NULL, &__clk_0_23),
+ CLKDEV_INIT("mcfuart.0", NULL, &__clk_0_24),
+ CLKDEV_INIT("mcfuart.1", NULL, &__clk_0_25),
+ CLKDEV_INIT("mcfuart.2", NULL, &__clk_0_26),
+ CLKDEV_INIT("mcftmr.0", NULL, &__clk_0_28),
+ CLKDEV_INIT("mcftmr.1", NULL, &__clk_0_29),
+ CLKDEV_INIT("mcftmr.2", NULL, &__clk_0_30),
+ CLKDEV_INIT("mcftmr.3", NULL, &__clk_0_31),
+ CLKDEV_INIT("mcfpit.0", NULL, &__clk_0_32),
+ CLKDEV_INIT("mcfpit.1", NULL, &__clk_0_33),
+ CLKDEV_INIT("mcfpit.2", NULL, &__clk_0_34),
+ CLKDEV_INIT("mcfpit.3", NULL, &__clk_0_35),
+ CLKDEV_INIT("mcfpwm.0", NULL, &__clk_0_36),
+ CLKDEV_INIT("mcfeport.0", NULL, &__clk_0_37),
+ CLKDEV_INIT("mcfwdt.0", NULL, &__clk_0_38),
+ CLKDEV_INIT(NULL, "sys.0", &__clk_0_40),
+ CLKDEV_INIT("gpio.0", NULL, &__clk_0_41),
+ CLKDEV_INIT("mcfrtc.0", NULL, &__clk_0_42),
+ CLKDEV_INIT("mcflcd.0", NULL, &__clk_0_43),
+ CLKDEV_INIT("mcfusb-otg.0", NULL, &__clk_0_44),
+ CLKDEV_INIT("mcfusb-host.0", NULL, &__clk_0_45),
+ CLKDEV_INIT("sdram.0", NULL, &__clk_0_46),
+ CLKDEV_INIT("ssi.0", NULL, &__clk_0_47),
+ CLKDEV_INIT(NULL, "pll.0", &__clk_0_48),
+ CLKDEV_INIT("mdha.0", NULL, &__clk_1_32),
+ CLKDEV_INIT("skha.0", NULL, &__clk_1_33),
+ CLKDEV_INIT("rng.0", NULL, &__clk_1_34),
};
static struct clk * const enable_clks[] __initconst = {
@@ -158,6 +156,8 @@ static void __init m53xx_clk_init(void)
/* make sure these clocks are disabled */
for (i = 0; i < ARRAY_SIZE(disable_clks); ++i)
__clk_init_disabled(disable_clks[i]);
+
+ clkdev_add_table(m53xx_clk_lookup, ARRAY_SIZE(m53xx_clk_lookup));
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m5407.c b/arch/m68k/coldfire/m5407.c
index 0400d76115a1..b32efb3042a2 100644
--- a/arch/m68k/coldfire/m5407.c
+++ b/arch/m68k/coldfire/m5407.c
@@ -10,6 +10,7 @@
/***************************************************************************/
+#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
@@ -23,21 +24,15 @@
DEFINE_CLK(pll, "pll.0", MCF_CLK);
DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
-DEFINE_CLK(mcftmr0, "mcftmr.0", MCF_BUSCLK);
-DEFINE_CLK(mcftmr1, "mcftmr.1", MCF_BUSCLK);
-DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
-DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
-DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
-struct clk *mcf_clks[] = {
- &clk_pll,
- &clk_sys,
- &clk_mcftmr0,
- &clk_mcftmr1,
- &clk_mcfuart0,
- &clk_mcfuart1,
- &clk_mcfi2c0,
- NULL
+static struct clk_lookup m5407_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "pll.0", &clk_pll),
+ CLKDEV_INIT(NULL, "sys.0", &clk_sys),
+ CLKDEV_INIT("mcftmr.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcftmr.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.1", NULL, &clk_sys),
+ CLKDEV_INIT("imx1-i2c.0", NULL, &clk_sys),
};
/***************************************************************************/
@@ -63,6 +58,8 @@ void __init config_BSP(char *commandp, int size)
mcf_mapirq2imr(29, MCFINTC_EINT5);
mcf_mapirq2imr(31, MCFINTC_EINT7);
m5407_i2c_init();
+
+ clkdev_add_table(m5407_clk_lookup, ARRAY_SIZE(m5407_clk_lookup));
}
/***************************************************************************/
diff --git a/arch/m68k/coldfire/m5441x.c b/arch/m68k/coldfire/m5441x.c
index 1e5259a652d1..ce14693d18b6 100644
--- a/arch/m68k/coldfire/m5441x.c
+++ b/arch/m68k/coldfire/m5441x.c
@@ -5,6 +5,7 @@
* (C) Copyright Steven King <sfking@fdwdc.com>
*/
+#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
@@ -78,72 +79,67 @@ DEFINE_CLK(2, "ipg.0", 0, MCF_CLK);
DEFINE_CLK(2, "ahb.0", 1, MCF_CLK);
DEFINE_CLK(2, "per.0", 2, MCF_CLK);
-struct clk *mcf_clks[] = {
- &__clk_0_2,
- &__clk_0_8,
- &__clk_0_9,
- &__clk_0_14,
- &__clk_0_15,
- &__clk_0_17,
- &__clk_0_18,
- &__clk_0_19,
- &__clk_0_20,
- &__clk_0_22,
- &__clk_0_23,
- &__clk_0_24,
- &__clk_0_25,
- &__clk_0_26,
- &__clk_0_27,
- &__clk_0_28,
- &__clk_0_29,
- &__clk_0_30,
- &__clk_0_31,
- &__clk_0_32,
- &__clk_0_33,
- &__clk_0_34,
- &__clk_0_35,
- &__clk_0_37,
- &__clk_0_38,
- &__clk_0_39,
- &__clk_0_42,
- &__clk_0_43,
- &__clk_0_44,
- &__clk_0_45,
- &__clk_0_46,
- &__clk_0_47,
- &__clk_0_48,
- &__clk_0_49,
- &__clk_0_50,
- &__clk_0_51,
- &__clk_0_53,
- &__clk_0_54,
- &__clk_0_55,
- &__clk_0_56,
- &__clk_0_63,
-
- &__clk_1_2,
- &__clk_1_4,
- &__clk_1_5,
- &__clk_1_6,
- &__clk_1_7,
- &__clk_1_24,
- &__clk_1_25,
- &__clk_1_26,
- &__clk_1_27,
- &__clk_1_28,
- &__clk_1_29,
- &__clk_1_34,
- &__clk_1_36,
- &__clk_1_37,
-
- &__clk_2_0,
- &__clk_2_1,
- &__clk_2_2,
-
- NULL,
+static struct clk_lookup m5411x_clk_lookup[] = {
+ CLKDEV_INIT("flexbus", NULL, &__clk_0_2),
+ CLKDEV_INIT("mcfcan.0", NULL, &__clk_0_8),
+ CLKDEV_INIT("mcfcan.1", NULL, &__clk_0_9),
+ CLKDEV_INIT("imx1-i2c.1", NULL, &__clk_0_14),
+ CLKDEV_INIT("mcfdspi.1", NULL, &__clk_0_15),
+ CLKDEV_INIT("edma", NULL, &__clk_0_17),
+ CLKDEV_INIT("intc.0", NULL, &__clk_0_18),
+ CLKDEV_INIT("intc.1", NULL, &__clk_0_19),
+ CLKDEV_INIT("intc.2", NULL, &__clk_0_20),
+ CLKDEV_INIT("imx1-i2c.0", NULL, &__clk_0_22),
+ CLKDEV_INIT("fsl-dspi.0", NULL, &__clk_0_23),
+ CLKDEV_INIT("mcfuart.0", NULL, &__clk_0_24),
+ CLKDEV_INIT("mcfuart.1", NULL, &__clk_0_25),
+ CLKDEV_INIT("mcfuart.2", NULL, &__clk_0_26),
+ CLKDEV_INIT("mcfuart.3", NULL, &__clk_0_27),
+ CLKDEV_INIT("mcftmr.0", NULL, &__clk_0_28),
+ CLKDEV_INIT("mcftmr.1", NULL, &__clk_0_29),
+ CLKDEV_INIT("mcftmr.2", NULL, &__clk_0_30),
+ CLKDEV_INIT("mcftmr.3", NULL, &__clk_0_31),
+ CLKDEV_INIT("mcfpit.0", NULL, &__clk_0_32),
+ CLKDEV_INIT("mcfpit.1", NULL, &__clk_0_33),
+ CLKDEV_INIT("mcfpit.2", NULL, &__clk_0_34),
+ CLKDEV_INIT("mcfpit.3", NULL, &__clk_0_35),
+ CLKDEV_INIT("mcfeport.0", NULL, &__clk_0_37),
+ CLKDEV_INIT("mcfadc.0", NULL, &__clk_0_38),
+ CLKDEV_INIT("mcfdac.0", NULL, &__clk_0_39),
+ CLKDEV_INIT("mcfrtc.0", NULL, &__clk_0_42),
+ CLKDEV_INIT("mcfsim.0", NULL, &__clk_0_43),
+ CLKDEV_INIT("mcfusb-otg.0", NULL, &__clk_0_44),
+ CLKDEV_INIT("mcfusb-host.0", NULL, &__clk_0_45),
+ CLKDEV_INIT("mcfddr-sram.0", NULL, &__clk_0_46),
+ CLKDEV_INIT("mcfssi.0", NULL, &__clk_0_47),
+ CLKDEV_INIT(NULL, "pll.0", &__clk_0_48),
+ CLKDEV_INIT("mcfrng.0", NULL, &__clk_0_49),
+ CLKDEV_INIT("mcfssi.1", NULL, &__clk_0_50),
+ CLKDEV_INIT("sdhci-esdhc-mcf.0", NULL, &__clk_0_51),
+ CLKDEV_INIT("enet-fec.0", NULL, &__clk_0_53),
+ CLKDEV_INIT("enet-fec.1", NULL, &__clk_0_54),
+ CLKDEV_INIT("switch.0", NULL, &__clk_0_55),
+ CLKDEV_INIT("switch.1", NULL, &__clk_0_56),
+ CLKDEV_INIT("nand.0", NULL, &__clk_0_63),
+ CLKDEV_INIT("mcfow.0", NULL, &__clk_1_2),
+ CLKDEV_INIT("imx1-i2c.2", NULL, &__clk_1_4),
+ CLKDEV_INIT("imx1-i2c.3", NULL, &__clk_1_5),
+ CLKDEV_INIT("imx1-i2c.4", NULL, &__clk_1_6),
+ CLKDEV_INIT("imx1-i2c.5", NULL, &__clk_1_7),
+ CLKDEV_INIT("mcfuart.4", NULL, &__clk_1_24),
+ CLKDEV_INIT("mcfuart.5", NULL, &__clk_1_25),
+ CLKDEV_INIT("mcfuart.6", NULL, &__clk_1_26),
+ CLKDEV_INIT("mcfuart.7", NULL, &__clk_1_27),
+ CLKDEV_INIT("mcfuart.8", NULL, &__clk_1_28),
+ CLKDEV_INIT("mcfuart.9", NULL, &__clk_1_29),
+ CLKDEV_INIT("mcfpwm.0", NULL, &__clk_1_34),
+ CLKDEV_INIT(NULL, "sys.0", &__clk_1_36),
+ CLKDEV_INIT("gpio.0", NULL, &__clk_1_37),
+ CLKDEV_INIT("ipg.0", NULL, &__clk_2_0),
+ CLKDEV_INIT("ahb.0", NULL, &__clk_2_1),
+ CLKDEV_INIT("per.0", NULL, &__clk_2_2),
};
-
static struct clk * const enable_clks[] __initconst = {
/* make sure these clocks are enabled */
&__clk_0_15, /* dspi.1 */
@@ -228,6 +224,8 @@ static void __init m5441x_clk_init(void)
/* make sure these clocks are disabled */
for (i = 0; i < ARRAY_SIZE(disable_clks); ++i)
__clk_init_disabled(disable_clks[i]);
+
+ clkdev_add_table(m5411x_clk_lookup, ARRAY_SIZE(m5411x_clk_lookup));
}
static void __init m5441x_uarts_init(void)
diff --git a/arch/m68k/coldfire/m54xx.c b/arch/m68k/coldfire/m54xx.c
index 360c723c0ae6..8e3c8fee8327 100644
--- a/arch/m68k/coldfire/m54xx.c
+++ b/arch/m68k/coldfire/m54xx.c
@@ -9,6 +9,7 @@
/***************************************************************************/
+#include <linux/clkdev.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/init.h>
@@ -32,25 +33,17 @@
DEFINE_CLK(pll, "pll.0", MCF_CLK);
DEFINE_CLK(sys, "sys.0", MCF_BUSCLK);
-DEFINE_CLK(mcfslt0, "mcfslt.0", MCF_BUSCLK);
-DEFINE_CLK(mcfslt1, "mcfslt.1", MCF_BUSCLK);
-DEFINE_CLK(mcfuart0, "mcfuart.0", MCF_BUSCLK);
-DEFINE_CLK(mcfuart1, "mcfuart.1", MCF_BUSCLK);
-DEFINE_CLK(mcfuart2, "mcfuart.2", MCF_BUSCLK);
-DEFINE_CLK(mcfuart3, "mcfuart.3", MCF_BUSCLK);
-DEFINE_CLK(mcfi2c0, "imx1-i2c.0", MCF_BUSCLK);
-
-struct clk *mcf_clks[] = {
- &clk_pll,
- &clk_sys,
- &clk_mcfslt0,
- &clk_mcfslt1,
- &clk_mcfuart0,
- &clk_mcfuart1,
- &clk_mcfuart2,
- &clk_mcfuart3,
- &clk_mcfi2c0,
- NULL
+
+static struct clk_lookup m54xx_clk_lookup[] = {
+ CLKDEV_INIT(NULL, "pll.0", &clk_pll),
+ CLKDEV_INIT(NULL, "sys.0", &clk_sys),
+ CLKDEV_INIT("mcfslt.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcfslt.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.0", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.1", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.2", NULL, &clk_sys),
+ CLKDEV_INIT("mcfuart.3", NULL, &clk_sys),
+ CLKDEV_INIT("imx1-i2c.0", NULL, &clk_sys),
};
/***************************************************************************/
@@ -100,6 +93,8 @@ void __init config_BSP(char *commandp, int size)
mach_sched_init = hw_timer_init;
m54xx_uarts_init();
m54xx_i2c_init();
+
+ clkdev_add_table(m54xx_clk_lookup, ARRAY_SIZE(m54xx_clk_lookup));
}
/***************************************************************************/
diff --git a/arch/m68k/configs/amcore_defconfig b/arch/m68k/configs/amcore_defconfig
index 3a84f24d41c8..6d9ed2198170 100644
--- a/arch/m68k/configs/amcore_defconfig
+++ b/arch/m68k/configs/amcore_defconfig
@@ -60,7 +60,6 @@ CONFIG_DM9000=y
# CONFIG_VT is not set
# CONFIG_UNIX98_PTYS is not set
# CONFIG_DEVMEM is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_MCF=y
CONFIG_SERIAL_MCF_BAUDRATE=115200
CONFIG_SERIAL_MCF_CONSOLE=y
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 786656090c50..0a2cacf7be08 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -85,7 +85,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -207,6 +206,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -253,7 +253,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -323,11 +322,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
-CONFIG_IDE=y
-CONFIG_IDE_GD_ATAPI=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_GAYLE=y
-CONFIG_BLK_DEV_BUDDHA=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -344,6 +338,11 @@ CONFIG_GVP11_SCSI=y
CONFIG_SCSI_A4000T=y
CONFIG_SCSI_ZORRO7XX=y
CONFIG_SCSI_ZORRO_ESP=y
+CONFIG_ATA=y
+# CONFIG_ATA_VERBOSE_ERROR is not set
+# CONFIG_ATA_BMDMA is not set
+CONFIG_PATA_GAYLE=y
+CONFIG_PATA_BUDDHA=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
@@ -563,6 +562,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -580,12 +580,8 @@ CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
@@ -598,7 +594,6 @@ CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
@@ -632,6 +627,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 9bb12be4a38e..4dc6dcfaf28a 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -81,7 +81,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -203,6 +202,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -249,7 +249,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -519,6 +518,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -536,12 +536,8 @@ CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
@@ -554,7 +550,6 @@ CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
@@ -588,6 +583,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index 413232626d9d..23d910a692ab 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -88,7 +88,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -210,6 +209,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -256,7 +256,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -324,10 +323,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
-CONFIG_IDE=y
-CONFIG_IDE_GD_ATAPI=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_FALCON_IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -339,6 +334,10 @@ CONFIG_SCSI_SAS_ATTRS=m
CONFIG_ISCSI_TCP=m
CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_ATARI_SCSI=y
+CONFIG_ATA=y
+# CONFIG_ATA_VERBOSE_ERROR is not set
+# CONFIG_ATA_BMDMA is not set
+CONFIG_PATA_FALCON=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
@@ -541,6 +540,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -558,12 +558,8 @@ CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
@@ -576,7 +572,6 @@ CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
@@ -610,6 +605,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 819cc70b06d8..2c3f42833846 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -78,7 +78,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -200,6 +199,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -246,7 +246,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -512,6 +511,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -529,12 +529,8 @@ CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
@@ -547,7 +543,6 @@ CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
@@ -581,6 +576,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 8f8d5968713b..5b1898d4b249 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -80,7 +80,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -202,6 +201,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -248,7 +248,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -521,6 +520,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -538,12 +538,8 @@ CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
@@ -556,7 +552,6 @@ CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
@@ -590,6 +585,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index bf15e6c1c939..9606ccd8dafa 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -79,7 +79,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -201,6 +200,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -247,7 +247,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -315,11 +314,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
-CONFIG_IDE=y
-CONFIG_IDE_GD_ATAPI=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_PLATFORM=y
-CONFIG_BLK_DEV_MAC_IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -332,6 +326,10 @@ CONFIG_ISCSI_TCP=m
CONFIG_ISCSI_BOOT_SYSFS=m
CONFIG_MAC_SCSI=y
CONFIG_SCSI_MAC_ESP=y
+CONFIG_ATA=y
+# CONFIG_ATA_VERBOSE_ERROR is not set
+# CONFIG_ATA_BMDMA is not set
+CONFIG_PATA_PLATFORM=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
@@ -544,6 +542,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -561,12 +560,8 @@ CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
@@ -579,7 +574,6 @@ CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
@@ -613,6 +607,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 5466d48fcd9d..3175ba5007e1 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -99,7 +99,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -221,6 +220,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -267,7 +267,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -344,15 +343,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
-CONFIG_IDE=y
-CONFIG_IDE_GD_ATAPI=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_PLATFORM=y
-CONFIG_BLK_DEV_GAYLE=y
-CONFIG_BLK_DEV_BUDDHA=y
-CONFIG_BLK_DEV_FALCON_IDE=y
-CONFIG_BLK_DEV_MAC_IDE=y
-CONFIG_BLK_DEV_Q40IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -376,6 +366,13 @@ CONFIG_MVME147_SCSI=y
CONFIG_MVME16x_SCSI=y
CONFIG_BVME6000_SCSI=y
CONFIG_SUN3X_ESP=y
+CONFIG_ATA=y
+# CONFIG_ATA_VERBOSE_ERROR is not set
+# CONFIG_ATA_BMDMA is not set
+CONFIG_PATA_FALCON=y
+CONFIG_PATA_GAYLE=y
+CONFIG_PATA_BUDDHA=y
+CONFIG_PATA_PLATFORM=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
@@ -630,6 +627,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -647,12 +645,8 @@ CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
@@ -665,7 +659,6 @@ CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
@@ -699,6 +692,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 93c305918838..793085f00c99 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -77,7 +77,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -199,6 +198,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -245,7 +245,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -511,6 +510,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -528,12 +528,8 @@ CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
@@ -546,7 +542,6 @@ CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
@@ -580,6 +575,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index cacd6c617f69..56fbac7943b2 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -78,7 +78,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -200,6 +199,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -246,7 +246,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -512,6 +511,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -529,12 +529,8 @@ CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
@@ -547,7 +543,6 @@ CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
@@ -581,6 +576,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index 3ae421cb24a4..0e15431b65e2 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -79,7 +79,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -201,6 +200,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -247,7 +247,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -314,10 +313,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
CONFIG_DUMMY_IRQ=m
-CONFIG_IDE=y
-CONFIG_IDE_GD_ATAPI=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_Q40IDE=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -328,6 +323,10 @@ CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SAS_ATTRS=m
CONFIG_ISCSI_TCP=m
CONFIG_ISCSI_BOOT_SYSFS=m
+CONFIG_ATA=y
+# CONFIG_ATA_VERBOSE_ERROR is not set
+# CONFIG_ATA_BMDMA is not set
+CONFIG_PATA_FALCON=y
CONFIG_MD=y
CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
@@ -530,6 +529,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -547,12 +547,8 @@ CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
@@ -565,7 +561,6 @@ CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
@@ -599,6 +594,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 6da97e28c48e..3490a05f29b8 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -75,7 +75,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -197,6 +196,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -243,7 +243,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -514,6 +513,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -531,12 +531,8 @@ CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
@@ -549,7 +545,6 @@ CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
@@ -582,6 +577,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index f54481bb789a..4e92c8c332fc 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -75,7 +75,6 @@ CONFIG_IPV6_VTI=m
CONFIG_IPV6_GRE=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
-CONFIG_NF_LOG_NETDEV=m
CONFIG_NF_CONNTRACK_ZONES=y
# CONFIG_NF_CONNTRACK_PROCFS is not set
# CONFIG_NF_CT_PROTO_DCCP is not set
@@ -197,6 +196,7 @@ CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_NF_FLOW_TABLE_IPV4=m
CONFIG_NF_LOG_ARP=m
+CONFIG_NF_LOG_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -243,7 +243,6 @@ CONFIG_IP6_NF_TARGET_NPT=m
CONFIG_NF_TABLES_BRIDGE=m
CONFIG_NFT_BRIDGE_META=m
CONFIG_NFT_BRIDGE_REJECT=m
-CONFIG_NF_LOG_BRIDGE=m
CONFIG_NF_CONNTRACK_BRIDGE=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
@@ -513,6 +512,7 @@ CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECDSA=m
CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_SM2=m
CONFIG_CRYPTO_CURVE25519=m
@@ -530,12 +530,8 @@ CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_AES_TI=m
@@ -548,7 +544,6 @@ CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
@@ -582,6 +577,7 @@ CONFIG_KUNIT_ALL_TESTS=m
CONFIG_TEST_LIST_SORT=m
CONFIG_TEST_MIN_HEAP=m
CONFIG_TEST_SORT=m
+CONFIG_TEST_DIV64=m
CONFIG_REED_SOLOMON_TEST=m
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_ASYNC_RAID6_TEST=m
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c
index ba808543161a..9a8394e96388 100644
--- a/arch/m68k/emu/nfblock.c
+++ b/arch/m68k/emu/nfblock.c
@@ -55,7 +55,6 @@ struct nfhd_device {
int id;
u32 blocks, bsize;
int bshift;
- struct request_queue *queue;
struct gendisk *disk;
};
@@ -119,32 +118,24 @@ static int __init nfhd_init_one(int id, u32 blocks, u32 bsize)
dev->bsize = bsize;
dev->bshift = ffs(bsize) - 10;
- dev->queue = blk_alloc_queue(NUMA_NO_NODE);
- if (dev->queue == NULL)
- goto free_dev;
-
- blk_queue_logical_block_size(dev->queue, bsize);
-
- dev->disk = alloc_disk(16);
+ dev->disk = blk_alloc_disk(NUMA_NO_NODE);
if (!dev->disk)
- goto free_queue;
+ goto free_dev;
dev->disk->major = major_num;
dev->disk->first_minor = dev_id * 16;
+ dev->disk->minors = 16;
dev->disk->fops = &nfhd_ops;
dev->disk->private_data = dev;
sprintf(dev->disk->disk_name, "nfhd%u", dev_id);
set_capacity(dev->disk, (sector_t)blocks * (bsize / 512));
- dev->disk->queue = dev->queue;
-
+ blk_queue_logical_block_size(dev->disk->queue, bsize);
add_disk(dev->disk);
list_add_tail(&dev->list, &nfhd_list);
return 0;
-free_queue:
- blk_cleanup_queue(dev->queue);
free_dev:
kfree(dev);
out:
@@ -186,8 +177,7 @@ static void __exit nfhd_exit(void)
list_for_each_entry_safe(dev, next, &nfhd_list, list) {
list_del(&dev->list);
del_gendisk(dev->disk);
- put_disk(dev->disk);
- blk_cleanup_queue(dev->queue);
+ blk_cleanup_disk(dev->disk);
kfree(dev);
}
unregister_blkdev(major_num, "nfhd");
diff --git a/arch/m68k/fpsp040/Makefile b/arch/m68k/fpsp040/Makefile
index aab04d372ae7..834ae9471b88 100644
--- a/arch/m68k/fpsp040/Makefile
+++ b/arch/m68k/fpsp040/Makefile
@@ -10,7 +10,3 @@ obj-y := bindec.o binstr.o decbin.o do_func.o gen_except.o get_op.o \
ssin.o ssinh.o stan.o stanh.o sto_res.o stwotox.o tbldo.o util.o \
x_bsun.o x_fline.o x_operr.o x_ovfl.o x_snan.o x_store.o \
x_unfl.o x_unimp.o x_unsupp.o bugfix.o skeleton.o
-
-EXTRA_LDFLAGS := -x
-
-$(OS_OBJS): fpsp.h
diff --git a/arch/m68k/ifpsp060/Makefile b/arch/m68k/ifpsp060/Makefile
index 43b435049452..56b530a96c2f 100644
--- a/arch/m68k/ifpsp060/Makefile
+++ b/arch/m68k/ifpsp060/Makefile
@@ -5,5 +5,3 @@
# for more details.
obj-y := fskeleton.o iskeleton.o os.o
-
-EXTRA_LDFLAGS := -x
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index 756c5cc58f94..8637bf8a2f65 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -16,8 +16,8 @@
* We do not have SMP m68k systems, so we don't have to deal with that.
*/
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
/*
* The ColdFire parts cannot do some immediate to memory operations,
@@ -30,7 +30,7 @@
#endif
#define ATOMIC_OP(op, c_op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
__asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
} \
@@ -38,7 +38,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
#ifdef CONFIG_RMW_INSNS
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int t, tmp; \
\
@@ -48,12 +48,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
" casl %2,%1,%0\n" \
" jne 1b" \
: "+m" (*v), "=&d" (t), "=&d" (tmp) \
- : "g" (i), "2" (atomic_read(v))); \
+ : "g" (i), "2" (arch_atomic_read(v))); \
return t; \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int t, tmp; \
\
@@ -63,14 +63,14 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
" casl %2,%1,%0\n" \
" jne 1b" \
: "+m" (*v), "=&d" (t), "=&d" (tmp) \
- : "g" (i), "2" (atomic_read(v))); \
+ : "g" (i), "2" (arch_atomic_read(v))); \
return tmp; \
}
#else
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t * v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t * v) \
{ \
unsigned long flags; \
int t; \
@@ -83,7 +83,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
}
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
-static inline int atomic_fetch_##op(int i, atomic_t * v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \
{ \
unsigned long flags; \
int t; \
@@ -120,27 +120,27 @@ ATOMIC_OPS(xor, ^=, eor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-static inline void atomic_inc(atomic_t *v)
+static inline void arch_atomic_inc(atomic_t *v)
{
__asm__ __volatile__("addql #1,%0" : "+m" (*v));
}
-#define atomic_inc atomic_inc
+#define arch_atomic_inc arch_atomic_inc
-static inline void atomic_dec(atomic_t *v)
+static inline void arch_atomic_dec(atomic_t *v)
{
__asm__ __volatile__("subql #1,%0" : "+m" (*v));
}
-#define atomic_dec atomic_dec
+#define arch_atomic_dec arch_atomic_dec
-static inline int atomic_dec_and_test(atomic_t *v)
+static inline int arch_atomic_dec_and_test(atomic_t *v)
{
char c;
__asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
return c != 0;
}
-#define atomic_dec_and_test atomic_dec_and_test
+#define arch_atomic_dec_and_test arch_atomic_dec_and_test
-static inline int atomic_dec_and_test_lt(atomic_t *v)
+static inline int arch_atomic_dec_and_test_lt(atomic_t *v)
{
char c;
__asm__ __volatile__(
@@ -150,49 +150,49 @@ static inline int atomic_dec_and_test_lt(atomic_t *v)
return c != 0;
}
-static inline int atomic_inc_and_test(atomic_t *v)
+static inline int arch_atomic_inc_and_test(atomic_t *v)
{
char c;
__asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
return c != 0;
}
-#define atomic_inc_and_test atomic_inc_and_test
+#define arch_atomic_inc_and_test arch_atomic_inc_and_test
#ifdef CONFIG_RMW_INSNS
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#else /* !CONFIG_RMW_INSNS */
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
unsigned long flags;
int prev;
local_irq_save(flags);
- prev = atomic_read(v);
+ prev = arch_atomic_read(v);
if (prev == old)
- atomic_set(v, new);
+ arch_atomic_set(v, new);
local_irq_restore(flags);
return prev;
}
-static inline int atomic_xchg(atomic_t *v, int new)
+static inline int arch_atomic_xchg(atomic_t *v, int new)
{
unsigned long flags;
int prev;
local_irq_save(flags);
- prev = atomic_read(v);
- atomic_set(v, new);
+ prev = arch_atomic_read(v);
+ arch_atomic_set(v, new);
local_irq_restore(flags);
return prev;
}
#endif /* !CONFIG_RMW_INSNS */
-static inline int atomic_sub_and_test(int i, atomic_t *v)
+static inline int arch_atomic_sub_and_test(int i, atomic_t *v)
{
char c;
__asm__ __volatile__("subl %2,%1; seq %0"
@@ -200,9 +200,9 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
: ASM_DI (i));
return c != 0;
}
-#define atomic_sub_and_test atomic_sub_and_test
+#define arch_atomic_sub_and_test arch_atomic_sub_and_test
-static inline int atomic_add_negative(int i, atomic_t *v)
+static inline int arch_atomic_add_negative(int i, atomic_t *v)
{
char c;
__asm__ __volatile__("addl %2,%1; smi %0"
@@ -210,6 +210,6 @@ static inline int atomic_add_negative(int i, atomic_t *v)
: ASM_DI (i));
return c != 0;
}
-#define atomic_add_negative atomic_add_negative
+#define arch_atomic_add_negative arch_atomic_add_negative
#endif /* __ARCH_M68K_ATOMIC __ */
diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h
index 10133a968c8e..7b414099e5fc 100644
--- a/arch/m68k/include/asm/bitops.h
+++ b/arch/m68k/include/asm/bitops.h
@@ -440,8 +440,6 @@ static inline unsigned long ffz(unsigned long word)
#endif
-#include <asm-generic/bitops/find.h>
-
#ifdef __KERNEL__
#if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
@@ -525,10 +523,12 @@ static inline int __fls(int x)
#define __clear_bit_unlock clear_bit_unlock
#include <asm-generic/bitops/ext2-atomic.h>
-#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/le.h>
#endif /* __KERNEL__ */
+#include <asm-generic/bitops/find.h>
+
#endif /* _M68K_BITOPS_H */
diff --git a/arch/m68k/include/asm/cmpxchg.h b/arch/m68k/include/asm/cmpxchg.h
index a4aa82021d3b..e8ca4b0ccefa 100644
--- a/arch/m68k/include/asm/cmpxchg.h
+++ b/arch/m68k/include/asm/cmpxchg.h
@@ -76,11 +76,11 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
}
#endif
-#define xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
+#define arch_xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
#include <asm-generic/cmpxchg-local.h>
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
extern unsigned long __invalid_cmpxchg_size(volatile void *,
unsigned long, unsigned long, int);
@@ -118,14 +118,14 @@ static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
return old;
}
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({(__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr)));})
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
({(__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr)));})
-#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
#else
diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h
index 819f611dccf2..d41fa488453b 100644
--- a/arch/m68k/include/asm/io_mm.h
+++ b/arch/m68k/include/asm/io_mm.h
@@ -397,11 +397,6 @@ static inline void isa_delay(void)
*/
#define xlate_dev_mem_ptr(p) __va(p)
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p) p
-
#define readb_relaxed(addr) readb(addr)
#define readw_relaxed(addr) readw(addr)
#define readl_relaxed(addr) readl(addr)
diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
index bc1228e00518..5c2c0a864524 100644
--- a/arch/m68k/include/asm/mcf_pgalloc.h
+++ b/arch/m68k/include/asm/mcf_pgalloc.h
@@ -32,8 +32,6 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
#define pmd_populate_kernel pmd_populate
-#define pmd_pgtable(pmd) pfn_to_virt(pmd_val(pmd) >> PAGE_SHIFT)
-
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pgtable,
unsigned long address)
{
diff --git a/arch/m68k/include/asm/mcf_pgtable.h b/arch/m68k/include/asm/mcf_pgtable.h
index 8d4ec05996c5..6f2b87d7a50d 100644
--- a/arch/m68k/include/asm/mcf_pgtable.h
+++ b/arch/m68k/include/asm/mcf_pgtable.h
@@ -150,6 +150,8 @@
#ifndef __ASSEMBLY__
+#define pmd_pgtable(pmd) pfn_to_virt(pmd_val(pmd) >> PAGE_SHIFT)
+
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
diff --git a/arch/m68k/include/asm/mcfclk.h b/arch/m68k/include/asm/mcfclk.h
index 722627e06d66..4e9a6b827a14 100644
--- a/arch/m68k/include/asm/mcfclk.h
+++ b/arch/m68k/include/asm/mcfclk.h
@@ -15,15 +15,12 @@ struct clk_ops {
};
struct clk {
- const char *name;
struct clk_ops *clk_ops;
unsigned long rate;
unsigned long enabled;
u8 slot;
};
-extern struct clk *mcf_clks[];
-
#ifdef MCFPM_PPMCR0
extern struct clk_ops clk_ops0;
#ifdef MCFPM_PPMCR1
@@ -34,7 +31,6 @@ extern struct clk_ops clk_ops2;
#define DEFINE_CLK(clk_bank, clk_name, clk_slot, clk_rate) \
static struct clk __clk_##clk_bank##_##clk_slot = { \
- .name = clk_name, \
.clk_ops = &clk_ops##clk_bank, \
.rate = clk_rate, \
.slot = clk_slot, \
@@ -45,7 +41,6 @@ void __clk_init_disabled(struct clk *);
#else
#define DEFINE_CLK(clk_ref, clk_name, clk_rate) \
static struct clk clk_##clk_ref = { \
- .name = clk_name, \
.rate = clk_rate, \
}
#endif /* MCFPM_PPMCR0 */
diff --git a/arch/m68k/include/asm/mmu_context.h b/arch/m68k/include/asm/mmu_context.h
index a5d358855878..8ed6ac14d99f 100644
--- a/arch/m68k/include/asm/mmu_context.h
+++ b/arch/m68k/include/asm/mmu_context.h
@@ -31,7 +31,7 @@ static inline void get_mmu_context(struct mm_struct *mm)
if (mm->context != NO_CONTEXT)
return;
- while (atomic_dec_and_test_lt(&nr_free_contexts)) {
+ while (arch_atomic_dec_and_test_lt(&nr_free_contexts)) {
atomic_inc(&nr_free_contexts);
steal_context();
}
diff --git a/arch/m68k/include/asm/mmzone.h b/arch/m68k/include/asm/mmzone.h
deleted file mode 100644
index 64573fe8e60d..000000000000
--- a/arch/m68k/include/asm/mmzone.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_M68K_MMZONE_H_
-#define _ASM_M68K_MMZONE_H_
-
-extern pg_data_t pg_data_map[];
-
-#define NODE_DATA(nid) (&pg_data_map[nid])
-#define NODE_MEM_MAP(nid) (NODE_DATA(nid)->node_mem_map)
-
-#endif /* _ASM_M68K_MMZONE_H_ */
diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h
index b4fc3b4f6bb3..74a817d9387f 100644
--- a/arch/m68k/include/asm/motorola_pgalloc.h
+++ b/arch/m68k/include/asm/motorola_pgalloc.h
@@ -88,7 +88,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page
{
pmd_set(pmd, page);
}
-#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h
index 8076467eff4b..a2908164ee6f 100644
--- a/arch/m68k/include/asm/motorola_pgtable.h
+++ b/arch/m68k/include/asm/motorola_pgtable.h
@@ -105,6 +105,8 @@ extern unsigned long mm_cachebits;
#define __S110 PAGE_SHARED_C
#define __S111 PAGE_SHARED_C
+#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
+
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
diff --git a/arch/m68k/include/asm/mvme147hw.h b/arch/m68k/include/asm/mvme147hw.h
index 257b29184af9..e28eb1c0e0bf 100644
--- a/arch/m68k/include/asm/mvme147hw.h
+++ b/arch/m68k/include/asm/mvme147hw.h
@@ -66,6 +66,9 @@ struct pcc_regs {
#define PCC_INT_ENAB 0x08
#define PCC_TIMER_INT_CLR 0x80
+
+#define PCC_TIMER_TIC_EN 0x01
+#define PCC_TIMER_COC_EN 0x02
#define PCC_TIMER_CLR_OVF 0x04
#define PCC_LEVEL_ABORT 0x07
diff --git a/arch/m68k/include/asm/page.h b/arch/m68k/include/asm/page.h
index 97087dd3ca6d..2f1c54e4725d 100644
--- a/arch/m68k/include/asm/page.h
+++ b/arch/m68k/include/asm/page.h
@@ -62,7 +62,7 @@ extern unsigned long _ramend;
#include <asm/page_no.h>
#endif
-#if !defined(CONFIG_MMU) || defined(CONFIG_DISCONTIGMEM)
+#ifndef CONFIG_MMU
#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
#define __pfn_to_phys(pfn) PFN_PHYS(pfn)
#endif
diff --git a/arch/m68k/include/asm/page_mm.h b/arch/m68k/include/asm/page_mm.h
index 7f5912af2a52..a5b459bcb7d8 100644
--- a/arch/m68k/include/asm/page_mm.h
+++ b/arch/m68k/include/asm/page_mm.h
@@ -126,26 +126,6 @@ static inline void *__va(unsigned long x)
extern int m68k_virt_to_node_shift;
-#ifndef CONFIG_DISCONTIGMEM
-#define __virt_to_node(addr) (&pg_data_map[0])
-#else
-extern struct pglist_data *pg_data_table[];
-
-static inline __attribute_const__ int __virt_to_node_shift(void)
-{
- int shift;
-
- asm (
- "1: moveq #0,%0\n"
- m68k_fixup(%c1, 1b)
- : "=d" (shift)
- : "i" (m68k_fixup_vnode_shift));
- return shift;
-}
-
-#define __virt_to_node(addr) (pg_data_table[(unsigned long)(addr) >> __virt_to_node_shift()])
-#endif
-
#define virt_to_page(addr) ({ \
pfn_to_page(virt_to_pfn(addr)); \
})
@@ -153,25 +133,10 @@ static inline __attribute_const__ int __virt_to_node_shift(void)
pfn_to_virt(page_to_pfn(page)); \
})
-#ifdef CONFIG_DISCONTIGMEM
-#define pfn_to_page(pfn) ({ \
- unsigned long __pfn = (pfn); \
- struct pglist_data *pgdat; \
- pgdat = __virt_to_node((unsigned long)pfn_to_virt(__pfn)); \
- pgdat->node_mem_map + (__pfn - pgdat->node_start_pfn); \
-})
-#define page_to_pfn(_page) ({ \
- const struct page *__p = (_page); \
- struct pglist_data *pgdat; \
- pgdat = &pg_data_map[page_to_nid(__p)]; \
- ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \
-})
-#else
-#define ARCH_PFN_OFFSET (m68k_memory[0].addr)
+#define ARCH_PFN_OFFSET (m68k_memory[0].addr >> PAGE_SHIFT)
#include <asm-generic/memory_model.h>
-#endif
-#define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && (void *)(kaddr) < high_memory)
+#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
#define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn))
#endif /* __ASSEMBLY__ */
diff --git a/arch/m68k/include/asm/page_no.h b/arch/m68k/include/asm/page_no.h
index 6bbe52025de3..c9d0d84158a4 100644
--- a/arch/m68k/include/asm/page_no.h
+++ b/arch/m68k/include/asm/page_no.h
@@ -13,9 +13,9 @@ extern unsigned long memory_end;
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
- alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
+ alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
#define __pa(vaddr) ((unsigned long)(vaddr))
#define __va(paddr) ((void *)((unsigned long)(paddr)))
@@ -30,8 +30,8 @@ extern unsigned long memory_end;
#define page_to_pfn(page) virt_to_pfn(page_to_virt(page))
#define pfn_valid(pfn) ((pfn) < max_mapnr)
-#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \
- ((void *)(kaddr) < (void *)memory_end))
+#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET) && \
+ ((unsigned long)(kaddr) < memory_end))
#endif /* __ASSEMBLY__ */
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index aca22c2c1ee2..143ba7de9bda 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -72,7 +72,6 @@
#define PTRS_PER_PGD 128
#endif
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
/* Virtual address region for use by kernel_map() */
#ifdef CONFIG_SUN3
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h
index 000f64869b91..198036aff519 100644
--- a/arch/m68k/include/asm/sun3_pgalloc.h
+++ b/arch/m68k/include/asm/sun3_pgalloc.h
@@ -32,7 +32,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page
{
pmd_val(*pmd) = __pa((unsigned long)page_address(page));
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
diff --git a/arch/m68k/include/asm/sun3xflop.h b/arch/m68k/include/asm/sun3xflop.h
index 93f2a8431c0e..bce8aabb5380 100644
--- a/arch/m68k/include/asm/sun3xflop.h
+++ b/arch/m68k/include/asm/sun3xflop.h
@@ -106,7 +106,7 @@ static void sun3x_82072_fd_outb(unsigned char value, int port)
case 4: /* FD_STATUS */
*(sun3x_fdc.status_r) = value;
break;
- };
+ }
return;
}
diff --git a/arch/m68k/include/asm/tlbflush.h b/arch/m68k/include/asm/tlbflush.h
index 5337bc2c262f..a6318ccd308f 100644
--- a/arch/m68k/include/asm/tlbflush.h
+++ b/arch/m68k/include/asm/tlbflush.h
@@ -263,7 +263,7 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr
BUG();
}
-static inline void flush_tlb_range(struct mm_struct *mm,
+static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
BUG();
diff --git a/arch/m68k/include/asm/unaligned.h b/arch/m68k/include/asm/unaligned.h
deleted file mode 100644
index 98c8930d3d35..000000000000
--- a/arch/m68k/include/asm/unaligned.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_M68K_UNALIGNED_H
-#define _ASM_M68K_UNALIGNED_H
-
-
-#ifdef CONFIG_CPU_HAS_NO_UNALIGNED
-#include <linux/unaligned/be_struct.h>
-#include <linux/unaligned/le_byteshift.h>
-#include <linux/unaligned/generic.h>
-
-#define get_unaligned __get_unaligned_be
-#define put_unaligned __put_unaligned_be
-
-#else
-/*
- * The m68k can do unaligned accesses itself.
- */
-#include <linux/unaligned/access_ok.h>
-#include <linux/unaligned/generic.h>
-
-#define get_unaligned __get_unaligned_be
-#define put_unaligned __put_unaligned_be
-
-#endif
-
-#endif /* _ASM_M68K_UNALIGNED_H */
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
index 1c1b875fadc1..2e192a5df949 100644
--- a/arch/m68k/kernel/dma.c
+++ b/arch/m68k/kernel/dma.c
@@ -34,9 +34,6 @@ pgprot_t pgprot_dmacoherent(pgprot_t prot)
return prot;
}
#else
-
-#include <asm/cacheflush.h>
-
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs)
{
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index da83cc83e791..db49f9091711 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -268,7 +268,7 @@ unsigned long get_wchan(struct task_struct *p)
unsigned long fp, pc;
unsigned long stack_page;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
stack_page = (unsigned long)task_stack_page(p);
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index 349570f16a78..8f215e79e70e 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -622,6 +622,10 @@ static inline void siginfo_build_tests(void)
/* _sigfault._addr_pkey */
BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x12);
+ /* _sigfault._perf */
+ BUILD_BUG_ON(offsetof(siginfo_t, si_perf_data) != 0x10);
+ BUILD_BUG_ON(offsetof(siginfo_t, si_perf_type) != 0x14);
+
/* _sigpoll */
BUILD_BUG_ON(offsetof(siginfo_t, si_band) != 0x0c);
BUILD_BUG_ON(offsetof(siginfo_t, si_fd) != 0x10);
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
index 1c235d8f53f3..bd0274c7592e 100644
--- a/arch/m68k/kernel/sys_m68k.c
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -388,6 +388,8 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
ret = -EPERM;
if (!capable(CAP_SYS_ADMIN))
goto out;
+
+ mmap_read_lock(current->mm);
} else {
struct vm_area_struct *vma;
@@ -400,8 +402,8 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
* to this process.
*/
mmap_read_lock(current->mm);
- vma = find_vma(current->mm, addr);
- if (!vma || addr < vma->vm_start || addr + len > vma->vm_end)
+ vma = vma_lookup(current->mm, addr);
+ if (!vma || addr + len > vma->vm_end)
goto out_unlock;
}
diff --git a/arch/m68k/kernel/syscalls/Makefile b/arch/m68k/kernel/syscalls/Makefile
index 285aaba832d9..6713c65a25e1 100644
--- a/arch/m68k/kernel/syscalls/Makefile
+++ b/arch/m68k/kernel/syscalls/Makefile
@@ -6,20 +6,14 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
$(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
syscall := $(src)/syscall.tbl
-syshdr := $(srctree)/$(src)/syscallhdr.sh
-systbl := $(srctree)/$(src)/syscalltbl.sh
+syshdr := $(srctree)/scripts/syscallhdr.sh
+systbl := $(srctree)/scripts/syscalltbl.sh
quiet_cmd_syshdr = SYSHDR $@
- cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
- '$(syshdr_abis_$(basetarget))' \
- '$(syshdr_pfx_$(basetarget))' \
- '$(syshdr_offset_$(basetarget))'
+ cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr $< $@
quiet_cmd_systbl = SYSTBL $@
- cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \
- '$(systbl_abis_$(basetarget))' \
- '$(systbl_abi_$(basetarget))' \
- '$(systbl_offset_$(basetarget))'
+ cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@
$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl
index 72bde6707dd3..541bc1b3a8f9 100644
--- a/arch/m68k/kernel/syscalls/syscall.tbl
+++ b/arch/m68k/kernel/syscalls/syscall.tbl
@@ -442,3 +442,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/m68k/kernel/syscalls/syscallhdr.sh b/arch/m68k/kernel/syscalls/syscallhdr.sh
deleted file mode 100644
index 6f357d68ef44..000000000000
--- a/arch/m68k/kernel/syscalls/syscallhdr.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-prefix="$4"
-offset="$5"
-
-fileguard=_UAPI_ASM_M68K_`basename "$out" | sed \
- -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
- -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- printf "#ifndef %s\n" "${fileguard}"
- printf "#define %s\n" "${fileguard}"
- printf "\n"
-
- nxt=0
- while read nr abi name entry ; do
- if [ -z "$offset" ]; then
- printf "#define __NR_%s%s\t%s\n" \
- "${prefix}" "${name}" "${nr}"
- else
- printf "#define __NR_%s%s\t(%s + %s)\n" \
- "${prefix}" "${name}" "${offset}" "${nr}"
- fi
- nxt=$((nr+1))
- done
-
- printf "\n"
- printf "#ifdef __KERNEL__\n"
- printf "#define __NR_syscalls\t%s\n" "${nxt}"
- printf "#endif\n"
- printf "\n"
- printf "#endif /* %s */\n" "${fileguard}"
-) > "$out"
diff --git a/arch/m68k/kernel/syscalls/syscalltbl.sh b/arch/m68k/kernel/syscalls/syscalltbl.sh
deleted file mode 100644
index 85d78d9309ad..000000000000
--- a/arch/m68k/kernel/syscalls/syscalltbl.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-my_abi="$4"
-offset="$5"
-
-emit() {
- t_nxt="$1"
- t_nr="$2"
- t_entry="$3"
-
- while [ $t_nxt -lt $t_nr ]; do
- printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}"
- t_nxt=$((t_nxt+1))
- done
- printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}"
-}
-
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- nxt=0
- if [ -z "$offset" ]; then
- offset=0
- fi
-
- while read nr abi name entry ; do
- emit $((nxt+offset)) $((nr+offset)) $entry
- nxt=$((nr+1))
- done
-) > "$out"
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index d329cc7b481c..e25ef4a9df30 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -18,9 +18,8 @@
#define sys_mmap2 sys_mmap_pgoff
#endif
-#define __SYSCALL(nr, entry, nargs) .long entry
+#define __SYSCALL(nr, entry) .long entry
.section .rodata
ALIGN
ENTRY(sys_call_table)
#include <asm/syscall_table.h>
-#undef __SYSCALL
diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
index 1cdac959bd91..5d16f9b47aa9 100644
--- a/arch/m68k/mac/config.c
+++ b/arch/m68k/mac/config.c
@@ -933,13 +933,15 @@ static const struct resource mac_scsi_ccl_rsrc[] __initconst = {
},
};
-static const struct resource mac_ide_quadra_rsrc[] __initconst = {
- DEFINE_RES_MEM(0x50F1A000, 0x104),
+static const struct resource mac_pata_quadra_rsrc[] __initconst = {
+ DEFINE_RES_MEM(0x50F1A000, 0x38),
+ DEFINE_RES_MEM(0x50F1A038, 0x04),
DEFINE_RES_IRQ(IRQ_NUBUS_F),
};
-static const struct resource mac_ide_pb_rsrc[] __initconst = {
- DEFINE_RES_MEM(0x50F1A000, 0x104),
+static const struct resource mac_pata_pb_rsrc[] __initconst = {
+ DEFINE_RES_MEM(0x50F1A000, 0x38),
+ DEFINE_RES_MEM(0x50F1A038, 0x04),
DEFINE_RES_IRQ(IRQ_NUBUS_C),
};
@@ -949,7 +951,7 @@ static const struct resource mac_pata_baboon_rsrc[] __initconst = {
DEFINE_RES_IRQ(IRQ_BABOON_1),
};
-static const struct pata_platform_info mac_pata_baboon_data __initconst = {
+static const struct pata_platform_info mac_pata_data __initconst = {
.ioport_shift = 2,
};
@@ -1067,17 +1069,19 @@ int __init mac_platform_init(void)
switch (macintosh_config->ide_type) {
case MAC_IDE_QUADRA:
- platform_device_register_simple("mac_ide", -1,
- mac_ide_quadra_rsrc, ARRAY_SIZE(mac_ide_quadra_rsrc));
+ platform_device_register_resndata(NULL, "pata_platform", -1,
+ mac_pata_quadra_rsrc, ARRAY_SIZE(mac_pata_quadra_rsrc),
+ &mac_pata_data, sizeof(mac_pata_data));
break;
case MAC_IDE_PB:
- platform_device_register_simple("mac_ide", -1,
- mac_ide_pb_rsrc, ARRAY_SIZE(mac_ide_pb_rsrc));
+ platform_device_register_resndata(NULL, "pata_platform", -1,
+ mac_pata_pb_rsrc, ARRAY_SIZE(mac_pata_pb_rsrc),
+ &mac_pata_data, sizeof(mac_pata_data));
break;
case MAC_IDE_BABOON:
platform_device_register_resndata(NULL, "pata_platform", -1,
mac_pata_baboon_rsrc, ARRAY_SIZE(mac_pata_baboon_rsrc),
- &mac_pata_baboon_data, sizeof(mac_pata_baboon_data));
+ &mac_pata_data, sizeof(mac_pata_data));
break;
}
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 14c1e541451c..5d749e188246 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -44,28 +44,8 @@ EXPORT_SYMBOL(empty_zero_page);
int m68k_virt_to_node_shift;
-#ifdef CONFIG_DISCONTIGMEM
-pg_data_t pg_data_map[MAX_NUMNODES];
-EXPORT_SYMBOL(pg_data_map);
-
-pg_data_t *pg_data_table[65];
-EXPORT_SYMBOL(pg_data_table);
-#endif
-
void __init m68k_setup_node(int node)
{
-#ifdef CONFIG_DISCONTIGMEM
- struct m68k_mem_info *info = m68k_memory + node;
- int i, end;
-
- i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift();
- end = (unsigned long)phys_to_virt(info->addr + info->size - 1) >> __virt_to_node_shift();
- for (; i <= end; i++) {
- if (pg_data_table[i])
- pr_warn("overlap at %u for chunk %u\n", i, node);
- pg_data_table[i] = pg_data_map + node;
- }
-#endif
node_set_online(node);
}
@@ -153,5 +133,4 @@ void __init mem_init(void)
/* this will put all memory onto the freelists */
memblock_free_all();
init_pointer_tables();
- mem_init_print_info(NULL);
}
diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
index cfdc7f912e14..e1e90c49a496 100644
--- a/arch/m68k/mvme147/config.c
+++ b/arch/m68k/mvme147/config.c
@@ -114,8 +114,10 @@ static irqreturn_t mvme147_timer_int (int irq, void *dev_id)
unsigned long flags;
local_irq_save(flags);
- m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR;
- m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF;
+ m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF | PCC_TIMER_COC_EN |
+ PCC_TIMER_TIC_EN;
+ m147_pcc->t1_int_cntrl = PCC_INT_ENAB | PCC_TIMER_INT_CLR |
+ PCC_LEVEL_TIMER1;
clk_total += PCC_TIMER_CYCLES;
legacy_timer_tick(1);
local_irq_restore(flags);
@@ -133,10 +135,10 @@ void mvme147_sched_init (void)
/* Init the clock with a value */
/* The clock counter increments until 0xFFFF then reloads */
m147_pcc->t1_preload = PCC_TIMER_PRELOAD;
- m147_pcc->t1_cntrl = 0x0; /* clear timer */
- m147_pcc->t1_cntrl = 0x3; /* start timer */
- m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR; /* clear pending ints */
- m147_pcc->t1_int_cntrl = PCC_INT_ENAB|PCC_LEVEL_TIMER1;
+ m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF | PCC_TIMER_COC_EN |
+ PCC_TIMER_TIC_EN;
+ m147_pcc->t1_int_cntrl = PCC_INT_ENAB | PCC_TIMER_INT_CLR |
+ PCC_LEVEL_TIMER1;
clocksource_register_hz(&mvme147_clk, PCC_TIMER_CLOCK_FREQ);
}
diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
index 30357fe4ba6c..b59593c7cfb9 100644
--- a/arch/m68k/mvme16x/config.c
+++ b/arch/m68k/mvme16x/config.c
@@ -366,6 +366,7 @@ static u32 clk_total;
#define PCCTOVR1_COC_EN 0x02
#define PCCTOVR1_OVR_CLR 0x04
+#define PCCTIC1_INT_LEVEL 6
#define PCCTIC1_INT_CLR 0x08
#define PCCTIC1_INT_EN 0x10
@@ -374,8 +375,8 @@ static irqreturn_t mvme16x_timer_int (int irq, void *dev_id)
unsigned long flags;
local_irq_save(flags);
- out_8(PCCTIC1, in_8(PCCTIC1) | PCCTIC1_INT_CLR);
- out_8(PCCTOVR1, PCCTOVR1_OVR_CLR);
+ out_8(PCCTOVR1, PCCTOVR1_OVR_CLR | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
+ out_8(PCCTIC1, PCCTIC1_INT_EN | PCCTIC1_INT_CLR | PCCTIC1_INT_LEVEL);
clk_total += PCC_TIMER_CYCLES;
legacy_timer_tick(1);
local_irq_restore(flags);
@@ -389,14 +390,15 @@ void mvme16x_sched_init(void)
int irq;
/* Using PCCchip2 or MC2 chip tick timer 1 */
- out_be32(PCCTCNT1, 0);
- out_be32(PCCTCMP1, PCC_TIMER_CYCLES);
- out_8(PCCTOVR1, in_8(PCCTOVR1) | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
- out_8(PCCTIC1, PCCTIC1_INT_EN | 6);
if (request_irq(MVME16x_IRQ_TIMER, mvme16x_timer_int, IRQF_TIMER, "timer",
NULL))
panic ("Couldn't register timer int");
+ out_be32(PCCTCNT1, 0);
+ out_be32(PCCTCMP1, PCC_TIMER_CYCLES);
+ out_8(PCCTOVR1, PCCTOVR1_OVR_CLR | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
+ out_8(PCCTIC1, PCCTIC1_INT_EN | PCCTIC1_INT_CLR | PCCTIC1_INT_LEVEL);
+
clocksource_register_hz(&mvme16x_clk, PCC_TIMER_CLOCK_FREQ);
if (brdno == 0x0162 || brdno == 0x172)
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index d6a423875231..5caf1e5be1c2 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -286,14 +286,39 @@ static int q40_set_rtc_pll(struct rtc_pll_info *pll)
return -EINVAL;
}
-static __init int q40_add_kbd_device(void)
-{
- struct platform_device *pdev;
+#define PCIDE_BASE1 0x1f0
+#define PCIDE_BASE2 0x170
+#define PCIDE_CTL 0x206
+
+static const struct resource q40_pata_rsrc_0[] __initconst = {
+ DEFINE_RES_MEM(q40_isa_io_base + PCIDE_BASE1 * 4, 0x38),
+ DEFINE_RES_MEM(q40_isa_io_base + (PCIDE_BASE1 + PCIDE_CTL) * 4, 2),
+ DEFINE_RES_IO(PCIDE_BASE1, 8),
+ DEFINE_RES_IO(PCIDE_BASE1 + PCIDE_CTL, 1),
+ DEFINE_RES_IRQ(14),
+};
+static const struct resource q40_pata_rsrc_1[] __initconst = {
+ DEFINE_RES_MEM(q40_isa_io_base + PCIDE_BASE2 * 4, 0x38),
+ DEFINE_RES_MEM(q40_isa_io_base + (PCIDE_BASE2 + PCIDE_CTL) * 4, 2),
+ DEFINE_RES_IO(PCIDE_BASE2, 8),
+ DEFINE_RES_IO(PCIDE_BASE2 + PCIDE_CTL, 1),
+ DEFINE_RES_IRQ(15),
+};
+
+static __init int q40_platform_init(void)
+{
if (!MACH_IS_Q40)
return -ENODEV;
- pdev = platform_device_register_simple("q40kbd", -1, NULL, 0);
- return PTR_ERR_OR_ZERO(pdev);
+ platform_device_register_simple("q40kbd", -1, NULL, 0);
+
+ platform_device_register_simple("atari-falcon-ide", 0, q40_pata_rsrc_0,
+ ARRAY_SIZE(q40_pata_rsrc_0));
+
+ platform_device_register_simple("atari-falcon-ide", 1, q40_pata_rsrc_1,
+ ARRAY_SIZE(q40_pata_rsrc_1));
+
+ return 0;
}
-arch_initcall(q40_add_kbd_device);
+arch_initcall(q40_platform_init);
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 0660f47012bc..14a67a42fcae 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -43,6 +43,7 @@ config MICROBLAZE
select MMU_GATHER_NO_RANGE
select SPARSE_IRQ
select SET_FS
+ select ZONE_DMA
# Endianness selection
choice
@@ -60,9 +61,6 @@ config CPU_LITTLE_ENDIAN
endchoice
-config ZONE_DMA
- def_bool y
-
config ARCH_HAS_ILOG2_U32
def_bool n
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index b41f323e1fde..6d4af39e3890 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -3,7 +3,7 @@ KBUILD_DEFCONFIG := mmu_defconfig
UTS_SYSNAME = -DUTS_SYSNAME=\"Linux\"
-# What CPU vesion are we building for, and crack it open
+# What CPU version are we building for, and crack it open
# as major.minor.rev
CPU_VER := $(shell echo $(CONFIG_XILINX_MICROBLAZE0_HW_VER))
CPU_MAJOR := $(shell echo $(CPU_VER) | cut -d '.' -f 1)
diff --git a/arch/microblaze/boot/dts/system.dts b/arch/microblaze/boot/dts/system.dts
index 5b236527176e..b7ee1056779e 100644
--- a/arch/microblaze/boot/dts/system.dts
+++ b/arch/microblaze/boot/dts/system.dts
@@ -310,14 +310,6 @@
xlnx,odd-parity = <0x0>;
xlnx,use-parity = <0x0>;
} ;
- SysACE_CompactFlash: sysace@83600000 {
- compatible = "xlnx,xps-sysace-1.00.a";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 4 2 >;
- reg = < 0x83600000 0x10000 >;
- xlnx,family = "virtex5";
- xlnx,mem-width = <0x10>;
- } ;
debug_module: debug@84400000 {
compatible = "xlnx,mdm-1.00.d";
reg = < 0x84400000 0x10000 >;
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 29b0e557aa7c..a055f5dbe00a 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table.h
+generic-y += cmpxchg.h
generic-y += extable.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
diff --git a/arch/microblaze/include/asm/atomic.h b/arch/microblaze/include/asm/atomic.h
deleted file mode 100644
index 41e9aff23a62..000000000000
--- a/arch/microblaze/include/asm/atomic.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_MICROBLAZE_ATOMIC_H
-#define _ASM_MICROBLAZE_ATOMIC_H
-
-#include <asm/cmpxchg.h>
-#include <asm-generic/atomic.h>
-#include <asm-generic/atomic64.h>
-
-/*
- * Atomically test *v and decrement if it is greater than 0.
- * The function returns the old value of *v minus 1.
- */
-static inline int atomic_dec_if_positive(atomic_t *v)
-{
- unsigned long flags;
- int res;
-
- local_irq_save(flags);
- res = v->counter - 1;
- if (res >= 0)
- v->counter = res;
- local_irq_restore(flags);
-
- return res;
-}
-#define atomic_dec_if_positive atomic_dec_if_positive
-
-#endif /* _ASM_MICROBLAZE_ATOMIC_H */
diff --git a/arch/microblaze/include/asm/cmpxchg.h b/arch/microblaze/include/asm/cmpxchg.h
deleted file mode 100644
index 3523b51aab36..000000000000
--- a/arch/microblaze/include/asm/cmpxchg.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_MICROBLAZE_CMPXCHG_H
-#define _ASM_MICROBLAZE_CMPXCHG_H
-
-#ifndef CONFIG_SMP
-# include <asm-generic/cmpxchg.h>
-#endif
-
-#endif /* _ASM_MICROBLAZE_CMPXCHG_H */
diff --git a/arch/microblaze/include/asm/ftrace.h b/arch/microblaze/include/asm/ftrace.h
index 5db7f4489f05..6a92bed37794 100644
--- a/arch/microblaze/include/asm/ftrace.h
+++ b/arch/microblaze/include/asm/ftrace.h
@@ -13,7 +13,7 @@ extern void ftrace_call_graph(void);
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
-/* reloction of mcount call site is the same as the address */
+/* relocation of mcount call site is the same as the address */
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr;
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h
index bf681f272f72..ce550978f4fc 100644
--- a/arch/microblaze/include/asm/page.h
+++ b/arch/microblaze/include/asm/page.h
@@ -35,9 +35,6 @@
#define ARCH_SLAB_MINALIGN L1_CACHE_BYTES
-#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1)))
-#define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1)))
-
/*
* PAGE_OFFSET -- the first address of the first page of memory. With MMU
* it is set to the kernel start address (aligned on a page boundary).
diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h
index d56b9f670ad1..6c33b05f730f 100644
--- a/arch/microblaze/include/asm/pgalloc.h
+++ b/arch/microblaze/include/asm/pgalloc.h
@@ -28,8 +28,6 @@ static inline pgd_t *get_pgd(void)
#define pgd_alloc(mm) get_pgd()
-#define pmd_pgtable(pmd) pmd_page(pmd)
-
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm);
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, (pte))
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h
index 9ae8d2c17dd5..71cd547655d9 100644
--- a/arch/microblaze/include/asm/pgtable.h
+++ b/arch/microblaze/include/asm/pgtable.h
@@ -25,8 +25,6 @@ extern int mem_init_done;
#include <asm/mmu.h>
#include <asm/page.h>
-#define FIRST_USER_ADDRESS 0UL
-
extern unsigned long va_to_phys(unsigned long address);
extern pte_t *va_to_pte(unsigned long address);
diff --git a/arch/microblaze/include/asm/unaligned.h b/arch/microblaze/include/asm/unaligned.h
deleted file mode 100644
index 448299beab69..000000000000
--- a/arch/microblaze/include/asm/unaligned.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2008 Michal Simek <monstr@monstr.eu>
- * Copyright (C) 2006 Atmark Techno, Inc.
- */
-
-#ifndef _ASM_MICROBLAZE_UNALIGNED_H
-#define _ASM_MICROBLAZE_UNALIGNED_H
-
-# ifdef __KERNEL__
-
-# ifdef __MICROBLAZEEL__
-# include <linux/unaligned/le_struct.h>
-# include <linux/unaligned/be_byteshift.h>
-# define get_unaligned __get_unaligned_le
-# define put_unaligned __put_unaligned_le
-# else
-# include <linux/unaligned/be_struct.h>
-# include <linux/unaligned/le_byteshift.h>
-# define get_unaligned __get_unaligned_be
-# define put_unaligned __put_unaligned_be
-# endif
-
-# include <linux/unaligned/generic.h>
-
-# endif /* __KERNEL__ */
-#endif /* _ASM_MICROBLAZE_UNALIGNED_H */
diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c
index 6c69ce7be2e8..b77dd188dec4 100644
--- a/arch/microblaze/kernel/asm-offsets.c
+++ b/arch/microblaze/kernel/asm-offsets.c
@@ -70,7 +70,6 @@ int main(int argc, char *argv[])
/* struct task_struct */
DEFINE(TS_THREAD_INFO, offsetof(struct task_struct, stack));
- DEFINE(TASK_STATE, offsetof(struct task_struct, state));
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index ce006646f741..3bc60a2b159e 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#define __SYSCALL(nr, entry, nargs) .long entry
+#define __SYSCALL(nr, entry) .long entry
ENTRY(sys_call_table)
#include <asm/syscall_table.h>
-#undef __SYSCALL
diff --git a/arch/microblaze/kernel/syscalls/Makefile b/arch/microblaze/kernel/syscalls/Makefile
index 285aaba832d9..6713c65a25e1 100644
--- a/arch/microblaze/kernel/syscalls/Makefile
+++ b/arch/microblaze/kernel/syscalls/Makefile
@@ -6,20 +6,14 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
$(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
syscall := $(src)/syscall.tbl
-syshdr := $(srctree)/$(src)/syscallhdr.sh
-systbl := $(srctree)/$(src)/syscalltbl.sh
+syshdr := $(srctree)/scripts/syscallhdr.sh
+systbl := $(srctree)/scripts/syscalltbl.sh
quiet_cmd_syshdr = SYSHDR $@
- cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
- '$(syshdr_abis_$(basetarget))' \
- '$(syshdr_pfx_$(basetarget))' \
- '$(syshdr_offset_$(basetarget))'
+ cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr $< $@
quiet_cmd_systbl = SYSTBL $@
- cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \
- '$(systbl_abis_$(basetarget))' \
- '$(systbl_abi_$(basetarget))' \
- '$(systbl_offset_$(basetarget))'
+ cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@
$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl
index d603a5ec9338..a176faca2927 100644
--- a/arch/microblaze/kernel/syscalls/syscall.tbl
+++ b/arch/microblaze/kernel/syscalls/syscall.tbl
@@ -448,3 +448,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/microblaze/kernel/syscalls/syscallhdr.sh b/arch/microblaze/kernel/syscalls/syscallhdr.sh
deleted file mode 100644
index a914854f8d9f..000000000000
--- a/arch/microblaze/kernel/syscalls/syscallhdr.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-prefix="$4"
-offset="$5"
-
-fileguard=_UAPI_ASM_MICROBLAZE_`basename "$out" | sed \
- -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
- -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- printf "#ifndef %s\n" "${fileguard}"
- printf "#define %s\n" "${fileguard}"
- printf "\n"
-
- nxt=0
- while read nr abi name entry ; do
- if [ -z "$offset" ]; then
- printf "#define __NR_%s%s\t%s\n" \
- "${prefix}" "${name}" "${nr}"
- else
- printf "#define __NR_%s%s\t(%s + %s)\n" \
- "${prefix}" "${name}" "${offset}" "${nr}"
- fi
- nxt=$((nr+1))
- done
-
- printf "\n"
- printf "#ifdef __KERNEL__\n"
- printf "#define __NR_syscalls\t%s\n" "${nxt}"
- printf "#endif\n"
- printf "\n"
- printf "#endif /* %s */\n" "${fileguard}"
-) > "$out"
diff --git a/arch/microblaze/kernel/syscalls/syscalltbl.sh b/arch/microblaze/kernel/syscalls/syscalltbl.sh
deleted file mode 100644
index 85d78d9309ad..000000000000
--- a/arch/microblaze/kernel/syscalls/syscalltbl.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-my_abi="$4"
-offset="$5"
-
-emit() {
- t_nxt="$1"
- t_nr="$2"
- t_entry="$3"
-
- while [ $t_nxt -lt $t_nr ]; do
- printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}"
- t_nxt=$((t_nxt+1))
- done
- printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}"
-}
-
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- nxt=0
- if [ -z "$offset" ]; then
- offset=0
- fi
-
- while read nr abi name entry ; do
- emit $((nxt+offset)) $((nr+offset)) $entry
- nxt=$((nr+1))
- done
-) > "$out"
diff --git a/arch/microblaze/lib/memcpy.c b/arch/microblaze/lib/memcpy.c
index f536e81b8168..63041fdf916d 100644
--- a/arch/microblaze/lib/memcpy.c
+++ b/arch/microblaze/lib/memcpy.c
@@ -68,9 +68,11 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
case 1:
*dst++ = *src++;
--c;
+ fallthrough;
case 2:
*dst++ = *src++;
--c;
+ fallthrough;
case 3:
*dst++ = *src++;
--c;
@@ -176,8 +178,10 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
switch (c) {
case 3:
*dst++ = *src++;
+ fallthrough;
case 2:
*dst++ = *src++;
+ fallthrough;
case 1:
*dst++ = *src++;
}
diff --git a/arch/microblaze/lib/memmove.c b/arch/microblaze/lib/memmove.c
index 3611ce70415b..9862f6b1e59d 100644
--- a/arch/microblaze/lib/memmove.c
+++ b/arch/microblaze/lib/memmove.c
@@ -90,9 +90,11 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
case 3:
*--dst = *--src;
--c;
+ fallthrough;
case 2:
*--dst = *--src;
--c;
+ fallthrough;
case 1:
*--dst = *--src;
--c;
@@ -201,10 +203,13 @@ void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
switch (c) {
case 4:
*--dst = *--src;
+ fallthrough;
case 3:
*--dst = *--src;
+ fallthrough;
case 2:
*--dst = *--src;
+ fallthrough;
case 1:
*--dst = *--src;
}
diff --git a/arch/microblaze/lib/memset.c b/arch/microblaze/lib/memset.c
index 04ea72c8a81d..eb6c8988af02 100644
--- a/arch/microblaze/lib/memset.c
+++ b/arch/microblaze/lib/memset.c
@@ -69,9 +69,11 @@ void *memset(void *v_src, int c, __kernel_size_t n)
case 1:
*src++ = c;
--n;
+ fallthrough;
case 2:
*src++ = c;
--n;
+ fallthrough;
case 3:
*src++ = c;
--n;
diff --git a/arch/microblaze/lib/uaccess_old.S b/arch/microblaze/lib/uaccess_old.S
index 0e8cc2710c27..eca290090038 100644
--- a/arch/microblaze/lib/uaccess_old.S
+++ b/arch/microblaze/lib/uaccess_old.S
@@ -188,7 +188,7 @@ w2: sw r4, r5, r3
.text
.align 4 /* Alignment is important to keep icache happy */
-page: /* Create room on stack and save registers for storign values */
+page: /* Create room on stack and save registers for storing values */
addik r1, r1, -40
swi r5, r1, 0
swi r6, r1, 4
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 181e48782e6c..ab55c70380a5 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -52,7 +52,7 @@ static void __init highmem_init(void)
pkmap_page_table = virt_to_kpte(PKMAP_BASE);
}
-static void highmem_setup(void)
+static void __meminit highmem_setup(void)
{
unsigned long pfn;
@@ -131,7 +131,6 @@ void __init mem_init(void)
highmem_setup();
#endif
- mem_init_print_info(NULL);
mem_init_done = 1;
}
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index d89efba3d8a4..cee6087cd686 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -4,18 +4,22 @@ config MIPS
default y
select ARCH_32BIT_OFF_T if !64BIT
select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
+ select ARCH_HAS_DEBUG_VIRTUAL if !64BIT
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_KCOV
+ select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE if !EVA
select ARCH_HAS_PTE_SPECIAL if !(32BIT && CPU_HAS_RIXI)
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAS_GCOV_PROFILE_ALL
- select ARCH_KEEP_MEMBLOCK if DEBUG_KERNEL
+ select ARCH_KEEP_MEMBLOCK
select ARCH_SUPPORTS_UPROBES
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if 64BIT
+ select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
+ select ARCH_SUPPORTS_HUGETLBFS if CPU_SUPPORTS_HUGEPAGES
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_WANT_LD_ORPHAN_WARN
@@ -26,6 +30,7 @@ config MIPS
select GENERIC_ATOMIC64 if !64BIT
select GENERIC_CMOS_UPDATE
select GENERIC_CPU_AUTOPROBE
+ select GENERIC_FIND_FIRST_BIT
select GENERIC_GETTIMEOFDAY
select GENERIC_IOMAP
select GENERIC_IRQ_PROBE
@@ -91,7 +96,6 @@ config MIPS
select PERF_USE_VMALLOC
select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
select RTC_LIB
- select SET_FS
select SYSCTL_EXCEPTION_TRACE
select VIRT_TO_BUS
select ARCH_HAS_ELFCORE_COMPAT
@@ -108,6 +112,7 @@ config MACH_INGENIC
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_ZBOOT
select DMA_NONCOHERENT
+ select ARCH_HAS_SYNC_DMA_FOR_CPU
select IRQ_MIPS_CPU
select PINCTRL
select GPIOLIB
@@ -197,6 +202,7 @@ config MIPS_ALCHEMY
config AR7
bool "Texas Instruments AR7"
select BOOT_ELF32
+ select COMMON_CLK
select DMA_NONCOHERENT
select CEVT_R4K
select CSRC_R4K
@@ -211,7 +217,6 @@ config AR7
select SYS_SUPPORTS_ZBOOT_UART16550
select GPIOLIB
select VLYNQ
- select HAVE_LEGACY_CLK
help
Support for the Texas Instruments AR7 System-on-a-Chip
family: TNETD7100, 7200 and 7300.
@@ -328,7 +333,6 @@ config BCM63XX
select SWAP_IO_SPACE
select GPIOLIB
select MIPS_L1_CACHE_SHIFT_4
- select CLKDEV_LOOKUP
select HAVE_LEGACY_CLK
help
Support for BCM63XX based boards
@@ -424,6 +428,8 @@ config MACH_INGENIC_SOC
select MIPS_GENERIC
select MACH_INGENIC
select SYS_SUPPORTS_ZBOOT_UART16550
+ select CPU_SUPPORTS_CPUFREQ
+ select MIPS_EXTERNAL_TIMER
config LANTIQ
bool "Lantiq based platforms"
@@ -442,7 +448,6 @@ config LANTIQ
select GPIOLIB
select SWAP_IO_SPACE
select BOOT_RAW
- select CLKDEV_LOOKUP
select HAVE_LEGACY_CLK
select USE_OF
select PINCTRL
@@ -626,6 +631,7 @@ config MACH_NINTENDO64
config RALINK
bool "Ralink based machines"
select CEVT_R4K
+ select COMMON_CLK
select CSRC_R4K
select BOOT_RAW
select DMA_NONCOHERENT
@@ -638,7 +644,6 @@ config RALINK
select SYS_SUPPORTS_MIPS16
select SYS_SUPPORTS_ZBOOT
select SYS_HAS_EARLY_PRINTK
- select CLKDEV_LOOKUP
select ARCH_HAS_RESET_CONTROLLER
select RESET_CONTROLLER
@@ -712,6 +717,7 @@ config SGI_IP27
select ARC_CMDLINE_ONLY
select BOOT_ELF64
select DEFAULT_SGI_PARTITION
+ select FORCE_PCI
select SYS_HAS_EARLY_PRINTK
select HAVE_PCI
select IRQ_MIPS_CPU
@@ -774,6 +780,7 @@ config SGI_IP30
select BOOT_ELF64
select CEVT_R4K
select CSRC_R4K
+ select FORCE_PCI
select SYNC_R4K if SMP
select ZONE_DMA32
select HAVE_PCI
@@ -990,7 +997,6 @@ config CAVIUM_OCTEON_SOC
select HAVE_PLAT_FW_INIT_CMDLINE
select HAVE_PLAT_MEMCPY
select ZONE_DMA32
- select HOLES_IN_ZONE
select GPIOLIB
select USE_OF
select ARCH_SPARSEMEM_ENABLE
@@ -998,6 +1004,7 @@ config CAVIUM_OCTEON_SOC
select NR_CPUS_DEFAULT_64
select MIPS_NR_CPU_NR_MAP_1024
select BUILTIN_DTB
+ select MTD
select MTD_COMPLEX_MAPPINGS
select SWIOTLB
select SYS_SUPPORTS_RELOCATABLE
@@ -1226,9 +1233,6 @@ config HAVE_PLAT_MEMCPY
config ISA_DMA_API
bool
-config HOLES_IN_ZONE
- bool
-
config SYS_SUPPORTS_RELOCATABLE
bool
help
@@ -1281,11 +1285,6 @@ config SYS_SUPPORTS_BIG_ENDIAN
config SYS_SUPPORTS_LITTLE_ENDIAN
bool
-config SYS_SUPPORTS_HUGETLBFS
- bool
- depends on CPU_SUPPORTS_HUGEPAGES
- default y
-
config MIPS_HUGE_TLB_SUPPORT
def_bool HUGETLB_PAGE || TRANSPARENT_HUGEPAGE
@@ -2118,7 +2117,7 @@ config CPU_MIPS32
config CPU_MIPS64
bool
default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 || CPU_MIPS64_R5 || \
- CPU_MIPS64_R6
+ CPU_MIPS64_R6 || CPU_LOONGSON64 || CPU_CAVIUM_OCTEON
#
# These indicate the revision of the architecture
@@ -2185,7 +2184,8 @@ config CPU_SUPPORTS_HUGEPAGES
depends on !(32BIT && (ARCH_PHYS_ADDR_T_64BIT || EVA))
config MIPS_PGD_C0_CONTEXT
bool
- default y if 64BIT && (CPU_MIPSR2 || CPU_MIPSR6) && !CPU_XLP
+ depends on 64BIT
+ default y if (CPU_MIPSR2 || CPU_MIPSR6) && !CPU_XLP
#
# Set to y for ptrace access to watch registers.
@@ -2219,23 +2219,6 @@ config 64BIT
endchoice
-config KVM_GUEST
- bool "KVM Guest Kernel"
- depends on CPU_MIPS32_R2
- depends on !64BIT && BROKEN_ON_SMP
- help
- Select this option if building a guest kernel for KVM (Trap & Emulate)
- mode.
-
-config KVM_GUEST_TIMER_FREQ
- int "Count/Compare Timer Frequency (MHz)"
- depends on KVM_GUEST
- default 100
- help
- Set this to non-zero if building a guest kernel for KVM to skip RTC
- emulation when determining guest CPU Frequency. Instead, the guest's
- timer frequency is specified directly.
-
config MIPS_VA_BITS_48
bool "48 bits virtual memory"
depends on 64BIT
@@ -2881,7 +2864,7 @@ config RANDOMIZE_BASE_MAX_OFFSET
config NODES_SHIFT
int
default "6"
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events"
@@ -3291,13 +3274,6 @@ config I8253
select CLKSRC_I8253
select CLKEVT_I8253
select MIPS_EXTERNAL_TIMER
-
-config ZONE_DMA
- bool
-
-config ZONE_DMA32
- bool
-
endmenu
config TRAD_SIGNALS
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 7a8d94cdd493..43dbf5930796 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -77,6 +77,7 @@ config CMDLINE_OVERRIDE
config SB1XXX_CORELIS
bool "Corelis Debugger"
depends on SIBYTE_SB1xxx_SOC
+ select DEBUG_KERNEL if !COMPILE_TEST
select DEBUG_INFO if !COMPILE_TEST
help
Select compile flags that produce code that can be processed by the
@@ -101,6 +102,14 @@ config DEBUG_ZBOOT
to reduce the kernel image size and speed up the booting procedure a
little.
+config ZBOOT_INGENIC_UART
+ int "UART to use for compressed kernel debugging"
+ depends on DEBUG_ZBOOT && MACH_INGENIC_SOC
+ default 0
+ range 0 4
+ help
+ Specify the UART that should be used for compressed kernel debugging.
+
config SPINLOCK_TEST
bool "Enable spinlock timing tests in debugfs"
depends on DEBUG_FS
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index e71d587af49c..258234c35a09 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -50,7 +50,7 @@ tool-archpref = $(64bit-tool-archpref)
UTS_MACHINE := mips64
endif
-ifneq ($(SUBARCH),$(ARCH))
+ifdef cross_compiling
ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE := $(call cc-cross-prefix, $(tool-archpref)-linux- $(tool-archpref)-linux-gnu- $(tool-archpref)-unknown-linux-gnu-)
endif
diff --git a/arch/mips/alchemy/board-xxs1500.c b/arch/mips/alchemy/board-xxs1500.c
index b184baa4e56a..f175bce2987f 100644
--- a/arch/mips/alchemy/board-xxs1500.c
+++ b/arch/mips/alchemy/board-xxs1500.c
@@ -18,6 +18,7 @@
#include <asm/reboot.h>
#include <asm/setup.h>
#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/gpio-au1000.h>
#include <prom.h>
const char *get_system_type(void)
diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c
index f0c830337104..c01be8c45271 100644
--- a/arch/mips/alchemy/common/clock.c
+++ b/arch/mips/alchemy/common/clock.c
@@ -111,7 +111,7 @@ static struct clk_aliastable {
/* access locks to SYS_FREQCTRL0/1 and SYS_CLKSRC registers */
static spinlock_t alchemy_clk_fg0_lock;
static spinlock_t alchemy_clk_fg1_lock;
-static spinlock_t alchemy_clk_csrc_lock;
+static DEFINE_SPINLOCK(alchemy_clk_csrc_lock);
/* CPU Core clock *****************************************************/
@@ -996,7 +996,6 @@ static int __init alchemy_clk_setup_imux(int ctype)
if (!a)
return -ENOMEM;
- spin_lock_init(&alchemy_clk_csrc_lock);
ret = 0;
for (i = 0; i < 6; i++) {
diff --git a/arch/mips/ar7/clock.c b/arch/mips/ar7/clock.c
index 95def949c971..c717acbc5506 100644
--- a/arch/mips/ar7/clock.c
+++ b/arch/mips/ar7/clock.c
@@ -13,7 +13,9 @@
#include <linux/gcd.h>
#include <linux/io.h>
#include <linux/err.h>
+#include <linux/clkdev.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <asm/addrspace.h>
#include <asm/mach-ar7/ar7.h>
@@ -84,17 +86,17 @@ struct tnetd7200_clocks {
struct tnetd7200_clock usb;
};
-static struct clk bus_clk = {
+struct clk_rate {
+ u32 rate;
+};
+static struct clk_rate bus_clk = {
.rate = 125000000,
};
-static struct clk cpu_clk = {
+static struct clk_rate cpu_clk = {
.rate = 150000000,
};
-static struct clk dsp_clk;
-static struct clk vbus_clk;
-
static void approximate(int base, int target, int *prediv,
int *postdiv, int *mul)
{
@@ -240,6 +242,8 @@ static void __init tnetd7300_init_clocks(void)
struct tnetd7300_clocks *clocks =
ioremap(UR8_REGS_CLOCKS,
sizeof(struct tnetd7300_clocks));
+ u32 dsp_clk;
+ struct clk *clk;
bus_clk.rate = tnetd7300_get_clock(BUS_PLL_SOURCE_SHIFT,
&clocks->bus, bootcr, AR7_AFE_CLOCK);
@@ -250,12 +254,18 @@ static void __init tnetd7300_init_clocks(void)
else
cpu_clk.rate = bus_clk.rate;
- if (dsp_clk.rate == 250000000)
+ dsp_clk = tnetd7300_dsp_clock();
+ if (dsp_clk == 250000000)
tnetd7300_set_clock(DSP_PLL_SOURCE_SHIFT, &clocks->dsp,
- bootcr, dsp_clk.rate);
+ bootcr, dsp_clk);
iounmap(clocks);
iounmap(bootcr);
+
+ clk = clk_register_fixed_rate(NULL, "cpu", NULL, 0, cpu_clk.rate);
+ clkdev_create(clk, "cpu", NULL);
+ clk = clk_register_fixed_rate(NULL, "dsp", NULL, 0, dsp_clk);
+ clkdev_create(clk, "dsp", NULL);
}
static void tnetd7200_set_clock(int base, struct tnetd7200_clock *clock,
@@ -327,6 +337,7 @@ static void __init tnetd7200_init_clocks(void)
int cpu_base, cpu_mul, cpu_prediv, cpu_postdiv;
int dsp_base, dsp_mul, dsp_prediv, dsp_postdiv;
int usb_base, usb_mul, usb_prediv, usb_postdiv;
+ struct clk *clk;
cpu_base = tnetd7200_get_clock_base(TNETD7200_CLOCK_ID_CPU, bootcr);
dsp_base = tnetd7200_get_clock_base(TNETD7200_CLOCK_ID_DSP, bootcr);
@@ -395,100 +406,34 @@ static void __init tnetd7200_init_clocks(void)
usb_prediv, usb_postdiv, -1, usb_mul,
TNETD7200_DEF_USB_CLK);
- dsp_clk.rate = cpu_clk.rate;
-
iounmap(clocks);
iounmap(bootcr);
-}
-
-/*
- * Linux clock API
- */
-int clk_enable(struct clk *clk)
-{
- return 0;
-}
-EXPORT_SYMBOL(clk_enable);
-
-void clk_disable(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_disable);
-
-unsigned long clk_get_rate(struct clk *clk)
-{
- if (!clk)
- return 0;
- return clk->rate;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-struct clk *clk_get(struct device *dev, const char *id)
-{
- if (!strcmp(id, "bus"))
- return &bus_clk;
- /* cpmac and vbus share the same rate */
- if (!strcmp(id, "cpmac"))
- return &vbus_clk;
- if (!strcmp(id, "cpu"))
- return &cpu_clk;
- if (!strcmp(id, "dsp"))
- return &dsp_clk;
- if (!strcmp(id, "vbus"))
- return &vbus_clk;
- return ERR_PTR(-ENOENT);
+ clk = clk_register_fixed_rate(NULL, "cpu", NULL, 0, cpu_clk.rate);
+ clkdev_create(clk, "cpu", NULL);
+ clkdev_create(clk, "dsp", NULL);
}
-EXPORT_SYMBOL(clk_get);
-
-void clk_put(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_put);
void __init ar7_init_clocks(void)
{
+ struct clk *clk;
+
switch (ar7_chip_id()) {
case AR7_CHIP_7100:
case AR7_CHIP_7200:
tnetd7200_init_clocks();
break;
case AR7_CHIP_7300:
- dsp_clk.rate = tnetd7300_dsp_clock();
tnetd7300_init_clocks();
break;
default:
break;
}
+ clk = clk_register_fixed_rate(NULL, "bus", NULL, 0, bus_clk.rate);
+ clkdev_create(clk, "bus", NULL);
/* adjust vbus clock rate */
- vbus_clk.rate = bus_clk.rate / 2;
-}
-
-/* dummy functions, should not be called */
-long clk_round_rate(struct clk *clk, unsigned long rate)
-{
- WARN_ON(clk);
- return 0;
-}
-EXPORT_SYMBOL(clk_round_rate);
-
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- WARN_ON(clk);
- return 0;
-}
-EXPORT_SYMBOL(clk_set_rate);
-
-int clk_set_parent(struct clk *clk, struct clk *parent)
-{
- WARN_ON(clk);
- return 0;
-}
-EXPORT_SYMBOL(clk_set_parent);
-
-struct clk *clk_get_parent(struct clk *clk)
-{
- WARN_ON(clk);
- return NULL;
+ clk = clk_register_fixed_factor(NULL, "vbus", "bus", 0, 1, 2);
+ clkdev_create(clk, "vbus", NULL);
+ clkdev_create(clk, "cpmac", "cpmac.1");
+ clkdev_create(clk, "cpmac", "cpmac.1");
}
-EXPORT_SYMBOL(clk_get_parent);
diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c
index 164115944a7f..5a3e325275d0 100644
--- a/arch/mips/bcm63xx/clk.c
+++ b/arch/mips/bcm63xx/clk.c
@@ -76,7 +76,7 @@ static struct clk clk_enet_misc = {
};
/*
- * Ethernet MAC clocks: only revelant on 6358, silently enable misc
+ * Ethernet MAC clocks: only relevant on 6358, silently enable misc
* clocks
*/
static void enetx_set(struct clk *clk, int enable)
diff --git a/arch/mips/bcm63xx/gpio.c b/arch/mips/bcm63xx/gpio.c
index 16f353ac3441..5c4a233db55f 100644
--- a/arch/mips/bcm63xx/gpio.c
+++ b/arch/mips/bcm63xx/gpio.c
@@ -43,8 +43,7 @@ static void bcm63xx_gpio_set(struct gpio_chip *chip,
u32 *v;
unsigned long flags;
- if (gpio >= chip->ngpio)
- BUG();
+ BUG_ON(gpio >= chip->ngpio);
if (gpio < 32) {
reg = gpio_out_low_reg;
@@ -70,8 +69,7 @@ static int bcm63xx_gpio_get(struct gpio_chip *chip, unsigned gpio)
u32 reg;
u32 mask;
- if (gpio >= chip->ngpio)
- BUG();
+ BUG_ON(gpio >= chip->ngpio);
if (gpio < 32) {
reg = gpio_out_low_reg;
@@ -92,8 +90,7 @@ static int bcm63xx_gpio_set_direction(struct gpio_chip *chip,
u32 tmp;
unsigned long flags;
- if (gpio >= chip->ngpio)
- BUG();
+ BUG_ON(gpio >= chip->ngpio);
if (gpio < 32) {
reg = GPIO_CTL_LO_REG;
diff --git a/arch/mips/bmips/dma.c b/arch/mips/bmips/dma.c
index 49061b870680..915ce4b189c1 100644
--- a/arch/mips/bmips/dma.c
+++ b/arch/mips/bmips/dma.c
@@ -10,7 +10,7 @@
#include <linux/device.h>
#include <linux/dma-direction.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/of.h>
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index f93f72bcba97..e4b7839293e1 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -18,7 +18,7 @@ include $(srctree)/arch/mips/Kbuild.platforms
BOOT_HEAP_SIZE := 0x400000
# Disable Function Tracer
-KBUILD_CFLAGS := $(filter-out -pg, $(KBUILD_CFLAGS))
+KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE), $(KBUILD_CFLAGS))
KBUILD_CFLAGS := $(filter-out -fstack-protector, $(KBUILD_CFLAGS))
diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
index e3946b06e840..3d70d15ada28 100644
--- a/arch/mips/boot/compressed/decompress.c
+++ b/arch/mips/boot/compressed/decompress.c
@@ -14,6 +14,7 @@
#include <asm/addrspace.h>
#include <asm/unaligned.h>
+#include <asm-generic/vmlinux.lds.h>
/*
* These two variables specify the free mem region
@@ -120,6 +121,13 @@ void decompress_kernel(unsigned long boot_heap_start)
/* last four bytes is always image size in little endian */
image_size = get_unaligned_le32((void *)&__image_end - 4);
+ /* The device tree's address must be properly aligned */
+ image_size = ALIGN(image_size, STRUCT_ALIGNMENT);
+
+ puts("Copy device tree to address ");
+ puthex(VMLINUX_LOAD_ADDRESS_ULL + image_size);
+ puts("\n");
+
/* copy dtb to where the booted kernel will expect it */
memcpy((void *)VMLINUX_LOAD_ADDRESS_ULL + image_size,
__appended_dtb, dtb_size);
diff --git a/arch/mips/boot/compressed/uart-16550.c b/arch/mips/boot/compressed/uart-16550.c
index aee8d7b8f091..c18d7f72d9d9 100644
--- a/arch/mips/boot/compressed/uart-16550.c
+++ b/arch/mips/boot/compressed/uart-16550.c
@@ -19,8 +19,8 @@
#endif
#ifdef CONFIG_MACH_INGENIC
-#define INGENIC_UART0_BASE_ADDR 0x10030000
-#define PORT(offset) (CKSEG1ADDR(INGENIC_UART0_BASE_ADDR) + (4 * offset))
+#define INGENIC_UART_BASE_ADDR (0x10030000 + 0x1000 * CONFIG_ZBOOT_INGENIC_UART)
+#define PORT(offset) (CKSEG1ADDR(INGENIC_UART_BASE_ADDR) + (4 * offset))
#endif
#ifdef CONFIG_CPU_XLR
diff --git a/arch/mips/boot/dts/brcm/bcm3368-netgear-cvg834g.dts b/arch/mips/boot/dts/brcm/bcm3368-netgear-cvg834g.dts
index ed6023a91763..d702a843c74a 100644
--- a/arch/mips/boot/dts/brcm/bcm3368-netgear-cvg834g.dts
+++ b/arch/mips/boot/dts/brcm/bcm3368-netgear-cvg834g.dts
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
-/include/ "bcm3368.dtsi"
+#include "bcm3368.dtsi"
/ {
compatible = "netgear,cvg834g", "brcm,bcm3368";
diff --git a/arch/mips/boot/dts/brcm/bcm3368.dtsi b/arch/mips/boot/dts/brcm/bcm3368.dtsi
index 69cbef472377..883ca8bed8e7 100644
--- a/arch/mips/boot/dts/brcm/bcm3368.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm3368.dtsi
@@ -1,4 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
+
+#include "dt-bindings/clock/bcm3368-clock.h"
+
/ {
#address-cells = <1>;
#size-cells = <1>;
@@ -59,7 +62,7 @@
periph_cntl: syscon@fff8c008 {
compatible = "syscon";
- reg = <0xfff8c000 0x4>;
+ reg = <0xfff8c008 0x4>;
native-endian;
};
diff --git a/arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts b/arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts
index 8d010b919de2..b511bc7125d5 100644
--- a/arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts
+++ b/arch/mips/boot/dts/brcm/bcm63268-comtrend-vr-3032u.dts
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
-/include/ "bcm63268.dtsi"
+#include "bcm63268.dtsi"
/ {
compatible = "comtrend,vr-3032u", "brcm,bcm63268";
diff --git a/arch/mips/boot/dts/brcm/bcm63268.dtsi b/arch/mips/boot/dts/brcm/bcm63268.dtsi
index e0021ff9f144..c3ce49ec675f 100644
--- a/arch/mips/boot/dts/brcm/bcm63268.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm63268.dtsi
@@ -1,4 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
+
+#include "dt-bindings/clock/bcm63268-clock.h"
+#include "dt-bindings/reset/bcm63268-reset.h"
+#include "dt-bindings/soc/bcm63268-pm.h"
+
/ {
#address-cells = <1>;
#size-cells = <1>;
@@ -24,16 +29,29 @@
};
clocks {
- periph_clk: periph-clk {
+ periph_osc: periph-osc {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <50000000>;
+ clock-output-names = "periph";
+ };
+
+ hsspi_osc: hsspi-osc {
+ compatible = "fixed-clock";
+
+ #clock-cells = <0>;
+
+ clock-frequency = <400000000>;
+ clock-output-names = "hsspi_osc";
};
};
aliases {
+ nflash = &nflash;
serial0 = &uart0;
serial1 = &uart1;
+ spi0 = &lsspi;
+ spi1 = &hsspi;
};
cpu_intc: interrupt-controller {
@@ -51,23 +69,22 @@
compatible = "simple-bus";
ranges;
- clkctl: clock-controller@10000004 {
+ periph_clk: clock-controller@10000004 {
compatible = "brcm,bcm63268-clocks";
reg = <0x10000004 0x4>;
#clock-cells = <1>;
};
- periph_cntl: syscon@10000008 {
+ pll_cntl: syscon@10000008 {
compatible = "syscon";
- reg = <0x10000000 0xc>;
+ reg = <0x10000008 0x4>;
native-endian;
- };
- reboot: syscon-reboot@10000008 {
- compatible = "syscon-reboot";
- regmap = <&periph_cntl>;
- offset = <0x0>;
- mask = <0x1>;
+ reboot {
+ compatible = "syscon-reboot";
+ offset = <0x0>;
+ mask = <0x1>;
+ };
};
periph_rst: reset-controller@10000010 {
@@ -88,6 +105,16 @@
interrupts = <2>, <3>;
};
+ wdt: watchdog@1000009c {
+ compatible = "brcm,bcm7038-wdt";
+ reg = <0x1000009c 0xc>;
+
+ clocks = <&periph_osc>;
+ clock-names = "refclk";
+
+ timeout-sec = <30>;
+ };
+
uart0: serial@10000180 {
compatible = "brcm,bcm6345-uart";
reg = <0x10000180 0x18>;
@@ -95,12 +122,34 @@
interrupt-parent = <&periph_intc>;
interrupts = <5>;
- clocks = <&periph_clk>;
+ clocks = <&periph_osc>;
clock-names = "refclk";
status = "disabled";
};
+ nflash: nand@10000200 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,nand-bcm6368",
+ "brcm,brcmnand-v4.0",
+ "brcm,brcmnand";
+ reg = <0x10000200 0x180>,
+ <0x10000600 0x200>,
+ <0x100000b0 0x10>;
+ reg-names = "nand",
+ "nand-cache",
+ "nand-int-base";
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <50>;
+
+ clocks = <&periph_clk BCM63268_CLK_NAND>;
+ clock-names = "nand";
+
+ status = "disabled";
+ };
+
uart1: serial@100001a0 {
compatible = "brcm,bcm6345-uart";
reg = <0x100001a0 0x18>;
@@ -108,17 +157,44 @@
interrupt-parent = <&periph_intc>;
interrupts = <34>;
- clocks = <&periph_clk>;
+ clocks = <&periph_osc>;
clock-names = "refclk";
status = "disabled";
};
- leds0: led-controller@10001900 {
+ lsspi: spi@10000800 {
#address-cells = <1>;
#size-cells = <0>;
- compatible = "brcm,bcm6328-leds";
- reg = <0x10001900 0x24>;
+ compatible = "brcm,bcm6358-spi";
+ reg = <0x10000800 0x70c>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <80>;
+
+ clocks = <&periph_clk BCM63268_CLK_SPI>;
+ clock-names = "spi";
+
+ resets = <&periph_rst BCM63268_RST_SPI>;
+
+ status = "disabled";
+ };
+
+ hsspi: spi@10001000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,bcm6328-hsspi";
+ reg = <0x10001000 0x600>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <6>;
+
+ clocks = <&periph_clk BCM63268_CLK_HSSPI>,
+ <&hsspi_osc>;
+ clock-names = "hsspi",
+ "pll";
+
+ resets = <&periph_rst BCM63268_RST_SPI>;
status = "disabled";
};
@@ -129,6 +205,15 @@
#power-domain-cells = <1>;
};
+ leds0: led-controller@10001900 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,bcm6328-leds";
+ reg = <0x10001900 0x24>;
+
+ status = "disabled";
+ };
+
ehci: usb@10002500 {
compatible = "brcm,bcm63268-ehci", "generic-ehci";
reg = <0x10002500 0x100>;
@@ -137,6 +222,9 @@
interrupt-parent = <&periph_intc>;
interrupts = <10>;
+ phys = <&usbh 0>;
+ phy-names = "usb";
+
status = "disabled";
};
@@ -149,6 +237,25 @@
interrupt-parent = <&periph_intc>;
interrupts = <9>;
+ phys = <&usbh 0>;
+ phy-names = "usb";
+
+ status = "disabled";
+ };
+
+ usbh: usb-phy@10002700 {
+ compatible = "brcm,bcm63268-usbh-phy";
+ reg = <0x10002700 0x38>;
+ #phy-cells = <1>;
+
+ clocks = <&periph_clk BCM63268_CLK_USBH>;
+ clock-names = "usbh";
+
+ power-domains = <&periph_pwr BCM63268_POWER_DOMAIN_USBH>;
+
+ resets = <&periph_rst BCM63268_RST_USBH>;
+ reset-names = "usbh";
+
status = "disabled";
};
};
diff --git a/arch/mips/boot/dts/brcm/bcm6328.dtsi b/arch/mips/boot/dts/brcm/bcm6328.dtsi
index 9dc558763c46..634618d4377e 100644
--- a/arch/mips/boot/dts/brcm/bcm6328.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm6328.dtsi
@@ -1,4 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
+
+#include "dt-bindings/clock/bcm6328-clock.h"
+#include "dt-bindings/reset/bcm6328-reset.h"
+#include "dt-bindings/soc/bcm6328-pm.h"
+
/ {
#address-cells = <1>;
#size-cells = <1>;
@@ -24,16 +29,26 @@
};
clocks {
- periph_clk: periph-clk {
+ periph_osc: periph-osc {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <50000000>;
+ clock-output-names = "periph";
+ };
+
+ hsspi_osc: hsspi-osc {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <133333333>;
+ clock-output-names = "hsspi_osc";
};
};
aliases {
+ nflash = &nflash;
serial0 = &uart0;
serial1 = &uart1;
+ spi1 = &hsspi;
};
cpu_intc: interrupt-controller {
@@ -51,7 +66,7 @@
compatible = "simple-bus";
ranges;
- clkctl: clock-controller@10000004 {
+ periph_clk: clock-controller@10000004 {
compatible = "brcm,bcm6328-clocks";
reg = <0x10000004 0x4>;
#clock-cells = <1>;
@@ -75,37 +90,71 @@
interrupts = <2>, <3>;
};
+ wdt: watchdog@1000005c {
+ compatible = "brcm,bcm7038-wdt";
+ reg = <0x1000005c 0xc>;
+
+ clocks = <&periph_osc>;
+ clock-names = "refclk";
+
+ timeout-sec = <30>;
+ };
+
+ soft_reset: syscon@10000068 {
+ compatible = "syscon";
+ reg = <0x10000068 0x4>;
+ native-endian;
+
+ reboot {
+ compatible = "syscon-reboot";
+ offset = <0x0>;
+ mask = <0x1>;
+ };
+ };
+
uart0: serial@10000100 {
compatible = "brcm,bcm6345-uart";
reg = <0x10000100 0x18>;
+
interrupt-parent = <&periph_intc>;
interrupts = <28>;
- clocks = <&periph_clk>;
+
+ clocks = <&periph_osc>;
clock-names = "refclk";
+
status = "disabled";
};
uart1: serial@10000120 {
compatible = "brcm,bcm6345-uart";
reg = <0x10000120 0x18>;
+
interrupt-parent = <&periph_intc>;
interrupts = <39>;
- clocks = <&periph_clk>;
+
+ clocks = <&periph_osc>;
clock-names = "refclk";
+
status = "disabled";
};
- timer: syscon@10000040 {
- compatible = "syscon";
- reg = <0x10000040 0x2c>;
- native-endian;
- };
+ nflash: nand@10000200 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,nand-bcm6368",
+ "brcm,brcmnand-v2.2",
+ "brcm,brcmnand";
+ reg = <0x10000200 0x180>,
+ <0x10000400 0x200>,
+ <0x10000070 0x10>;
+ reg-names = "nand",
+ "nand-cache",
+ "nand-int-base";
- reboot: syscon-reboot@10000068 {
- compatible = "syscon-reboot";
- regmap = <&timer>;
- offset = <0x28>;
- mask = <0x1>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <0>;
+
+ status = "disabled";
};
leds0: led-controller@10000800 {
@@ -113,6 +162,27 @@
#size-cells = <0>;
compatible = "brcm,bcm6328-leds";
reg = <0x10000800 0x24>;
+
+ status = "disabled";
+ };
+
+ hsspi: spi@10001000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,bcm6328-hsspi";
+ reg = <0x10001000 0x600>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <29>;
+
+ clocks = <&periph_clk BCM6328_CLK_HSSPI>,
+ <&hsspi_osc>;
+ clock-names = "hsspi",
+ "pll";
+
+ resets = <&periph_rst BCM6328_RST_SPI>;
+ reset-names = "hsspi";
+
status = "disabled";
};
@@ -126,8 +196,13 @@
compatible = "brcm,bcm6328-ehci", "generic-ehci";
reg = <0x10002500 0x100>;
big-endian;
+
interrupt-parent = <&periph_intc>;
interrupts = <42>;
+
+ phys = <&usbh 0>;
+ phy-names = "usb";
+
status = "disabled";
};
@@ -136,8 +211,29 @@
reg = <0x10002600 0x100>;
big-endian;
no-big-frame-no;
+
interrupt-parent = <&periph_intc>;
interrupts = <41>;
+
+ phys = <&usbh 0>;
+ phy-names = "usb";
+
+ status = "disabled";
+ };
+
+ usbh: usb-phy@10002700 {
+ compatible = "brcm,bcm6328-usbh-phy";
+ reg = <0x10002700 0x38>;
+ #phy-cells = <1>;
+
+ clocks = <&periph_clk BCM6328_CLK_USBH>;
+ clock-names = "usbh";
+
+ power-domains = <&periph_pwr BCM6328_POWER_DOMAIN_USBH>;
+
+ resets = <&periph_rst BCM6328_RST_USBH>;
+ reset-names = "usbh";
+
status = "disabled";
};
};
diff --git a/arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts b/arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts
index 53e57cc29291..c646690ee3df 100644
--- a/arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts
+++ b/arch/mips/boot/dts/brcm/bcm6358-neufbox4-sercomm.dts
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
-/include/ "bcm6358.dtsi"
+#include "bcm6358.dtsi"
/ {
compatible = "sfr,nb4-ser", "brcm,bcm6358";
diff --git a/arch/mips/boot/dts/brcm/bcm6358.dtsi b/arch/mips/boot/dts/brcm/bcm6358.dtsi
index 9d93e7f5e6fc..777c4379ed03 100644
--- a/arch/mips/boot/dts/brcm/bcm6358.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm6358.dtsi
@@ -1,4 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
+
+#include "dt-bindings/clock/bcm6358-clock.h"
+#include "dt-bindings/reset/bcm6358-reset.h"
+
/ {
#address-cells = <1>;
#size-cells = <1>;
@@ -24,16 +28,19 @@
};
clocks {
- periph_clk: periph-clk {
+ periph_osc: periph-osc {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <50000000>;
+ clock-output-names = "periph";
};
};
aliases {
+ pflash = &pflash;
serial0 = &uart0;
serial1 = &uart1;
+ spi0 = &lsspi;
};
cpu_intc: interrupt-controller {
@@ -51,23 +58,22 @@
compatible = "simple-bus";
ranges;
- clkctl: clock-controller@fffe0004 {
+ periph_clk: clock-controller@fffe0004 {
compatible = "brcm,bcm6358-clocks";
reg = <0xfffe0004 0x4>;
#clock-cells = <1>;
};
- periph_cntl: syscon@fffe0008 {
+ pll_cntl: syscon@fffe0008 {
compatible = "syscon";
- reg = <0xfffe0000 0x4>;
+ reg = <0xfffe0008 0x4>;
native-endian;
- };
- reboot: syscon-reboot@fffe0008 {
- compatible = "syscon-reboot";
- regmap = <&periph_cntl>;
- offset = <0x0>;
- mask = <0x1>;
+ reboot {
+ compatible = "syscon-reboot";
+ offset = <0x0>;
+ mask = <0x1>;
+ };
};
periph_intc: interrupt-controller@fffe000c {
@@ -88,6 +94,16 @@
#reset-cells = <1>;
};
+ wdt: watchdog@fffe005c {
+ compatible = "brcm,bcm7038-wdt";
+ reg = <0xfffe005c 0xc>;
+
+ clocks = <&periph_osc>;
+ clock-names = "refclk";
+
+ timeout-sec = <30>;
+ };
+
leds0: led-controller@fffe00d0 {
#address-cells = <1>;
#size-cells = <0>;
@@ -104,7 +120,7 @@
interrupt-parent = <&periph_intc>;
interrupts = <2>;
- clocks = <&periph_clk>;
+ clocks = <&periph_osc>;
clock-names = "refclk";
status = "disabled";
@@ -117,18 +133,41 @@
interrupt-parent = <&periph_intc>;
interrupts = <3>;
- clocks = <&periph_clk>;
+ clocks = <&periph_osc>;
clock-names = "refclk";
status = "disabled";
};
+ lsspi: spi@fffe0800 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,bcm6358-spi";
+ reg = <0xfffe0800 0x70c>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <1>;
+
+ clocks = <&periph_clk BCM6358_CLK_SPI>;
+ clock-names = "spi";
+
+ resets = <&periph_rst BCM6358_RST_SPI>;
+ reset-names = "spi";
+
+ status = "disabled";
+ };
+
ehci: usb@fffe1300 {
compatible = "brcm,bcm6358-ehci", "generic-ehci";
reg = <0xfffe1300 0x100>;
big-endian;
+
interrupt-parent = <&periph_intc>;
interrupts = <10>;
+
+ phys = <&usbh 0>;
+ phy-names = "usb";
+
status = "disabled";
};
@@ -137,9 +176,35 @@
reg = <0xfffe1400 0x100>;
big-endian;
no-big-frame-no;
+
interrupt-parent = <&periph_intc>;
interrupts = <5>;
+
+ phys = <&usbh 0>;
+ phy-names = "usb";
+
status = "disabled";
};
+
+ usbh: usb-phy@fffe1500 {
+ compatible = "brcm,bcm6358-usbh-phy";
+ reg = <0xfffe1500 0x38>;
+ #phy-cells = <1>;
+
+ resets = <&periph_rst BCM6358_RST_USBH>;
+ reset-names = "usbh";
+
+ status = "disabled";
+ };
+ };
+
+ pflash: nor@1e000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x1e000000 0x2000000>;
+ bank-width = <2>;
+
+ status = "disabled";
};
};
diff --git a/arch/mips/boot/dts/brcm/bcm6362-neufbox6-sercomm.dts b/arch/mips/boot/dts/brcm/bcm6362-neufbox6-sercomm.dts
index 3e83bee5b91e..f83d95ca0514 100644
--- a/arch/mips/boot/dts/brcm/bcm6362-neufbox6-sercomm.dts
+++ b/arch/mips/boot/dts/brcm/bcm6362-neufbox6-sercomm.dts
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
-/include/ "bcm6362.dtsi"
+#include "bcm6362.dtsi"
/ {
compatible = "sfr,nb6-ser", "brcm,bcm6362";
diff --git a/arch/mips/boot/dts/brcm/bcm6362.dtsi b/arch/mips/boot/dts/brcm/bcm6362.dtsi
index eb10341b75ba..d74021925c53 100644
--- a/arch/mips/boot/dts/brcm/bcm6362.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm6362.dtsi
@@ -1,4 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
+
+#include "dt-bindings/clock/bcm6362-clock.h"
+#include "dt-bindings/reset/bcm6362-reset.h"
+#include "dt-bindings/soc/bcm6362-pm.h"
+
/ {
#address-cells = <1>;
#size-cells = <1>;
@@ -24,16 +29,29 @@
};
clocks {
- periph_clk: periph-clk {
+ periph_osc: periph-osc {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <50000000>;
+ clock-output-names = "periph";
+ };
+
+ hsspi_osc: hsspi-osc {
+ compatible = "fixed-clock";
+
+ #clock-cells = <0>;
+
+ clock-frequency = <400000000>;
+ clock-output-names = "hsspi_osc";
};
};
aliases {
+ nflash = &nflash;
serial0 = &uart0;
serial1 = &uart1;
+ spi0 = &lsspi;
+ spi1 = &hsspi;
};
cpu_intc: interrupt-controller {
@@ -51,23 +69,22 @@
compatible = "simple-bus";
ranges;
- clkctl: clock-controller@10000004 {
+ periph_clk: clock-controller@10000004 {
compatible = "brcm,bcm6362-clocks";
reg = <0x10000004 0x4>;
#clock-cells = <1>;
};
- periph_cntl: syscon@10000008 {
+ pll_cntl: syscon@10000008 {
compatible = "syscon";
- reg = <0x10000000 0xc>;
+ reg = <0x10000008 0x4>;
native-endian;
- };
- reboot: syscon-reboot@10000008 {
- compatible = "syscon-reboot";
- regmap = <&periph_cntl>;
- offset = <0x0>;
- mask = <0x1>;
+ reboot {
+ compatible = "syscon-reboot";
+ offset = <0x0>;
+ mask = <0x1>;
+ };
};
periph_rst: reset-controller@10000010 {
@@ -88,6 +105,16 @@
interrupts = <2>, <3>;
};
+ wdt: watchdog@1000005c {
+ compatible = "brcm,bcm7038-wdt";
+ reg = <0x1000005c 0xc>;
+
+ clocks = <&periph_osc>;
+ clock-names = "refclk";
+
+ timeout-sec = <30>;
+ };
+
uart0: serial@10000100 {
compatible = "brcm,bcm6345-uart";
reg = <0x10000100 0x18>;
@@ -95,7 +122,7 @@
interrupt-parent = <&periph_intc>;
interrupts = <3>;
- clocks = <&periph_clk>;
+ clocks = <&periph_osc>;
clock-names = "refclk";
status = "disabled";
@@ -108,12 +135,72 @@
interrupt-parent = <&periph_intc>;
interrupts = <4>;
- clocks = <&periph_clk>;
+ clocks = <&periph_osc>;
clock-names = "refclk";
status = "disabled";
};
+ nflash: nand@10000200 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,nand-bcm6368",
+ "brcm,brcmnand-v2.2",
+ "brcm,brcmnand";
+ reg = <0x10000200 0x180>,
+ <0x10000600 0x200>,
+ <0x10000070 0x10>;
+ reg-names = "nand",
+ "nand-cache",
+ "nand-int-base";
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <12>;
+
+ clocks = <&periph_clk BCM6362_CLK_NAND>;
+ clock-names = "nand";
+
+ status = "disabled";
+ };
+
+ lsspi: spi@10000800 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,bcm6358-spi";
+ reg = <0x10000800 0x70c>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <2>;
+
+ clocks = <&periph_clk BCM6362_CLK_SPI>;
+ clock-names = "spi";
+
+ resets = <&periph_rst BCM6362_RST_SPI>;
+ reset-names = "spi";
+
+ status = "disabled";
+ };
+
+ hsspi: spi@10001000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,bcm6328-hsspi";
+ reg = <0x10001000 0x600>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <5>;
+
+ clocks = <&periph_clk BCM6362_CLK_HSSPI>,
+ <&hsspi_osc>;
+ clock-names = "hsspi",
+ "pll";
+
+ resets = <&periph_rst BCM6362_RST_SPI>;
+ reset-names = "hsspi";
+
+ status = "disabled";
+ };
+
periph_pwr: power-controller@10001848 {
compatible = "brcm,bcm6362-power-controller";
reg = <0x10001848 0x4>;
@@ -137,6 +224,9 @@
interrupt-parent = <&periph_intc>;
interrupts = <10>;
+ phys = <&usbh 0>;
+ phy-names = "usb";
+
status = "disabled";
};
@@ -149,6 +239,26 @@
interrupt-parent = <&periph_intc>;
interrupts = <9>;
+ phys = <&usbh 0>;
+ phy-names = "usb";
+
+ status = "disabled";
+ };
+
+ usbh: usb-phy@10002700 {
+ compatible = "brcm,bcm6362-usbh-phy";
+ reg = <0x10002700 0x38>;
+
+ #phy-cells = <1>;
+
+ clocks = <&periph_clk BCM6362_CLK_USBH>;
+ clock-names = "usbh";
+
+ power-domains = <&periph_pwr BCM6362_POWER_DOMAIN_USBH>;
+
+ resets = <&periph_rst BCM6362_RST_USBH>;
+ reset-names = "usbh";
+
status = "disabled";
};
};
diff --git a/arch/mips/boot/dts/brcm/bcm6368.dtsi b/arch/mips/boot/dts/brcm/bcm6368.dtsi
index 52c19f40b9cc..fc15e200877d 100644
--- a/arch/mips/boot/dts/brcm/bcm6368.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm6368.dtsi
@@ -1,4 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
+
+#include "dt-bindings/clock/bcm6368-clock.h"
+#include "dt-bindings/reset/bcm6368-reset.h"
+
/ {
#address-cells = <1>;
#size-cells = <1>;
@@ -24,16 +28,20 @@
};
clocks {
- periph_clk: periph-clk {
+ periph_osc: periph-osc {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <50000000>;
+ clock-output-names = "periph";
};
};
aliases {
+ nflash = &nflash;
+ pflash = &pflash;
serial0 = &uart0;
serial1 = &uart1;
+ spi0 = &lsspi;
};
cpu_intc: interrupt-controller {
@@ -51,23 +59,22 @@
compatible = "simple-bus";
ranges;
- clkctl: clock-controller@10000004 {
+ periph_clk: clock-controller@10000004 {
compatible = "brcm,bcm6368-clocks";
reg = <0x10000004 0x4>;
#clock-cells = <1>;
};
- periph_cntl: syscon@100000008 {
+ pll_cntl: syscon@100000008 {
compatible = "syscon";
- reg = <0x10000000 0xc>;
+ reg = <0x10000008 0x4>;
native-endian;
- };
- reboot: syscon-reboot@10000008 {
- compatible = "syscon-reboot";
- regmap = <&periph_cntl>;
- offset = <0x0>;
- mask = <0x1>;
+ reboot {
+ compatible = "syscon-reboot";
+ offset = <0x0>;
+ mask = <0x1>;
+ };
};
periph_rst: reset-controller@10000010 {
@@ -88,31 +95,88 @@
interrupts = <2>, <3>;
};
+ wdt: watchdog@1000005c {
+ compatible = "brcm,bcm7038-wdt";
+ reg = <0x1000005c 0xc>;
+
+ clocks = <&periph_osc>;
+ clock-names = "refclk";
+
+ timeout-sec = <30>;
+ };
+
leds0: led-controller@100000d0 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "brcm,bcm6358-leds";
reg = <0x100000d0 0x8>;
+
status = "disabled";
};
uart0: serial@10000100 {
compatible = "brcm,bcm6345-uart";
reg = <0x10000100 0x18>;
+
interrupt-parent = <&periph_intc>;
interrupts = <2>;
- clocks = <&periph_clk>;
+
+ clocks = <&periph_osc>;
clock-names = "refclk";
+
status = "disabled";
};
uart1: serial@10000120 {
compatible = "brcm,bcm6345-uart";
reg = <0x10000120 0x18>;
+
interrupt-parent = <&periph_intc>;
interrupts = <3>;
- clocks = <&periph_clk>;
+
+ clocks = <&periph_osc>;
clock-names = "refclk";
+
+ status = "disabled";
+ };
+
+ nflash: nand@10000200 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,nand-bcm6368",
+ "brcm,brcmnand-v2.1",
+ "brcm,brcmnand";
+ reg = <0x10000200 0x180>,
+ <0x10000600 0x200>,
+ <0x10000070 0x10>;
+ reg-names = "nand",
+ "nand-cache",
+ "nand-int-base";
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <10>;
+
+ clocks = <&periph_clk BCM6368_CLK_NAND>;
+ clock-names = "nand";
+
+ status = "disabled";
+ };
+
+ lsspi: spi@10000800 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,bcm6358-spi";
+ reg = <0x10000800 0x70c>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <1>;
+
+ clocks = <&periph_clk BCM6368_CLK_SPI>;
+ clock-names = "spi";
+
+ resets = <&periph_rst BCM6368_RST_SPI>;
+ reset-names = "spi";
+
status = "disabled";
};
@@ -120,8 +184,13 @@
compatible = "brcm,bcm6368-ehci", "generic-ehci";
reg = <0x10001500 0x100>;
big-endian;
+
interrupt-parent = <&periph_intc>;
interrupts = <7>;
+
+ phys = <&usbh 0>;
+ phy-names = "usb";
+
status = "disabled";
};
@@ -130,9 +199,49 @@
reg = <0x10001600 0x100>;
big-endian;
no-big-frame-no;
+
interrupt-parent = <&periph_intc>;
interrupts = <5>;
+
+ phys = <&usbh 0>;
+ phy-names = "usb";
+
status = "disabled";
};
+
+ usbh: usb-phy@10001700 {
+ compatible = "brcm,bcm6368-usbh-phy";
+ reg = <0x10001700 0x38>;
+ #phy-cells = <1>;
+
+ clocks = <&periph_clk BCM6368_CLK_USBH>;
+ clock-names = "usbh";
+
+ resets = <&periph_rst BCM6368_RST_USBH>;
+ reset-names = "usbh";
+
+ status = "disabled";
+ };
+
+ random: rng@10004180 {
+ compatible = "brcm,bcm6368-rng";
+ reg = <0x10004180 0x14>;
+
+ clocks = <&periph_clk BCM6368_CLK_IPSEC>;
+ clock-names = "ipsec";
+
+ resets = <&periph_rst BCM6368_RST_IPSEC>;
+ reset-names = "ipsec";
+ };
+ };
+
+ pflash: nor@18000000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x18000000 0x2000000>;
+ bank-width = <2>;
+
+ status = "disabled";
};
};
diff --git a/arch/mips/boot/dts/brcm/bcm93384wvg.dts b/arch/mips/boot/dts/brcm/bcm93384wvg.dts
index 601e4d9293ab..7d3f181b8980 100644
--- a/arch/mips/boot/dts/brcm/bcm93384wvg.dts
+++ b/arch/mips/boot/dts/brcm/bcm93384wvg.dts
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
-/include/ "bcm3384_zephyr.dtsi"
+#include "bcm3384_zephyr.dtsi"
/ {
compatible = "brcm,bcm93384wvg", "brcm,bcm3384";
diff --git a/arch/mips/boot/dts/brcm/bcm93384wvg_viper.dts b/arch/mips/boot/dts/brcm/bcm93384wvg_viper.dts
index 938a8e66128c..f845faa0d682 100644
--- a/arch/mips/boot/dts/brcm/bcm93384wvg_viper.dts
+++ b/arch/mips/boot/dts/brcm/bcm93384wvg_viper.dts
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
-/include/ "bcm3384_viper.dtsi"
+#include "bcm3384_viper.dtsi"
/ {
compatible = "brcm,bcm93384wvg-viper", "brcm,bcm3384-viper";
diff --git a/arch/mips/boot/dts/brcm/bcm96368mvwg.dts b/arch/mips/boot/dts/brcm/bcm96368mvwg.dts
index 6d772c394e41..f5e955085308 100644
--- a/arch/mips/boot/dts/brcm/bcm96368mvwg.dts
+++ b/arch/mips/boot/dts/brcm/bcm96368mvwg.dts
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
-/include/ "bcm6368.dtsi"
+#include "bcm6368.dtsi"
/ {
compatible = "brcm,bcm96368mvwg", "brcm,bcm6368";
diff --git a/arch/mips/boot/dts/brcm/bcm97125cbmb.dts b/arch/mips/boot/dts/brcm/bcm97125cbmb.dts
index 79e9769f7e00..bda5f796251a 100644
--- a/arch/mips/boot/dts/brcm/bcm97125cbmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97125cbmb.dts
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
-/include/ "bcm7125.dtsi"
+#include "bcm7125.dtsi"
/ {
compatible = "brcm,bcm97125cbmb", "brcm,bcm7125";
diff --git a/arch/mips/boot/dts/brcm/bcm97346dbsmb.dts b/arch/mips/boot/dts/brcm/bcm97346dbsmb.dts
index 28370ff77eeb..9f73735e815c 100644
--- a/arch/mips/boot/dts/brcm/bcm97346dbsmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97346dbsmb.dts
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
-/include/ "bcm7346.dtsi"
-/include/ "bcm97xxx-nand-cs1-bch24.dtsi"
+#include "bcm7346.dtsi"
+#include "bcm97xxx-nand-cs1-bch24.dtsi"
/ {
compatible = "brcm,bcm97346dbsmb", "brcm,bcm7346";
diff --git a/arch/mips/boot/dts/brcm/bcm97358svmb.dts b/arch/mips/boot/dts/brcm/bcm97358svmb.dts
index 41c1b510c230..522f2c40d6e6 100644
--- a/arch/mips/boot/dts/brcm/bcm97358svmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97358svmb.dts
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
-/include/ "bcm7358.dtsi"
-/include/ "bcm97xxx-nand-cs1-bch4.dtsi"
+#include "bcm7358.dtsi"
+#include "bcm97xxx-nand-cs1-bch4.dtsi"
/ {
compatible = "brcm,bcm97358svmb", "brcm,bcm7358";
diff --git a/arch/mips/boot/dts/brcm/bcm97360svmb.dts b/arch/mips/boot/dts/brcm/bcm97360svmb.dts
index 9f6c6c9b7ea7..01f215b08dba 100644
--- a/arch/mips/boot/dts/brcm/bcm97360svmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97360svmb.dts
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
-/include/ "bcm7360.dtsi"
+#include "bcm7360.dtsi"
/ {
compatible = "brcm,bcm97360svmb", "brcm,bcm7360";
diff --git a/arch/mips/boot/dts/brcm/bcm97362svmb.dts b/arch/mips/boot/dts/brcm/bcm97362svmb.dts
index df8b755c390f..97aeb51b6831 100644
--- a/arch/mips/boot/dts/brcm/bcm97362svmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97362svmb.dts
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
-/include/ "bcm7362.dtsi"
-/include/ "bcm97xxx-nand-cs1-bch4.dtsi"
+#include "bcm7362.dtsi"
+#include "bcm97xxx-nand-cs1-bch4.dtsi"
/ {
compatible = "brcm,bcm97362svmb", "brcm,bcm7362";
diff --git a/arch/mips/boot/dts/brcm/bcm97420c.dts b/arch/mips/boot/dts/brcm/bcm97420c.dts
index 086faeaa384a..cc70c2dd4d85 100644
--- a/arch/mips/boot/dts/brcm/bcm97420c.dts
+++ b/arch/mips/boot/dts/brcm/bcm97420c.dts
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
-/include/ "bcm7420.dtsi"
+#include "bcm7420.dtsi"
/ {
compatible = "brcm,bcm97420c", "brcm,bcm7420";
diff --git a/arch/mips/boot/dts/brcm/bcm97425svmb.dts b/arch/mips/boot/dts/brcm/bcm97425svmb.dts
index 0ed22217bf3a..9efecfe1e05c 100644
--- a/arch/mips/boot/dts/brcm/bcm97425svmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97425svmb.dts
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
-/include/ "bcm7425.dtsi"
-/include/ "bcm97xxx-nand-cs1-bch24.dtsi"
+#include "bcm7425.dtsi"
+#include "bcm97xxx-nand-cs1-bch24.dtsi"
/ {
compatible = "brcm,bcm97425svmb", "brcm,bcm7425";
diff --git a/arch/mips/boot/dts/brcm/bcm97435svmb.dts b/arch/mips/boot/dts/brcm/bcm97435svmb.dts
index 2c145a883aef..b653c6ff74b5 100644
--- a/arch/mips/boot/dts/brcm/bcm97435svmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97435svmb.dts
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
-/include/ "bcm7435.dtsi"
-/include/ "bcm97xxx-nand-cs1-bch24.dtsi"
+#include "bcm7435.dtsi"
+#include "bcm97xxx-nand-cs1-bch24.dtsi"
/ {
compatible = "brcm,bcm97435svmb", "brcm,bcm7435";
diff --git a/arch/mips/boot/dts/brcm/bcm9ejtagprb.dts b/arch/mips/boot/dts/brcm/bcm9ejtagprb.dts
index 8d58c1971b30..615d2b97770e 100644
--- a/arch/mips/boot/dts/brcm/bcm9ejtagprb.dts
+++ b/arch/mips/boot/dts/brcm/bcm9ejtagprb.dts
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/dts-v1/;
-/include/ "bcm6328.dtsi"
+#include "bcm6328.dtsi"
/ {
compatible = "brcm,bcm9ejtagprb", "brcm,bcm6328";
diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts
index 8877c62609de..a688809beebc 100644
--- a/arch/mips/boot/dts/ingenic/ci20.dts
+++ b/arch/mips/boot/dts/ingenic/ci20.dts
@@ -118,6 +118,20 @@
assigned-clock-rates = <48000000>;
};
+&tcu {
+ /*
+ * 750 kHz for the system timers and clocksource,
+ * use channel #0 and #1 for the per cpu system timers,
+ * and use channel #2 for the clocksource.
+ *
+ * 3000 kHz for the OST timer to provide a higher
+ * precision clocksource.
+ */
+ assigned-clocks = <&tcu TCU_CLK_TIMER0>, <&tcu TCU_CLK_TIMER1>,
+ <&tcu TCU_CLK_TIMER2>, <&tcu TCU_CLK_OST>;
+ assigned-clock-rates = <750000>, <750000>, <750000>, <3000000>;
+};
+
&mmc0 {
status = "okay";
@@ -522,13 +536,3 @@
bias-disable;
};
};
-
-&tcu {
- /*
- * 750 kHz for the system timer and 3 MHz for the clocksource,
- * use channel #0 for the system timer, #1 for the clocksource.
- */
- assigned-clocks = <&tcu TCU_CLK_TIMER0>, <&tcu TCU_CLK_TIMER1>,
- <&tcu TCU_CLK_OST>;
- assigned-clock-rates = <750000>, <3000000>, <3000000>;
-};
diff --git a/arch/mips/boot/dts/ingenic/gcw0.dts b/arch/mips/boot/dts/ingenic/gcw0.dts
index bc72304a2440..4abb0318416c 100644
--- a/arch/mips/boot/dts/ingenic/gcw0.dts
+++ b/arch/mips/boot/dts/ingenic/gcw0.dts
@@ -74,7 +74,6 @@
simple-audio-card,widgets =
"Speaker", "Speaker",
"Headphone", "Headphones",
- "Line", "FM Radio",
"Microphone", "Built-in Mic";
simple-audio-card,routing =
"Headphones Amp INL", "LHPOUT",
@@ -85,8 +84,8 @@
"Speaker Amp INR", "ROUT",
"Speaker", "Speaker Amp OUTL",
"Speaker", "Speaker Amp OUTR",
- "LLINEIN", "FM Radio",
- "RLINEIN", "FM Radio",
+ "LLINEIN", "Cap-less",
+ "RLINEIN", "Cap-less",
"Built-in Mic", "MICBIAS",
"MIC1P", "Built-in Mic",
"MIC1N", "Built-in Mic";
@@ -345,7 +344,6 @@
spi-max-frequency = <3125000>;
spi-3wire;
- spi-cs-high;
reset-gpios = <&gpe 2 GPIO_ACTIVE_LOW>;
diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi
index 8d01feef7ff5..9e34f433b9b5 100644
--- a/arch/mips/boot/dts/ingenic/jz4780.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi
@@ -339,7 +339,7 @@
};
i2c0: i2c@10050000 {
- compatible = "ingenic,jz4780-i2c";
+ compatible = "ingenic,jz4780-i2c", "ingenic,jz4770-i2c";
#address-cells = <1>;
#size-cells = <0>;
@@ -357,7 +357,7 @@
};
i2c1: i2c@10051000 {
- compatible = "ingenic,jz4780-i2c";
+ compatible = "ingenic,jz4780-i2c", "ingenic,jz4770-i2c";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x10051000 0x1000>;
@@ -374,7 +374,7 @@
};
i2c2: i2c@10052000 {
- compatible = "ingenic,jz4780-i2c";
+ compatible = "ingenic,jz4780-i2c", "ingenic,jz4770-i2c";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x10052000 0x1000>;
@@ -391,7 +391,7 @@
};
i2c3: i2c@10053000 {
- compatible = "ingenic,jz4780-i2c";
+ compatible = "ingenic,jz4780-i2c", "ingenic,jz4770-i2c";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x10053000 0x1000>;
@@ -408,7 +408,7 @@
};
i2c4: i2c@10054000 {
- compatible = "ingenic,jz4780-i2c";
+ compatible = "ingenic,jz4780-i2c", "ingenic,jz4770-i2c";
#address-cells = <1>;
#size-cells = <0>;
reg = <0x10054000 0x1000>;
diff --git a/arch/mips/boot/dts/ingenic/rs90.dts b/arch/mips/boot/dts/ingenic/rs90.dts
index 4eb1edbfc155..74fee7f01352 100644
--- a/arch/mips/boot/dts/ingenic/rs90.dts
+++ b/arch/mips/boot/dts/ingenic/rs90.dts
@@ -16,6 +16,18 @@
reg = <0x0 0x2000000>;
};
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ vmem: video-memory@1f00000 {
+ compatible = "shared-dma-pool";
+ reg = <0x1f00000 0x100000>;
+ reusable;
+ };
+ };
+
vcc: regulator {
compatible = "regulator-fixed";
@@ -300,6 +312,8 @@
};
&lcd {
+ memory-region = <&vmem>;
+
pinctrl-names = "default";
pinctrl-0 = <&pins_lcd>;
};
diff --git a/arch/mips/boot/dts/ingenic/x1000.dtsi b/arch/mips/boot/dts/ingenic/x1000.dtsi
index aac9dedaf334..dec7909d4baa 100644
--- a/arch/mips/boot/dts/ingenic/x1000.dtsi
+++ b/arch/mips/boot/dts/ingenic/x1000.dtsi
@@ -80,6 +80,11 @@
status = "disabled";
};
+
+ mac_phy_ctrl: mac-phy-ctrl@e8 {
+ compatible = "syscon";
+ reg = <0xe8 0x4>;
+ };
};
ost: timer@12000000 {
@@ -347,6 +352,8 @@
clocks = <&cgu X1000_CLK_MAC>;
clock-names = "stmmaceth";
+ mode-reg = <&mac_phy_ctrl>;
+
status = "disabled";
mdio: mdio {
diff --git a/arch/mips/boot/dts/ingenic/x1830.dtsi b/arch/mips/boot/dts/ingenic/x1830.dtsi
index b21c93057356..215257f8bb1a 100644
--- a/arch/mips/boot/dts/ingenic/x1830.dtsi
+++ b/arch/mips/boot/dts/ingenic/x1830.dtsi
@@ -73,6 +73,11 @@
status = "disabled";
};
+
+ mac_phy_ctrl: mac-phy-ctrl@e8 {
+ compatible = "syscon";
+ reg = <0xe8 0x4>;
+ };
};
ost: timer@12000000 {
@@ -97,9 +102,9 @@
#clock-cells = <1>;
- clocks = <&cgu X1830_CLK_RTCLK
- &cgu X1830_CLK_EXCLK
- &cgu X1830_CLK_PCLK>;
+ clocks = <&cgu X1830_CLK_RTCLK>,
+ <&cgu X1830_CLK_EXCLK>,
+ <&cgu X1830_CLK_PCLK>;
clock-names = "rtc", "ext", "pclk";
interrupt-controller;
@@ -274,8 +279,7 @@
pdma: dma-controller@13420000 {
compatible = "ingenic,x1830-dma";
- reg = <0x13420000 0x400
- 0x13421000 0x40>;
+ reg = <0x13420000 0x400>, <0x13421000 0x40>;
#dma-cells = <2>;
interrupt-parent = <&intc>;
@@ -337,6 +341,8 @@
clocks = <&cgu X1830_CLK_MAC>;
clock-names = "stmmaceth";
+ mode-reg = <&mac_phy_ctrl>;
+
status = "disabled";
mdio: mdio {
diff --git a/arch/mips/boot/dts/loongson/Makefile b/arch/mips/boot/dts/loongson/Makefile
index 8fd0efb37423..5c6433e441ee 100644
--- a/arch/mips/boot/dts/loongson/Makefile
+++ b/arch/mips/boot/dts/loongson/Makefile
@@ -1,4 +1,5 @@
-# SPDX_License_Identifier: GPL_2.0
+# SPDX-License-Identifier: GPL-2.0
+dtb-$(CONFIG_MACH_LOONGSON64) += loongson64_2core_2k1000.dtb
dtb-$(CONFIG_MACH_LOONGSON64) += loongson64c_4core_ls7a.dtb
dtb-$(CONFIG_MACH_LOONGSON64) += loongson64c_4core_rs780e.dtb
dtb-$(CONFIG_MACH_LOONGSON64) += loongson64c_8core_rs780e.dtb
diff --git a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
new file mode 100644
index 000000000000..bfc3d3243ee7
--- /dev/null
+++ b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
@@ -0,0 +1,289 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/dts-v1/;
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+/ {
+ compatible = "loongson,loongson2k1000";
+
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "loongson,gs264";
+ reg = <0x0>;
+ #clock-cells = <1>;
+ clocks = <&cpu_clk>;
+ };
+ };
+
+ memory@200000 {
+ compatible = "memory";
+ device_type = "memory";
+ reg = <0x00000000 0x00200000 0x00000000 0x0ee00000>, /* 238 MB at 2 MB */
+ <0x00000000 0x20000000 0x00000000 0x1f000000>, /* 496 MB at 512 MB */
+ <0x00000001 0x10000000 0x00000001 0xb0000000>; /* 6912 MB at 4352MB */
+ };
+
+ cpu_clk: cpu_clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <800000000>;
+ };
+
+ cpuintc: interrupt-controller {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ package0: bus@10000000 {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges = <0 0x10000000 0 0x10000000 0 0x10000000 /* ioports */
+ 0 0x40000000 0 0x40000000 0 0x40000000
+ 0xfe 0x00000000 0xfe 0x00000000 0 0x40000000>;
+
+ liointc0: interrupt-controller@1fe11400 {
+ compatible = "loongson,liointc-2.0";
+ reg = <0 0x1fe11400 0 0x40>,
+ <0 0x1fe11040 0 0x8>,
+ <0 0x1fe11140 0 0x8>;
+ reg-names = "main", "isr0", "isr1";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>;
+ interrupt-names = "int0";
+
+ loongson,parent_int_map = <0xffffffff>, /* int0 */
+ <0x00000000>, /* int1 */
+ <0x00000000>, /* int2 */
+ <0x00000000>; /* int3 */
+ };
+
+ liointc1: interrupt-controller@1fe11440 {
+ compatible = "loongson,liointc-2.0";
+ reg = <0 0x1fe11440 0 0x40>,
+ <0 0x1fe11048 0 0x8>,
+ <0 0x1fe11148 0 0x8>;
+ reg-names = "main", "isr0", "isr1";
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <3>;
+ interrupt-names = "int1";
+
+ loongson,parent_int_map = <0x00000000>, /* int0 */
+ <0xffffffff>, /* int1 */
+ <0x00000000>, /* int2 */
+ <0x00000000>; /* int3 */
+ };
+
+ uart0: serial@1fe00000 {
+ compatible = "ns16550a";
+ reg = <0 0x1fe00000 0 0x8>;
+ clock-frequency = <125000000>;
+ interrupt-parent = <&liointc0>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ no-loopback-test;
+ };
+
+ pci@1a000000 {
+ compatible = "loongson,ls2k-pci";
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <2>;
+
+ reg = <0 0x1a000000 0 0x02000000>,
+ <0xfe 0x00000000 0 0x20000000>;
+
+ ranges = <0x01000000 0x0 0x00000000 0x0 0x18000000 0x0 0x00010000>,
+ <0x02000000 0x0 0x40000000 0x0 0x40000000 0x0 0x40000000>;
+
+ gmac@3,0 {
+ compatible = "pci0014,7a03.0",
+ "pci0014,7a03",
+ "pciclass0c0320",
+ "pciclass0c03",
+ "loongson, pci-gmac";
+
+ reg = <0x1800 0x0 0x0 0x0 0x0>;
+ interrupts = <12 IRQ_TYPE_LEVEL_LOW>,
+ <13 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "macirq", "eth_lpi";
+ interrupt-parent = <&liointc0>;
+ phy-mode = "rgmii";
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+ };
+ };
+
+ gmac@3,1 {
+ compatible = "pci0014,7a03.0",
+ "pci0014,7a03",
+ "pciclass0c0320",
+ "pciclass0c03",
+ "loongson, pci-gmac";
+
+ reg = <0x1900 0x0 0x0 0x0 0x0>;
+ interrupts = <14 IRQ_TYPE_LEVEL_LOW>,
+ <15 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-names = "macirq", "eth_lpi";
+ interrupt-parent = <&liointc0>;
+ phy-mode = "rgmii";
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+ phy1: ethernet-phy@1 {
+ reg = <0>;
+ };
+ };
+ };
+
+ ehci@4,1 {
+ compatible = "pci0014,7a14.0",
+ "pci0014,7a14",
+ "pciclass0c0320",
+ "pciclass0c03";
+
+ reg = <0x2100 0x0 0x0 0x0 0x0>;
+ interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&liointc1>;
+ };
+
+ ohci@4,2 {
+ compatible = "pci0014,7a24.0",
+ "pci0014,7a24",
+ "pciclass0c0310",
+ "pciclass0c03";
+
+ reg = <0x2200 0x0 0x0 0x0 0x0>;
+ interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&liointc1>;
+ };
+
+ sata@8,0 {
+ compatible = "pci0014,7a08.0",
+ "pci0014,7a08",
+ "pciclass010601",
+ "pciclass0106";
+
+ reg = <0x4000 0x0 0x0 0x0 0x0>;
+ interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&liointc0>;
+ };
+
+ pci_bridge@9,0 {
+ compatible = "pci0014,7a19.0",
+ "pci0014,7a19",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x4800 0x0 0x0 0x0 0x0>;
+ #interrupt-cells = <1>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&liointc1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &liointc1 0 IRQ_TYPE_LEVEL_LOW>;
+ external-facing;
+ };
+
+ pci_bridge@a,0 {
+ compatible = "pci0014,7a09.0",
+ "pci0014,7a09",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x5000 0x0 0x0 0x0 0x0>;
+ #interrupt-cells = <1>;
+ interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&liointc1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &liointc1 1 IRQ_TYPE_LEVEL_LOW>;
+ external-facing;
+ };
+
+ pci_bridge@b,0 {
+ compatible = "pci0014,7a09.0",
+ "pci0014,7a09",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x5800 0x0 0x0 0x0 0x0>;
+ #interrupt-cells = <1>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&liointc1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &liointc1 2 IRQ_TYPE_LEVEL_LOW>;
+ external-facing;
+ };
+
+ pci_bridge@c,0 {
+ compatible = "pci0014,7a09.0",
+ "pci0014,7a09",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x6000 0x0 0x0 0x0 0x0>;
+ #interrupt-cells = <1>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&liointc1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &liointc1 3 IRQ_TYPE_LEVEL_LOW>;
+ external-facing;
+ };
+
+ pci_bridge@d,0 {
+ compatible = "pci0014,7a19.0",
+ "pci0014,7a19",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x6800 0x0 0x0 0x0 0x0>;
+ #interrupt-cells = <1>;
+ interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&liointc1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &liointc1 4 IRQ_TYPE_LEVEL_LOW>;
+ external-facing;
+ };
+
+ pci_bridge@e,0 {
+ compatible = "pci0014,7a09.0",
+ "pci0014,7a09",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x7000 0x0 0x0 0x0 0x0>;
+ #interrupt-cells = <1>;
+ interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&liointc1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &liointc1 5 IRQ_TYPE_LEVEL_LOW>;
+ external-facing;
+ };
+
+ };
+ };
+};
+
diff --git a/arch/mips/boot/dts/loongson/loongson64_2core_2k1000.dts b/arch/mips/boot/dts/loongson/loongson64_2core_2k1000.dts
new file mode 100644
index 000000000000..e31d2ee65cd5
--- /dev/null
+++ b/arch/mips/boot/dts/loongson/loongson64_2core_2k1000.dts
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/dts-v1/;
+
+#include "loongson64-2k1000.dtsi"
+
+/ {
+ compatible = "loongson,loongson64-2core-2k1000";
+};
+
diff --git a/arch/mips/boot/dts/loongson/loongson64g-package.dtsi b/arch/mips/boot/dts/loongson/loongson64g-package.dtsi
index 38abc570cd82..d4314f62ccc2 100644
--- a/arch/mips/boot/dts/loongson/loongson64g-package.dtsi
+++ b/arch/mips/boot/dts/loongson/loongson64g-package.dtsi
@@ -39,7 +39,7 @@
};
- cpu_uart0: serial@1fe001e0 {
+ cpu_uart0: serial@1fe00100 {
compatible = "ns16550a";
reg = <0 0x1fe00100 0x10>;
clock-frequency = <100000000>;
@@ -48,7 +48,7 @@
no-loopback-test;
};
- cpu_uart1: serial@1fe001e8 {
+ cpu_uart1: serial@1fe00110 {
status = "disabled";
compatible = "ns16550a";
reg = <0 0x1fe00110 0x10>;
diff --git a/arch/mips/boot/dts/loongson/loongson64v_4core_virtio.dts b/arch/mips/boot/dts/loongson/loongson64v_4core_virtio.dts
index 41f0b110d455..d0588d81e0c2 100644
--- a/arch/mips/boot/dts/loongson/loongson64v_4core_virtio.dts
+++ b/arch/mips/boot/dts/loongson/loongson64v_4core_virtio.dts
@@ -88,7 +88,7 @@
interrupt-map-mask = <0x1800 0x0 0x0 0x7>;
};
- isa {
+ isa@18000000 {
compatible = "isa";
#address-cells = <2>;
#size-cells = <1>;
diff --git a/arch/mips/boot/dts/loongson/ls7a-pch.dtsi b/arch/mips/boot/dts/loongson/ls7a-pch.dtsi
index f99a7a11fded..2f45fce2cdc4 100644
--- a/arch/mips/boot/dts/loongson/ls7a-pch.dtsi
+++ b/arch/mips/boot/dts/loongson/ls7a-pch.dtsi
@@ -186,7 +186,8 @@
compatible = "pci0014,7a03.0",
"pci0014,7a03",
"pciclass020000",
- "pciclass0200";
+ "pciclass0200",
+ "loongson, pci-gmac";
reg = <0x1800 0x0 0x0 0x0 0x0>;
interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,
@@ -208,7 +209,8 @@
compatible = "pci0014,7a03.0",
"pci0014,7a03",
"pciclass020000",
- "pciclass0200";
+ "pciclass0200",
+ "loongson, pci-gmac";
reg = <0x1900 0x0 0x0 0x0 0x0>;
interrupts = <14 IRQ_TYPE_LEVEL_HIGH>,
@@ -407,7 +409,7 @@
};
};
- isa {
+ isa@18000000 {
compatible = "isa";
#address-cells = <2>;
#size-cells = <1>;
diff --git a/arch/mips/boot/dts/loongson/rs780e-pch.dtsi b/arch/mips/boot/dts/loongson/rs780e-pch.dtsi
index 871c866e0423..6f459511e6c9 100644
--- a/arch/mips/boot/dts/loongson/rs780e-pch.dtsi
+++ b/arch/mips/boot/dts/loongson/rs780e-pch.dtsi
@@ -21,7 +21,7 @@
<0x02000000 0 0x40000000 0 0x40000000 0 0x40000000>;
};
- isa {
+ isa@18000000 {
compatible = "isa";
#address-cells = <2>;
#size-cells = <1>;
diff --git a/arch/mips/boot/dts/mti/sead3.dts b/arch/mips/boot/dts/mti/sead3.dts
index 1cf6728af8fe..046c97a29710 100644
--- a/arch/mips/boot/dts/mti/sead3.dts
+++ b/arch/mips/boot/dts/mti/sead3.dts
@@ -244,7 +244,7 @@
no-loopback-test;
};
- eth@1f010000 {
+ ethernet@1f010000 {
compatible = "smsc,lan9115";
reg = <0x1f010000 0x10000>;
reg-io-width = <4>;
diff --git a/arch/mips/boot/dts/qca/Makefile b/arch/mips/boot/dts/qca/Makefile
index 4451cf45b0ad..6749f77068a8 100644
--- a/arch/mips/boot/dts/qca/Makefile
+++ b/arch/mips/boot/dts/qca/Makefile
@@ -4,4 +4,5 @@ dtb-$(CONFIG_ATH79) += ar9132_tl_wr1043nd_v1.dtb
dtb-$(CONFIG_ATH79) += ar9331_dpt_module.dtb
dtb-$(CONFIG_ATH79) += ar9331_dragino_ms14.dtb
dtb-$(CONFIG_ATH79) += ar9331_omega.dtb
+dtb-$(CONFIG_ATH79) += ar9331_openembed_som9331_board.dtb
dtb-$(CONFIG_ATH79) += ar9331_tl_mr3020.dtb
diff --git a/arch/mips/boot/dts/qca/ar9331.dtsi b/arch/mips/boot/dts/qca/ar9331.dtsi
index 83b3c0ce135a..c4102b280b47 100644
--- a/arch/mips/boot/dts/qca/ar9331.dtsi
+++ b/arch/mips/boot/dts/qca/ar9331.dtsi
@@ -148,6 +148,7 @@
fixed-link {
speed = <1000>;
full-duplex;
+ pause;
};
mdio {
@@ -183,6 +184,7 @@
fixed-link {
speed = <1000>;
full-duplex;
+ pause;
};
};
diff --git a/arch/mips/boot/dts/qca/ar9331_openembed_som9331_board.dts b/arch/mips/boot/dts/qca/ar9331_openembed_som9331_board.dts
new file mode 100644
index 000000000000..e6622f8e8c2b
--- /dev/null
+++ b/arch/mips/boot/dts/qca/ar9331_openembed_som9331_board.dts
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
+
+#include "ar9331.dtsi"
+
+/ {
+ model = "OpenEmbed SOM9331 Board";
+ compatible = "openembed,som9331";
+
+ aliases {
+ serial0 = &uart;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x4000000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ led-0 {
+ function = LED_FUNCTION_STATUS;
+ color = <LED_COLOR_ID_RED>;
+ gpios = <&gpio 27 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+ };
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ button@0 {
+ label = "reset";
+ linux,code = <KEY_RESTART>;
+ gpios = <&gpio 11 GPIO_ACTIVE_HIGH>;
+ };
+ };
+};
+
+&ref {
+ clock-frequency = <25000000>;
+};
+
+&uart {
+ status = "okay";
+};
+
+&gpio {
+ status = "okay";
+};
+
+&usb {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usb_phy {
+ status = "okay";
+};
+
+&spi {
+ num-chipselects = <1>;
+ status = "okay";
+
+ /* Winbond 25Q64FVSIG SPI flash */
+ spiflash: w25q64@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "winbond,w25q64", "jedec,spi-nor";
+ spi-max-frequency = <104000000>;
+ reg = <0>;
+ };
+};
+
+&eth0 {
+ status = "okay";
+};
+
+&eth1 {
+ status = "okay";
+};
+
+&switch_port1 {
+ label = "lan0";
+ status = "okay";
+};
+
+&switch_port3 {
+ label = "lan1";
+ status = "okay";
+};
+
+&phy_port0 {
+ status = "okay";
+};
+
+&phy_port2 {
+ status = "okay";
+};
+
+&phy_port4 {
+ status = "okay";
+};
diff --git a/arch/mips/cavium-octeon/oct_ilm.c b/arch/mips/cavium-octeon/oct_ilm.c
index 99e27155b399..6a4694538bb6 100644
--- a/arch/mips/cavium-octeon/oct_ilm.c
+++ b/arch/mips/cavium-octeon/oct_ilm.c
@@ -62,7 +62,7 @@ static int reset_statistics(void *data, u64 value)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(reset_statistics_ops, NULL, reset_statistics, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(reset_statistics_ops, NULL, reset_statistics, "%llu\n");
static void init_debugfs(void)
{
diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S
index 0a7c9834b81c..600d018cf354 100644
--- a/arch/mips/cavium-octeon/octeon-memcpy.S
+++ b/arch/mips/cavium-octeon/octeon-memcpy.S
@@ -150,8 +150,12 @@ LEAF(memcpy) /* a0=dst a1=src a2=len */
EXPORT_SYMBOL(memcpy)
move v0, dst /* return value */
__memcpy:
-FEXPORT(__copy_user)
-EXPORT_SYMBOL(__copy_user)
+FEXPORT(__raw_copy_from_user)
+EXPORT_SYMBOL(__raw_copy_from_user)
+FEXPORT(__raw_copy_to_user)
+EXPORT_SYMBOL(__raw_copy_to_user)
+FEXPORT(__raw_copy_in_user)
+EXPORT_SYMBOL(__raw_copy_in_user)
/*
* Note: dst & src may be unaligned, len may be 0
* Temps
diff --git a/arch/mips/cavium-octeon/octeon-usb.c b/arch/mips/cavium-octeon/octeon-usb.c
index 950e6c6e8629..6e4d3619137a 100644
--- a/arch/mips/cavium-octeon/octeon-usb.c
+++ b/arch/mips/cavium-octeon/octeon-usb.c
@@ -516,20 +516,13 @@ static int __init dwc3_octeon_device_init(void)
if (!pdev)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- put_device(&pdev->dev);
- dev_err(&pdev->dev, "No memory resources\n");
- return -ENXIO;
- }
-
/*
* The code below maps in the registers necessary for
* setting up the clocks and reseting PHYs. We must
* release the resources so the dwc3 subsystem doesn't
* know the difference.
*/
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base)) {
put_device(&pdev->dev);
return PTR_ERR(base);
diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig
index eea9b613bb74..d83e7d600b0a 100644
--- a/arch/mips/configs/bigsur_defconfig
+++ b/arch/mips/configs/bigsur_defconfig
@@ -105,10 +105,6 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_EEPROM_LEGACY=y
CONFIG_EEPROM_MAX6875=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_IDETAPE=y
-CONFIG_BLK_DEV_TC86C001=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
diff --git a/arch/mips/configs/loongson2k_defconfig b/arch/mips/configs/loongson2k_defconfig
new file mode 100644
index 000000000000..e948ca487e2d
--- /dev/null
+++ b/arch/mips/configs/loongson2k_defconfig
@@ -0,0 +1,353 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_KERNEL_LZMA=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_MEMCG=y
+CONFIG_BLK_CGROUP=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EMBEDDED=y
+CONFIG_MACH_LOONGSON64=y
+# CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION is not set
+CONFIG_HZ_256=y
+CONFIG_MIPS32_O32=y
+CONFIG_MIPS32_N32=y
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MQ_IOSCHED_DEADLINE=m
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
+CONFIG_BINFMT_MISC=m
+CONFIG_KSM=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_NETLINK_LOG=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_IP_VS=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_IP_SCTP=m
+CONFIG_L2TP=m
+CONFIG_BRIDGE=m
+CONFIG_CFG80211=m
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=m
+CONFIG_RFKILL=m
+CONFIG_RFKILL_INPUT=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_HOTPLUG_PCI_PCIE=y
+CONFIG_PCIEASPM_PERFORMANCE=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_MTD=m
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_RAID_ATTRS=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SPI_ATTRS=m
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_MEGARAID_NEWGEN=y
+CONFIG_MEGARAID_MM=y
+CONFIG_MEGARAID_MAILBOX=y
+CONFIG_MEGARAID_LEGACY=y
+CONFIG_MEGARAID_SAS=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_PATA_ATIIXP=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_TARGET_CORE=m
+CONFIG_TCM_IBLOCK=m
+CONFIG_TCM_FILEIO=m
+CONFIG_TCM_PSCSI=m
+CONFIG_LOOPBACK_TARGET=m
+CONFIG_ISCSI_TARGET=m
+CONFIG_NETDEVICES=y
+CONFIG_TUN=m
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CIRRUS is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_I825XX is not set
+CONFIG_E1000=y
+CONFIG_E1000E=y
+CONFIG_IGB=y
+CONFIG_IXGB=y
+CONFIG_IXGBE=y
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETERION is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_NI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_VENDOR_PACKET_ENGINES is not set
+# CONFIG_NET_VENDOR_PENSANDO is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RDC is not set
+CONFIG_8139CP=y
+CONFIG_8139TOO=y
+# CONFIG_8139TOO_PIO is not set
+CONFIG_R8169=y
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
+CONFIG_PPPOL2TP=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+CONFIG_ATH9K=m
+CONFIG_HOSTAP=m
+CONFIG_INPUT_LEDS=m
+CONFIG_INPUT_SPARSEKMAP=y
+CONFIG_INPUT_EVDEV=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_XTKBD=m
+# CONFIG_MOUSE_PS2 is not set
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=m
+# CONFIG_SERIO_I8042 is not set
+CONFIG_SERIO_SERPORT=m
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SERIO_RAW=m
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_16550A_VARIANTS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=16
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_RSA=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_HW_RANDOM=y
+CONFIG_RAW_DRIVER=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_PIIX4=y
+CONFIG_GPIO_LOONGSON=y
+CONFIG_SENSORS_LM75=m
+CONFIG_SENSORS_LM93=m
+CONFIG_SENSORS_W83627HF=m
+# CONFIG_MEDIA_CEC_SUPPORT is not set
+CONFIG_MEDIA_SUPPORT=m
+# CONFIG_MEDIA_CONTROLLER is not set
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=m
+CONFIG_DRM=y
+CONFIG_DRM_RADEON=y
+CONFIG_FB_RADEON=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_LCD_PLATFORM=m
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
+CONFIG_LOGO=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_VERBOSE_PRINTK=y
+CONFIG_SND_SEQUENCER=y
+CONFIG_SND_SEQ_DUMMY=m
+# CONFIG_SND_ISA is not set
+CONFIG_SND_HDA_INTEL=y
+CONFIG_SND_HDA_HWDEP=y
+CONFIG_SND_HDA_PATCH_LOADER=y
+CONFIG_SND_HDA_CODEC_REALTEK=y
+CONFIG_SND_HDA_CODEC_ANALOG=y
+CONFIG_SND_HDA_CODEC_SIGMATEL=y
+CONFIG_SND_HDA_CODEC_VIA=y
+CONFIG_SND_HDA_CODEC_CONEXANT=y
+# CONFIG_SND_USB is not set
+CONFIG_SND_SOC=y
+CONFIG_HID_A4TECH=m
+CONFIG_HID_SUNPLUS=m
+CONFIG_USB=y
+CONFIG_USB_MON=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_UHCI_HCD=m
+CONFIG_USB_STORAGE=y
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_OPTION=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_DMADEVICES=y
+# CONFIG_CPU_HWMON is not set
+CONFIG_PM_DEVFREQ=y
+CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=936
+CONFIG_FAT_DEFAULT_IOCHARSET="gb2312"
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_CRAMFS=m
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_NFS_FS=m
+CONFIG_NFS_V3_ACL=y
+CONFIG_NFS_V4=m
+CONFIG_NFSD=m
+CONFIG_NFSD_V3_ACL=y
+CONFIG_NFSD_V4=y
+CONFIG_CIFS=m
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_UTF8=y
+CONFIG_SECURITY=y
+CONFIG_SECURITYFS=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_PATH=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_CRYPTO_SEQIV=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_DEFLATE=m
+CONFIG_PRINTK_TIME=y
+CONFIG_FRAME_WARN=1024
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig
index 0e79f81217bc..f02101ff04b3 100644
--- a/arch/mips/configs/loongson3_defconfig
+++ b/arch/mips/configs/loongson3_defconfig
@@ -13,7 +13,6 @@ CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_PIDS=y
CONFIG_CGROUP_FREEZER=y
@@ -26,20 +25,20 @@ CONFIG_SCHED_AUTOGROUP=y
CONFIG_SYSFS_DEPRECATED=y
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
+CONFIG_BPF_SYSCALL=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
CONFIG_MACH_LOONGSON64=y
CONFIG_CPU_HAS_MSA=y
-CONFIG_NR_CPUS=16
CONFIG_NUMA=y
-CONFIG_SMP=y
+CONFIG_NR_CPUS=16
CONFIG_HZ_256=y
CONFIG_KEXEC=y
CONFIG_MIPS32_O32=y
CONFIG_MIPS32_N32=y
CONFIG_VIRTUALIZATION=y
CONFIG_KVM=m
-CONFIG_KVM_MIPS_VZ=y
+CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
@@ -129,6 +128,7 @@ CONFIG_L2TP=m
CONFIG_BRIDGE=m
CONFIG_VSOCKETS=m
CONFIG_VIRTIO_VSOCKETS=m
+CONFIG_BPF_JIT=y
CONFIG_CFG80211=m
CONFIG_CFG80211_WEXT=y
CONFIG_MAC80211=m
@@ -204,7 +204,6 @@ CONFIG_VIRTIO_NET=m
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
-# CONFIG_NET_VENDOR_HP is not set
# CONFIG_NET_VENDOR_I825XX is not set
CONFIG_E1000=y
CONFIG_E1000E=y
@@ -246,7 +245,6 @@ CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
CONFIG_ATH9K=m
CONFIG_HOSTAP=m
-CONFIG_INPUT_POLLDEV=m
CONFIG_INPUT_SPARSEKMAP=y
CONFIG_INPUT_MOUSEDEV=y
CONFIG_INPUT_MOUSEDEV_PSAUX=y
@@ -278,7 +276,6 @@ CONFIG_SENSORS_LM75=m
CONFIG_SENSORS_LM93=m
CONFIG_SENSORS_W83627HF=m
CONFIG_MEDIA_SUPPORT=m
-CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_MEDIA_USB_SUPPORT=y
CONFIG_USB_VIDEO_CLASS=m
CONFIG_DRM=y
@@ -318,6 +315,7 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_UHCI_HCD=m
CONFIG_USB_STORAGE=m
CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_PL2303=m
CONFIG_USB_SERIAL_OPTION=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_CMOS=y
@@ -388,25 +386,22 @@ CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_DISABLE=y
CONFIG_DEFAULT_SECURITY_DAC=y
-CONFIG_CRYPTO_AUTHENC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_PRINTK_TIME=y
CONFIG_STRIP_ASM_SYMS=y
CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set
-# CONFIG_FTRACE is not set
+CONFIG_FUNCTION_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="ieee754=relaxed"
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index 211bd3d6e6cb..9cb2cf2595e0 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -227,7 +227,6 @@ CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_UBI=m
CONFIG_MTD_UBI_GLUEBI=m
CONFIG_BLK_DEV_FD=m
-CONFIG_BLK_DEV_UMEM=m
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
index 62b1969b4f55..5924e48fd3ec 100644
--- a/arch/mips/configs/malta_kvm_defconfig
+++ b/arch/mips/configs/malta_kvm_defconfig
@@ -232,16 +232,12 @@ CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_UBI=m
CONFIG_MTD_UBI_GLUEBI=m
CONFIG_BLK_DEV_FD=m
-CONFIG_BLK_DEV_UMEM=m
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_TC86C001=m
CONFIG_RAID_ATTRS=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig
deleted file mode 100644
index 9185e0a0aa45..000000000000
--- a/arch/mips/configs/malta_kvm_guest_defconfig
+++ /dev/null
@@ -1,436 +0,0 @@
-CONFIG_SYSVIPC=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_LOG_BUF_SHIFT=15
-CONFIG_NAMESPACES=y
-CONFIG_RELAY=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-# CONFIG_COMPAT_BRK is not set
-CONFIG_SLAB=y
-CONFIG_MIPS_MALTA=y
-CONFIG_CPU_LITTLE_ENDIAN=y
-CONFIG_CPU_MIPS32_R2=y
-CONFIG_KVM_GUEST=y
-CONFIG_PAGE_SIZE_16KB=y
-# CONFIG_MIPS_MT_SMP is not set
-CONFIG_HZ_100=y
-CONFIG_PCI=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=m
-CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_NET_IPIP=m
-CONFIG_IP_MROUTE=y
-CONFIG_IP_PIMSM_V1=y
-CONFIG_IP_PIMSM_V2=y
-CONFIG_SYN_COOKIES=y
-CONFIG_INET_AH=m
-CONFIG_INET_ESP=m
-CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
-CONFIG_IPV6_OPTIMISTIC_DAD=y
-CONFIG_INET6_AH=m
-CONFIG_INET6_ESP=m
-CONFIG_INET6_IPCOMP=m
-CONFIG_IPV6_TUNNEL=m
-CONFIG_IPV6_MROUTE=y
-CONFIG_IPV6_PIMSM_V2=y
-CONFIG_NETWORK_SECMARK=y
-CONFIG_NETFILTER=y
-CONFIG_NF_CONNTRACK=m
-CONFIG_NF_CONNTRACK_SECMARK=y
-CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CONNTRACK_AMANDA=m
-CONFIG_NF_CONNTRACK_FTP=m
-CONFIG_NF_CONNTRACK_H323=m
-CONFIG_NF_CONNTRACK_IRC=m
-CONFIG_NF_CONNTRACK_PPTP=m
-CONFIG_NF_CONNTRACK_SANE=m
-CONFIG_NF_CONNTRACK_SIP=m
-CONFIG_NF_CONNTRACK_TFTP=m
-CONFIG_NF_CT_NETLINK=m
-CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
-CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
-CONFIG_NETFILTER_XT_TARGET_MARK=m
-CONFIG_NETFILTER_XT_TARGET_NFLOG=m
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_TPROXY=m
-CONFIG_NETFILTER_XT_TARGET_TRACE=m
-CONFIG_NETFILTER_XT_TARGET_SECMARK=m
-CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
-CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
-CONFIG_NETFILTER_XT_MATCH_COMMENT=m
-CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
-CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
-CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
-CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
-CONFIG_NETFILTER_XT_MATCH_ESP=m
-CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
-CONFIG_NETFILTER_XT_MATCH_HELPER=m
-CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
-CONFIG_NETFILTER_XT_MATCH_LENGTH=m
-CONFIG_NETFILTER_XT_MATCH_LIMIT=m
-CONFIG_NETFILTER_XT_MATCH_MAC=m
-CONFIG_NETFILTER_XT_MATCH_MARK=m
-CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
-CONFIG_NETFILTER_XT_MATCH_OWNER=m
-CONFIG_NETFILTER_XT_MATCH_POLICY=m
-CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
-CONFIG_NETFILTER_XT_MATCH_QUOTA=m
-CONFIG_NETFILTER_XT_MATCH_RATEEST=m
-CONFIG_NETFILTER_XT_MATCH_REALM=m
-CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_SOCKET=m
-CONFIG_NETFILTER_XT_MATCH_STATE=m
-CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
-CONFIG_NETFILTER_XT_MATCH_STRING=m
-CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
-CONFIG_NETFILTER_XT_MATCH_TIME=m
-CONFIG_NETFILTER_XT_MATCH_U32=m
-CONFIG_IP_VS=m
-CONFIG_IP_VS_IPV6=y
-CONFIG_IP_VS_PROTO_TCP=y
-CONFIG_IP_VS_PROTO_UDP=y
-CONFIG_IP_VS_PROTO_ESP=y
-CONFIG_IP_VS_PROTO_AH=y
-CONFIG_IP_VS_RR=m
-CONFIG_IP_VS_WRR=m
-CONFIG_IP_VS_LC=m
-CONFIG_IP_VS_WLC=m
-CONFIG_IP_VS_LBLC=m
-CONFIG_IP_VS_LBLCR=m
-CONFIG_IP_VS_DH=m
-CONFIG_IP_VS_SH=m
-CONFIG_IP_VS_SED=m
-CONFIG_IP_VS_NQ=m
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_MATCH_AH=m
-CONFIG_IP_NF_MATCH_ECN=m
-CONFIG_IP_NF_MATCH_TTL=m
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_TARGET_CLUSTERIP=m
-CONFIG_IP_NF_TARGET_ECN=m
-CONFIG_IP_NF_TARGET_TTL=m
-CONFIG_IP_NF_RAW=m
-CONFIG_IP_NF_ARPTABLES=m
-CONFIG_IP_NF_ARPFILTER=m
-CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_IP6_NF_MATCH_AH=m
-CONFIG_IP6_NF_MATCH_EUI64=m
-CONFIG_IP6_NF_MATCH_FRAG=m
-CONFIG_IP6_NF_MATCH_OPTS=m
-CONFIG_IP6_NF_MATCH_HL=m
-CONFIG_IP6_NF_MATCH_IPV6HEADER=m
-CONFIG_IP6_NF_MATCH_MH=m
-CONFIG_IP6_NF_MATCH_RT=m
-CONFIG_IP6_NF_TARGET_HL=m
-CONFIG_IP6_NF_FILTER=m
-CONFIG_IP6_NF_TARGET_REJECT=m
-CONFIG_IP6_NF_MANGLE=m
-CONFIG_IP6_NF_RAW=m
-CONFIG_BRIDGE_NF_EBTABLES=m
-CONFIG_BRIDGE_EBT_BROUTE=m
-CONFIG_BRIDGE_EBT_T_FILTER=m
-CONFIG_BRIDGE_EBT_T_NAT=m
-CONFIG_BRIDGE_EBT_802_3=m
-CONFIG_BRIDGE_EBT_AMONG=m
-CONFIG_BRIDGE_EBT_ARP=m
-CONFIG_BRIDGE_EBT_IP=m
-CONFIG_BRIDGE_EBT_IP6=m
-CONFIG_BRIDGE_EBT_LIMIT=m
-CONFIG_BRIDGE_EBT_MARK=m
-CONFIG_BRIDGE_EBT_PKTTYPE=m
-CONFIG_BRIDGE_EBT_STP=m
-CONFIG_BRIDGE_EBT_VLAN=m
-CONFIG_BRIDGE_EBT_ARPREPLY=m
-CONFIG_BRIDGE_EBT_DNAT=m
-CONFIG_BRIDGE_EBT_MARK_T=m
-CONFIG_BRIDGE_EBT_REDIRECT=m
-CONFIG_BRIDGE_EBT_SNAT=m
-CONFIG_BRIDGE_EBT_LOG=m
-CONFIG_BRIDGE_EBT_NFLOG=m
-CONFIG_IP_SCTP=m
-CONFIG_BRIDGE=m
-CONFIG_VLAN_8021Q=m
-CONFIG_VLAN_8021Q_GVRP=y
-CONFIG_ATALK=m
-CONFIG_DEV_APPLETALK=m
-CONFIG_IPDDP=m
-CONFIG_IPDDP_ENCAP=y
-CONFIG_PHONET=m
-CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_CBQ=m
-CONFIG_NET_SCH_HTB=m
-CONFIG_NET_SCH_HFSC=m
-CONFIG_NET_SCH_PRIO=m
-CONFIG_NET_SCH_RED=m
-CONFIG_NET_SCH_SFQ=m
-CONFIG_NET_SCH_TEQL=m
-CONFIG_NET_SCH_TBF=m
-CONFIG_NET_SCH_GRED=m
-CONFIG_NET_SCH_DSMARK=m
-CONFIG_NET_SCH_NETEM=m
-CONFIG_NET_SCH_INGRESS=m
-CONFIG_NET_CLS_BASIC=m
-CONFIG_NET_CLS_TCINDEX=m
-CONFIG_NET_CLS_ROUTE4=m
-CONFIG_NET_CLS_FW=m
-CONFIG_NET_CLS_U32=m
-CONFIG_NET_CLS_RSVP=m
-CONFIG_NET_CLS_RSVP6=m
-CONFIG_NET_CLS_FLOW=m
-CONFIG_NET_CLS_ACT=y
-CONFIG_NET_ACT_POLICE=y
-CONFIG_NET_ACT_GACT=m
-CONFIG_GACT_PROB=y
-CONFIG_NET_ACT_MIRRED=m
-CONFIG_NET_ACT_IPT=m
-CONFIG_NET_ACT_NAT=m
-CONFIG_NET_ACT_PEDIT=m
-CONFIG_NET_ACT_SIMP=m
-CONFIG_NET_ACT_SKBEDIT=m
-CONFIG_CFG80211=m
-CONFIG_MAC80211=m
-CONFIG_MAC80211_MESH=y
-CONFIG_RFKILL=m
-CONFIG_DEVTMPFS=y
-CONFIG_CONNECTOR=m
-CONFIG_MTD=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_OOPS=m
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_CFI_STAA=y
-CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_MTD_UBI=m
-CONFIG_MTD_UBI_GLUEBI=m
-CONFIG_BLK_DEV_FD=m
-CONFIG_BLK_DEV_UMEM=m
-CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
-CONFIG_BLK_DEV_NBD=m
-CONFIG_BLK_DEV_RAM=y
-CONFIG_CDROM_PKTCDVD=m
-CONFIG_ATA_OVER_ETH=m
-CONFIG_VIRTIO_BLK=y
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_TC86C001=m
-CONFIG_RAID_ATTRS=m
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
-CONFIG_BLK_DEV_SR=y
-CONFIG_CHR_DEV_SG=m
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_SCSI_SCAN_ASYNC=y
-CONFIG_SCSI_FC_ATTRS=m
-CONFIG_ISCSI_TCP=m
-CONFIG_BLK_DEV_3W_XXXX_RAID=m
-CONFIG_SCSI_3W_9XXX=m
-CONFIG_SCSI_ACARD=m
-CONFIG_SCSI_AACRAID=m
-CONFIG_SCSI_AIC7XXX=m
-CONFIG_AIC7XXX_RESET_DELAY_MS=15000
-# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
-CONFIG_ATA=y
-CONFIG_ATA_PIIX=y
-CONFIG_PATA_IT8213=m
-CONFIG_PATA_OLDPIIX=y
-CONFIG_PATA_MPIIX=y
-CONFIG_ATA_GENERIC=y
-CONFIG_PATA_LEGACY=y
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
-CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_MD_RAID10=m
-CONFIG_MD_RAID456=m
-CONFIG_MD_MULTIPATH=m
-CONFIG_MD_FAULTY=m
-CONFIG_BLK_DEV_DM=m
-CONFIG_DM_CRYPT=m
-CONFIG_DM_SNAPSHOT=m
-CONFIG_DM_MIRROR=m
-CONFIG_DM_ZERO=m
-CONFIG_DM_MULTIPATH=m
-CONFIG_NETDEVICES=y
-CONFIG_BONDING=m
-CONFIG_DUMMY=m
-CONFIG_EQUALIZER=m
-CONFIG_IFB=m
-CONFIG_MACVLAN=m
-CONFIG_TUN=m
-CONFIG_VETH=m
-CONFIG_VIRTIO_NET=y
-CONFIG_PCNET32=y
-CONFIG_CHELSIO_T3=m
-CONFIG_AX88796=m
-CONFIG_NETXEN_NIC=m
-CONFIG_TC35815=m
-CONFIG_BROADCOM_PHY=m
-CONFIG_CICADA_PHY=m
-CONFIG_DAVICOM_PHY=m
-CONFIG_ICPLUS_PHY=m
-CONFIG_LXT_PHY=m
-CONFIG_MARVELL_PHY=m
-CONFIG_QSEMI_PHY=m
-CONFIG_REALTEK_PHY=m
-CONFIG_SMSC_PHY=m
-CONFIG_VITESSE_PHY=m
-CONFIG_ATMEL=m
-CONFIG_PCI_ATMEL=m
-CONFIG_IPW2100=m
-CONFIG_IPW2100_MONITOR=y
-CONFIG_HOSTAP=m
-CONFIG_HOSTAP_FIRMWARE=y
-CONFIG_HOSTAP_FIRMWARE_NVRAM=y
-CONFIG_HOSTAP_PLX=m
-CONFIG_HOSTAP_PCI=m
-CONFIG_PRISM54=m
-CONFIG_LIBERTAS=m
-CONFIG_INPUT_MOUSEDEV=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_POWER_RESET=y
-CONFIG_POWER_RESET_PIIX4_POWEROFF=y
-CONFIG_POWER_RESET_SYSCON=y
-# CONFIG_HWMON is not set
-CONFIG_FB=y
-CONFIG_FB_CIRRUS=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_HID=m
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_CMOS=y
-CONFIG_UIO=m
-CONFIG_UIO_CIF=m
-CONFIG_VIRTIO_PCI=y
-CONFIG_VIRTIO_BALLOON=y
-CONFIG_VIRTIO_MMIO=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_REISERFS_FS=m
-CONFIG_REISERFS_PROC_INFO=y
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
-CONFIG_JFS_FS=m
-CONFIG_JFS_POSIX_ACL=y
-CONFIG_JFS_SECURITY=y
-CONFIG_XFS_FS=m
-CONFIG_XFS_QUOTA=y
-CONFIG_XFS_POSIX_ACL=y
-CONFIG_QUOTA=y
-CONFIG_QFMT_V2=y
-CONFIG_FUSE_FS=m
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_AFFS_FS=m
-CONFIG_HFS_FS=m
-CONFIG_HFSPLUS_FS=m
-CONFIG_BEFS_FS=m
-CONFIG_BFS_FS=m
-CONFIG_EFS_FS=m
-CONFIG_JFFS2_FS=m
-CONFIG_JFFS2_FS_XATTR=y
-CONFIG_JFFS2_COMPRESSION_OPTIONS=y
-CONFIG_JFFS2_RUBIN=y
-CONFIG_CRAMFS=m
-CONFIG_VXFS_FS=m
-CONFIG_MINIX_FS=m
-CONFIG_ROMFS_FS=m
-CONFIG_SYSV_FS=m
-CONFIG_UFS_FS=m
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=y
-CONFIG_NFSD_V3=y
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=m
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_CAMELLIA=m
-CONFIG_CRYPTO_CAST5=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_TWOFISH=m
diff --git a/arch/mips/configs/maltaup_xpa_defconfig b/arch/mips/configs/maltaup_xpa_defconfig
index 636311d67a53..c0d3156ef640 100644
--- a/arch/mips/configs/maltaup_xpa_defconfig
+++ b/arch/mips/configs/maltaup_xpa_defconfig
@@ -230,16 +230,12 @@ CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_UBI=m
CONFIG_MTD_UBI_GLUEBI=m
CONFIG_BLK_DEV_FD=m
-CONFIG_BLK_DEV_UMEM=m
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_TC86C001=m
CONFIG_RAID_ATTRS=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
diff --git a/arch/mips/configs/rbtx49xx_defconfig b/arch/mips/configs/rbtx49xx_defconfig
index 5e389db35fa7..69f2300107f9 100644
--- a/arch/mips/configs/rbtx49xx_defconfig
+++ b/arch/mips/configs/rbtx49xx_defconfig
@@ -44,9 +44,6 @@ CONFIG_MTD_NAND_TXX9NDFMC=m
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDE_TX4938=y
-CONFIG_BLK_DEV_IDE_TX4939=y
CONFIG_NETDEVICES=y
CONFIG_NE2000=y
CONFIG_SMC91X=y
diff --git a/arch/mips/configs/sb1250_swarm_defconfig b/arch/mips/configs/sb1250_swarm_defconfig
index bb0b1b22ebe1..de94bf756a93 100644
--- a/arch/mips/configs/sb1250_swarm_defconfig
+++ b/arch/mips/configs/sb1250_swarm_defconfig
@@ -17,7 +17,6 @@ CONFIG_64BIT=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_HZ_1000=y
-CONFIG_PCI=y
CONFIG_MIPS32_O32=y
CONFIG_PM=y
CONFIG_MODULES=y
@@ -34,25 +33,28 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
CONFIG_TCP_MD5SIG=y
# CONFIG_IPV6 is not set
CONFIG_NETWORK_SECMARK=y
CONFIG_CFG80211=m
CONFIG_MAC80211=m
CONFIG_RFKILL=m
+CONFIG_PCI=y
CONFIG_FW_LOADER=m
CONFIG_CONNECTOR=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=9220
CONFIG_CDROM_PKTCDVD=m
CONFIG_ATA_OVER_ETH=m
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_IDETAPE=y
CONFIG_RAID_ATTRS=m
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_CONSTANTS=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+# CONFIG_ATA_BMDMA is not set
+CONFIG_PATA_PLATFORM=y
CONFIG_NETDEVICES=y
CONFIG_MACVLAN=m
CONFIG_SB1250_MAC=y
@@ -88,18 +90,14 @@ CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SALSA20=m
-CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/mips/configs/workpad_defconfig b/arch/mips/configs/workpad_defconfig
index 891a5f77305d..4798dc86c9ce 100644
--- a/arch/mips/configs/workpad_defconfig
+++ b/arch/mips/configs/workpad_defconfig
@@ -26,9 +26,12 @@ CONFIG_IP_MULTICAST=y
# CONFIG_IPV6 is not set
CONFIG_NETWORK_SECMARK=y
CONFIG_BLK_DEV_RAM=m
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECS=m
-CONFIG_IDE_GENERIC=y
+# CONFIG_SCSI_PROC_FS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+# CONFIG_ATA_VERBOSE_ERROR is not set
+# CONFIG_ATA_FORCE is not set
+# CONFIG_ATA_BMDMA is not set
CONFIG_NETDEVICES=y
CONFIG_PCMCIA_3C574=m
CONFIG_PCMCIA_3C589=m
diff --git a/arch/mips/crypto/.gitignore b/arch/mips/crypto/.gitignore
new file mode 100644
index 000000000000..0d47d4f21c6d
--- /dev/null
+++ b/arch/mips/crypto/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+poly1305-core.S
diff --git a/arch/mips/crypto/Makefile b/arch/mips/crypto/Makefile
index 8e1deaf00e0c..5e4105cccf9f 100644
--- a/arch/mips/crypto/Makefile
+++ b/arch/mips/crypto/Makefile
@@ -12,8 +12,8 @@ AFLAGS_chacha-core.o += -O2 # needed to fill branch delay slots
obj-$(CONFIG_CRYPTO_POLY1305_MIPS) += poly1305-mips.o
poly1305-mips-y := poly1305-core.o poly1305-glue.o
-perlasm-flavour-$(CONFIG_CPU_MIPS32) := o32
-perlasm-flavour-$(CONFIG_CPU_MIPS64) := 64
+perlasm-flavour-$(CONFIG_32BIT) := o32
+perlasm-flavour-$(CONFIG_64BIT) := 64
quiet_cmd_perlasm = PERLASM $@
cmd_perlasm = $(PERL) $(<) $(perlasm-flavour-y) $(@)
diff --git a/arch/mips/crypto/crc32-mips.c b/arch/mips/crypto/crc32-mips.c
index faa88a6a74c0..0a03529cf317 100644
--- a/arch/mips/crypto/crc32-mips.c
+++ b/arch/mips/crypto/crc32-mips.c
@@ -8,13 +8,13 @@
* Copyright (C) 2018 MIPS Tech, LLC
*/
-#include <linux/unaligned/access_ok.h>
#include <linux/cpufeature.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <asm/mipsregs.h>
+#include <asm/unaligned.h>
#include <crypto/internal/hash.h>
diff --git a/arch/mips/crypto/poly1305-glue.c b/arch/mips/crypto/poly1305-glue.c
index fc881b46d911..bc6110fb98e0 100644
--- a/arch/mips/crypto/poly1305-glue.c
+++ b/arch/mips/crypto/poly1305-glue.c
@@ -17,7 +17,7 @@ asmlinkage void poly1305_init_mips(void *state, const u8 *key);
asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit);
asmlinkage void poly1305_emit_mips(void *state, u8 *digest, const u32 *nonce);
-void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
+void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
{
poly1305_init_mips(&dctx->h, key);
dctx->s[0] = get_unaligned_le32(key + 16);
diff --git a/arch/mips/generic/board-boston.its.S b/arch/mips/generic/board-boston.its.S
index a7f51f97b910..c45ad2759421 100644
--- a/arch/mips/generic/board-boston.its.S
+++ b/arch/mips/generic/board-boston.its.S
@@ -1,22 +1,22 @@
/ {
images {
- fdt@boston {
+ fdt-boston {
description = "img,boston Device Tree";
data = /incbin/("boot/dts/img/boston.dtb");
type = "flat_dt";
arch = "mips";
compression = "none";
- hash@0 {
+ hash {
algo = "sha1";
};
};
};
configurations {
- conf@boston {
+ conf-boston {
description = "Boston Linux kernel";
- kernel = "kernel@0";
- fdt = "fdt@boston";
+ kernel = "kernel";
+ fdt = "fdt-boston";
};
};
};
diff --git a/arch/mips/generic/board-jaguar2.its.S b/arch/mips/generic/board-jaguar2.its.S
index fb0e589eeff7..c2b8d479b26c 100644
--- a/arch/mips/generic/board-jaguar2.its.S
+++ b/arch/mips/generic/board-jaguar2.its.S
@@ -1,23 +1,23 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/ {
images {
- fdt@jaguar2_pcb110 {
+ fdt-jaguar2_pcb110 {
description = "MSCC Jaguar2 PCB110 Device Tree";
data = /incbin/("boot/dts/mscc/jaguar2_pcb110.dtb");
type = "flat_dt";
arch = "mips";
compression = "none";
- hash@0 {
+ hash {
algo = "sha1";
};
};
- fdt@jaguar2_pcb111 {
+ fdt-jaguar2_pcb111 {
description = "MSCC Jaguar2 PCB111 Device Tree";
data = /incbin/("boot/dts/mscc/jaguar2_pcb111.dtb");
type = "flat_dt";
arch = "mips";
compression = "none";
- hash@0 {
+ hash {
algo = "sha1";
};
};
@@ -26,14 +26,14 @@
configurations {
pcb110 {
description = "Jaguar2 Linux kernel";
- kernel = "kernel@0";
- fdt = "fdt@jaguar2_pcb110";
+ kernel = "kernel";
+ fdt = "fdt-jaguar2_pcb110";
ramdisk = "ramdisk";
};
pcb111 {
description = "Jaguar2 Linux kernel";
- kernel = "kernel@0";
- fdt = "fdt@jaguar2_pcb111";
+ kernel = "kernel";
+ fdt = "fdt-jaguar2_pcb111";
ramdisk = "ramdisk";
};
};
diff --git a/arch/mips/generic/board-luton.its.S b/arch/mips/generic/board-luton.its.S
index 39a543f62f25..bd9837c9af97 100644
--- a/arch/mips/generic/board-luton.its.S
+++ b/arch/mips/generic/board-luton.its.S
@@ -1,13 +1,13 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/ {
images {
- fdt@luton_pcb091 {
+ fdt-luton_pcb091 {
description = "MSCC Luton PCB091 Device Tree";
data = /incbin/("boot/dts/mscc/luton_pcb091.dtb");
type = "flat_dt";
arch = "mips";
compression = "none";
- hash@0 {
+ hash {
algo = "sha1";
};
};
@@ -16,8 +16,8 @@
configurations {
pcb091 {
description = "Luton Linux kernel";
- kernel = "kernel@0";
- fdt = "fdt@luton_pcb091";
+ kernel = "kernel";
+ fdt = "fdt-luton_pcb091";
};
};
};
diff --git a/arch/mips/generic/board-ni169445.its.S b/arch/mips/generic/board-ni169445.its.S
index e4cb4f95a8cc..0a2e8f7a8526 100644
--- a/arch/mips/generic/board-ni169445.its.S
+++ b/arch/mips/generic/board-ni169445.its.S
@@ -1,22 +1,22 @@
/ {
images {
- fdt@ni169445 {
+ fdt-ni169445 {
description = "NI 169445 device tree";
data = /incbin/("boot/dts/ni/169445.dtb");
type = "flat_dt";
arch = "mips";
compression = "none";
- hash@0 {
+ hash {
algo = "sha1";
};
};
};
configurations {
- conf@ni169445 {
+ conf-ni169445 {
description = "NI 169445 Linux Kernel";
- kernel = "kernel@0";
- fdt = "fdt@ni169445";
+ kernel = "kernel";
+ fdt = "fdt-ni169445";
};
};
};
diff --git a/arch/mips/generic/board-ocelot.its.S b/arch/mips/generic/board-ocelot.its.S
index 3da23988149a..8c7e3a1b68d3 100644
--- a/arch/mips/generic/board-ocelot.its.S
+++ b/arch/mips/generic/board-ocelot.its.S
@@ -1,40 +1,40 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/ {
images {
- fdt@ocelot_pcb123 {
+ fdt-ocelot_pcb123 {
description = "MSCC Ocelot PCB123 Device Tree";
data = /incbin/("boot/dts/mscc/ocelot_pcb123.dtb");
type = "flat_dt";
arch = "mips";
compression = "none";
- hash@0 {
+ hash {
algo = "sha1";
};
};
- fdt@ocelot_pcb120 {
+ fdt-ocelot_pcb120 {
description = "MSCC Ocelot PCB120 Device Tree";
data = /incbin/("boot/dts/mscc/ocelot_pcb120.dtb");
type = "flat_dt";
arch = "mips";
compression = "none";
- hash@0 {
+ hash {
algo = "sha1";
};
};
};
configurations {
- conf@ocelot_pcb123 {
+ conf-ocelot_pcb123 {
description = "Ocelot Linux kernel";
- kernel = "kernel@0";
- fdt = "fdt@ocelot_pcb123";
+ kernel = "kernel";
+ fdt = "fdt-ocelot_pcb123";
};
- conf@ocelot_pcb120 {
+ conf-ocelot_pcb120 {
description = "Ocelot Linux kernel";
- kernel = "kernel@0";
- fdt = "fdt@ocelot_pcb120";
+ kernel = "kernel";
+ fdt = "fdt-ocelot_pcb120";
};
};
};
diff --git a/arch/mips/generic/board-serval.its.S b/arch/mips/generic/board-serval.its.S
index 4ea4fc9d757f..dde833efe980 100644
--- a/arch/mips/generic/board-serval.its.S
+++ b/arch/mips/generic/board-serval.its.S
@@ -1,13 +1,13 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/ {
images {
- fdt@serval_pcb105 {
+ fdt-serval_pcb105 {
description = "MSCC Serval PCB105 Device Tree";
data = /incbin/("boot/dts/mscc/serval_pcb105.dtb");
type = "flat_dt";
arch = "mips";
compression = "none";
- hash@0 {
+ hash {
algo = "sha1";
};
};
@@ -16,8 +16,8 @@
configurations {
pcb105 {
description = "Serval Linux kernel";
- kernel = "kernel@0";
- fdt = "fdt@serval_pcb105";
+ kernel = "kernel";
+ fdt = "fdt-serval_pcb105";
ramdisk = "ramdisk";
};
};
diff --git a/arch/mips/generic/board-xilfpga.its.S b/arch/mips/generic/board-xilfpga.its.S
index a2e773d3f14f..08c1e900eb4e 100644
--- a/arch/mips/generic/board-xilfpga.its.S
+++ b/arch/mips/generic/board-xilfpga.its.S
@@ -1,22 +1,22 @@
/ {
images {
- fdt@xilfpga {
+ fdt-xilfpga {
description = "MIPSfpga (xilfpga) Device Tree";
data = /incbin/("boot/dts/xilfpga/nexys4ddr.dtb");
type = "flat_dt";
arch = "mips";
compression = "none";
- hash@0 {
+ hash {
algo = "sha1";
};
};
};
configurations {
- conf@xilfpga {
+ conf-xilfpga {
description = "MIPSfpga Linux kernel";
- kernel = "kernel@0";
- fdt = "fdt@xilfpga";
+ kernel = "kernel";
+ fdt = "fdt-xilfpga";
};
};
};
diff --git a/arch/mips/generic/vmlinux.its.S b/arch/mips/generic/vmlinux.its.S
index 1a08438fd893..3e254676540f 100644
--- a/arch/mips/generic/vmlinux.its.S
+++ b/arch/mips/generic/vmlinux.its.S
@@ -6,7 +6,7 @@
#address-cells = <ADDR_CELLS>;
images {
- kernel@0 {
+ kernel {
description = KERNEL_NAME;
data = /incbin/(VMLINUX_BINARY);
type = "kernel";
@@ -15,18 +15,18 @@
compression = VMLINUX_COMPRESSION;
load = /bits/ ADDR_BITS <VMLINUX_LOAD_ADDRESS>;
entry = /bits/ ADDR_BITS <VMLINUX_ENTRY_ADDRESS>;
- hash@0 {
+ hash {
algo = "sha1";
};
};
};
configurations {
- default = "conf@default";
+ default = "conf-default";
- conf@default {
+ conf-default {
description = "Generic Linux kernel";
- kernel = "kernel@0";
+ kernel = "kernel";
};
};
};
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 8f6fe69674b7..dee172716581 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -1,9 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
# MIPS headers
-generated-y += syscall_table_32_o32.h
-generated-y += syscall_table_64_n32.h
-generated-y += syscall_table_64_n64.h
-generated-y += syscall_table_64_o32.h
+generated-y += syscall_table_n32.h
+generated-y += syscall_table_n64.h
+generated-y += syscall_table_o32.h
generated-y += unistd_nr_n32.h
generated-y += unistd_nr_n64.h
generated-y += unistd_nr_o32.h
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 86f2323ebe6b..ca83ada7015f 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -44,8 +44,7 @@
.endm
#endif
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
- defined(CONFIG_CPU_MIPSR6)
+#ifdef CONFIG_CPU_HAS_DIEI
.macro local_irq_enable reg=t0
ei
irq_enable_hazard
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 27ad76791539..95e1f7f3597f 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -25,24 +25,25 @@
#include <asm/war.h>
#define ATOMIC_OPS(pfx, type) \
-static __always_inline type pfx##_read(const pfx##_t *v) \
+static __always_inline type arch_##pfx##_read(const pfx##_t *v) \
{ \
return READ_ONCE(v->counter); \
} \
\
-static __always_inline void pfx##_set(pfx##_t *v, type i) \
+static __always_inline void arch_##pfx##_set(pfx##_t *v, type i) \
{ \
WRITE_ONCE(v->counter, i); \
} \
\
-static __always_inline type pfx##_cmpxchg(pfx##_t *v, type o, type n) \
+static __always_inline type \
+arch_##pfx##_cmpxchg(pfx##_t *v, type o, type n) \
{ \
- return cmpxchg(&v->counter, o, n); \
+ return arch_cmpxchg(&v->counter, o, n); \
} \
\
-static __always_inline type pfx##_xchg(pfx##_t *v, type n) \
+static __always_inline type arch_##pfx##_xchg(pfx##_t *v, type n) \
{ \
- return xchg(&v->counter, n); \
+ return arch_xchg(&v->counter, n); \
}
ATOMIC_OPS(atomic, int)
@@ -53,7 +54,7 @@ ATOMIC_OPS(atomic64, s64)
#endif
#define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
-static __inline__ void pfx##_##op(type i, pfx##_t * v) \
+static __inline__ void arch_##pfx##_##op(type i, pfx##_t * v) \
{ \
type temp; \
\
@@ -80,7 +81,8 @@ static __inline__ void pfx##_##op(type i, pfx##_t * v) \
}
#define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
-static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
+static __inline__ type \
+arch_##pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
{ \
type temp, result; \
\
@@ -113,7 +115,8 @@ static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
}
#define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) \
-static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
+static __inline__ type \
+arch_##pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
{ \
int temp, result; \
\
@@ -153,18 +156,18 @@ static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#ifdef CONFIG_64BIT
ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
-# define atomic64_add_return_relaxed atomic64_add_return_relaxed
-# define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-# define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-# define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+# define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+# define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+# define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+# define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#endif /* CONFIG_64BIT */
#undef ATOMIC_OPS
@@ -176,17 +179,17 @@ ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
#ifdef CONFIG_64BIT
ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
-# define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-# define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-# define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+# define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+# define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+# define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#endif
#undef ATOMIC_OPS
@@ -203,7 +206,7 @@ ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
* The function returns the old value of @v minus @i.
*/
#define ATOMIC_SIP_OP(pfx, type, op, ll, sc) \
-static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v) \
+static __inline__ int arch_##pfx##_sub_if_positive(type i, pfx##_t * v) \
{ \
type temp, result; \
\
@@ -255,11 +258,11 @@ static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v) \
}
ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
-#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
+#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v)
#ifdef CONFIG_64BIT
ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd)
-#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v)
+#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v)
#endif
#undef ATOMIC_SIP_OP
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index 5be10ece3ef0..4c2e8173e6ec 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -107,7 +107,7 @@ extern void (*free_init_pages_eva)(void *begin, void *end);
extern char arcs_cmdline[COMMAND_LINE_SIZE];
/*
- * Registers a0, a1, a3 and a4 as passed to the kernel entry by firmware
+ * Registers a0, a1, a2 and a3 as passed to the kernel entry by firmware
*/
extern unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index ed8f3f3c4304..0b983800f48b 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -90,7 +90,7 @@ unsigned long __xchg(volatile void *ptr, unsigned long x, int size)
}
}
-#define xchg(ptr, x) \
+#define arch_xchg(ptr, x) \
({ \
__typeof__(*(ptr)) __res; \
\
@@ -175,14 +175,14 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
}
}
-#define cmpxchg_local(ptr, old, new) \
+#define arch_cmpxchg_local(ptr, old, new) \
((__typeof__(*(ptr))) \
__cmpxchg((ptr), \
(unsigned long)(__typeof__(*(ptr)))(old), \
(unsigned long)(__typeof__(*(ptr)))(new), \
sizeof(*(ptr))))
-#define cmpxchg(ptr, old, new) \
+#define arch_cmpxchg(ptr, old, new) \
({ \
__typeof__(*(ptr)) __res; \
\
@@ -194,7 +194,7 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
if (__SYNC_loongson3_war == 0) \
smp_mb__before_llsc(); \
\
- __res = cmpxchg_local((ptr), (old), (new)); \
+ __res = arch_cmpxchg_local((ptr), (old), (new)); \
\
/* \
* In the Loongson3 workaround case __cmpxchg_asm() already \
@@ -208,21 +208,21 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
})
#ifdef CONFIG_64BIT
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_local((ptr), (o), (n)); \
+ arch_cmpxchg_local((ptr), (o), (n)); \
})
-#define cmpxchg64(ptr, o, n) \
+#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
+ arch_cmpxchg((ptr), (o), (n)); \
})
#else
# include <asm-generic/cmpxchg-local.h>
-# define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+# define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
# ifdef CONFIG_SMP
@@ -294,7 +294,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
return ret;
}
-# define cmpxchg64(ptr, o, n) ({ \
+# define arch_cmpxchg64(ptr, o, n) ({ \
unsigned long long __old = (__typeof__(*(ptr)))(o); \
unsigned long long __new = (__typeof__(*(ptr)))(n); \
__typeof__(*(ptr)) __res; \
@@ -317,7 +317,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
})
# else /* !CONFIG_SMP */
-# define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+# define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
# endif /* !CONFIG_SMP */
#endif /* !CONFIG_64BIT */
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 336e02b3b3ce..3d71081afc55 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -64,6 +64,8 @@
((MIPS_ISA_REV >= (ge)) && (MIPS_ISA_REV < (lt)))
#define __isa_range_or_flag(ge, lt, flag) \
(__isa_range(ge, lt) || ((MIPS_ISA_REV < (lt)) && __isa(flag)))
+#define __isa_range_and_ase(ge, lt, ase) \
+ (__isa_range(ge, lt) && __ase(ase))
/*
* SMP assumption: Options of CPU 0 are a superset of all processors.
@@ -421,7 +423,7 @@
#endif
#ifndef cpu_has_mipsmt
-#define cpu_has_mipsmt __isa_lt_and_ase(6, MIPS_ASE_MIPSMT)
+#define cpu_has_mipsmt __isa_range_and_ase(2, 6, MIPS_ASE_MIPSMT)
#endif
#ifndef cpu_has_vp
diff --git a/arch/mips/include/asm/div64.h b/arch/mips/include/asm/div64.h
index dc5ea5736440..ceece76fc971 100644
--- a/arch/mips/include/asm/div64.h
+++ b/arch/mips/include/asm/div64.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2000, 2004 Maciej W. Rozycki
+ * Copyright (C) 2000, 2004, 2021 Maciej W. Rozycki
* Copyright (C) 2003, 07 Ralf Baechle (ralf@linux-mips.org)
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -9,25 +9,18 @@
#ifndef __ASM_DIV64_H
#define __ASM_DIV64_H
-#include <asm-generic/div64.h>
-
-#if BITS_PER_LONG == 64
+#include <asm/bitsperlong.h>
-#include <linux/types.h>
+#if BITS_PER_LONG == 32
/*
* No traps on overflows for any of these...
*/
-#define __div64_32(n, base) \
-({ \
+#define do_div64_32(res, high, low, base) ({ \
unsigned long __cf, __tmp, __tmp2, __i; \
unsigned long __quot32, __mod32; \
- unsigned long __high, __low; \
- unsigned long long __n; \
\
- __high = *__n >> 32; \
- __low = __n; \
__asm__( \
" .set push \n" \
" .set noat \n" \
@@ -51,18 +44,48 @@
" subu %0, %0, %z6 \n" \
" addiu %2, %2, 1 \n" \
"3: \n" \
- " bnez %4, 0b\n\t" \
- " srl %5, %1, 0x1f\n\t" \
+ " bnez %4, 0b \n" \
+ " srl %5, %1, 0x1f \n" \
" .set pop" \
: "=&r" (__mod32), "=&r" (__tmp), \
"=&r" (__quot32), "=&r" (__cf), \
"=&r" (__i), "=&r" (__tmp2) \
- : "Jr" (base), "0" (__high), "1" (__low)); \
+ : "Jr" (base), "0" (high), "1" (low)); \
\
- (__n) = __quot32; \
+ (res) = __quot32; \
__mod32; \
})
-#endif /* BITS_PER_LONG == 64 */
+#define __div64_32(n, base) ({ \
+ unsigned long __upper, __low, __high, __radix; \
+ unsigned long long __quot; \
+ unsigned long long __div; \
+ unsigned long __mod; \
+ \
+ __div = (*n); \
+ __radix = (base); \
+ \
+ __high = __div >> 32; \
+ __low = __div; \
+ \
+ if (__high < __radix) { \
+ __upper = __high; \
+ __high = 0; \
+ } else { \
+ __upper = __high % __radix; \
+ __high /= __radix; \
+ } \
+ \
+ __mod = do_div64_32(__low, __upper, __low, __radix); \
+ \
+ __quot = __high; \
+ __quot = __quot << 32 | __low; \
+ (*n) = __quot; \
+ __mod; \
+})
+
+#endif /* BITS_PER_LONG == 32 */
+
+#include <asm-generic/div64.h>
#endif /* __ASM_DIV64_H */
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
index 292d0425717f..92a380210017 100644
--- a/arch/mips/include/asm/highmem.h
+++ b/arch/mips/include/asm/highmem.h
@@ -36,7 +36,7 @@ extern pte_t *pkmap_page_table;
* easily, subsequent pte tables have to be allocated in one physical
* chunk of RAM.
*/
-#ifdef CONFIG_PHYS_ADDR_T_64BIT
+#if defined(CONFIG_PHYS_ADDR_T_64BIT) || defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
#define LAST_PKMAP 512
#else
#define LAST_PKMAP 1024
diff --git a/arch/mips/include/asm/hugetlb.h b/arch/mips/include/asm/hugetlb.h
index 10e3be870df7..c2144409c0c4 100644
--- a/arch/mips/include/asm/hugetlb.h
+++ b/arch/mips/include/asm/hugetlb.h
@@ -46,7 +46,13 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
- flush_tlb_page(vma, addr & huge_page_mask(hstate_vma(vma)));
+ /*
+ * clear the huge pte entry firstly, so that the other smp threads will
+ * not get old pte entry after finishing flush_tlb_page and before
+ * setting new huge pte entry
+ */
+ huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ flush_tlb_page(vma, addr);
}
#define __HAVE_ARCH_HUGE_PTE_NONE
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 78537aa23500..6f5c86d2bab4 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -100,11 +100,23 @@ static inline void set_io_port_base(unsigned long base)
* almost all conceivable cases a device driver should not be using
* this function
*/
-static inline unsigned long virt_to_phys(volatile const void *address)
+static inline unsigned long __virt_to_phys_nodebug(volatile const void *address)
{
return __pa(address);
}
+#ifdef CONFIG_DEBUG_VIRTUAL
+extern phys_addr_t __virt_to_phys(volatile const void *x);
+#else
+#define __virt_to_phys(x) __virt_to_phys_nodebug(x)
+#endif
+
+#define virt_to_phys virt_to_phys
+static inline phys_addr_t virt_to_phys(const volatile void *x)
+{
+ return __virt_to_phys(x);
+}
+
/*
* phys_to_virt - map physical address to virtual
* @address: address to remap
@@ -552,11 +564,6 @@ extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
*/
#define xlate_dev_mem_ptr(p) __va(p)
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p) p
-
void __ioread64_copy(void *to, const void __iomem *from, size_t count);
#endif /* _ASM_IO_H */
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index f021de661c3a..d1477ecb1af9 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -11,7 +11,6 @@
#include <linux/linkage.h>
#include <linux/smp.h>
-#include <linux/irqdomain.h>
#include <asm/mipsmtregs.h>
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 3a5612e7304c..696f6b009377 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -88,44 +88,10 @@
#define KVM_HALT_POLL_NS_DEFAULT 500000
-#ifdef CONFIG_KVM_MIPS_VZ
extern unsigned long GUESTID_MASK;
extern unsigned long GUESTID_FIRST_VERSION;
extern unsigned long GUESTID_VERSION_MASK;
-#endif
-
-
-/*
- * Special address that contains the comm page, used for reducing # of traps
- * This needs to be within 32Kb of 0x0 (so the zero register can be used), but
- * preferably not at 0x0 so that most kernel NULL pointer dereferences can be
- * caught.
- */
-#define KVM_GUEST_COMMPAGE_ADDR ((PAGE_SIZE > 0x8000) ? 0 : \
- (0x8000 - PAGE_SIZE))
-
-#define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
- ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
-
-#define KVM_GUEST_KUSEG 0x00000000UL
-#define KVM_GUEST_KSEG0 0x40000000UL
-#define KVM_GUEST_KSEG1 0x40000000UL
-#define KVM_GUEST_KSEG23 0x60000000UL
-#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0xe0000000)
-#define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
-
-#define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
-#define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
-#define KVM_GUEST_CKSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
-
-/*
- * Map an address to a certain kernel segment
- */
-#define KVM_GUEST_KSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
-#define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
-#define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
-#define KVM_INVALID_PAGE 0xdeadbeef
#define KVM_INVALID_ADDR 0xdeadbeef
/*
@@ -143,10 +109,11 @@ static inline bool kvm_is_error_hva(unsigned long addr)
}
struct kvm_vm_stat {
- ulong remote_tlb_flush;
+ struct kvm_vm_stat_generic generic;
};
struct kvm_vcpu_stat {
+ struct kvm_vcpu_stat_generic generic;
u64 wait_exits;
u64 cache_exits;
u64 signal_exits;
@@ -165,7 +132,6 @@ struct kvm_vcpu_stat {
u64 fpe_exits;
u64 msa_disabled_exits;
u64 flush_dcache_exits;
-#ifdef CONFIG_KVM_MIPS_VZ
u64 vz_gpsi_exits;
u64 vz_gsfc_exits;
u64 vz_hc_exits;
@@ -177,13 +143,6 @@ struct kvm_vcpu_stat {
#ifdef CONFIG_CPU_LOONGSON64
u64 vz_cpucfg_exits;
#endif
-#endif
- u64 halt_successful_poll;
- u64 halt_attempted_poll;
- u64 halt_poll_success_ns;
- u64 halt_poll_fail_ns;
- u64 halt_poll_invalid;
- u64 halt_wakeup;
};
struct kvm_arch_memory_slot {
@@ -303,14 +262,6 @@ enum emulation_result {
EMULATE_HYPERCALL, /* HYPCALL instruction */
};
-#define mips3_paddr_to_tlbpfn(x) \
- (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
-#define mips3_tlbpfn_to_paddr(x) \
- ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
-
-#define MIPS3_PG_SHIFT 6
-#define MIPS3_PG_FRAME 0x3fffffc0
-
#if defined(CONFIG_64BIT)
#define VPN2_MASK GENMASK(cpu_vmbits - 1, 13)
#else
@@ -337,7 +288,6 @@ struct kvm_mips_tlb {
#define KVM_MIPS_AUX_FPU 0x1
#define KVM_MIPS_AUX_MSA 0x2
-#define KVM_MIPS_GUEST_TLB_SIZE 64
struct kvm_vcpu_arch {
void *guest_ebase;
int (*vcpu_run)(struct kvm_vcpu *vcpu);
@@ -370,9 +320,6 @@ struct kvm_vcpu_arch {
/* COP0 State */
struct mips_coproc *cop0;
- /* Host KSEG0 address of the EI/DI offset */
- void *kseg0_commpage;
-
/* Resume PC after MMIO completion */
unsigned long io_pc;
/* GPR used as IO source/target */
@@ -398,19 +345,9 @@ struct kvm_vcpu_arch {
/* Bitmask of pending exceptions to be cleared */
unsigned long pending_exceptions_clr;
- /* S/W Based TLB for guest */
- struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
-
- /* Guest kernel/user [partial] mm */
- struct mm_struct guest_kernel_mm, guest_user_mm;
-
- /* Guest ASID of last user mode execution */
- unsigned int last_user_gasid;
-
/* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;
-#ifdef CONFIG_KVM_MIPS_VZ
/* vcpu's vzguestid is different on each host cpu in an smp system */
u32 vzguestid[NR_CPUS];
@@ -421,7 +358,6 @@ struct kvm_vcpu_arch {
/* emulated guest MAAR registers */
unsigned long maar[6];
-#endif
/* Last CPU the VCPU state was loaded on */
int last_sched_cpu;
@@ -651,20 +587,6 @@ static inline void kvm_change_##name1(struct mips_coproc *cop0, \
__BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
__BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
-#ifndef CONFIG_KVM_MIPS_VZ
-
-/*
- * T&E (trap & emulate software based virtualisation)
- * We generate the common accessors operating exclusively on the saved context
- * in RAM.
- */
-
-#define __BUILD_KVM_RW_HW __BUILD_KVM_RW_SW
-#define __BUILD_KVM_SET_HW __BUILD_KVM_SET_SW
-#define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_ATOMIC_SW
-
-#else
-
/*
* VZ (hardware assisted virtualisation)
* These macros use the active guest state in VZ mode (hardware registers),
@@ -697,8 +619,6 @@ static inline void kvm_change_##name1(struct mips_coproc *cop0, \
*/
#define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_SET_HW
-#endif
-
/*
* Define accessors for CP0 registers that are accessible to the guest. These
* are primarily used by common emulation code, which may need to access the
@@ -815,14 +735,7 @@ struct kvm_mips_callbacks {
int (*vcpu_init)(struct kvm_vcpu *vcpu);
void (*vcpu_uninit)(struct kvm_vcpu *vcpu);
int (*vcpu_setup)(struct kvm_vcpu *vcpu);
- void (*flush_shadow_all)(struct kvm *kvm);
- /*
- * Must take care of flushing any cached GPA PTEs (e.g. guest entries in
- * VZ root TLB, or T&E GVA page tables and corresponding root TLB
- * mappings).
- */
- void (*flush_shadow_memslot)(struct kvm *kvm,
- const struct kvm_memory_slot *slot);
+ void (*prepare_flush_shadow)(struct kvm *kvm);
gpa_t (*gva_to_gpa)(gva_t gva);
void (*queue_timer_int)(struct kvm_vcpu *vcpu);
void (*dequeue_timer_int)(struct kvm_vcpu *vcpu);
@@ -874,42 +787,9 @@ void kvm_drop_fpu(struct kvm_vcpu *vcpu);
void kvm_lose_fpu(struct kvm_vcpu *vcpu);
/* TLB handling */
-u32 kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
-
-u32 kvm_get_user_asid(struct kvm_vcpu *vcpu);
-
-u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
-
-#ifdef CONFIG_KVM_MIPS_VZ
int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
struct kvm_vcpu *vcpu, bool write_fault);
-#endif
-extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
- struct kvm_vcpu *vcpu,
- bool write_fault);
-
-extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
- struct kvm_vcpu *vcpu);
-extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
- struct kvm_mips_tlb *tlb,
- unsigned long gva,
- bool write_fault);
-
-extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu,
- bool write_fault);
-
-extern void kvm_mips_dump_host_tlbs(void);
-extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
-extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi,
- bool user, bool kernel);
-
-extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
- unsigned long entryhi);
-
-#ifdef CONFIG_KVM_MIPS_VZ
int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
unsigned long *gpa);
@@ -923,58 +803,17 @@ void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
void kvm_loongson_clear_guest_vtlb(void);
void kvm_loongson_clear_guest_ftlb(void);
#endif
-#endif
-
-void kvm_mips_suspend_mm(int cpu);
-void kvm_mips_resume_mm(int cpu);
/* MMU handling */
-/**
- * enum kvm_mips_flush - Types of MMU flushes.
- * @KMF_USER: Flush guest user virtual memory mappings.
- * Guest USeg only.
- * @KMF_KERN: Flush guest kernel virtual memory mappings.
- * Guest USeg and KSeg2/3.
- * @KMF_GPA: Flush guest physical memory mappings.
- * Also includes KSeg0 if KMF_KERN is set.
- */
-enum kvm_mips_flush {
- KMF_USER = 0x0,
- KMF_KERN = 0x1,
- KMF_GPA = 0x2,
-};
-void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags);
bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
pgd_t *kvm_pgd_alloc(void);
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
-void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
- bool user);
-void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu);
-void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu);
-
-enum kvm_mips_fault_result {
- KVM_MIPS_MAPPED = 0,
- KVM_MIPS_GVA,
- KVM_MIPS_GPA,
- KVM_MIPS_TLB,
- KVM_MIPS_TLBINV,
- KVM_MIPS_TLBMOD,
-};
-enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
- unsigned long gva,
- bool write);
#define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end, unsigned flags);
-int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
-int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
/* Emulation */
-int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
@@ -1006,68 +845,6 @@ static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *vcpu)
return false;
}
-extern enum emulation_result kvm_mips_emulate_inst(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu);
-
-long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu);
-
-extern enum emulation_result kvm_mips_emulate_syscall(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu);
-
-extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu);
-
-extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu);
-
-extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu);
-
-extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu);
-
-extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu);
-
-extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu);
-
-extern enum emulation_result kvm_mips_handle_ri(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu);
-
-extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu);
-
-extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu);
-
-extern enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu);
-
-extern enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu);
-
-extern enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu);
-
-extern enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu);
-
extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu);
u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
@@ -1087,26 +864,9 @@ ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count);
int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
u32 count, int min_drift);
-#ifdef CONFIG_KVM_MIPS_VZ
void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu);
void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu);
-#else
-static inline void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu) {}
-static inline void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu) {}
-#endif
-enum emulation_result kvm_mips_check_privilege(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu);
-
-enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
- u32 *opc,
- u32 cause,
- struct kvm_vcpu *vcpu);
-enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
- u32 *opc,
- u32 cause,
- struct kvm_vcpu *vcpu);
enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
u32 cause,
struct kvm_vcpu *vcpu);
@@ -1117,27 +877,12 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
/* COP0 */
enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu);
-unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu);
-unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu);
-unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
-unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
-
/* Hypercalls (hypcall.c) */
enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
union mips_instruction inst);
int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu);
-/* Dynamic binary translation */
-extern int kvm_mips_trans_cache_index(union mips_instruction inst,
- u32 *opc, struct kvm_vcpu *vcpu);
-extern int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
- struct kvm_vcpu *vcpu);
-extern int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
- struct kvm_vcpu *vcpu);
-extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
- struct kvm_vcpu *vcpu);
-
/* Misc */
extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
@@ -1154,4 +899,7 @@ static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
+#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
+int kvm_arch_flush_remote_tlb(struct kvm *kvm);
+
#endif /* __MIPS_KVM_HOST_H__ */
diff --git a/arch/mips/include/asm/mach-ar7/ar7.h b/arch/mips/include/asm/mach-ar7/ar7.h
index dd09c3bf0601..cbe75ade3277 100644
--- a/arch/mips/include/asm/mach-ar7/ar7.h
+++ b/arch/mips/include/asm/mach-ar7/ar7.h
@@ -131,10 +131,6 @@ static inline u8 ar7_chip_rev(void)
0x14))) >> 16) & 0xff;
}
-struct clk {
- unsigned int rate;
-};
-
static inline int ar7_has_high_cpmac(void)
{
u16 chip_id = ar7_chip_id();
diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
index c38b38ce5a3d..b071a7353ee1 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
@@ -157,4 +157,12 @@ octeon_main_processor:
.macro smp_slave_setup
.endm
+#define USE_KEXEC_SMP_WAIT_FINAL
+ .macro kexec_smp_wait_final
+ .set push
+ .set noreorder
+ synci 0($0)
+ .set pop
+ .endm
+
#endif /* __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H */
diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
index c3ac06a6acd2..b247575c5e69 100644
--- a/arch/mips/include/asm/mach-generic/spaces.h
+++ b/arch/mips/include/asm/mach-generic/spaces.h
@@ -30,11 +30,7 @@
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_32BIT
-#ifdef CONFIG_KVM_GUEST
-#define CAC_BASE _AC(0x40000000, UL)
-#else
#define CAC_BASE _AC(0x80000000, UL)
-#endif
#ifndef IO_BASE
#define IO_BASE _AC(0xa0000000, UL)
#endif
@@ -43,12 +39,8 @@
#endif
#ifndef MAP_BASE
-#ifdef CONFIG_KVM_GUEST
-#define MAP_BASE _AC(0x60000000, UL)
-#else
#define MAP_BASE _AC(0xc0000000, UL)
#endif
-#endif
/*
* Memory above this physical address will be considered highmem.
@@ -100,11 +92,7 @@
#endif
#ifndef FIXADDR_TOP
-#ifdef CONFIG_KVM_GUEST
-#define FIXADDR_TOP ((unsigned long)(long)(int)0x7ffe0000)
-#else
#define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000)
#endif
-#endif
#endif /* __ASM_MACH_GENERIC_SPACES_H */
diff --git a/arch/mips/include/asm/mach-loongson64/boot_param.h b/arch/mips/include/asm/mach-loongson64/boot_param.h
index 4592841b6b0c..035b1a69e2d0 100644
--- a/arch/mips/include/asm/mach-loongson64/boot_param.h
+++ b/arch/mips/include/asm/mach-loongson64/boot_param.h
@@ -198,33 +198,6 @@ enum loongson_bridge_type {
VIRTUAL = 3
};
-struct loongson_system_configuration {
- u32 nr_cpus;
- u32 nr_nodes;
- int cores_per_node;
- int cores_per_package;
- u16 boot_cpu_id;
- u16 reserved_cpus_mask;
- enum loongson_cpu_type cputype;
- enum loongson_bridge_type bridgetype;
- u64 ht_control_base;
- u64 pci_mem_start_addr;
- u64 pci_mem_end_addr;
- u64 pci_io_base;
- u64 restart_addr;
- u64 poweroff_addr;
- u64 suspend_addr;
- u64 vgabios_addr;
- u32 dma_mask_bits;
- char ecname[32];
- u32 nr_uarts;
- struct uart_device uarts[MAX_UARTS];
- u32 nr_sensors;
- struct sensor_device sensors[MAX_SENSORS];
- u64 workarounds;
- void (*early_config)(void);
-};
-
extern struct efi_memory_map_loongson *loongson_memmap;
extern struct loongson_system_configuration loongson_sysconf;
diff --git a/arch/mips/include/asm/mach-loongson64/builtin_dtbs.h b/arch/mips/include/asm/mach-loongson64/builtin_dtbs.h
index 839410cda621..8be710557bdb 100644
--- a/arch/mips/include/asm/mach-loongson64/builtin_dtbs.h
+++ b/arch/mips/include/asm/mach-loongson64/builtin_dtbs.h
@@ -8,6 +8,7 @@
#ifndef __ASM_MACH_LOONGSON64_BUILTIN_DTBS_H_
#define __ASM_MACH_LOONGSON64_BUILTIN_DTBS_H_
+extern u32 __dtb_loongson64_2core_2k1000_begin[];
extern u32 __dtb_loongson64c_4core_ls7a_begin[];
extern u32 __dtb_loongson64c_4core_rs780e_begin[];
extern u32 __dtb_loongson64c_8core_rs780e_begin[];
diff --git a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
index e4d77f4f0fe3..13373c5144f8 100644
--- a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
@@ -75,4 +75,31 @@
.set pop
.endm
+#define USE_KEXEC_SMP_WAIT_FINAL
+ .macro kexec_smp_wait_final
+ /* s0:prid s1:initfn */
+ /* a0:base t1:cpuid t2:node t9:count */
+ mfc0 t1, CP0_EBASE
+ andi t1, MIPS_EBASE_CPUNUM
+ dins a0, t1, 8, 2 /* insert core id*/
+ dext t2, t1, 2, 2
+ dins a0, t2, 44, 2 /* insert node id */
+ mfc0 s0, CP0_PRID
+ andi s0, s0, (PRID_IMP_MASK | PRID_REV_MASK)
+ beq s0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3B_R1), 1f
+ beq s0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3B_R2), 1f
+ b 2f /* Loongson-3A1000/3A2000/3A3000/3A4000 */
+1: dins a0, t2, 14, 2 /* Loongson-3B1000/3B1500 need bit 15~14 */
+2: li t9, 0x100 /* wait for init loop */
+3: addiu t9, -1 /* limit mailbox access */
+ bnez t9, 3b
+ lw s1, 0x20(a0) /* check PC as an indicator */
+ beqz s1, 2b
+ ld s1, 0x20(a0) /* get PC via mailbox reg0 */
+ ld sp, 0x28(a0) /* get SP via mailbox reg1 */
+ ld gp, 0x30(a0) /* get GP via mailbox reg2 */
+ ld a1, 0x38(a0)
+ jr s1 /* jump to initial PC */
+ .endm
+
#endif /* __ASM_MACH_LOONGSON64_KERNEL_ENTRY_H */
diff --git a/arch/mips/include/asm/mach-loongson64/loongson.h b/arch/mips/include/asm/mach-loongson64/loongson.h
index ac1c20e172a2..f7c3ab6d724e 100644
--- a/arch/mips/include/asm/mach-loongson64/loongson.h
+++ b/arch/mips/include/asm/mach-loongson64/loongson.h
@@ -12,6 +12,30 @@
#include <linux/irq.h>
#include <boot_param.h>
+enum loongson_fw_interface {
+ LOONGSON_LEFI,
+ LOONGSON_DTB,
+};
+
+/* machine-specific boot configuration */
+struct loongson_system_configuration {
+ enum loongson_fw_interface fw_interface;
+ u32 nr_cpus;
+ u32 nr_nodes;
+ int cores_per_node;
+ int cores_per_package;
+ u16 boot_cpu_id;
+ u16 reserved_cpus_mask;
+ enum loongson_cpu_type cputype;
+ enum loongson_bridge_type bridgetype;
+ u64 restart_addr;
+ u64 poweroff_addr;
+ u64 suspend_addr;
+ u64 vgabios_addr;
+ u32 dma_mask_bits;
+ u64 workarounds;
+ void (*early_config)(void);
+};
/* machine-specific reboot/halt operation */
extern void mach_prepare_reboot(void);
@@ -23,7 +47,8 @@ extern u32 memsize, highmemsize;
extern const struct plat_smp_ops loongson3_smp_ops;
/* loongson-specific command line, env and memory initialization */
-extern void __init prom_init_env(void);
+extern void __init prom_dtb_init_env(void);
+extern void __init prom_lefi_init_env(void);
extern void __init szmem(unsigned int node);
extern void *loongson_fdt_blob;
diff --git a/arch/mips/include/asm/mach-ralink/mt7620.h b/arch/mips/include/asm/mach-ralink/mt7620.h
index 757ce53d00e6..d51dfad8f543 100644
--- a/arch/mips/include/asm/mach-ralink/mt7620.h
+++ b/arch/mips/include/asm/mach-ralink/mt7620.h
@@ -83,52 +83,13 @@
#define MT7620_DDR2_SIZE_MIN 32
#define MT7620_DDR2_SIZE_MAX 256
-#define MT7620_GPIO_MODE_UART0_SHIFT 2
-#define MT7620_GPIO_MODE_UART0_MASK 0x7
-#define MT7620_GPIO_MODE_UART0(x) ((x) << MT7620_GPIO_MODE_UART0_SHIFT)
-#define MT7620_GPIO_MODE_UARTF 0x0
-#define MT7620_GPIO_MODE_PCM_UARTF 0x1
-#define MT7620_GPIO_MODE_PCM_I2S 0x2
-#define MT7620_GPIO_MODE_I2S_UARTF 0x3
-#define MT7620_GPIO_MODE_PCM_GPIO 0x4
-#define MT7620_GPIO_MODE_GPIO_UARTF 0x5
-#define MT7620_GPIO_MODE_GPIO_I2S 0x6
-#define MT7620_GPIO_MODE_GPIO 0x7
-
-#define MT7620_GPIO_MODE_NAND 0
-#define MT7620_GPIO_MODE_SD 1
-#define MT7620_GPIO_MODE_ND_SD_GPIO 2
-#define MT7620_GPIO_MODE_ND_SD_MASK 0x3
-#define MT7620_GPIO_MODE_ND_SD_SHIFT 18
-
-#define MT7620_GPIO_MODE_PCIE_RST 0
-#define MT7620_GPIO_MODE_PCIE_REF 1
-#define MT7620_GPIO_MODE_PCIE_GPIO 2
-#define MT7620_GPIO_MODE_PCIE_MASK 0x3
-#define MT7620_GPIO_MODE_PCIE_SHIFT 16
-
-#define MT7620_GPIO_MODE_WDT_RST 0
-#define MT7620_GPIO_MODE_WDT_REF 1
-#define MT7620_GPIO_MODE_WDT_GPIO 2
-#define MT7620_GPIO_MODE_WDT_MASK 0x3
-#define MT7620_GPIO_MODE_WDT_SHIFT 21
-
-#define MT7620_GPIO_MODE_MDIO 0
-#define MT7620_GPIO_MODE_MDIO_REFCLK 1
-#define MT7620_GPIO_MODE_MDIO_GPIO 2
-#define MT7620_GPIO_MODE_MDIO_MASK 0x3
-#define MT7620_GPIO_MODE_MDIO_SHIFT 7
-
-#define MT7620_GPIO_MODE_I2C 0
-#define MT7620_GPIO_MODE_UART1 5
-#define MT7620_GPIO_MODE_RGMII1 9
-#define MT7620_GPIO_MODE_RGMII2 10
-#define MT7620_GPIO_MODE_SPI 11
-#define MT7620_GPIO_MODE_SPI_REF_CLK 12
-#define MT7620_GPIO_MODE_WLED 13
-#define MT7620_GPIO_MODE_JTAG 15
-#define MT7620_GPIO_MODE_EPHY 15
-#define MT7620_GPIO_MODE_PA 20
+extern enum ralink_soc_type ralink_soc;
+
+static inline int is_mt76x8(void)
+{
+ return ralink_soc == MT762X_SOC_MT7628AN ||
+ ralink_soc == MT762X_SOC_MT7688;
+}
static inline int mt7620_get_eco(void)
{
diff --git a/arch/mips/include/asm/mach-ralink/mt7621.h b/arch/mips/include/asm/mach-ralink/mt7621.h
index e1af1ba50bd8..6bbf082dd149 100644
--- a/arch/mips/include/asm/mach-ralink/mt7621.h
+++ b/arch/mips/include/asm/mach-ralink/mt7621.h
@@ -24,9 +24,10 @@
#define CHIP_REV_VER_SHIFT 8
#define CHIP_REV_ECO_MASK 0xf
-#define MT7621_DRAM_BASE 0x0
-#define MT7621_DDR2_SIZE_MIN 32
-#define MT7621_DDR2_SIZE_MAX 256
+#define MT7621_LOWMEM_BASE 0x0
+#define MT7621_LOWMEM_MAX_SIZE 0x1C000000
+#define MT7621_HIGHMEM_BASE 0x20000000
+#define MT7621_HIGHMEM_SIZE 0x4000000
#define MT7621_CHIP_NAME0 0x3637544D
#define MT7621_CHIP_NAME1 0x20203132
diff --git a/arch/mips/include/asm/mach-ralink/pinmux.h b/arch/mips/include/asm/mach-ralink/pinmux.h
deleted file mode 100644
index 048309348be0..000000000000
--- a/arch/mips/include/asm/mach-ralink/pinmux.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 John Crispin <john@phrozen.org>
- */
-
-#ifndef _RT288X_PINMUX_H__
-#define _RT288X_PINMUX_H__
-
-#define FUNC(name, value, pin_first, pin_count) \
- { name, value, pin_first, pin_count }
-
-#define GRP(_name, _func, _mask, _shift) \
- { .name = _name, .mask = _mask, .shift = _shift, \
- .func = _func, .gpio = _mask, \
- .func_count = ARRAY_SIZE(_func) }
-
-#define GRP_G(_name, _func, _mask, _gpio, _shift) \
- { .name = _name, .mask = _mask, .shift = _shift, \
- .func = _func, .gpio = _gpio, \
- .func_count = ARRAY_SIZE(_func) }
-
-struct rt2880_pmx_group;
-
-struct rt2880_pmx_func {
- const char *name;
- const char value;
-
- int pin_first;
- int pin_count;
- int *pins;
-
- int *groups;
- int group_count;
-
- int enabled;
-};
-
-struct rt2880_pmx_group {
- const char *name;
- int enabled;
-
- const u32 shift;
- const char mask;
- const char gpio;
-
- struct rt2880_pmx_func *func;
- int func_count;
-};
-
-extern struct rt2880_pmx_group *rt2880_pinmux_data;
-
-#endif
diff --git a/arch/mips/include/asm/mach-ralink/rt288x.h b/arch/mips/include/asm/mach-ralink/rt288x.h
index 5d10178f26af..5f213534f0f5 100644
--- a/arch/mips/include/asm/mach-ralink/rt288x.h
+++ b/arch/mips/include/asm/mach-ralink/rt288x.h
@@ -33,15 +33,6 @@
#define SYSTEM_CONFIG_CPUCLK_280 0x2
#define SYSTEM_CONFIG_CPUCLK_300 0x3
-#define RT2880_GPIO_MODE_I2C BIT(0)
-#define RT2880_GPIO_MODE_UART0 BIT(1)
-#define RT2880_GPIO_MODE_SPI BIT(2)
-#define RT2880_GPIO_MODE_UART1 BIT(3)
-#define RT2880_GPIO_MODE_JTAG BIT(4)
-#define RT2880_GPIO_MODE_MDIO BIT(5)
-#define RT2880_GPIO_MODE_SDRAM BIT(6)
-#define RT2880_GPIO_MODE_PCI BIT(7)
-
#define CLKCFG_SRAM_CS_N_WDT BIT(9)
#define RT2880_SDRAM_BASE 0x08000000
diff --git a/arch/mips/include/asm/mach-ralink/rt305x.h b/arch/mips/include/asm/mach-ralink/rt305x.h
index b54619dc4b88..4d8e8c8d83ce 100644
--- a/arch/mips/include/asm/mach-ralink/rt305x.h
+++ b/arch/mips/include/asm/mach-ralink/rt305x.h
@@ -114,30 +114,6 @@ static inline int soc_is_rt5350(void)
#define RT305X_GPIO_GE0_TXD0 40
#define RT305X_GPIO_GE0_RXCLK 51
-#define RT305X_GPIO_MODE_UART0_SHIFT 2
-#define RT305X_GPIO_MODE_UART0_MASK 0x7
-#define RT305X_GPIO_MODE_UART0(x) ((x) << RT305X_GPIO_MODE_UART0_SHIFT)
-#define RT305X_GPIO_MODE_UARTF 0
-#define RT305X_GPIO_MODE_PCM_UARTF 1
-#define RT305X_GPIO_MODE_PCM_I2S 2
-#define RT305X_GPIO_MODE_I2S_UARTF 3
-#define RT305X_GPIO_MODE_PCM_GPIO 4
-#define RT305X_GPIO_MODE_GPIO_UARTF 5
-#define RT305X_GPIO_MODE_GPIO_I2S 6
-#define RT305X_GPIO_MODE_GPIO 7
-
-#define RT305X_GPIO_MODE_I2C 0
-#define RT305X_GPIO_MODE_SPI 1
-#define RT305X_GPIO_MODE_UART1 5
-#define RT305X_GPIO_MODE_JTAG 6
-#define RT305X_GPIO_MODE_MDIO 7
-#define RT305X_GPIO_MODE_SDRAM 8
-#define RT305X_GPIO_MODE_RGMII 9
-#define RT5350_GPIO_MODE_PHY_LED 14
-#define RT5350_GPIO_MODE_SPI_CS1 21
-#define RT3352_GPIO_MODE_LNA 18
-#define RT3352_GPIO_MODE_PA 20
-
#define RT3352_SYSC_REG_SYSCFG0 0x010
#define RT3352_SYSC_REG_SYSCFG1 0x014
#define RT3352_SYSC_REG_CLKCFG1 0x030
diff --git a/arch/mips/include/asm/mach-ralink/rt3883.h b/arch/mips/include/asm/mach-ralink/rt3883.h
index 565f2548496a..f250de9c055b 100644
--- a/arch/mips/include/asm/mach-ralink/rt3883.h
+++ b/arch/mips/include/asm/mach-ralink/rt3883.h
@@ -109,40 +109,6 @@
#define RT3883_CLKCFG1_PCI_CLK_EN BIT(19)
#define RT3883_CLKCFG1_UPHY0_CLK_EN BIT(18)
-#define RT3883_GPIO_MODE_UART0_SHIFT 2
-#define RT3883_GPIO_MODE_UART0_MASK 0x7
-#define RT3883_GPIO_MODE_UART0(x) ((x) << RT3883_GPIO_MODE_UART0_SHIFT)
-#define RT3883_GPIO_MODE_UARTF 0x0
-#define RT3883_GPIO_MODE_PCM_UARTF 0x1
-#define RT3883_GPIO_MODE_PCM_I2S 0x2
-#define RT3883_GPIO_MODE_I2S_UARTF 0x3
-#define RT3883_GPIO_MODE_PCM_GPIO 0x4
-#define RT3883_GPIO_MODE_GPIO_UARTF 0x5
-#define RT3883_GPIO_MODE_GPIO_I2S 0x6
-#define RT3883_GPIO_MODE_GPIO 0x7
-
-#define RT3883_GPIO_MODE_I2C 0
-#define RT3883_GPIO_MODE_SPI 1
-#define RT3883_GPIO_MODE_UART1 5
-#define RT3883_GPIO_MODE_JTAG 6
-#define RT3883_GPIO_MODE_MDIO 7
-#define RT3883_GPIO_MODE_GE1 9
-#define RT3883_GPIO_MODE_GE2 10
-
-#define RT3883_GPIO_MODE_PCI_SHIFT 11
-#define RT3883_GPIO_MODE_PCI_MASK 0x7
-#define RT3883_GPIO_MODE_PCI (RT3883_GPIO_MODE_PCI_MASK << RT3883_GPIO_MODE_PCI_SHIFT)
-#define RT3883_GPIO_MODE_LNA_A_SHIFT 16
-#define RT3883_GPIO_MODE_LNA_A_MASK 0x3
-#define _RT3883_GPIO_MODE_LNA_A(_x) ((_x) << RT3883_GPIO_MODE_LNA_A_SHIFT)
-#define RT3883_GPIO_MODE_LNA_A_GPIO 0x3
-#define RT3883_GPIO_MODE_LNA_A _RT3883_GPIO_MODE_LNA_A(RT3883_GPIO_MODE_LNA_A_MASK)
-#define RT3883_GPIO_MODE_LNA_G_SHIFT 18
-#define RT3883_GPIO_MODE_LNA_G_MASK 0x3
-#define _RT3883_GPIO_MODE_LNA_G(_x) ((_x) << RT3883_GPIO_MODE_LNA_G_SHIFT)
-#define RT3883_GPIO_MODE_LNA_G_GPIO 0x3
-#define RT3883_GPIO_MODE_LNA_G _RT3883_GPIO_MODE_LNA_G(RT3883_GPIO_MODE_LNA_G_MASK)
-
#define RT3883_GPIO_I2C_SD 1
#define RT3883_GPIO_I2C_SCLK 2
#define RT3883_GPIO_SPI_CS0 3
diff --git a/arch/mips/include/asm/mips-boards/launch.h b/arch/mips/include/asm/mips-boards/launch.h
index f93aa5ee2e2e..3481ed4c117b 100644
--- a/arch/mips/include/asm/mips-boards/launch.h
+++ b/arch/mips/include/asm/mips-boards/launch.h
@@ -3,6 +3,9 @@
*
*/
+#ifndef _ASM_MIPS_BOARDS_LAUNCH_H
+#define _ASM_MIPS_BOARDS_LAUNCH_H
+
#ifndef _ASSEMBLER_
struct cpulaunch {
@@ -34,3 +37,5 @@ struct cpulaunch {
/* Polling period in count cycles for secondary CPU's */
#define LAUNCHPERIOD 10000
+
+#endif /* _ASM_MIPS_BOARDS_LAUNCH_H */
diff --git a/arch/mips/include/asm/mips-cps.h b/arch/mips/include/asm/mips-cps.h
index fd43d876892e..35fb8ee6dd33 100644
--- a/arch/mips/include/asm/mips-cps.h
+++ b/arch/mips/include/asm/mips-cps.h
@@ -10,6 +10,8 @@
#include <linux/io.h>
#include <linux/types.h>
+#include <asm/mips-boards/launch.h>
+
extern unsigned long __cps_access_bad_size(void)
__compiletime_error("Bad size for CPS accessor");
@@ -165,11 +167,30 @@ static inline uint64_t mips_cps_cluster_config(unsigned int cluster)
*/
static inline unsigned int mips_cps_numcores(unsigned int cluster)
{
+ unsigned int ncores;
+
if (!mips_cm_present())
return 0;
/* Add one before masking to handle 0xff indicating no cores */
- return (mips_cps_cluster_config(cluster) + 1) & CM_GCR_CONFIG_PCORES;
+ ncores = (mips_cps_cluster_config(cluster) + 1) & CM_GCR_CONFIG_PCORES;
+
+ if (IS_ENABLED(CONFIG_SOC_MT7621)) {
+ struct cpulaunch *launch;
+
+ /*
+ * Ralink MT7621S SoC is single core, but the GCR_CONFIG method
+ * always reports 2 cores. Check the second core's LAUNCH_FREADY
+ * flag to detect if the second core is missing. This method
+ * only works before the core has been started.
+ */
+ launch = (struct cpulaunch *)CKSEG0ADDR(CPULAUNCH);
+ launch += 2; /* MT7621 has 2 VPEs per core */
+ if (!(launch->flags & LAUNCH_FREADY))
+ ncores = 1;
+ }
+
+ return ncores;
}
/**
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 9c8099a6ffed..acdf8c69220b 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -2077,7 +2077,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c)
({ int __res; \
__asm__ __volatile__( \
".set\tpush\n\t" \
- ".set\tmips32r2\n\t" \
+ ".set\tmips32r5\n\t" \
_ASM_SET_VIRT \
"mfgc0\t%0, " #source ", %1\n\t" \
".set\tpop" \
@@ -2090,7 +2090,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c)
({ unsigned long long __res; \
__asm__ __volatile__( \
".set\tpush\n\t" \
- ".set\tmips64r2\n\t" \
+ ".set\tmips64r5\n\t" \
_ASM_SET_VIRT \
"dmfgc0\t%0, " #source ", %1\n\t" \
".set\tpop" \
@@ -2103,7 +2103,7 @@ _ASM_MACRO_0(tlbginvf, _ASM_INSN_IF_MIPS(0x4200000c)
do { \
__asm__ __volatile__( \
".set\tpush\n\t" \
- ".set\tmips32r2\n\t" \
+ ".set\tmips32r5\n\t" \
_ASM_SET_VIRT \
"mtgc0\t%z0, " #register ", %1\n\t" \
".set\tpop" \
@@ -2115,7 +2115,7 @@ do { \
do { \
__asm__ __volatile__( \
".set\tpush\n\t" \
- ".set\tmips64r2\n\t" \
+ ".set\tmips64r5\n\t" \
_ASM_SET_VIRT \
"dmtgc0\t%z0, " #register ", %1\n\t" \
".set\tpop" \
diff --git a/arch/mips/include/asm/mmzone.h b/arch/mips/include/asm/mmzone.h
index b826b8473e95..602a21aee9d4 100644
--- a/arch/mips/include/asm/mmzone.h
+++ b/arch/mips/include/asm/mmzone.h
@@ -8,7 +8,7 @@
#include <asm/page.h>
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
# include <mmzone.h>
#endif
@@ -20,10 +20,4 @@
#define nid_to_addrbase(nid) 0
#endif
-#ifdef CONFIG_DISCONTIGMEM
-
-#define pfn_to_nid(pfn) pa_to_nid((pfn) << PAGE_SHIFT)
-
-#endif /* CONFIG_DISCONTIGMEM */
-
#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/mips/include/asm/octeon/cvmx-address.h b/arch/mips/include/asm/octeon/cvmx-address.h
index e4444f8c4a61..5df5c90f6a5d 100644
--- a/arch/mips/include/asm/octeon/cvmx-address.h
+++ b/arch/mips/include/asm/octeon/cvmx-address.h
@@ -152,7 +152,7 @@ typedef union {
/* physical mem address */
struct {
- /* techically, <47:40> are dont-cares */
+ /* technically, <47:40> are dont-cares */
uint64_t zeroes:24;
/* the hardware ignores <39:36> in Octeon I */
uint64_t unaddr:4;
diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
index c114a7ba0bad..0e6bf220db61 100644
--- a/arch/mips/include/asm/octeon/cvmx-bootinfo.h
+++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
@@ -298,6 +298,7 @@ enum cvmx_board_types_enum {
CVMX_BOARD_TYPE_UBNT_E200 = 20003,
CVMX_BOARD_TYPE_UBNT_E220 = 20005,
CVMX_BOARD_TYPE_CUST_DSR1000N = 20006,
+ CVMX_BOARD_TYPE_UBNT_E300 = 20300,
CVMX_BOARD_TYPE_KONTRON_S1901 = 21901,
CVMX_BOARD_TYPE_CUST_PRIVATE_MAX = 30000,
@@ -401,6 +402,7 @@ static inline const char *cvmx_board_type_to_string(enum
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E200)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E220)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DSR1000N)
+ ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E300)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KONTRON_S1901)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MAX)
}
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index 65acab9c41f9..96bc798c1ec1 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -210,9 +210,16 @@ static inline unsigned long ___pa(unsigned long x)
* also affect MIPS so we keep this one until GCC 3.x has been retired
* before we can apply https://patchwork.linux-mips.org/patch/1541/
*/
+#define __pa_symbol_nodebug(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
+
+#ifdef CONFIG_DEBUG_VIRTUAL
+extern phys_addr_t __phys_addr_symbol(unsigned long x);
+#else
+#define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
+#endif
#ifndef __pa_symbol
-#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
+#define __pa_symbol(x) __phys_addr_symbol((unsigned long)(x))
#endif
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
@@ -232,7 +239,7 @@ static inline int pfn_valid(unsigned long pfn)
/* pfn_valid is defined in linux/mmzone.h */
-#elif defined(CONFIG_NEED_MULTIPLE_NODES)
+#elif defined(CONFIG_NUMA)
#define pfn_valid(pfn) \
({ \
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 6f48649201c5..9ffc8192adae 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -38,7 +38,6 @@ struct pci_controller {
struct resource *io_resource;
unsigned long io_offset;
unsigned long io_map_base;
- struct resource *busn_resource;
#ifndef CONFIG_PCI_DOMAINS_GENERIC
unsigned int index;
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index 8b18424b3120..4b2567d6b2df 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -28,7 +28,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
{
set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* Initialize a new pmd table with invalid pointers.
@@ -59,11 +58,15 @@ do { \
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
- pmd_t *pmd;
+ pmd_t *pmd = NULL;
+ struct page *pg;
- pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ORDER);
- if (pmd)
+ pg = alloc_pages(GFP_KERNEL | __GFP_ACCOUNT, PMD_ORDER);
+ if (pg) {
+ pgtable_pmd_page_ctor(pg);
+ pmd = (pmd_t *)page_address(pg);
pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
+ }
return pmd;
}
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index 6c0532d7b211..95df9c293d8d 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -93,7 +93,6 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
#endif
#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
#define VMALLOC_START MAP_BASE
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index 1e7d6ce9d8d6..046465906c82 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -137,7 +137,6 @@
#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
#define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
-#define FIRST_USER_ADDRESS 0UL
/*
* TLB refill handlers also map the vmalloc area into xuseg. Avoid
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 7834e7c0c78a..0c3550c82b72 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -32,16 +32,11 @@ extern unsigned int vced_count, vcei_count;
extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
#ifdef CONFIG_32BIT
-#ifdef CONFIG_KVM_GUEST
-/* User space process size is limited to 1GB in KVM Guest Mode */
-#define TASK_SIZE 0x3fff8000UL
-#else
/*
* User space process size: 2GB. This is hardcoded into a few places,
* so don't change it unless you know what you are doing.
*/
#define TASK_SIZE 0x80000000UL
-#endif
#define STACK_TOP_MAX TASK_SIZE
@@ -226,10 +221,6 @@ struct nlm_cop2_state {
#define COP2_INIT
#endif
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
-
#ifdef CONFIG_CPU_HAS_MSA
# define ARCH_MIN_TASKALIGN 16
# define FPU_ALIGN __aligned(16)
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index e2c352da3877..0b17aaa9e012 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -28,11 +28,6 @@ struct thread_info {
unsigned long tp_value; /* thread pointer */
__u32 cpu; /* current CPU */
int preempt_count; /* 0 => preemptable, <0 => BUG */
- mm_segment_t addr_limit; /*
- * thread address space limit:
- * 0x7fffffff for user-thead
- * 0xffffffff for kernel-thread
- */
struct pt_regs *regs;
long syscall; /* syscall number */
};
@@ -46,7 +41,6 @@ struct thread_info {
.flags = _TIF_FIXADE, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
- .addr_limit = KERNEL_DS, \
}
/*
diff --git a/arch/mips/include/asm/traps.h b/arch/mips/include/asm/traps.h
index 6aa8f126a43d..b710e76c9c65 100644
--- a/arch/mips/include/asm/traps.h
+++ b/arch/mips/include/asm/traps.h
@@ -24,8 +24,11 @@ extern void (*board_ebase_setup)(void);
extern void (*board_cache_error_setup)(void);
extern int register_nmi_notifier(struct notifier_block *nb);
+extern void reserve_exception_space(phys_addr_t addr, unsigned long size);
extern char except_vec_nmi[];
+#define VECTORSPACING 0x100 /* for EI/VI mode */
+
#define nmi_notifier(fn, pri) \
({ \
static struct notifier_block fn##_nb = { \
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 61fc01f177a6..783fecce65c8 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -16,20 +16,9 @@
#include <asm/asm-eva.h>
#include <asm/extable.h>
-/*
- * The fs value determines whether argument validity checking should be
- * performed or not. If get_fs() == USER_DS, checking is performed, with
- * get_fs() == KERNEL_DS, checking is bypassed.
- *
- * For historical reasons, these macros are grossly misnamed.
- */
#ifdef CONFIG_32BIT
-#ifdef CONFIG_KVM_GUEST
-#define __UA_LIMIT 0x40000000UL
-#else
#define __UA_LIMIT 0x80000000UL
-#endif
#define __UA_ADDR ".word"
#define __UA_LA "la"
@@ -54,43 +43,6 @@ extern u64 __ua_limit;
#endif /* CONFIG_64BIT */
/*
- * USER_DS is a bitmask that has the bits set that may not be set in a valid
- * userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but
- * the arithmetic we're doing only works if the limit is a power of two, so
- * we use 0x80000000 here on 32-bit kernels. If a process passes an invalid
- * address in this range it's the process's problem, not ours :-)
- */
-
-#ifdef CONFIG_KVM_GUEST
-#define KERNEL_DS ((mm_segment_t) { 0x80000000UL })
-#define USER_DS ((mm_segment_t) { 0xC0000000UL })
-#else
-#define KERNEL_DS ((mm_segment_t) { 0UL })
-#define USER_DS ((mm_segment_t) { __UA_LIMIT })
-#endif
-
-#define get_fs() (current_thread_info()->addr_limit)
-#define set_fs(x) (current_thread_info()->addr_limit = (x))
-
-#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
-
-/*
- * eva_kernel_access() - determine whether kernel memory access on an EVA system
- *
- * Determines whether memory accesses should be performed to kernel memory
- * on a system using Extended Virtual Addressing (EVA).
- *
- * Return: true if a kernel memory access on an EVA system, else false.
- */
-static inline bool eva_kernel_access(void)
-{
- if (!IS_ENABLED(CONFIG_EVA))
- return false;
-
- return uaccess_kernel();
-}
-
-/*
* Is a address valid? This does a straightforward calculation rather
* than tests.
*
@@ -127,7 +79,9 @@ static inline bool eva_kernel_access(void)
static inline int __access_ok(const void __user *p, unsigned long size)
{
unsigned long addr = (unsigned long)p;
- return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0;
+ unsigned long end = addr + size - !!size;
+
+ return (__UA_LIMIT & (addr | end | __ua_size(size))) == 0;
}
#define access_ok(addr, size) \
@@ -150,8 +104,13 @@ static inline int __access_ok(const void __user *p, unsigned long size)
*
* Returns zero on success, or -EFAULT on error.
*/
-#define put_user(x,ptr) \
- __put_user_check((x), (ptr), sizeof(*(ptr)))
+#define put_user(x, ptr) \
+({ \
+ __typeof__(*(ptr)) __user *__p = (ptr); \
+ \
+ might_fault(); \
+ access_ok(__p, sizeof(*__p)) ? __put_user((x), __p) : -EFAULT; \
+})
/*
* get_user: - Get a simple variable from user space.
@@ -171,8 +130,14 @@ static inline int __access_ok(const void __user *p, unsigned long size)
* Returns zero on success, or -EFAULT on error.
* On error, the variable @x is set to zero.
*/
-#define get_user(x,ptr) \
- __get_user_check((x), (ptr), sizeof(*(ptr)))
+#define get_user(x, ptr) \
+({ \
+ const __typeof__(*(ptr)) __user *__p = (ptr); \
+ \
+ might_fault(); \
+ access_ok(__p, sizeof(*__p)) ? __get_user((x), __p) : \
+ ((x) = 0, -EFAULT); \
+})
/*
* __put_user: - Write a simple value into user space, with less checking.
@@ -194,8 +159,32 @@ static inline int __access_ok(const void __user *p, unsigned long size)
*
* Returns zero on success, or -EFAULT on error.
*/
-#define __put_user(x,ptr) \
- __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
+#define __put_user(x, ptr) \
+({ \
+ __typeof__(*(ptr)) __user *__pu_ptr = (ptr); \
+ __typeof__(*(ptr)) __pu_val = (x); \
+ int __pu_err = 0; \
+ \
+ __chk_user_ptr(__pu_ptr); \
+ switch (sizeof(*__pu_ptr)) { \
+ case 1: \
+ __put_data_asm(user_sb, __pu_ptr); \
+ break; \
+ case 2: \
+ __put_data_asm(user_sh, __pu_ptr); \
+ break; \
+ case 4: \
+ __put_data_asm(user_sw, __pu_ptr); \
+ break; \
+ case 8: \
+ __PUT_DW(user_sd, __pu_ptr); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ \
+ __pu_err; \
+})
/*
* __get_user: - Get a simple variable from user space, with less checking.
@@ -218,49 +207,35 @@ static inline int __access_ok(const void __user *p, unsigned long size)
* Returns zero on success, or -EFAULT on error.
* On error, the variable @x is set to zero.
*/
-#define __get_user(x,ptr) \
- __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+#define __get_user(x, ptr) \
+({ \
+ const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
+ int __gu_err = 0; \
+ \
+ __chk_user_ptr(__gu_ptr); \
+ switch (sizeof(*__gu_ptr)) { \
+ case 1: \
+ __get_data_asm((x), user_lb, __gu_ptr); \
+ break; \
+ case 2: \
+ __get_data_asm((x), user_lh, __gu_ptr); \
+ break; \
+ case 4: \
+ __get_data_asm((x), user_lw, __gu_ptr); \
+ break; \
+ case 8: \
+ __GET_DW((x), user_ld, __gu_ptr); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ \
+ __gu_err; \
+})
struct __large_struct { unsigned long buf[100]; };
#define __m(x) (*(struct __large_struct __user *)(x))
-/*
- * Yuck. We need two variants, one for 64bit operation and one
- * for 32 bit mode and old iron.
- */
-#ifndef CONFIG_EVA
-#define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
-#else
-/*
- * Kernel specific functions for EVA. We need to use normal load instructions
- * to read data from kernel when operating in EVA mode. We use these macros to
- * avoid redefining __get_user_asm for EVA.
- */
-#undef _loadd
-#undef _loadw
-#undef _loadh
-#undef _loadb
-#ifdef CONFIG_32BIT
-#define _loadd _loadw
-#else
-#define _loadd(reg, addr) "ld " reg ", " addr
-#endif
-#define _loadw(reg, addr) "lw " reg ", " addr
-#define _loadh(reg, addr) "lh " reg ", " addr
-#define _loadb(reg, addr) "lb " reg ", " addr
-
-#define __get_kernel_common(val, size, ptr) \
-do { \
- switch (size) { \
- case 1: __get_data_asm(val, _loadb, ptr); break; \
- case 2: __get_data_asm(val, _loadh, ptr); break; \
- case 4: __get_data_asm(val, _loadw, ptr); break; \
- case 8: __GET_DW(val, _loadd, ptr); break; \
- default: __get_user_unknown(); break; \
- } \
-} while (0)
-#endif
-
#ifdef CONFIG_32BIT
#define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
#endif
@@ -268,49 +243,6 @@ do { \
#define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
#endif
-extern void __get_user_unknown(void);
-
-#define __get_user_common(val, size, ptr) \
-do { \
- switch (size) { \
- case 1: __get_data_asm(val, user_lb, ptr); break; \
- case 2: __get_data_asm(val, user_lh, ptr); break; \
- case 4: __get_data_asm(val, user_lw, ptr); break; \
- case 8: __GET_DW(val, user_ld, ptr); break; \
- default: __get_user_unknown(); break; \
- } \
-} while (0)
-
-#define __get_user_nocheck(x, ptr, size) \
-({ \
- int __gu_err; \
- \
- if (eva_kernel_access()) { \
- __get_kernel_common((x), size, ptr); \
- } else { \
- __chk_user_ptr(ptr); \
- __get_user_common((x), size, ptr); \
- } \
- __gu_err; \
-})
-
-#define __get_user_check(x, ptr, size) \
-({ \
- int __gu_err = -EFAULT; \
- const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
- \
- might_fault(); \
- if (likely(access_ok( __gu_ptr, size))) { \
- if (eva_kernel_access()) \
- __get_kernel_common((x), size, __gu_ptr); \
- else \
- __get_user_common((x), size, __gu_ptr); \
- } else \
- (x) = 0; \
- \
- __gu_err; \
-})
-
#define __get_data_asm(val, insn, addr) \
{ \
long __gu_tmp; \
@@ -364,39 +296,36 @@ do { \
(val) = __gu_tmp.t; \
}
-#ifndef CONFIG_EVA
-#define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
-#else
-/*
- * Kernel specific functions for EVA. We need to use normal load instructions
- * to read data from kernel when operating in EVA mode. We use these macros to
- * avoid redefining __get_data_asm for EVA.
- */
-#undef _stored
-#undef _storew
-#undef _storeh
-#undef _storeb
-#ifdef CONFIG_32BIT
-#define _stored _storew
-#else
-#define _stored(reg, addr) "ld " reg ", " addr
-#endif
-
-#define _storew(reg, addr) "sw " reg ", " addr
-#define _storeh(reg, addr) "sh " reg ", " addr
-#define _storeb(reg, addr) "sb " reg ", " addr
+#define HAVE_GET_KERNEL_NOFAULT
-#define __put_kernel_common(ptr, size) \
+#define __get_kernel_nofault(dst, src, type, err_label) \
do { \
- switch (size) { \
- case 1: __put_data_asm(_storeb, ptr); break; \
- case 2: __put_data_asm(_storeh, ptr); break; \
- case 4: __put_data_asm(_storew, ptr); break; \
- case 8: __PUT_DW(_stored, ptr); break; \
- default: __put_user_unknown(); break; \
+ int __gu_err; \
+ \
+ switch (sizeof(type)) { \
+ case 1: \
+ __get_data_asm(*(type *)(dst), kernel_lb, \
+ (__force type *)(src)); \
+ break; \
+ case 2: \
+ __get_data_asm(*(type *)(dst), kernel_lh, \
+ (__force type *)(src)); \
+ break; \
+ case 4: \
+ __get_data_asm(*(type *)(dst), kernel_lw, \
+ (__force type *)(src)); \
+ break; \
+ case 8: \
+ __GET_DW(*(type *)(dst), kernel_ld, \
+ (__force type *)(src)); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ break; \
} \
-} while(0)
-#endif
+ if (unlikely(__gu_err)) \
+ goto err_label; \
+} while (0)
/*
* Yuck. We need two variants, one for 64bit operation and one
@@ -409,49 +338,6 @@ do { \
#define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
#endif
-#define __put_user_common(ptr, size) \
-do { \
- switch (size) { \
- case 1: __put_data_asm(user_sb, ptr); break; \
- case 2: __put_data_asm(user_sh, ptr); break; \
- case 4: __put_data_asm(user_sw, ptr); break; \
- case 8: __PUT_DW(user_sd, ptr); break; \
- default: __put_user_unknown(); break; \
- } \
-} while (0)
-
-#define __put_user_nocheck(x, ptr, size) \
-({ \
- __typeof__(*(ptr)) __pu_val; \
- int __pu_err = 0; \
- \
- __pu_val = (x); \
- if (eva_kernel_access()) { \
- __put_kernel_common(ptr, size); \
- } else { \
- __chk_user_ptr(ptr); \
- __put_user_common(ptr, size); \
- } \
- __pu_err; \
-})
-
-#define __put_user_check(x, ptr, size) \
-({ \
- __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- __typeof__(*(ptr)) __pu_val = (x); \
- int __pu_err = -EFAULT; \
- \
- might_fault(); \
- if (likely(access_ok( __pu_addr, size))) { \
- if (eva_kernel_access()) \
- __put_kernel_common(__pu_addr, size); \
- else \
- __put_user_common(__pu_addr, size); \
- } \
- \
- __pu_err; \
-})
-
#define __put_data_asm(insn, ptr) \
{ \
__asm__ __volatile__( \
@@ -490,7 +376,33 @@ do { \
"i" (-EFAULT)); \
}
-extern void __put_user_unknown(void);
+#define __put_kernel_nofault(dst, src, type, err_label) \
+do { \
+ type __pu_val; \
+ int __pu_err = 0; \
+ \
+ __pu_val = *(__force type *)(src); \
+ switch (sizeof(type)) { \
+ case 1: \
+ __put_data_asm(kernel_sb, (type *)(dst)); \
+ break; \
+ case 2: \
+ __put_data_asm(kernel_sh, (type *)(dst)); \
+ break; \
+ case 4: \
+ __put_data_asm(kernel_sw, (type *)(dst)) \
+ break; \
+ case 8: \
+ __PUT_DW(kernel_sd, (type *)(dst)); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ break; \
+ } \
+ if (unlikely(__pu_err)) \
+ goto err_label; \
+} while (0)
+
/*
* We're generating jump to subroutines which will be outside the range of
@@ -514,124 +426,85 @@ extern void __put_user_unknown(void);
#define DADDI_SCRATCH "$0"
#endif
-extern size_t __copy_user(void *__to, const void *__from, size_t __n);
-
-#define __invoke_copy_from(func, to, from, n) \
-({ \
- register void *__cu_to_r __asm__("$4"); \
- register const void __user *__cu_from_r __asm__("$5"); \
- register long __cu_len_r __asm__("$6"); \
- \
- __cu_to_r = (to); \
- __cu_from_r = (from); \
- __cu_len_r = (n); \
- __asm__ __volatile__( \
- ".set\tnoreorder\n\t" \
- __MODULE_JAL(func) \
- ".set\tnoat\n\t" \
- __UA_ADDU "\t$1, %1, %2\n\t" \
- ".set\tat\n\t" \
- ".set\treorder" \
- : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
- : \
- : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
- DADDI_SCRATCH, "memory"); \
- __cu_len_r; \
-})
-
-#define __invoke_copy_to(func, to, from, n) \
-({ \
- register void __user *__cu_to_r __asm__("$4"); \
- register const void *__cu_from_r __asm__("$5"); \
- register long __cu_len_r __asm__("$6"); \
- \
- __cu_to_r = (to); \
- __cu_from_r = (from); \
- __cu_len_r = (n); \
- __asm__ __volatile__( \
- __MODULE_JAL(func) \
- : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
- : \
- : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
- DADDI_SCRATCH, "memory"); \
- __cu_len_r; \
-})
-
-#define __invoke_copy_from_kernel(to, from, n) \
- __invoke_copy_from(__copy_user, to, from, n)
-
-#define __invoke_copy_to_kernel(to, from, n) \
- __invoke_copy_to(__copy_user, to, from, n)
-
-#define ___invoke_copy_in_kernel(to, from, n) \
- __invoke_copy_from(__copy_user, to, from, n)
-
-#ifndef CONFIG_EVA
-#define __invoke_copy_from_user(to, from, n) \
- __invoke_copy_from(__copy_user, to, from, n)
-
-#define __invoke_copy_to_user(to, from, n) \
- __invoke_copy_to(__copy_user, to, from, n)
-
-#define ___invoke_copy_in_user(to, from, n) \
- __invoke_copy_from(__copy_user, to, from, n)
-
-#else
-
-/* EVA specific functions */
-
-extern size_t __copy_from_user_eva(void *__to, const void *__from,
- size_t __n);
-extern size_t __copy_to_user_eva(void *__to, const void *__from,
- size_t __n);
-extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
-
-/*
- * Source or destination address is in userland. We need to go through
- * the TLB
- */
-#define __invoke_copy_from_user(to, from, n) \
- __invoke_copy_from(__copy_from_user_eva, to, from, n)
-
-#define __invoke_copy_to_user(to, from, n) \
- __invoke_copy_to(__copy_to_user_eva, to, from, n)
-
-#define ___invoke_copy_in_user(to, from, n) \
- __invoke_copy_from(__copy_in_user_eva, to, from, n)
-
-#endif /* CONFIG_EVA */
+extern size_t __raw_copy_from_user(void *__to, const void *__from, size_t __n);
+extern size_t __raw_copy_to_user(void *__to, const void *__from, size_t __n);
+extern size_t __raw_copy_in_user(void *__to, const void *__from, size_t __n);
static inline unsigned long
-raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
- if (eva_kernel_access())
- return __invoke_copy_to_kernel(to, from, n);
- else
- return __invoke_copy_to_user(to, from, n);
+ register void *__cu_to_r __asm__("$4");
+ register const void __user *__cu_from_r __asm__("$5");
+ register long __cu_len_r __asm__("$6");
+
+ __cu_to_r = to;
+ __cu_from_r = from;
+ __cu_len_r = n;
+
+ __asm__ __volatile__(
+ ".set\tnoreorder\n\t"
+ __MODULE_JAL(__raw_copy_from_user)
+ ".set\tnoat\n\t"
+ __UA_ADDU "\t$1, %1, %2\n\t"
+ ".set\tat\n\t"
+ ".set\treorder"
+ : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)
+ :
+ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",
+ DADDI_SCRATCH, "memory");
+
+ return __cu_len_r;
}
static inline unsigned long
-raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- if (eva_kernel_access())
- return __invoke_copy_from_kernel(to, from, n);
- else
- return __invoke_copy_from_user(to, from, n);
+ register void __user *__cu_to_r __asm__("$4");
+ register const void *__cu_from_r __asm__("$5");
+ register long __cu_len_r __asm__("$6");
+
+ __cu_to_r = (to);
+ __cu_from_r = (from);
+ __cu_len_r = (n);
+
+ __asm__ __volatile__(
+ __MODULE_JAL(__raw_copy_to_user)
+ : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)
+ :
+ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",
+ DADDI_SCRATCH, "memory");
+
+ return __cu_len_r;
}
#define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER
static inline unsigned long
-raw_copy_in_user(void __user*to, const void __user *from, unsigned long n)
+raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
- if (eva_kernel_access())
- return ___invoke_copy_in_kernel(to, from, n);
- else
- return ___invoke_copy_in_user(to, from, n);
+ register void __user *__cu_to_r __asm__("$4");
+ register const void __user *__cu_from_r __asm__("$5");
+ register long __cu_len_r __asm__("$6");
+
+ __cu_to_r = to;
+ __cu_from_r = from;
+ __cu_len_r = n;
+
+ __asm__ __volatile__(
+ ".set\tnoreorder\n\t"
+ __MODULE_JAL(__raw_copy_in_user)
+ ".set\tnoat\n\t"
+ __UA_ADDU "\t$1, %1, %2\n\t"
+ ".set\tat\n\t"
+ ".set\treorder"
+ : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)
+ :
+ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",
+ DADDI_SCRATCH, "memory");
+ return __cu_len_r;
}
-extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size);
extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
/*
@@ -657,28 +530,16 @@ __clear_user(void __user *addr, __kernel_size_t size)
#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
#endif /* CONFIG_CPU_MICROMIPS */
- if (eva_kernel_access()) {
- __asm__ __volatile__(
- "move\t$4, %1\n\t"
- "move\t$5, $0\n\t"
- "move\t$6, %2\n\t"
- __MODULE_JAL(__bzero_kernel)
- "move\t%0, $6"
- : "=r" (res)
- : "r" (addr), "r" (size)
- : bzero_clobbers);
- } else {
- might_fault();
- __asm__ __volatile__(
- "move\t$4, %1\n\t"
- "move\t$5, $0\n\t"
- "move\t$6, %2\n\t"
- __MODULE_JAL(__bzero)
- "move\t%0, $6"
- : "=r" (res)
- : "r" (addr), "r" (size)
- : bzero_clobbers);
- }
+ might_fault();
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, $0\n\t"
+ "move\t$6, %2\n\t"
+ __MODULE_JAL(__bzero)
+ "move\t%0, $6"
+ : "=r" (res)
+ : "r" (addr), "r" (size)
+ : bzero_clobbers);
return res;
}
@@ -692,7 +553,6 @@ __clear_user(void __user *addr, __kernel_size_t size)
__cl_size; \
})
-extern long __strncpy_from_kernel_asm(char *__to, const char __user *__from, long __len);
extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len);
/*
@@ -718,33 +578,23 @@ strncpy_from_user(char *__to, const char __user *__from, long __len)
{
long res;
- if (eva_kernel_access()) {
- __asm__ __volatile__(
- "move\t$4, %1\n\t"
- "move\t$5, %2\n\t"
- "move\t$6, %3\n\t"
- __MODULE_JAL(__strncpy_from_kernel_asm)
- "move\t%0, $2"
- : "=r" (res)
- : "r" (__to), "r" (__from), "r" (__len)
- : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
- } else {
- might_fault();
- __asm__ __volatile__(
- "move\t$4, %1\n\t"
- "move\t$5, %2\n\t"
- "move\t$6, %3\n\t"
- __MODULE_JAL(__strncpy_from_user_asm)
- "move\t%0, $2"
- : "=r" (res)
- : "r" (__to), "r" (__from), "r" (__len)
- : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
- }
+ if (!access_ok(__from, __len))
+ return -EFAULT;
+
+ might_fault();
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, %2\n\t"
+ "move\t$6, %3\n\t"
+ __MODULE_JAL(__strncpy_from_user_asm)
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (__to), "r" (__from), "r" (__len)
+ : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
return res;
}
-extern long __strnlen_kernel_asm(const char __user *s, long n);
extern long __strnlen_user_asm(const char __user *s, long n);
/*
@@ -764,26 +614,18 @@ static inline long strnlen_user(const char __user *s, long n)
{
long res;
+ if (!access_ok(s, 1))
+ return 0;
+
might_fault();
- if (eva_kernel_access()) {
- __asm__ __volatile__(
- "move\t$4, %1\n\t"
- "move\t$5, %2\n\t"
- __MODULE_JAL(__strnlen_kernel_asm)
- "move\t%0, $2"
- : "=r" (res)
- : "r" (s), "r" (n)
- : "$2", "$4", "$5", __UA_t0, "$31");
- } else {
- __asm__ __volatile__(
- "move\t$4, %1\n\t"
- "move\t$5, %2\n\t"
- __MODULE_JAL(__strnlen_user_asm)
- "move\t%0, $2"
- : "=r" (res)
- : "r" (s), "r" (n)
- : "$2", "$4", "$5", __UA_t0, "$31");
- }
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, %2\n\t"
+ __MODULE_JAL(__strnlen_user_asm)
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (s), "r" (n)
+ : "$2", "$4", "$5", __UA_t0, "$31");
return res;
}
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index 5d70babfc9ee..c2196b1b6604 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -17,6 +17,10 @@
#include <asm/unistd_nr_n64.h>
#include <asm/unistd_nr_o32.h>
+#define __NR_N32_Linux 6000
+#define __NR_64_Linux 5000
+#define __NR_O32_Linux 4000
+
#ifdef CONFIG_MIPS32_N32
#define NR_syscalls (__NR_N32_Linux + __NR_N32_Linux_syscalls)
#elif defined(CONFIG_64BIT)
diff --git a/arch/mips/include/asm/vdso/gettimeofday.h b/arch/mips/include/asm/vdso/gettimeofday.h
index 2203e2d0ae2a..44a45f3fa4b0 100644
--- a/arch/mips/include/asm/vdso/gettimeofday.h
+++ b/arch/mips/include/asm/vdso/gettimeofday.h
@@ -20,6 +20,12 @@
#define VDSO_HAS_CLOCK_GETRES 1
+#if MIPS_ISA_REV < 6
+#define VDSO_SYSCALL_CLOBBERS "hi", "lo",
+#else
+#define VDSO_SYSCALL_CLOBBERS
+#endif
+
static __always_inline long gettimeofday_fallback(
struct __kernel_old_timeval *_tv,
struct timezone *_tz)
@@ -35,7 +41,9 @@ static __always_inline long gettimeofday_fallback(
: "=r" (ret), "=r" (error)
: "r" (tv), "r" (tz), "r" (nr)
: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
- "$14", "$15", "$24", "$25", "hi", "lo", "memory");
+ "$14", "$15", "$24", "$25",
+ VDSO_SYSCALL_CLOBBERS
+ "memory");
return error ? -ret : ret;
}
@@ -59,7 +67,9 @@ static __always_inline long clock_gettime_fallback(
: "=r" (ret), "=r" (error)
: "r" (clkid), "r" (ts), "r" (nr)
: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
- "$14", "$15", "$24", "$25", "hi", "lo", "memory");
+ "$14", "$15", "$24", "$25",
+ VDSO_SYSCALL_CLOBBERS
+ "memory");
return error ? -ret : ret;
}
@@ -83,7 +93,9 @@ static __always_inline int clock_getres_fallback(
: "=r" (ret), "=r" (error)
: "r" (clkid), "r" (ts), "r" (nr)
: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
- "$14", "$15", "$24", "$25", "hi", "lo", "memory");
+ "$14", "$15", "$24", "$25",
+ VDSO_SYSCALL_CLOBBERS
+ "memory");
return error ? -ret : ret;
}
@@ -105,7 +117,9 @@ static __always_inline long clock_gettime32_fallback(
: "=r" (ret), "=r" (error)
: "r" (clkid), "r" (ts), "r" (nr)
: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
- "$14", "$15", "$24", "$25", "hi", "lo", "memory");
+ "$14", "$15", "$24", "$25",
+ VDSO_SYSCALL_CLOBBERS
+ "memory");
return error ? -ret : ret;
}
@@ -125,7 +139,9 @@ static __always_inline int clock_getres32_fallback(
: "=r" (ret), "=r" (error)
: "r" (clkid), "r" (ts), "r" (nr)
: "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
- "$14", "$15", "$24", "$25", "hi", "lo", "memory");
+ "$14", "$15", "$24", "$25",
+ VDSO_SYSCALL_CLOBBERS
+ "memory");
return error ? -ret : ret;
}
diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h
index 57dc2ac4f8bd..40b210c65a5a 100644
--- a/arch/mips/include/uapi/asm/mman.h
+++ b/arch/mips/include/uapi/asm/mman.h
@@ -98,6 +98,9 @@
#define MADV_COLD 20 /* deactivate these pages */
#define MADV_PAGEOUT 21 /* reclaim these pages */
+#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */
+#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 2d949969313b..cdf404a831b2 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -138,6 +138,8 @@
#define SO_PREFER_BUSY_POLL 69
#define SO_BUSY_POLL_BUDGET 70
+#define SO_NETNS_COOKIE 71
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/mips/ingenic/Kconfig b/arch/mips/ingenic/Kconfig
index 3238e16febd5..f595b339a4b8 100644
--- a/arch/mips/ingenic/Kconfig
+++ b/arch/mips/ingenic/Kconfig
@@ -4,9 +4,11 @@ config MACH_INGENIC_GENERIC
bool
select MACH_INGENIC
select MACH_JZ4740
+ select MACH_JZ4725B
select MACH_JZ4770
select MACH_JZ4780
select MACH_X1000
+ select MACH_X1830
choice
prompt "Machine type"
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index b4a57f1de772..814b3da30501 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -17,10 +17,10 @@ obj-y += cpu-probe.o
endif
ifdef CONFIG_FUNCTION_TRACER
-CFLAGS_REMOVE_ftrace.o = -pg
-CFLAGS_REMOVE_early_printk.o = -pg
-CFLAGS_REMOVE_perf_event.o = -pg
-CFLAGS_REMOVE_perf_event_mipsxx.o = -pg
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_early_printk.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_perf_event.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_perf_event_mipsxx.o = $(CC_FLAGS_FTRACE)
endif
obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
diff --git a/arch/mips/kernel/access-helper.h b/arch/mips/kernel/access-helper.h
new file mode 100644
index 000000000000..590388031503
--- /dev/null
+++ b/arch/mips/kernel/access-helper.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/uaccess.h>
+
+static inline int __get_addr(unsigned long *a, unsigned long *p, bool user)
+{
+ return user ? get_user(*a, (unsigned long __user *)p) :
+ get_kernel_nofault(*a, p);
+}
+
+static inline int __get_inst16(u16 *i, u16 *p, bool user)
+{
+ return user ? get_user(*i, (u16 __user *)p) : get_kernel_nofault(*i, p);
+}
+
+static inline int __get_inst32(u32 *i, u32 *p, bool user)
+{
+ return user ? get_user(*i, (u32 __user *)p) : get_kernel_nofault(*i, p);
+}
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index aebfda81120a..04ca75278f02 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -78,7 +78,6 @@ void output_ptreg_defines(void)
void output_task_defines(void)
{
COMMENT("MIPS task_struct offsets.");
- OFFSET(TASK_STATE, task_struct, state);
OFFSET(TASK_THREAD_INFO, task_struct, stack);
OFFSET(TASK_FLAGS, task_struct, flags);
OFFSET(TASK_MM, task_struct, mm);
@@ -98,7 +97,6 @@ void output_thread_info_defines(void)
OFFSET(TI_TP_VALUE, thread_info, tp_value);
OFFSET(TI_CPU, thread_info, cpu);
OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
- OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
OFFSET(TI_REGS, thread_info, regs);
DEFINE(_THREAD_SIZE, THREAD_SIZE);
DEFINE(_THREAD_MASK, THREAD_MASK);
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 995ad9e69ded..32ec67c9ab67 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -195,10 +195,6 @@ int c0_compare_int_usable(void)
unsigned int delta;
unsigned int cnt;
-#ifdef CONFIG_KVM_GUEST
- return 1;
-#endif
-
/*
* IP7 already pending? Try to clear it by acking the timer.
*/
diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c
index 89107deb03fc..ac9c8cfb2ba9 100644
--- a/arch/mips/kernel/cmpxchg.c
+++ b/arch/mips/kernel/cmpxchg.c
@@ -41,7 +41,7 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s
do {
old32 = load32;
new32 = (load32 & ~mask) | (val << shift);
- load32 = cmpxchg(ptr32, old32, new32);
+ load32 = arch_cmpxchg(ptr32, old32, new32);
} while (load32 != old32);
return (load32 & mask) >> shift;
@@ -97,7 +97,7 @@ unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
*/
old32 = (load32 & ~mask) | (old << shift);
new32 = (load32 & ~mask) | (new << shift);
- load32 = cmpxchg(ptr32, old32, new32);
+ load32 = arch_cmpxchg(ptr32, old32, new32);
if (load32 == old32)
return old;
}
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 9a89637b4ecf..630fcb4cb30e 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -26,6 +26,7 @@
#include <asm/elf.h>
#include <asm/pgtable-bits.h>
#include <asm/spram.h>
+#include <asm/traps.h>
#include <linux/uaccess.h>
#include "fpu-probe.h"
@@ -1628,6 +1629,7 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_BMIPS3300;
__cpu_name[cpu] = "Broadcom BMIPS3300";
set_elf_platform(cpu, "bmips3300");
+ reserve_exception_space(0x400, VECTORSPACING * 64);
break;
case PRID_IMP_BMIPS43XX: {
int rev = c->processor_id & PRID_REV_MASK;
@@ -1638,6 +1640,7 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "Broadcom BMIPS4380";
set_elf_platform(cpu, "bmips4380");
c->options |= MIPS_CPU_RIXI;
+ reserve_exception_space(0x400, VECTORSPACING * 64);
} else {
c->cputype = CPU_BMIPS4350;
__cpu_name[cpu] = "Broadcom BMIPS4350";
@@ -1654,6 +1657,7 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "Broadcom BMIPS5000";
set_elf_platform(cpu, "bmips5000");
c->options |= MIPS_CPU_ULRI | MIPS_CPU_RIXI;
+ reserve_exception_space(0x1000, VECTORSPACING * 64);
break;
}
}
@@ -1748,7 +1752,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
set_isa(c, MIPS_CPU_ISA_M64R2);
break;
}
- c->writecombine = _CACHE_UNCACHED_ACCELERATED;
c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_EXT |
MIPS_ASE_LOONGSON_EXT2);
break;
@@ -1778,7 +1781,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
* register, we correct it here.
*/
c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
- c->writecombine = _CACHE_UNCACHED_ACCELERATED;
c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */
@@ -1789,7 +1791,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
set_elf_platform(cpu, "loongson3a");
set_isa(c, MIPS_CPU_ISA_M64R2);
decode_cpucfg(c);
- c->writecombine = _CACHE_UNCACHED_ACCELERATED;
break;
default:
panic("Unknown Loongson Processor ID!");
@@ -1839,6 +1840,11 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
*/
case PRID_COMP_INGENIC_D0:
c->isa_level &= ~MIPS_CPU_ISA_M32R2;
+
+ /* FPU is not properly detected on JZ4760(B). */
+ if (c->processor_id == 0x2ed0024f)
+ c->options |= MIPS_CPU_FPU;
+
fallthrough;
/*
@@ -2133,6 +2139,8 @@ void cpu_probe(void)
if (cpu == 0)
__ua_limit = ~((1ull << cpu_vmbits) - 1);
#endif
+
+ reserve_exception_space(0, 0x1000);
}
void cpu_report(void)
diff --git a/arch/mips/kernel/cpu-r3k-probe.c b/arch/mips/kernel/cpu-r3k-probe.c
index abdbbe8c5a43..af654771918c 100644
--- a/arch/mips/kernel/cpu-r3k-probe.c
+++ b/arch/mips/kernel/cpu-r3k-probe.c
@@ -21,6 +21,7 @@
#include <asm/fpu.h>
#include <asm/mipsregs.h>
#include <asm/elf.h>
+#include <asm/traps.h>
#include "fpu-probe.h"
@@ -158,6 +159,8 @@ void cpu_probe(void)
cpu_set_fpu_opts(c);
else
cpu_set_nofpu_opts(c);
+
+ reserve_exception_space(0, 0x400);
}
void cpu_report(void)
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 666b9969c1bd..8c401e42301c 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -90,7 +90,6 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
unsigned int new_code2)
{
int faulted;
- mm_segment_t old_fs;
safe_store_code(new_code1, ip, faulted);
if (unlikely(faulted))
@@ -102,10 +101,7 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
return -EFAULT;
ip -= 4;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
flush_icache_range(ip, ip + 8);
- set_fs(old_fs);
return 0;
}
@@ -114,7 +110,6 @@ static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
unsigned int new_code2)
{
int faulted;
- mm_segment_t old_fs;
ip += 4;
safe_store_code(new_code2, ip, faulted);
@@ -126,10 +121,7 @@ static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
if (unlikely(faulted))
return -EFAULT;
- old_fs = get_fs();
- set_fs(KERNEL_DS);
flush_icache_range(ip, ip + 8);
- set_fs(old_fs);
return 0;
}
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
index 54dfba8fa77c..75bff0f77319 100644
--- a/arch/mips/kernel/kprobes.c
+++ b/arch/mips/kernel/kprobes.c
@@ -403,9 +403,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
if (kcb->kprobe_status & KPROBE_HIT_SS) {
resume_execution(cur, regs, kcb);
regs->cp0_status |= kcb->kprobe_old_SR;
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 7efa0d1a4c2b..73c8e7990a97 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -124,7 +124,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
/* kernel thread */
unsigned long status = p->thread.cp0_status;
memset(childregs, 0, sizeof(struct pt_regs));
- ti->addr_limit = KERNEL_DS;
p->thread.reg16 = usp; /* fn */
p->thread.reg17 = kthread_arg;
p->thread.reg29 = childksp;
@@ -145,7 +144,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
childregs->regs[2] = 0; /* Child gets zero as return value */
if (usp)
childregs->regs[29] = usp;
- ti->addr_limit = USER_DS;
p->thread.reg29 = (unsigned long) childregs;
p->thread.reg31 = (unsigned long) ret_from_fork;
@@ -664,7 +662,7 @@ unsigned long get_wchan(struct task_struct *task)
unsigned long ra = 0;
#endif
- if (!task || task == current || task->state == TASK_RUNNING)
+ if (!task || task == current || task_is_running(task))
goto out;
if (!task_stack_page(task))
goto out;
diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
index 499a5357c09f..56b51de2dc51 100644
--- a/arch/mips/kernel/relocate.c
+++ b/arch/mips/kernel/relocate.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/libfdt.h>
#include <linux/of_fdt.h>
+#include <linux/panic_notifier.h>
#include <linux/sched/task.h>
#include <linux/start_kernel.h>
#include <linux/string.h>
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
index ac870893ba2d..f3c908abdbb8 100644
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -11,6 +11,8 @@
#include <asm/stackframe.h>
#include <asm/addrspace.h>
+#include <kernel-entry-init.h>
+
LEAF(relocate_new_kernel)
PTR_L a0, arg0
PTR_L a1, arg1
@@ -125,11 +127,8 @@ LEAF(kexec_smp_wait)
1: LONG_L s0, (t0)
bne s0, zero,1b
-#ifdef CONFIG_CPU_CAVIUM_OCTEON
- .set push
- .set noreorder
- synci 0($0)
- .set pop
+#ifdef USE_KEXEC_SMP_WAIT_FINAL
+ kexec_smp_wait_final
#else
sync
#endif
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index b449b68662a9..b1b2e106f711 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -48,10 +48,8 @@ NESTED(handle_sys, PT_SIZE, sp)
* We intentionally keep the kernel stack a little below the top of
* userspace so we don't have to do a slower byte accurate check here.
*/
- lw t5, TI_ADDR_LIMIT($28)
addu t4, t0, 32
- and t5, t4
- bltz t5, bad_stack # -> sp is bad
+ bltz t4, bad_stack # -> sp is bad
/*
* Ok, copy the args from the luser stack to the kernel stack.
@@ -217,9 +215,9 @@ einval: li v0, -ENOSYS
#define sys_sched_getaffinity mipsmt_sys_sched_getaffinity
#endif /* CONFIG_MIPS_MT_FPAFF */
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
#define __SYSCALL(nr, entry) PTR entry
.align 2
.type sys_call_table, @object
EXPORT(sys_call_table)
-#include <asm/syscall_table_32_o32.h>
-#undef __SYSCALL
+#include <asm/syscall_table_o32.h>
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 35d8c86b160e..f650c55a17dc 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -104,5 +104,4 @@ not_n32_scall:
#define __SYSCALL(nr, entry) PTR entry
.type sysn32_call_table, @object
EXPORT(sysn32_call_table)
-#include <asm/syscall_table_64_n32.h>
-#undef __SYSCALL
+#include <asm/syscall_table_n32.h>
diff --git a/arch/mips/kernel/scall64-n64.S b/arch/mips/kernel/scall64-n64.S
index 5e9c497ce099..5d7bfc65e4d0 100644
--- a/arch/mips/kernel/scall64-n64.S
+++ b/arch/mips/kernel/scall64-n64.S
@@ -113,5 +113,4 @@ illegal_syscall:
.align 3
.type sys_call_table, @object
EXPORT(sys_call_table)
-#include <asm/syscall_table_64_n64.h>
-#undef __SYSCALL
+#include <asm/syscall_table_n64.h>
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 50c9a57e0d3a..cedc8bd88804 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -213,9 +213,9 @@ einval: li v0, -ENOSYS
jr ra
END(sys32_syscall)
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat)
#define __SYSCALL(nr, entry) PTR entry
.align 3
.type sys32_call_table,@object
EXPORT(sys32_call_table)
-#include <asm/syscall_table_64_o32.h>
-#undef __SYSCALL
+#include <asm/syscall_table_o32.h>
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 279be0153f8b..23a140327a0b 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -43,7 +43,7 @@
#include <asm/prom.h>
#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
-const char __section(".appended_dtb") __appended_dtb[0x100000];
+char __section(".appended_dtb") __appended_dtb[0x100000];
#endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 359b176b665f..b6ef5f7312cf 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -134,17 +134,24 @@ static void __init bmips_smp_setup(void)
if (!board_ebase_setup)
board_ebase_setup = &bmips_ebase_setup;
- __cpu_number_map[boot_cpu] = 0;
- __cpu_logical_map[0] = boot_cpu;
-
- for (i = 0; i < max_cpus; i++) {
- if (i != boot_cpu) {
- __cpu_number_map[i] = cpu;
- __cpu_logical_map[cpu] = i;
- cpu++;
+ if (max_cpus > 1) {
+ __cpu_number_map[boot_cpu] = 0;
+ __cpu_logical_map[0] = boot_cpu;
+
+ for (i = 0; i < max_cpus; i++) {
+ if (i != boot_cpu) {
+ __cpu_number_map[i] = cpu;
+ __cpu_logical_map[cpu] = i;
+ cpu++;
+ }
+ set_cpu_possible(i, 1);
+ set_cpu_present(i, 1);
}
- set_cpu_possible(i, 1);
- set_cpu_present(i, 1);
+ } else {
+ __cpu_number_map[0] = boot_cpu;
+ __cpu_logical_map[0] = 0;
+ set_cpu_possible(0, 1);
+ set_cpu_present(0, 1);
}
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index ef86fbad8546..d542fb7af3ba 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -348,7 +348,6 @@ asmlinkage void start_secondary(void)
*/
calibrate_delay();
- preempt_disable();
cpu = smp_processor_id();
cpu_data[cpu].udelay_val = loops_per_jiffy;
diff --git a/arch/mips/kernel/spinlock_test.c b/arch/mips/kernel/spinlock_test.c
index ab4e3e1b138d..90f53e041a38 100644
--- a/arch/mips/kernel/spinlock_test.c
+++ b/arch/mips/kernel/spinlock_test.c
@@ -35,7 +35,7 @@ static int ss_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_ss, ss_get, NULL, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(fops_ss, ss_get, NULL, "%llu\n");
@@ -114,13 +114,13 @@ static int multi_get(void *data, u64 *val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_multi, multi_get, NULL, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(fops_multi, multi_get, NULL, "%llu\n");
static int __init spinlock_test(void)
{
- debugfs_create_file("spin_single", S_IRUGO, mips_debugfs_dir, NULL,
+ debugfs_create_file_unsafe("spin_single", S_IRUGO, mips_debugfs_dir, NULL,
&fops_ss);
- debugfs_create_file("spin_multi", S_IRUGO, mips_debugfs_dir, NULL,
+ debugfs_create_file_unsafe("spin_multi", S_IRUGO, mips_debugfs_dir, NULL,
&fops_multi);
return 0;
}
diff --git a/arch/mips/kernel/syscalls/Makefile b/arch/mips/kernel/syscalls/Makefile
index 51f8b805f2ed..10bf90dc02c0 100644
--- a/arch/mips/kernel/syscalls/Makefile
+++ b/arch/mips/kernel/syscalls/Makefile
@@ -5,85 +5,40 @@ uapi := arch/$(SRCARCH)/include/generated/uapi/asm
_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
$(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
-syscalln32 := $(src)/syscall_n32.tbl
-syscalln64 := $(src)/syscall_n64.tbl
-syscallo32 := $(src)/syscall_o32.tbl
-syshdr := $(srctree)/$(src)/syscallhdr.sh
+syshdr := $(srctree)/scripts/syscallhdr.sh
sysnr := $(srctree)/$(src)/syscallnr.sh
-systbl := $(srctree)/$(src)/syscalltbl.sh
+systbl := $(srctree)/scripts/syscalltbl.sh
quiet_cmd_syshdr = SYSHDR $@
- cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
- '$(syshdr_abis_$(basetarget))' \
- '$(syshdr_pfx_$(basetarget))' \
- '$(syshdr_offset_$(basetarget))'
+ cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --offset __NR_Linux $< $@
quiet_cmd_sysnr = SYSNR $@
cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@' \
'$(sysnr_abis_$(basetarget))' \
- '$(sysnr_pfx_$(basetarget))' \
- '$(sysnr_offset_$(basetarget))'
+ '$(sysnr_pfx_$(basetarget))'
quiet_cmd_systbl = SYSTBL $@
- cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \
- '$(systbl_abis_$(basetarget))' \
- '$(systbl_abi_$(basetarget))' \
- '$(systbl_offset_$(basetarget))'
+ cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@
-syshdr_offset_unistd_n32 := __NR_Linux
-$(uapi)/unistd_n32.h: $(syscalln32) $(syshdr) FORCE
- $(call if_changed,syshdr)
-
-syshdr_offset_unistd_n64 := __NR_Linux
-$(uapi)/unistd_n64.h: $(syscalln64) $(syshdr) FORCE
- $(call if_changed,syshdr)
-
-syshdr_offset_unistd_o32 := __NR_Linux
-$(uapi)/unistd_o32.h: $(syscallo32) $(syshdr) FORCE
+$(uapi)/unistd_%.h: $(src)/syscall_%.tbl $(syshdr) FORCE
$(call if_changed,syshdr)
sysnr_pfx_unistd_nr_n32 := N32
-sysnr_offset_unistd_nr_n32 := 6000
-$(kapi)/unistd_nr_n32.h: $(syscalln32) $(sysnr) FORCE
- $(call if_changed,sysnr)
-
sysnr_pfx_unistd_nr_n64 := 64
-sysnr_offset_unistd_nr_n64 := 5000
-$(kapi)/unistd_nr_n64.h: $(syscalln64) $(sysnr) FORCE
- $(call if_changed,sysnr)
-
sysnr_pfx_unistd_nr_o32 := O32
-sysnr_offset_unistd_nr_o32 := 4000
-$(kapi)/unistd_nr_o32.h: $(syscallo32) $(sysnr) FORCE
- $(call if_changed,sysnr)
-
-systbl_abi_syscall_table_32_o32 := 32_o32
-systbl_offset_syscall_table_32_o32 := 4000
-$(kapi)/syscall_table_32_o32.h: $(syscallo32) $(systbl) FORCE
- $(call if_changed,systbl)
-systbl_abi_syscall_table_64_n32 := 64_n32
-systbl_offset_syscall_table_64_n32 := 6000
-$(kapi)/syscall_table_64_n32.h: $(syscalln32) $(systbl) FORCE
- $(call if_changed,systbl)
-
-systbl_abi_syscall_table_64_n64 := 64_n64
-systbl_offset_syscall_table_64_n64 := 5000
-$(kapi)/syscall_table_64_n64.h: $(syscalln64) $(systbl) FORCE
- $(call if_changed,systbl)
+$(kapi)/unistd_nr_%.h: $(src)/syscall_%.tbl $(sysnr) FORCE
+ $(call if_changed,sysnr)
-systbl_abi_syscall_table_64_o32 := 64_o32
-systbl_offset_syscall_table_64_o32 := 4000
-$(kapi)/syscall_table_64_o32.h: $(syscallo32) $(systbl) FORCE
+$(kapi)/syscall_table_%.h: $(src)/syscall_%.tbl $(systbl) FORCE
$(call if_changed,systbl)
uapisyshdr-y += unistd_n32.h \
unistd_n64.h \
unistd_o32.h
-kapisyshdr-y += syscall_table_32_o32.h \
- syscall_table_64_n32.h \
- syscall_table_64_n64.h \
- syscall_table_64_o32.h \
+kapisyshdr-y += syscall_table_n32.h \
+ syscall_table_n64.h \
+ syscall_table_o32.h \
unistd_nr_n32.h \
unistd_nr_n64.h \
unistd_nr_o32.h
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
index 8fd8c1790941..c2d2e19abea8 100644
--- a/arch/mips/kernel/syscalls/syscall_n32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
@@ -381,3 +381,7 @@
440 n32 process_madvise sys_process_madvise
441 n32 epoll_pwait2 compat_sys_epoll_pwait2
442 n32 mount_setattr sys_mount_setattr
+443 n32 quotactl_fd sys_quotactl_fd
+444 n32 landlock_create_ruleset sys_landlock_create_ruleset
+445 n32 landlock_add_rule sys_landlock_add_rule
+446 n32 landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl
index 169f21438065..ac653d08b1ea 100644
--- a/arch/mips/kernel/syscalls/syscall_n64.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n64.tbl
@@ -357,3 +357,7 @@
440 n64 process_madvise sys_process_madvise
441 n64 epoll_pwait2 sys_epoll_pwait2
442 n64 mount_setattr sys_mount_setattr
+443 n64 quotactl_fd sys_quotactl_fd
+444 n64 landlock_create_ruleset sys_landlock_create_ruleset
+445 n64 landlock_add_rule sys_landlock_add_rule
+446 n64 landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
index 090d29ca80ff..253f2cd70b6b 100644
--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
@@ -430,3 +430,7 @@
440 o32 process_madvise sys_process_madvise
441 o32 epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 o32 mount_setattr sys_mount_setattr
+443 o32 quotactl_fd sys_quotactl_fd
+444 o32 landlock_create_ruleset sys_landlock_create_ruleset
+445 o32 landlock_add_rule sys_landlock_add_rule
+446 o32 landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/mips/kernel/syscalls/syscallhdr.sh b/arch/mips/kernel/syscalls/syscallhdr.sh
deleted file mode 100644
index 2e241e713a7d..000000000000
--- a/arch/mips/kernel/syscalls/syscallhdr.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-prefix="$4"
-offset="$5"
-
-fileguard=_UAPI_ASM_MIPS_`basename "$out" | sed \
- -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
- -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- printf "#ifndef %s\n" "${fileguard}"
- printf "#define %s\n" "${fileguard}"
- printf "\n"
-
- nxt=0
- while read nr abi name entry compat ; do
- if [ -z "$offset" ]; then
- printf "#define __NR_%s%s\t%s\n" \
- "${prefix}" "${name}" "${nr}"
- else
- printf "#define __NR_%s%s\t(%s + %s)\n" \
- "${prefix}" "${name}" "${offset}" "${nr}"
- fi
- nxt=$((nr+1))
- done
-
- printf "\n"
- printf "#ifdef __KERNEL__\n"
- printf "#define __NR_syscalls\t%s\n" "${nxt}"
- printf "#endif\n"
- printf "\n"
- printf "#endif /* %s */\n" "${fileguard}"
-) > "$out"
diff --git a/arch/mips/kernel/syscalls/syscallnr.sh b/arch/mips/kernel/syscalls/syscallnr.sh
index 60bbdb3fe03a..c190bbefbfc2 100644
--- a/arch/mips/kernel/syscalls/syscallnr.sh
+++ b/arch/mips/kernel/syscalls/syscallnr.sh
@@ -5,7 +5,6 @@ in="$1"
out="$2"
my_abis=`echo "($3)" | tr ',' '|'`
prefix="$4"
-offset="$5"
fileguard=_UAPI_ASM_MIPS_`basename "$out" | sed \
-e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
@@ -20,7 +19,6 @@ grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
nxt=$((nr+1))
done
- printf "#define __NR_%s_Linux\t%s\n" "${prefix}" "${offset}"
printf "#define __NR_%s_Linux_syscalls\t%s\n" "${prefix}" "${nxt}"
printf "\n"
printf "#endif /* %s */" "${fileguard}"
diff --git a/arch/mips/kernel/syscalls/syscalltbl.sh b/arch/mips/kernel/syscalls/syscalltbl.sh
deleted file mode 100644
index 1e2570740c20..000000000000
--- a/arch/mips/kernel/syscalls/syscalltbl.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-my_abi="$4"
-offset="$5"
-
-emit() {
- t_nxt="$1"
- t_nr="$2"
- t_entry="$3"
-
- while [ $t_nxt -lt $t_nr ]; do
- printf "__SYSCALL(%s,sys_ni_syscall)\n" "${t_nxt}"
- t_nxt=$((t_nxt+1))
- done
- printf "__SYSCALL(%s,%s)\n" "${t_nxt}" "${t_entry}"
-}
-
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- nxt=0
- if [ -z "$offset" ]; then
- offset=0
- fi
-
- while read nr abi name entry compat ; do
- if [ "$my_abi" = "64_o32" ] && [ ! -z "$compat" ]; then
- emit $((nxt+offset)) $((nr+offset)) $compat
- else
- emit $((nxt+offset)) $((nr+offset)) $entry
- fi
- nxt=$((nr+1))
- done
-) > "$out"
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index e0352958e2f7..6f07362de5ce 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -72,6 +72,8 @@
#include <asm/mach-loongson64/cpucfg-emul.h>
+#include "access-helper.h"
+
extern void check_wait(void);
extern asmlinkage void rollback_handle_int(void);
extern asmlinkage void handle_int(void);
@@ -108,7 +110,8 @@ void (*board_bind_eic_interrupt)(int irq, int regset);
void (*board_ebase_setup)(void);
void(*board_cache_error_setup)(void);
-static void show_raw_backtrace(unsigned long reg29, const char *loglvl)
+static void show_raw_backtrace(unsigned long reg29, const char *loglvl,
+ bool user)
{
unsigned long *sp = (unsigned long *)(reg29 & ~3);
unsigned long addr;
@@ -118,9 +121,7 @@ static void show_raw_backtrace(unsigned long reg29, const char *loglvl)
printk("%s\n", loglvl);
#endif
while (!kstack_end(sp)) {
- unsigned long __user *p =
- (unsigned long __user *)(unsigned long)sp++;
- if (__get_user(addr, p)) {
+ if (__get_addr(&addr, sp++, user)) {
printk("%s (Bad stack address)", loglvl);
break;
}
@@ -141,7 +142,7 @@ __setup("raw_show_trace", set_raw_show_trace);
#endif
static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
- const char *loglvl)
+ const char *loglvl, bool user)
{
unsigned long sp = regs->regs[29];
unsigned long ra = regs->regs[31];
@@ -151,7 +152,7 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
task = current;
if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
- show_raw_backtrace(sp, loglvl);
+ show_raw_backtrace(sp, loglvl, user);
return;
}
printk("%sCall Trace:\n", loglvl);
@@ -167,12 +168,12 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
* with at least a bit of error checking ...
*/
static void show_stacktrace(struct task_struct *task,
- const struct pt_regs *regs, const char *loglvl)
+ const struct pt_regs *regs, const char *loglvl, bool user)
{
const int field = 2 * sizeof(unsigned long);
- long stackdata;
+ unsigned long stackdata;
int i;
- unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
+ unsigned long *sp = (unsigned long *)regs->regs[29];
printk("%sStack :", loglvl);
i = 0;
@@ -186,7 +187,7 @@ static void show_stacktrace(struct task_struct *task,
break;
}
- if (__get_user(stackdata, sp++)) {
+ if (__get_addr(&stackdata, sp++, user)) {
pr_cont(" (Bad stack address)");
break;
}
@@ -195,13 +196,12 @@ static void show_stacktrace(struct task_struct *task,
i++;
}
pr_cont("\n");
- show_backtrace(task, regs, loglvl);
+ show_backtrace(task, regs, loglvl, user);
}
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
struct pt_regs regs;
- mm_segment_t old_fs = get_fs();
regs.cp0_status = KSU_KERNEL;
if (sp) {
@@ -217,33 +217,41 @@ void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
prepare_frametrace(&regs);
}
}
- /*
- * show_stack() deals exclusively with kernel mode, so be sure to access
- * the stack in the kernel (not user) address space.
- */
- set_fs(KERNEL_DS);
- show_stacktrace(task, &regs, loglvl);
- set_fs(old_fs);
+ show_stacktrace(task, &regs, loglvl, false);
}
-static void show_code(unsigned int __user *pc)
+static void show_code(void *pc, bool user)
{
long i;
- unsigned short __user *pc16 = NULL;
+ unsigned short *pc16 = NULL;
printk("Code:");
if ((unsigned long)pc & 1)
- pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
+ pc16 = (u16 *)((unsigned long)pc & ~1);
+
for(i = -3 ; i < 6 ; i++) {
- unsigned int insn;
- if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
- pr_cont(" (Bad address in epc)\n");
- break;
+ if (pc16) {
+ u16 insn16;
+
+ if (__get_inst16(&insn16, pc16 + i, user))
+ goto bad_address;
+
+ pr_cont("%c%04x%c", (i?' ':'<'), insn16, (i?' ':'>'));
+ } else {
+ u32 insn32;
+
+ if (__get_inst32(&insn32, (u32 *)pc + i, user))
+ goto bad_address;
+
+ pr_cont("%c%08x%c", (i?' ':'<'), insn32, (i?' ':'>'));
}
- pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
}
pr_cont("\n");
+ return;
+
+bad_address:
+ pr_cont(" (Bad address in epc)\n\n");
}
static void __show_regs(const struct pt_regs *regs)
@@ -356,7 +364,6 @@ void show_regs(struct pt_regs *regs)
void show_registers(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
- mm_segment_t old_fs = get_fs();
__show_regs(regs);
print_modules();
@@ -371,13 +378,9 @@ void show_registers(struct pt_regs *regs)
printk("*HwTLS: %0*lx\n", field, tls);
}
- if (!user_mode(regs))
- /* Necessary for getting the correct stack content */
- set_fs(KERNEL_DS);
- show_stacktrace(current, regs, KERN_DEFAULT);
- show_code((unsigned int __user *) regs->cp0_epc);
+ show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs));
+ show_code((void *)regs->cp0_epc, user_mode(regs));
printk("\n");
- set_fs(old_fs);
}
static DEFINE_RAW_SPINLOCK(die_lock);
@@ -781,7 +784,6 @@ void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
{
int si_code;
- struct vm_area_struct *vma;
switch (sig) {
case 0:
@@ -797,8 +799,7 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
case SIGSEGV:
mmap_read_lock(current->mm);
- vma = find_vma(current->mm, (unsigned long)fault_addr);
- if (vma && (vma->vm_start <= (unsigned long)fault_addr))
+ if (vma_lookup(current->mm, (unsigned long)fault_addr))
si_code = SEGV_ACCERR;
else
si_code = SEGV_MAPERR;
@@ -1022,18 +1023,14 @@ asmlinkage void do_bp(struct pt_regs *regs)
unsigned long epc = msk_isa16_mode(exception_epc(regs));
unsigned int opcode, bcode;
enum ctx_state prev_state;
- mm_segment_t seg;
-
- seg = get_fs();
- if (!user_mode(regs))
- set_fs(KERNEL_DS);
+ bool user = user_mode(regs);
prev_state = exception_enter();
current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
if (get_isa16_mode(regs->cp0_epc)) {
u16 instr[2];
- if (__get_user(instr[0], (u16 __user *)epc))
+ if (__get_inst16(&instr[0], (u16 *)epc, user))
goto out_sigsegv;
if (!cpu_has_mmips) {
@@ -1044,13 +1041,13 @@ asmlinkage void do_bp(struct pt_regs *regs)
bcode = instr[0] & 0xf;
} else {
/* 32-bit microMIPS BREAK */
- if (__get_user(instr[1], (u16 __user *)(epc + 2)))
+ if (__get_inst16(&instr[1], (u16 *)(epc + 2), user))
goto out_sigsegv;
opcode = (instr[0] << 16) | instr[1];
bcode = (opcode >> 6) & ((1 << 20) - 1);
}
} else {
- if (__get_user(opcode, (unsigned int __user *)epc))
+ if (__get_inst32(&opcode, (u32 *)epc, user))
goto out_sigsegv;
bcode = (opcode >> 6) & ((1 << 20) - 1);
}
@@ -1100,7 +1097,6 @@ asmlinkage void do_bp(struct pt_regs *regs)
do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
out:
- set_fs(seg);
exception_exit(prev_state);
return;
@@ -1114,25 +1110,21 @@ asmlinkage void do_tr(struct pt_regs *regs)
u32 opcode, tcode = 0;
enum ctx_state prev_state;
u16 instr[2];
- mm_segment_t seg;
+ bool user = user_mode(regs);
unsigned long epc = msk_isa16_mode(exception_epc(regs));
- seg = get_fs();
- if (!user_mode(regs))
- set_fs(KERNEL_DS);
-
prev_state = exception_enter();
current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
if (get_isa16_mode(regs->cp0_epc)) {
- if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
- __get_user(instr[1], (u16 __user *)(epc + 2)))
+ if (__get_inst16(&instr[0], (u16 *)(epc + 0), user) ||
+ __get_inst16(&instr[1], (u16 *)(epc + 2), user))
goto out_sigsegv;
opcode = (instr[0] << 16) | instr[1];
/* Immediate versions don't provide a code. */
if (!(opcode & OPCODE))
tcode = (opcode >> 12) & ((1 << 4) - 1);
} else {
- if (__get_user(opcode, (u32 __user *)epc))
+ if (__get_inst32(&opcode, (u32 *)epc, user))
goto out_sigsegv;
/* Immediate versions don't provide a code. */
if (!(opcode & OPCODE))
@@ -1142,7 +1134,6 @@ asmlinkage void do_tr(struct pt_regs *regs)
do_trap_or_bp(regs, tcode, 0, "Trap");
out:
- set_fs(seg);
exception_exit(prev_state);
return;
@@ -1591,7 +1582,6 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
{
int multi_match = regs->cp0_status & ST0_TS;
enum ctx_state prev_state;
- mm_segment_t old_fs = get_fs();
prev_state = exception_enter();
show_regs(regs);
@@ -1602,12 +1592,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
dump_tlb_all();
}
- if (!user_mode(regs))
- set_fs(KERNEL_DS);
-
- show_code((unsigned int __user *) regs->cp0_epc);
-
- set_fs(old_fs);
+ show_code((void *)regs->cp0_epc, user_mode(regs));
/*
* Some chips may have other causes of machine check (e.g. SB1
@@ -2009,13 +1994,16 @@ void __noreturn nmi_exception_handler(struct pt_regs *regs)
nmi_exit();
}
-#define VECTORSPACING 0x100 /* for EI/VI mode */
-
unsigned long ebase;
EXPORT_SYMBOL_GPL(ebase);
unsigned long exception_handlers[32];
unsigned long vi_handlers[64];
+void reserve_exception_space(phys_addr_t addr, unsigned long size)
+{
+ memblock_reserve(addr, size);
+}
+
void __init *set_except_vector(int n, void *addr)
{
unsigned long handler = (unsigned long) addr;
@@ -2367,10 +2355,7 @@ void __init trap_init(void)
if (!cpu_has_mips_r2_r6) {
ebase = CAC_BASE;
- ebase_pa = virt_to_phys((void *)ebase);
vec_size = 0x400;
-
- memblock_reserve(ebase_pa, vec_size);
} else {
if (cpu_has_veic || cpu_has_vint)
vec_size = 0x200 + VECTORSPACING*64;
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 126a5f3f4e4c..df4b708c04a9 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -93,6 +93,8 @@
#include <asm/mmu_context.h>
#include <linux/uaccess.h>
+#include "access-helper.h"
+
enum {
UNALIGNED_ACTION_QUIET,
UNALIGNED_ACTION_SIGNAL,
@@ -107,14 +109,13 @@ static u32 unaligned_action;
extern void show_registers(struct pt_regs *regs);
static void emulate_load_store_insn(struct pt_regs *regs,
- void __user *addr, unsigned int __user *pc)
+ void __user *addr, unsigned int *pc)
{
unsigned long origpc, orig31, value;
union mips_instruction insn;
unsigned int res;
-#ifdef CONFIG_EVA
- mm_segment_t seg;
-#endif
+ bool user = user_mode(regs);
+
origpc = (unsigned long)pc;
orig31 = regs->regs[31];
@@ -123,7 +124,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
/*
* This load never faults.
*/
- __get_user(insn.word, pc);
+ __get_inst32(&insn.word, pc, user);
switch (insn.i_format.opcode) {
/*
@@ -163,7 +164,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (insn.dsp_format.func == lx_op) {
switch (insn.dsp_format.op) {
case lwx_op:
- if (!access_ok(addr, 4))
+ if (user && !access_ok(addr, 4))
goto sigbus;
LoadW(addr, value, res);
if (res)
@@ -172,7 +173,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
regs->regs[insn.dsp_format.rd] = value;
break;
case lhx_op:
- if (!access_ok(addr, 2))
+ if (user && !access_ok(addr, 2))
goto sigbus;
LoadHW(addr, value, res);
if (res)
@@ -191,93 +192,66 @@ static void emulate_load_store_insn(struct pt_regs *regs,
* memory, so we need to "switch" the address limit to
* user space, so that address check can work properly.
*/
- seg = force_uaccess_begin();
switch (insn.spec3_format.func) {
case lhe_op:
- if (!access_ok(addr, 2)) {
- force_uaccess_end(seg);
+ if (!access_ok(addr, 2))
goto sigbus;
- }
LoadHWE(addr, value, res);
- if (res) {
- force_uaccess_end(seg);
+ if (res)
goto fault;
- }
compute_return_epc(regs);
regs->regs[insn.spec3_format.rt] = value;
break;
case lwe_op:
- if (!access_ok(addr, 4)) {
- force_uaccess_end(seg);
+ if (!access_ok(addr, 4))
goto sigbus;
- }
LoadWE(addr, value, res);
- if (res) {
- force_uaccess_end(seg);
+ if (res)
goto fault;
- }
compute_return_epc(regs);
regs->regs[insn.spec3_format.rt] = value;
break;
case lhue_op:
- if (!access_ok(addr, 2)) {
- force_uaccess_end(seg);
+ if (!access_ok(addr, 2))
goto sigbus;
- }
LoadHWUE(addr, value, res);
- if (res) {
- force_uaccess_end(seg);
+ if (res)
goto fault;
- }
compute_return_epc(regs);
regs->regs[insn.spec3_format.rt] = value;
break;
case she_op:
- if (!access_ok(addr, 2)) {
- force_uaccess_end(seg);
+ if (!access_ok(addr, 2))
goto sigbus;
- }
compute_return_epc(regs);
value = regs->regs[insn.spec3_format.rt];
StoreHWE(addr, value, res);
- if (res) {
- force_uaccess_end(seg);
+ if (res)
goto fault;
- }
break;
case swe_op:
- if (!access_ok(addr, 4)) {
- force_uaccess_end(seg);
+ if (!access_ok(addr, 4))
goto sigbus;
- }
compute_return_epc(regs);
value = regs->regs[insn.spec3_format.rt];
StoreWE(addr, value, res);
- if (res) {
- force_uaccess_end(seg);
+ if (res)
goto fault;
- }
break;
default:
- force_uaccess_end(seg);
goto sigill;
}
- force_uaccess_end(seg);
}
#endif
break;
case lh_op:
- if (!access_ok(addr, 2))
+ if (user && !access_ok(addr, 2))
goto sigbus;
- if (IS_ENABLED(CONFIG_EVA)) {
- if (uaccess_kernel())
- LoadHW(addr, value, res);
- else
- LoadHWE(addr, value, res);
- } else {
+ if (IS_ENABLED(CONFIG_EVA) && user)
+ LoadHWE(addr, value, res);
+ else
LoadHW(addr, value, res);
- }
if (res)
goto fault;
@@ -286,17 +260,13 @@ static void emulate_load_store_insn(struct pt_regs *regs,
break;
case lw_op:
- if (!access_ok(addr, 4))
+ if (user && !access_ok(addr, 4))
goto sigbus;
- if (IS_ENABLED(CONFIG_EVA)) {
- if (uaccess_kernel())
- LoadW(addr, value, res);
- else
- LoadWE(addr, value, res);
- } else {
+ if (IS_ENABLED(CONFIG_EVA) && user)
+ LoadWE(addr, value, res);
+ else
LoadW(addr, value, res);
- }
if (res)
goto fault;
@@ -305,17 +275,13 @@ static void emulate_load_store_insn(struct pt_regs *regs,
break;
case lhu_op:
- if (!access_ok(addr, 2))
+ if (user && !access_ok(addr, 2))
goto sigbus;
- if (IS_ENABLED(CONFIG_EVA)) {
- if (uaccess_kernel())
- LoadHWU(addr, value, res);
- else
- LoadHWUE(addr, value, res);
- } else {
+ if (IS_ENABLED(CONFIG_EVA) && user)
+ LoadHWUE(addr, value, res);
+ else
LoadHWU(addr, value, res);
- }
if (res)
goto fault;
@@ -332,7 +298,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
- if (!access_ok(addr, 4))
+ if (user && !access_ok(addr, 4))
goto sigbus;
LoadWU(addr, value, res);
@@ -355,7 +321,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
- if (!access_ok(addr, 8))
+ if (user && !access_ok(addr, 8))
goto sigbus;
LoadDW(addr, value, res);
@@ -370,40 +336,32 @@ static void emulate_load_store_insn(struct pt_regs *regs,
goto sigill;
case sh_op:
- if (!access_ok(addr, 2))
+ if (user && !access_ok(addr, 2))
goto sigbus;
compute_return_epc(regs);
value = regs->regs[insn.i_format.rt];
- if (IS_ENABLED(CONFIG_EVA)) {
- if (uaccess_kernel())
- StoreHW(addr, value, res);
- else
- StoreHWE(addr, value, res);
- } else {
+ if (IS_ENABLED(CONFIG_EVA) && user)
+ StoreHWE(addr, value, res);
+ else
StoreHW(addr, value, res);
- }
if (res)
goto fault;
break;
case sw_op:
- if (!access_ok(addr, 4))
+ if (user && !access_ok(addr, 4))
goto sigbus;
compute_return_epc(regs);
value = regs->regs[insn.i_format.rt];
- if (IS_ENABLED(CONFIG_EVA)) {
- if (uaccess_kernel())
- StoreW(addr, value, res);
- else
- StoreWE(addr, value, res);
- } else {
+ if (IS_ENABLED(CONFIG_EVA) && user)
+ StoreWE(addr, value, res);
+ else
StoreW(addr, value, res);
- }
if (res)
goto fault;
@@ -418,7 +376,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
- if (!access_ok(addr, 8))
+ if (user && !access_ok(addr, 8))
goto sigbus;
compute_return_epc(regs);
@@ -626,6 +584,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs,
unsigned long origpc, contpc;
union mips_instruction insn;
struct mm_decoded_insn mminsn;
+ bool user = user_mode(regs);
origpc = regs->cp0_epc;
orig31 = regs->regs[31];
@@ -689,7 +648,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs,
if (reg == 31)
goto sigbus;
- if (!access_ok(addr, 8))
+ if (user && !access_ok(addr, 8))
goto sigbus;
LoadW(addr, value, res);
@@ -708,7 +667,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs,
if (reg == 31)
goto sigbus;
- if (!access_ok(addr, 8))
+ if (user && !access_ok(addr, 8))
goto sigbus;
value = regs->regs[reg];
@@ -728,7 +687,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs,
if (reg == 31)
goto sigbus;
- if (!access_ok(addr, 16))
+ if (user && !access_ok(addr, 16))
goto sigbus;
LoadDW(addr, value, res);
@@ -751,7 +710,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs,
if (reg == 31)
goto sigbus;
- if (!access_ok(addr, 16))
+ if (user && !access_ok(addr, 16))
goto sigbus;
value = regs->regs[reg];
@@ -774,10 +733,10 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs,
if ((rvar > 9) || !reg)
goto sigill;
if (reg & 0x10) {
- if (!access_ok(addr, 4 * (rvar + 1)))
+ if (user && !access_ok(addr, 4 * (rvar + 1)))
goto sigbus;
} else {
- if (!access_ok(addr, 4 * rvar))
+ if (user && !access_ok(addr, 4 * rvar))
goto sigbus;
}
if (rvar == 9)
@@ -810,10 +769,10 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs,
if ((rvar > 9) || !reg)
goto sigill;
if (reg & 0x10) {
- if (!access_ok(addr, 4 * (rvar + 1)))
+ if (user && !access_ok(addr, 4 * (rvar + 1)))
goto sigbus;
} else {
- if (!access_ok(addr, 4 * rvar))
+ if (user && !access_ok(addr, 4 * rvar))
goto sigbus;
}
if (rvar == 9)
@@ -847,10 +806,10 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs,
if ((rvar > 9) || !reg)
goto sigill;
if (reg & 0x10) {
- if (!access_ok(addr, 8 * (rvar + 1)))
+ if (user && !access_ok(addr, 8 * (rvar + 1)))
goto sigbus;
} else {
- if (!access_ok(addr, 8 * rvar))
+ if (user && !access_ok(addr, 8 * rvar))
goto sigbus;
}
if (rvar == 9)
@@ -888,10 +847,10 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs,
if ((rvar > 9) || !reg)
goto sigill;
if (reg & 0x10) {
- if (!access_ok(addr, 8 * (rvar + 1)))
+ if (user && !access_ok(addr, 8 * (rvar + 1)))
goto sigbus;
} else {
- if (!access_ok(addr, 8 * rvar))
+ if (user && !access_ok(addr, 8 * rvar))
goto sigbus;
}
if (rvar == 9)
@@ -1010,7 +969,7 @@ fpu_emul:
case mm_lwm16_op:
reg = insn.mm16_m_format.rlist;
rvar = reg + 1;
- if (!access_ok(addr, 4 * rvar))
+ if (user && !access_ok(addr, 4 * rvar))
goto sigbus;
for (i = 16; rvar; rvar--, i++) {
@@ -1030,7 +989,7 @@ fpu_emul:
case mm_swm16_op:
reg = insn.mm16_m_format.rlist;
rvar = reg + 1;
- if (!access_ok(addr, 4 * rvar))
+ if (user && !access_ok(addr, 4 * rvar))
goto sigbus;
for (i = 16; rvar; rvar--, i++) {
@@ -1084,7 +1043,7 @@ fpu_emul:
}
loadHW:
- if (!access_ok(addr, 2))
+ if (user && !access_ok(addr, 2))
goto sigbus;
LoadHW(addr, value, res);
@@ -1094,7 +1053,7 @@ loadHW:
goto success;
loadHWU:
- if (!access_ok(addr, 2))
+ if (user && !access_ok(addr, 2))
goto sigbus;
LoadHWU(addr, value, res);
@@ -1104,7 +1063,7 @@ loadHWU:
goto success;
loadW:
- if (!access_ok(addr, 4))
+ if (user && !access_ok(addr, 4))
goto sigbus;
LoadW(addr, value, res);
@@ -1122,7 +1081,7 @@ loadWU:
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
- if (!access_ok(addr, 4))
+ if (user && !access_ok(addr, 4))
goto sigbus;
LoadWU(addr, value, res);
@@ -1144,7 +1103,7 @@ loadDW:
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
- if (!access_ok(addr, 8))
+ if (user && !access_ok(addr, 8))
goto sigbus;
LoadDW(addr, value, res);
@@ -1158,7 +1117,7 @@ loadDW:
goto sigill;
storeHW:
- if (!access_ok(addr, 2))
+ if (user && !access_ok(addr, 2))
goto sigbus;
value = regs->regs[reg];
@@ -1168,7 +1127,7 @@ storeHW:
goto success;
storeW:
- if (!access_ok(addr, 4))
+ if (user && !access_ok(addr, 4))
goto sigbus;
value = regs->regs[reg];
@@ -1186,7 +1145,7 @@ storeDW:
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
- if (!access_ok(addr, 8))
+ if (user && !access_ok(addr, 8))
goto sigbus;
value = regs->regs[reg];
@@ -1243,6 +1202,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
union mips16e_instruction mips16inst, oldinst;
unsigned int opcode;
int extended = 0;
+ bool user = user_mode(regs);
origpc = regs->cp0_epc;
orig31 = regs->regs[31];
@@ -1344,7 +1304,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
goto sigbus;
case MIPS16e_lh_op:
- if (!access_ok(addr, 2))
+ if (user && !access_ok(addr, 2))
goto sigbus;
LoadHW(addr, value, res);
@@ -1355,7 +1315,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
break;
case MIPS16e_lhu_op:
- if (!access_ok(addr, 2))
+ if (user && !access_ok(addr, 2))
goto sigbus;
LoadHWU(addr, value, res);
@@ -1368,7 +1328,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
case MIPS16e_lw_op:
case MIPS16e_lwpc_op:
case MIPS16e_lwsp_op:
- if (!access_ok(addr, 4))
+ if (user && !access_ok(addr, 4))
goto sigbus;
LoadW(addr, value, res);
@@ -1387,7 +1347,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
- if (!access_ok(addr, 4))
+ if (user && !access_ok(addr, 4))
goto sigbus;
LoadWU(addr, value, res);
@@ -1411,7 +1371,7 @@ loadDW:
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
- if (!access_ok(addr, 8))
+ if (user && !access_ok(addr, 8))
goto sigbus;
LoadDW(addr, value, res);
@@ -1426,7 +1386,7 @@ loadDW:
goto sigill;
case MIPS16e_sh_op:
- if (!access_ok(addr, 2))
+ if (user && !access_ok(addr, 2))
goto sigbus;
MIPS16e_compute_return_epc(regs, &oldinst);
@@ -1439,7 +1399,7 @@ loadDW:
case MIPS16e_sw_op:
case MIPS16e_swsp_op:
case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */
- if (!access_ok(addr, 4))
+ if (user && !access_ok(addr, 4))
goto sigbus;
MIPS16e_compute_return_epc(regs, &oldinst);
@@ -1459,7 +1419,7 @@ writeDW:
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
- if (!access_ok(addr, 8))
+ if (user && !access_ok(addr, 8))
goto sigbus;
MIPS16e_compute_return_epc(regs, &oldinst);
@@ -1515,8 +1475,7 @@ sigill:
asmlinkage void do_ade(struct pt_regs *regs)
{
enum ctx_state prev_state;
- unsigned int __user *pc;
- mm_segment_t seg;
+ unsigned int *pc;
prev_state = exception_enter();
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
@@ -1551,24 +1510,14 @@ asmlinkage void do_ade(struct pt_regs *regs)
show_registers(regs);
if (cpu_has_mmips) {
- seg = get_fs();
- if (!user_mode(regs))
- set_fs(KERNEL_DS);
emulate_load_store_microMIPS(regs,
(void __user *)regs->cp0_badvaddr);
- set_fs(seg);
-
return;
}
if (cpu_has_mips16) {
- seg = get_fs();
- if (!user_mode(regs))
- set_fs(KERNEL_DS);
emulate_load_store_MIPS16e(regs,
(void __user *)regs->cp0_badvaddr);
- set_fs(seg);
-
return;
}
@@ -1577,13 +1526,9 @@ asmlinkage void do_ade(struct pt_regs *regs)
if (unaligned_action == UNALIGNED_ACTION_SHOW)
show_registers(regs);
- pc = (unsigned int __user *)exception_epc(regs);
+ pc = (unsigned int *)exception_epc(regs);
- seg = get_fs();
- if (!user_mode(regs))
- set_fs(KERNEL_DS);
emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
- set_fs(seg);
return;
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 7d0b91ad2581..3d0cf471f2fe 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -90,7 +90,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mips_vdso_image *image = current->thread.abi->vdso;
struct mm_struct *mm = current->mm;
- unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn;
+ unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn, gic_base;
struct vm_area_struct *vma;
int ret;
@@ -158,7 +158,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
/* Map GIC user page. */
if (gic_size) {
- gic_pfn = virt_to_phys(mips_gic_base + MIPS_GIC_USER_OFS) >> PAGE_SHIFT;
+ gic_base = (unsigned long)mips_gic_base + MIPS_GIC_USER_OFS;
+ gic_pfn = virt_to_phys((void *)gic_base) >> PAGE_SHIFT;
ret = io_remap_pfn_range(vma, base, gic_pfn, gic_size,
pgprot_noncached(vma->vm_page_prot));
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index c1c345be04ff..1f98947fe715 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -145,6 +145,7 @@ SECTIONS
}
#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
+ STRUCT_ALIGN();
.appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) {
*(.appended_dtb)
KEEP(*(.appended_dtb))
@@ -172,6 +173,11 @@ SECTIONS
#endif
#ifdef CONFIG_MIPS_RAW_APPENDED_DTB
+ .fill : {
+ FILL(0);
+ BYTE(0);
+ STRUCT_ALIGN();
+ }
__appended_dtb = .;
/* leave space for appended DTB */
. += 0x100000;
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
index 032b3fca6cbb..a77297480f56 100644
--- a/arch/mips/kvm/Kconfig
+++ b/arch/mips/kvm/Kconfig
@@ -30,40 +30,6 @@ config KVM
help
Support for hosting Guest kernels.
-choice
- prompt "Virtualization mode"
- depends on KVM
- default KVM_MIPS_TE
-
-config KVM_MIPS_TE
- bool "Trap & Emulate"
- depends on CPU_MIPS32_R2
- help
- Use trap and emulate to virtualize 32-bit guests in user mode. This
- does not require any special hardware Virtualization support beyond
- standard MIPS32 r2 or later, but it does require the guest kernel
- to be configured with CONFIG_KVM_GUEST=y so that it resides in the
- user address segment.
-
-config KVM_MIPS_VZ
- bool "MIPS Virtualization (VZ) ASE"
- help
- Use the MIPS Virtualization (VZ) ASE to virtualize guests. This
- supports running unmodified guest kernels (with CONFIG_KVM_GUEST=n),
- but requires hardware support.
-
-endchoice
-
-config KVM_MIPS_DYN_TRANS
- bool "KVM/MIPS: Dynamic binary translation to reduce traps"
- depends on KVM_MIPS_TE
- default y
- help
- When running in Trap & Emulate mode patch privileged
- instructions to reduce the number of traps.
-
- If unsure, say Y.
-
config KVM_MIPS_DEBUG_COP0_COUNTERS
bool "Maintain counters for COP0 accesses"
depends on KVM
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
index 506c4ac0ba1c..c67250a956b8 100644
--- a/arch/mips/kvm/Makefile
+++ b/arch/mips/kvm/Makefile
@@ -2,14 +2,14 @@
# Makefile for KVM support for MIPS
#
-common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o eventfd.o)
+common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o eventfd.o binary_stats.o)
EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o
kvm-objs := $(common-objs-y) mips.o emulate.o entry.o \
- interrupt.o stats.o commpage.o \
+ interrupt.o stats.o \
fpu.o
kvm-objs += hypcall.o
kvm-objs += mmu.o
@@ -17,11 +17,6 @@ ifdef CONFIG_CPU_LOONGSON64
kvm-objs += loongson_ipi.o
endif
-ifdef CONFIG_KVM_MIPS_VZ
kvm-objs += vz.o
-else
-kvm-objs += dyntrans.o
-kvm-objs += trap_emul.o
-endif
obj-$(CONFIG_KVM) += kvm.o
obj-y += callback.o tlb.o
diff --git a/arch/mips/kvm/commpage.c b/arch/mips/kvm/commpage.c
deleted file mode 100644
index 5812e6145801..000000000000
--- a/arch/mips/kvm/commpage.c
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * commpage, currently used for Virtual COP0 registers.
- * Mapped into the guest kernel @ KVM_GUEST_COMMPAGE_ADDR.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- * Authors: Sanjay Lal <sanjayl@kymasys.com>
- */
-
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/memblock.h>
-#include <asm/page.h>
-#include <asm/cacheflush.h>
-#include <asm/mmu_context.h>
-
-#include <linux/kvm_host.h>
-
-#include "commpage.h"
-
-void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
-{
- struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
-
- /* Specific init values for fields */
- vcpu->arch.cop0 = &page->cop0;
-}
diff --git a/arch/mips/kvm/commpage.h b/arch/mips/kvm/commpage.h
deleted file mode 100644
index 08c5fa2bbc0f..000000000000
--- a/arch/mips/kvm/commpage.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * KVM/MIPS: commpage: mapped into get kernel space
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- * Authors: Sanjay Lal <sanjayl@kymasys.com>
- */
-
-#ifndef __KVM_MIPS_COMMPAGE_H__
-#define __KVM_MIPS_COMMPAGE_H__
-
-struct kvm_mips_commpage {
- /* COP0 state is mapped into Guest kernel via commpage */
- struct mips_coproc cop0;
-};
-
-#define KVM_MIPS_COMM_EIDI_OFFSET 0x0
-
-extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
-
-#endif /* __KVM_MIPS_COMMPAGE_H__ */
diff --git a/arch/mips/kvm/dyntrans.c b/arch/mips/kvm/dyntrans.c
deleted file mode 100644
index d77b61b3d6ee..000000000000
--- a/arch/mips/kvm/dyntrans.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- * Authors: Sanjay Lal <sanjayl@kymasys.com>
- */
-
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/highmem.h>
-#include <linux/kvm_host.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/memblock.h>
-#include <asm/cacheflush.h>
-
-#include "commpage.h"
-
-/**
- * kvm_mips_trans_replace() - Replace trapping instruction in guest memory.
- * @vcpu: Virtual CPU.
- * @opc: PC of instruction to replace.
- * @replace: Instruction to write
- */
-static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc,
- union mips_instruction replace)
-{
- unsigned long vaddr = (unsigned long)opc;
- int err;
-
-retry:
- /* The GVA page table is still active so use the Linux TLB handlers */
- kvm_trap_emul_gva_lockless_begin(vcpu);
- err = put_user(replace.word, opc);
- kvm_trap_emul_gva_lockless_end(vcpu);
-
- if (unlikely(err)) {
- /*
- * We write protect clean pages in GVA page table so normal
- * Linux TLB mod handler doesn't silently dirty the page.
- * Its also possible we raced with a GVA invalidation.
- * Try to force the page to become dirty.
- */
- err = kvm_trap_emul_gva_fault(vcpu, vaddr, true);
- if (unlikely(err)) {
- kvm_info("%s: Address unwriteable: %p\n",
- __func__, opc);
- return -EFAULT;
- }
-
- /*
- * Try again. This will likely trigger a TLB refill, which will
- * fetch the new dirty entry from the GVA page table, which
- * should then succeed.
- */
- goto retry;
- }
- __local_flush_icache_user_range(vaddr, vaddr + 4);
-
- return 0;
-}
-
-int kvm_mips_trans_cache_index(union mips_instruction inst, u32 *opc,
- struct kvm_vcpu *vcpu)
-{
- union mips_instruction nop_inst = { 0 };
-
- /* Replace the CACHE instruction, with a NOP */
- return kvm_mips_trans_replace(vcpu, opc, nop_inst);
-}
-
-/*
- * Address based CACHE instructions are transformed into synci(s). A little
- * heavy for just D-cache invalidates, but avoids an expensive trap
- */
-int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
- struct kvm_vcpu *vcpu)
-{
- union mips_instruction synci_inst = { 0 };
-
- synci_inst.i_format.opcode = bcond_op;
- synci_inst.i_format.rs = inst.i_format.rs;
- synci_inst.i_format.rt = synci_op;
- if (cpu_has_mips_r6)
- synci_inst.i_format.simmediate = inst.spec3_format.simmediate;
- else
- synci_inst.i_format.simmediate = inst.i_format.simmediate;
-
- return kvm_mips_trans_replace(vcpu, opc, synci_inst);
-}
-
-int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
- struct kvm_vcpu *vcpu)
-{
- union mips_instruction mfc0_inst = { 0 };
- u32 rd, sel;
-
- rd = inst.c0r_format.rd;
- sel = inst.c0r_format.sel;
-
- if (rd == MIPS_CP0_ERRCTL && sel == 0) {
- mfc0_inst.r_format.opcode = spec_op;
- mfc0_inst.r_format.rd = inst.c0r_format.rt;
- mfc0_inst.r_format.func = add_op;
- } else {
- mfc0_inst.i_format.opcode = lw_op;
- mfc0_inst.i_format.rt = inst.c0r_format.rt;
- mfc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
- offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
-#ifdef CONFIG_CPU_BIG_ENDIAN
- if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
- mfc0_inst.i_format.simmediate |= 4;
-#endif
- }
-
- return kvm_mips_trans_replace(vcpu, opc, mfc0_inst);
-}
-
-int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
- struct kvm_vcpu *vcpu)
-{
- union mips_instruction mtc0_inst = { 0 };
- u32 rd, sel;
-
- rd = inst.c0r_format.rd;
- sel = inst.c0r_format.sel;
-
- mtc0_inst.i_format.opcode = sw_op;
- mtc0_inst.i_format.rt = inst.c0r_format.rt;
- mtc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
- offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
-#ifdef CONFIG_CPU_BIG_ENDIAN
- if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
- mtc0_inst.i_format.simmediate |= 4;
-#endif
-
- return kvm_mips_trans_replace(vcpu, opc, mtc0_inst);
-}
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index d70c4f8e14e2..22e745e49b0a 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -30,7 +30,6 @@
#define CONFIG_MIPS_MT
#include "interrupt.h"
-#include "commpage.h"
#include "trace.h"
@@ -276,7 +275,8 @@ int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
*out = vcpu->arch.host_cp0_badinstr;
return 0;
} else {
- return kvm_get_inst(opc, vcpu, out);
+ WARN_ONCE(1, "CPU doesn't have BadInstr register\n");
+ return -EINVAL;
}
}
@@ -297,7 +297,8 @@ int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
*out = vcpu->arch.host_cp0_badinstrp;
return 0;
} else {
- return kvm_get_inst(opc, vcpu, out);
+ WARN_ONCE(1, "CPU doesn't have BadInstrp register\n");
+ return -EINVAL;
}
}
@@ -721,7 +722,7 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
* preemption until the new value is written to prevent restore of a
* GTOffset corresponding to the old CP0_Compare value.
*/
- if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta > 0) {
+ if (delta > 0) {
preempt_disable();
write_c0_gtoffset(compare - read_c0_count());
back_to_back_c0_hazard();
@@ -734,7 +735,7 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
if (ack)
kvm_mips_callbacks->dequeue_timer_int(vcpu);
- else if (IS_ENABLED(CONFIG_KVM_MIPS_VZ))
+ else
/*
* With VZ, writing CP0_Compare acks (clears) CP0_Cause.TI, so
* preserve guest CP0_Cause.TI if we don't want to ack it.
@@ -743,15 +744,13 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
kvm_write_c0_guest_compare(cop0, compare);
- if (IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
- if (delta > 0)
- preempt_enable();
+ if (delta > 0)
+ preempt_enable();
- back_to_back_c0_hazard();
+ back_to_back_c0_hazard();
- if (!ack && cause & CAUSEF_TI)
- kvm_write_c0_guest_cause(cop0, cause);
- }
+ if (!ack && cause & CAUSEF_TI)
+ kvm_write_c0_guest_cause(cop0, cause);
/* resume_hrtimer() takes care of timer interrupts > count */
if (!dc)
@@ -762,7 +761,7 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
* until after the new CP0_Compare is written, otherwise new guest
* CP0_Count could hit new guest CP0_Compare.
*/
- if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta <= 0)
+ if (delta <= 0)
write_c0_gtoffset(compare - read_c0_count());
}
@@ -943,29 +942,6 @@ enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
return HRTIMER_RESTART;
}
-enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- enum emulation_result er = EMULATE_DONE;
-
- if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
- kvm_clear_c0_guest_status(cop0, ST0_ERL);
- vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
- } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
- kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
- kvm_read_c0_guest_epc(cop0));
- kvm_clear_c0_guest_status(cop0, ST0_EXL);
- vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
-
- } else {
- kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
- vcpu->arch.pc);
- er = EMULATE_FAIL;
- }
-
- return er;
-}
-
enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
{
kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
@@ -991,609 +967,6 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
-static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu,
- unsigned long entryhi)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
- int cpu, i;
- u32 nasid = entryhi & KVM_ENTRYHI_ASID;
-
- if (((kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID) != nasid)) {
- trace_kvm_asid_change(vcpu, kvm_read_c0_guest_entryhi(cop0) &
- KVM_ENTRYHI_ASID, nasid);
-
- /*
- * Flush entries from the GVA page tables.
- * Guest user page table will get flushed lazily on re-entry to
- * guest user if the guest ASID actually changes.
- */
- kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_KERN);
-
- /*
- * Regenerate/invalidate kernel MMU context.
- * The user MMU context will be regenerated lazily on re-entry
- * to guest user if the guest ASID actually changes.
- */
- preempt_disable();
- cpu = smp_processor_id();
- get_new_mmu_context(kern_mm);
- for_each_possible_cpu(i)
- if (i != cpu)
- set_cpu_context(i, kern_mm, 0);
- preempt_enable();
- }
- kvm_write_c0_guest_entryhi(cop0, entryhi);
-}
-
-enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_mips_tlb *tlb;
- unsigned long pc = vcpu->arch.pc;
- int index;
-
- index = kvm_read_c0_guest_index(cop0);
- if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
- /* UNDEFINED */
- kvm_debug("[%#lx] TLBR Index %#x out of range\n", pc, index);
- index &= KVM_MIPS_GUEST_TLB_SIZE - 1;
- }
-
- tlb = &vcpu->arch.guest_tlb[index];
- kvm_write_c0_guest_pagemask(cop0, tlb->tlb_mask);
- kvm_write_c0_guest_entrylo0(cop0, tlb->tlb_lo[0]);
- kvm_write_c0_guest_entrylo1(cop0, tlb->tlb_lo[1]);
- kvm_mips_change_entryhi(vcpu, tlb->tlb_hi);
-
- return EMULATE_DONE;
-}
-
-/**
- * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
- * @vcpu: VCPU with changed mappings.
- * @tlb: TLB entry being removed.
- *
- * This is called to indicate a single change in guest MMU mappings, so that we
- * can arrange TLB flushes on this and other CPUs.
- */
-static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
- struct kvm_mips_tlb *tlb)
-{
- struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
- struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
- int cpu, i;
- bool user;
-
- /* No need to flush for entries which are already invalid */
- if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V))
- return;
- /* Don't touch host kernel page tables or TLB mappings */
- if ((unsigned long)tlb->tlb_hi > 0x7fffffff)
- return;
- /* User address space doesn't need flushing for KSeg2/3 changes */
- user = tlb->tlb_hi < KVM_GUEST_KSEG0;
-
- preempt_disable();
-
- /* Invalidate page table entries */
- kvm_trap_emul_invalidate_gva(vcpu, tlb->tlb_hi & VPN2_MASK, user);
-
- /*
- * Probe the shadow host TLB for the entry being overwritten, if one
- * matches, invalidate it
- */
- kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi, user, true);
-
- /* Invalidate the whole ASID on other CPUs */
- cpu = smp_processor_id();
- for_each_possible_cpu(i) {
- if (i == cpu)
- continue;
- if (user)
- set_cpu_context(i, user_mm, 0);
- set_cpu_context(i, kern_mm, 0);
- }
-
- preempt_enable();
-}
-
-/* Write Guest TLB Entry @ Index */
-enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- int index = kvm_read_c0_guest_index(cop0);
- struct kvm_mips_tlb *tlb = NULL;
- unsigned long pc = vcpu->arch.pc;
-
- if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
- kvm_debug("%s: illegal index: %d\n", __func__, index);
- kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
- pc, index, kvm_read_c0_guest_entryhi(cop0),
- kvm_read_c0_guest_entrylo0(cop0),
- kvm_read_c0_guest_entrylo1(cop0),
- kvm_read_c0_guest_pagemask(cop0));
- index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
- }
-
- tlb = &vcpu->arch.guest_tlb[index];
-
- kvm_mips_invalidate_guest_tlb(vcpu, tlb);
-
- tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
- tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
- tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
- tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
-
- kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
- pc, index, kvm_read_c0_guest_entryhi(cop0),
- kvm_read_c0_guest_entrylo0(cop0),
- kvm_read_c0_guest_entrylo1(cop0),
- kvm_read_c0_guest_pagemask(cop0));
-
- return EMULATE_DONE;
-}
-
-/* Write Guest TLB Entry @ Random Index */
-enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_mips_tlb *tlb = NULL;
- unsigned long pc = vcpu->arch.pc;
- int index;
-
- index = prandom_u32_max(KVM_MIPS_GUEST_TLB_SIZE);
- tlb = &vcpu->arch.guest_tlb[index];
-
- kvm_mips_invalidate_guest_tlb(vcpu, tlb);
-
- tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
- tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
- tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
- tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
-
- kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
- pc, index, kvm_read_c0_guest_entryhi(cop0),
- kvm_read_c0_guest_entrylo0(cop0),
- kvm_read_c0_guest_entrylo1(cop0));
-
- return EMULATE_DONE;
-}
-
-enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- long entryhi = kvm_read_c0_guest_entryhi(cop0);
- unsigned long pc = vcpu->arch.pc;
- int index = -1;
-
- index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
-
- kvm_write_c0_guest_index(cop0, index);
-
- kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
- index);
-
- return EMULATE_DONE;
-}
-
-/**
- * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
- * @vcpu: Virtual CPU.
- *
- * Finds the mask of bits which are writable in the guest's Config1 CP0
- * register, by userland (currently read-only to the guest).
- */
-unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
-{
- unsigned int mask = 0;
-
- /* Permit FPU to be present if FPU is supported */
- if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
- mask |= MIPS_CONF1_FP;
-
- return mask;
-}
-
-/**
- * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
- * @vcpu: Virtual CPU.
- *
- * Finds the mask of bits which are writable in the guest's Config3 CP0
- * register, by userland (currently read-only to the guest).
- */
-unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
-{
- /* Config4 and ULRI are optional */
- unsigned int mask = MIPS_CONF_M | MIPS_CONF3_ULRI;
-
- /* Permit MSA to be present if MSA is supported */
- if (kvm_mips_guest_can_have_msa(&vcpu->arch))
- mask |= MIPS_CONF3_MSA;
-
- return mask;
-}
-
-/**
- * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
- * @vcpu: Virtual CPU.
- *
- * Finds the mask of bits which are writable in the guest's Config4 CP0
- * register, by userland (currently read-only to the guest).
- */
-unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
-{
- /* Config5 is optional */
- unsigned int mask = MIPS_CONF_M;
-
- /* KScrExist */
- mask |= 0xfc << MIPS_CONF4_KSCREXIST_SHIFT;
-
- return mask;
-}
-
-/**
- * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
- * @vcpu: Virtual CPU.
- *
- * Finds the mask of bits which are writable in the guest's Config5 CP0
- * register, by the guest itself.
- */
-unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
-{
- unsigned int mask = 0;
-
- /* Permit MSAEn changes if MSA supported and enabled */
- if (kvm_mips_guest_has_msa(&vcpu->arch))
- mask |= MIPS_CONF5_MSAEN;
-
- /*
- * Permit guest FPU mode changes if FPU is enabled and the relevant
- * feature exists according to FIR register.
- */
- if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
- if (cpu_has_fre)
- mask |= MIPS_CONF5_FRE;
- /* We don't support UFR or UFE */
- }
-
- return mask;
-}
-
-enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
- u32 *opc, u32 cause,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- enum emulation_result er = EMULATE_DONE;
- u32 rt, rd, sel;
- unsigned long curr_pc;
-
- /*
- * Update PC and hold onto current PC in case there is
- * an error and we want to rollback the PC
- */
- curr_pc = vcpu->arch.pc;
- er = update_pc(vcpu, cause);
- if (er == EMULATE_FAIL)
- return er;
-
- if (inst.co_format.co) {
- switch (inst.co_format.func) {
- case tlbr_op: /* Read indexed TLB entry */
- er = kvm_mips_emul_tlbr(vcpu);
- break;
- case tlbwi_op: /* Write indexed */
- er = kvm_mips_emul_tlbwi(vcpu);
- break;
- case tlbwr_op: /* Write random */
- er = kvm_mips_emul_tlbwr(vcpu);
- break;
- case tlbp_op: /* TLB Probe */
- er = kvm_mips_emul_tlbp(vcpu);
- break;
- case rfe_op:
- kvm_err("!!!COP0_RFE!!!\n");
- break;
- case eret_op:
- er = kvm_mips_emul_eret(vcpu);
- goto dont_update_pc;
- case wait_op:
- er = kvm_mips_emul_wait(vcpu);
- break;
- case hypcall_op:
- er = kvm_mips_emul_hypcall(vcpu, inst);
- break;
- }
- } else {
- rt = inst.c0r_format.rt;
- rd = inst.c0r_format.rd;
- sel = inst.c0r_format.sel;
-
- switch (inst.c0r_format.rs) {
- case mfc_op:
-#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
- cop0->stat[rd][sel]++;
-#endif
- /* Get reg */
- if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
- vcpu->arch.gprs[rt] =
- (s32)kvm_mips_read_count(vcpu);
- } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
- vcpu->arch.gprs[rt] = 0x0;
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
- kvm_mips_trans_mfc0(inst, opc, vcpu);
-#endif
- } else {
- vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel];
-
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
- kvm_mips_trans_mfc0(inst, opc, vcpu);
-#endif
- }
-
- trace_kvm_hwr(vcpu, KVM_TRACE_MFC0,
- KVM_TRACE_COP0(rd, sel),
- vcpu->arch.gprs[rt]);
- break;
-
- case dmfc_op:
- vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
-
- trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0,
- KVM_TRACE_COP0(rd, sel),
- vcpu->arch.gprs[rt]);
- break;
-
- case mtc_op:
-#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
- cop0->stat[rd][sel]++;
-#endif
- trace_kvm_hwr(vcpu, KVM_TRACE_MTC0,
- KVM_TRACE_COP0(rd, sel),
- vcpu->arch.gprs[rt]);
-
- if ((rd == MIPS_CP0_TLB_INDEX)
- && (vcpu->arch.gprs[rt] >=
- KVM_MIPS_GUEST_TLB_SIZE)) {
- kvm_err("Invalid TLB Index: %ld",
- vcpu->arch.gprs[rt]);
- er = EMULATE_FAIL;
- break;
- }
- if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
- /*
- * Preserve core number, and keep the exception
- * base in guest KSeg0.
- */
- kvm_change_c0_guest_ebase(cop0, 0x1ffff000,
- vcpu->arch.gprs[rt]);
- } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
- kvm_mips_change_entryhi(vcpu,
- vcpu->arch.gprs[rt]);
- }
- /* Are we writing to COUNT */
- else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
- kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
- goto done;
- } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
- /* If we are writing to COMPARE */
- /* Clear pending timer interrupt, if any */
- kvm_mips_write_compare(vcpu,
- vcpu->arch.gprs[rt],
- true);
- } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
- unsigned int old_val, val, change;
-
- old_val = kvm_read_c0_guest_status(cop0);
- val = vcpu->arch.gprs[rt];
- change = val ^ old_val;
-
- /* Make sure that the NMI bit is never set */
- val &= ~ST0_NMI;
-
- /*
- * Don't allow CU1 or FR to be set unless FPU
- * capability enabled and exists in guest
- * configuration.
- */
- if (!kvm_mips_guest_has_fpu(&vcpu->arch))
- val &= ~(ST0_CU1 | ST0_FR);
-
- /*
- * Also don't allow FR to be set if host doesn't
- * support it.
- */
- if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
- val &= ~ST0_FR;
-
-
- /* Handle changes in FPU mode */
- preempt_disable();
-
- /*
- * FPU and Vector register state is made
- * UNPREDICTABLE by a change of FR, so don't
- * even bother saving it.
- */
- if (change & ST0_FR)
- kvm_drop_fpu(vcpu);
-
- /*
- * If MSA state is already live, it is undefined
- * how it interacts with FR=0 FPU state, and we
- * don't want to hit reserved instruction
- * exceptions trying to save the MSA state later
- * when CU=1 && FR=1, so play it safe and save
- * it first.
- */
- if (change & ST0_CU1 && !(val & ST0_FR) &&
- vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
- kvm_lose_fpu(vcpu);
-
- /*
- * Propagate CU1 (FPU enable) changes
- * immediately if the FPU context is already
- * loaded. When disabling we leave the context
- * loaded so it can be quickly enabled again in
- * the near future.
- */
- if (change & ST0_CU1 &&
- vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
- change_c0_status(ST0_CU1, val);
-
- preempt_enable();
-
- kvm_write_c0_guest_status(cop0, val);
-
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
- /*
- * If FPU present, we need CU1/FR bits to take
- * effect fairly soon.
- */
- if (!kvm_mips_guest_has_fpu(&vcpu->arch))
- kvm_mips_trans_mtc0(inst, opc, vcpu);
-#endif
- } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
- unsigned int old_val, val, change, wrmask;
-
- old_val = kvm_read_c0_guest_config5(cop0);
- val = vcpu->arch.gprs[rt];
-
- /* Only a few bits are writable in Config5 */
- wrmask = kvm_mips_config5_wrmask(vcpu);
- change = (val ^ old_val) & wrmask;
- val = old_val ^ change;
-
-
- /* Handle changes in FPU/MSA modes */
- preempt_disable();
-
- /*
- * Propagate FRE changes immediately if the FPU
- * context is already loaded.
- */
- if (change & MIPS_CONF5_FRE &&
- vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
- change_c0_config5(MIPS_CONF5_FRE, val);
-
- /*
- * Propagate MSAEn changes immediately if the
- * MSA context is already loaded. When disabling
- * we leave the context loaded so it can be
- * quickly enabled again in the near future.
- */
- if (change & MIPS_CONF5_MSAEN &&
- vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
- change_c0_config5(MIPS_CONF5_MSAEN,
- val);
-
- preempt_enable();
-
- kvm_write_c0_guest_config5(cop0, val);
- } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
- u32 old_cause, new_cause;
-
- old_cause = kvm_read_c0_guest_cause(cop0);
- new_cause = vcpu->arch.gprs[rt];
- /* Update R/W bits */
- kvm_change_c0_guest_cause(cop0, 0x08800300,
- new_cause);
- /* DC bit enabling/disabling timer? */
- if ((old_cause ^ new_cause) & CAUSEF_DC) {
- if (new_cause & CAUSEF_DC)
- kvm_mips_count_disable_cause(vcpu);
- else
- kvm_mips_count_enable_cause(vcpu);
- }
- } else if ((rd == MIPS_CP0_HWRENA) && (sel == 0)) {
- u32 mask = MIPS_HWRENA_CPUNUM |
- MIPS_HWRENA_SYNCISTEP |
- MIPS_HWRENA_CC |
- MIPS_HWRENA_CCRES;
-
- if (kvm_read_c0_guest_config3(cop0) &
- MIPS_CONF3_ULRI)
- mask |= MIPS_HWRENA_ULR;
- cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask;
- } else {
- cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
- kvm_mips_trans_mtc0(inst, opc, vcpu);
-#endif
- }
- break;
-
- case dmtc_op:
- kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
- vcpu->arch.pc, rt, rd, sel);
- trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0,
- KVM_TRACE_COP0(rd, sel),
- vcpu->arch.gprs[rt]);
- er = EMULATE_FAIL;
- break;
-
- case mfmc0_op:
-#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
- cop0->stat[MIPS_CP0_STATUS][0]++;
-#endif
- if (rt != 0)
- vcpu->arch.gprs[rt] =
- kvm_read_c0_guest_status(cop0);
- /* EI */
- if (inst.mfmc0_format.sc) {
- kvm_debug("[%#lx] mfmc0_op: EI\n",
- vcpu->arch.pc);
- kvm_set_c0_guest_status(cop0, ST0_IE);
- } else {
- kvm_debug("[%#lx] mfmc0_op: DI\n",
- vcpu->arch.pc);
- kvm_clear_c0_guest_status(cop0, ST0_IE);
- }
-
- break;
-
- case wrpgpr_op:
- {
- u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
- u32 pss =
- (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
- /*
- * We don't support any shadow register sets, so
- * SRSCtl[PSS] == SRSCtl[CSS] = 0
- */
- if (css || pss) {
- er = EMULATE_FAIL;
- break;
- }
- kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
- vcpu->arch.gprs[rt]);
- vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
- }
- break;
- default:
- kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
- vcpu->arch.pc, inst.c0r_format.rs);
- er = EMULATE_FAIL;
- break;
- }
- }
-
-done:
- /* Rollback PC only if emulation was unsuccessful */
- if (er == EMULATE_FAIL)
- vcpu->arch.pc = curr_pc;
-
-dont_update_pc:
- /*
- * This is for special instructions whose emulation
- * updates the PC, so do not overwrite the PC under
- * any circumstances
- */
-
- return er;
-}
-
enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
u32 cause,
struct kvm_vcpu *vcpu)
@@ -1623,7 +996,7 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
goto out_fail;
switch (inst.i_format.opcode) {
-#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
+#if defined(CONFIG_64BIT)
case sd_op:
run->mmio.len = 8;
*(u64 *)data = vcpu->arch.gprs[rt];
@@ -1721,7 +1094,7 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
vcpu->arch.gprs[rt], *(u32 *)data);
break;
-#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
+#if defined(CONFIG_64BIT)
case sdl_op:
run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
vcpu->arch.host_cp0_badvaddr) & (~0x7);
@@ -1928,7 +1301,7 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
vcpu->mmio_needed = 2; /* signed */
switch (op) {
-#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
+#if defined(CONFIG_64BIT)
case ld_op:
run->mmio.len = 8;
break;
@@ -2003,7 +1376,7 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
}
break;
-#if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ)
+#if defined(CONFIG_64BIT)
case ldl_op:
run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa(
vcpu->arch.host_cp0_badvaddr) & (~0x7);
@@ -2135,815 +1508,6 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
return EMULATE_DO_MMIO;
}
-#ifndef CONFIG_KVM_MIPS_VZ
-static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long),
- unsigned long curr_pc,
- unsigned long addr,
- struct kvm_vcpu *vcpu,
- u32 cause)
-{
- int err;
-
- for (;;) {
- /* Carefully attempt the cache operation */
- kvm_trap_emul_gva_lockless_begin(vcpu);
- err = fn(addr);
- kvm_trap_emul_gva_lockless_end(vcpu);
-
- if (likely(!err))
- return EMULATE_DONE;
-
- /*
- * Try to handle the fault and retry, maybe we just raced with a
- * GVA invalidation.
- */
- switch (kvm_trap_emul_gva_fault(vcpu, addr, false)) {
- case KVM_MIPS_GVA:
- case KVM_MIPS_GPA:
- /* bad virtual or physical address */
- return EMULATE_FAIL;
- case KVM_MIPS_TLB:
- /* no matching guest TLB */
- vcpu->arch.host_cp0_badvaddr = addr;
- vcpu->arch.pc = curr_pc;
- kvm_mips_emulate_tlbmiss_ld(cause, NULL, vcpu);
- return EMULATE_EXCEPT;
- case KVM_MIPS_TLBINV:
- /* invalid matching guest TLB */
- vcpu->arch.host_cp0_badvaddr = addr;
- vcpu->arch.pc = curr_pc;
- kvm_mips_emulate_tlbinv_ld(cause, NULL, vcpu);
- return EMULATE_EXCEPT;
- default:
- break;
- }
- }
-}
-
-enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
- u32 *opc, u32 cause,
- struct kvm_vcpu *vcpu)
-{
- enum emulation_result er = EMULATE_DONE;
- u32 cache, op_inst, op, base;
- s16 offset;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- unsigned long va;
- unsigned long curr_pc;
-
- /*
- * Update PC and hold onto current PC in case there is
- * an error and we want to rollback the PC
- */
- curr_pc = vcpu->arch.pc;
- er = update_pc(vcpu, cause);
- if (er == EMULATE_FAIL)
- return er;
-
- base = inst.i_format.rs;
- op_inst = inst.i_format.rt;
- if (cpu_has_mips_r6)
- offset = inst.spec3_format.simmediate;
- else
- offset = inst.i_format.simmediate;
- cache = op_inst & CacheOp_Cache;
- op = op_inst & CacheOp_Op;
-
- va = arch->gprs[base] + offset;
-
- kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
- cache, op, base, arch->gprs[base], offset);
-
- /*
- * Treat INDEX_INV as a nop, basically issued by Linux on startup to
- * invalidate the caches entirely by stepping through all the
- * ways/indexes
- */
- if (op == Index_Writeback_Inv) {
- kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
- vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
- arch->gprs[base], offset);
-
- if (cache == Cache_D) {
-#ifdef CONFIG_CPU_R4K_CACHE_TLB
- r4k_blast_dcache();
-#else
- switch (boot_cpu_type()) {
- case CPU_CAVIUM_OCTEON3:
- /* locally flush icache */
- local_flush_icache_range(0, 0);
- break;
- default:
- __flush_cache_all();
- break;
- }
-#endif
- } else if (cache == Cache_I) {
-#ifdef CONFIG_CPU_R4K_CACHE_TLB
- r4k_blast_icache();
-#else
- switch (boot_cpu_type()) {
- case CPU_CAVIUM_OCTEON3:
- /* locally flush icache */
- local_flush_icache_range(0, 0);
- break;
- default:
- flush_icache_all();
- break;
- }
-#endif
- } else {
- kvm_err("%s: unsupported CACHE INDEX operation\n",
- __func__);
- return EMULATE_FAIL;
- }
-
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
- kvm_mips_trans_cache_index(inst, opc, vcpu);
-#endif
- goto done;
- }
-
- /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
- if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) {
- /*
- * Perform the dcache part of icache synchronisation on the
- * guest's behalf.
- */
- er = kvm_mips_guest_cache_op(protected_writeback_dcache_line,
- curr_pc, va, vcpu, cause);
- if (er != EMULATE_DONE)
- goto done;
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
- /*
- * Replace the CACHE instruction, with a SYNCI, not the same,
- * but avoids a trap
- */
- kvm_mips_trans_cache_va(inst, opc, vcpu);
-#endif
- } else if (op_inst == Hit_Invalidate_I) {
- /* Perform the icache synchronisation on the guest's behalf */
- er = kvm_mips_guest_cache_op(protected_writeback_dcache_line,
- curr_pc, va, vcpu, cause);
- if (er != EMULATE_DONE)
- goto done;
- er = kvm_mips_guest_cache_op(protected_flush_icache_line,
- curr_pc, va, vcpu, cause);
- if (er != EMULATE_DONE)
- goto done;
-
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
- /* Replace the CACHE instruction, with a SYNCI */
- kvm_mips_trans_cache_va(inst, opc, vcpu);
-#endif
- } else {
- kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
- cache, op, base, arch->gprs[base], offset);
- er = EMULATE_FAIL;
- }
-
-done:
- /* Rollback PC only if emulation was unsuccessful */
- if (er == EMULATE_FAIL)
- vcpu->arch.pc = curr_pc;
- /* Guest exception needs guest to resume */
- if (er == EMULATE_EXCEPT)
- er = EMULATE_DONE;
-
- return er;
-}
-
-enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
- struct kvm_vcpu *vcpu)
-{
- union mips_instruction inst;
- enum emulation_result er = EMULATE_DONE;
- int err;
-
- /* Fetch the instruction. */
- if (cause & CAUSEF_BD)
- opc += 1;
- err = kvm_get_badinstr(opc, vcpu, &inst.word);
- if (err)
- return EMULATE_FAIL;
-
- switch (inst.r_format.opcode) {
- case cop0_op:
- er = kvm_mips_emulate_CP0(inst, opc, cause, vcpu);
- break;
-
-#ifndef CONFIG_CPU_MIPSR6
- case cache_op:
- ++vcpu->stat.cache_exits;
- trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
- er = kvm_mips_emulate_cache(inst, opc, cause, vcpu);
- break;
-#else
- case spec3_op:
- switch (inst.spec3_format.func) {
- case cache6_op:
- ++vcpu->stat.cache_exits;
- trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
- er = kvm_mips_emulate_cache(inst, opc, cause,
- vcpu);
- break;
- default:
- goto unknown;
- }
- break;
-unknown:
-#endif
-
- default:
- kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
- inst.word);
- kvm_arch_vcpu_dump_regs(vcpu);
- er = EMULATE_FAIL;
- break;
- }
-
- return er;
-}
-#endif /* CONFIG_KVM_MIPS_VZ */
-
-/**
- * kvm_mips_guest_exception_base() - Find guest exception vector base address.
- *
- * Returns: The base address of the current guest exception vector, taking
- * both Guest.CP0_Status.BEV and Guest.CP0_EBase into account.
- */
-long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
-
- if (kvm_read_c0_guest_status(cop0) & ST0_BEV)
- return KVM_GUEST_CKSEG1ADDR(0x1fc00200);
- else
- return kvm_read_c0_guest_ebase(cop0) & MIPS_EBASE_BASE;
-}
-
-enum emulation_result kvm_mips_emulate_syscall(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- enum emulation_result er = EMULATE_DONE;
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (EXCCODE_SYS << CAUSEB_EXCCODE));
-
- /* Set PC to the exception entry point */
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
-
- } else {
- kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
- er = EMULATE_FAIL;
- }
-
- return er;
-}
-
-enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
- arch->pc);
-
- /* set pc to the exception entry point */
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0;
-
- } else {
- kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
- arch->pc);
-
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
- }
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (EXCCODE_TLBL << CAUSEB_EXCCODE));
-
- /* setup badvaddr, context and entryhi registers for the guest */
- kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
- /* XXXKYMA: is the context register used by linux??? */
- kvm_write_c0_guest_entryhi(cop0, entryhi);
-
- return EMULATE_DONE;
-}
-
-enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- unsigned long entryhi =
- (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
- arch->pc);
- } else {
- kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
- arch->pc);
- }
-
- /* set pc to the exception entry point */
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (EXCCODE_TLBL << CAUSEB_EXCCODE));
-
- /* setup badvaddr, context and entryhi registers for the guest */
- kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
- /* XXXKYMA: is the context register used by linux??? */
- kvm_write_c0_guest_entryhi(cop0, entryhi);
-
- return EMULATE_DONE;
-}
-
-enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
- arch->pc);
-
- /* Set PC to the exception entry point */
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0;
- } else {
- kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
- arch->pc);
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
- }
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (EXCCODE_TLBS << CAUSEB_EXCCODE));
-
- /* setup badvaddr, context and entryhi registers for the guest */
- kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
- /* XXXKYMA: is the context register used by linux??? */
- kvm_write_c0_guest_entryhi(cop0, entryhi);
-
- return EMULATE_DONE;
-}
-
-enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
- arch->pc);
- } else {
- kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
- arch->pc);
- }
-
- /* Set PC to the exception entry point */
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (EXCCODE_TLBS << CAUSEB_EXCCODE));
-
- /* setup badvaddr, context and entryhi registers for the guest */
- kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
- /* XXXKYMA: is the context register used by linux??? */
- kvm_write_c0_guest_entryhi(cop0, entryhi);
-
- return EMULATE_DONE;
-}
-
-enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
- struct kvm_vcpu_arch *arch = &vcpu->arch;
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
- arch->pc);
- } else {
- kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
- arch->pc);
- }
-
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (EXCCODE_MOD << CAUSEB_EXCCODE));
-
- /* setup badvaddr, context and entryhi registers for the guest */
- kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
- /* XXXKYMA: is the context register used by linux??? */
- kvm_write_c0_guest_entryhi(cop0, entryhi);
-
- return EMULATE_DONE;
-}
-
-enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- }
-
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (EXCCODE_CPU << CAUSEB_EXCCODE));
- kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
-
- return EMULATE_DONE;
-}
-
-enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- enum emulation_result er = EMULATE_DONE;
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (EXCCODE_RI << CAUSEB_EXCCODE));
-
- /* Set PC to the exception entry point */
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
-
- } else {
- kvm_err("Trying to deliver RI when EXL is already set\n");
- er = EMULATE_FAIL;
- }
-
- return er;
-}
-
-enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- enum emulation_result er = EMULATE_DONE;
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (EXCCODE_BP << CAUSEB_EXCCODE));
-
- /* Set PC to the exception entry point */
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
-
- } else {
- kvm_err("Trying to deliver BP when EXL is already set\n");
- er = EMULATE_FAIL;
- }
-
- return er;
-}
-
-enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- enum emulation_result er = EMULATE_DONE;
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (EXCCODE_TR << CAUSEB_EXCCODE));
-
- /* Set PC to the exception entry point */
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
-
- } else {
- kvm_err("Trying to deliver TRAP when EXL is already set\n");
- er = EMULATE_FAIL;
- }
-
- return er;
-}
-
-enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- enum emulation_result er = EMULATE_DONE;
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (EXCCODE_MSAFPE << CAUSEB_EXCCODE));
-
- /* Set PC to the exception entry point */
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
-
- } else {
- kvm_err("Trying to deliver MSAFPE when EXL is already set\n");
- er = EMULATE_FAIL;
- }
-
- return er;
-}
-
-enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- enum emulation_result er = EMULATE_DONE;
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (EXCCODE_FPE << CAUSEB_EXCCODE));
-
- /* Set PC to the exception entry point */
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
-
- } else {
- kvm_err("Trying to deliver FPE when EXL is already set\n");
- er = EMULATE_FAIL;
- }
-
- return er;
-}
-
-enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- enum emulation_result er = EMULATE_DONE;
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (EXCCODE_MSADIS << CAUSEB_EXCCODE));
-
- /* Set PC to the exception entry point */
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
-
- } else {
- kvm_err("Trying to deliver MSADIS when EXL is already set\n");
- er = EMULATE_FAIL;
- }
-
- return er;
-}
-
-enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
- struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- enum emulation_result er = EMULATE_DONE;
- unsigned long curr_pc;
- union mips_instruction inst;
- int err;
-
- /*
- * Update PC and hold onto current PC in case there is
- * an error and we want to rollback the PC
- */
- curr_pc = vcpu->arch.pc;
- er = update_pc(vcpu, cause);
- if (er == EMULATE_FAIL)
- return er;
-
- /* Fetch the instruction. */
- if (cause & CAUSEF_BD)
- opc += 1;
- err = kvm_get_badinstr(opc, vcpu, &inst.word);
- if (err) {
- kvm_err("%s: Cannot get inst @ %p (%d)\n", __func__, opc, err);
- return EMULATE_FAIL;
- }
-
- if (inst.r_format.opcode == spec3_op &&
- inst.r_format.func == rdhwr_op &&
- inst.r_format.rs == 0 &&
- (inst.r_format.re >> 3) == 0) {
- int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
- int rd = inst.r_format.rd;
- int rt = inst.r_format.rt;
- int sel = inst.r_format.re & 0x7;
-
- /* If usermode, check RDHWR rd is allowed by guest HWREna */
- if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
- kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
- rd, opc);
- goto emulate_ri;
- }
- switch (rd) {
- case MIPS_HWR_CPUNUM: /* CPU number */
- arch->gprs[rt] = vcpu->vcpu_id;
- break;
- case MIPS_HWR_SYNCISTEP: /* SYNCI length */
- arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
- current_cpu_data.icache.linesz);
- break;
- case MIPS_HWR_CC: /* Read count register */
- arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu);
- break;
- case MIPS_HWR_CCRES: /* Count register resolution */
- switch (current_cpu_data.cputype) {
- case CPU_20KC:
- case CPU_25KF:
- arch->gprs[rt] = 1;
- break;
- default:
- arch->gprs[rt] = 2;
- }
- break;
- case MIPS_HWR_ULR: /* Read UserLocal register */
- arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
- break;
-
- default:
- kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
- goto emulate_ri;
- }
-
- trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel),
- vcpu->arch.gprs[rt]);
- } else {
- kvm_debug("Emulate RI not supported @ %p: %#x\n",
- opc, inst.word);
- goto emulate_ri;
- }
-
- return EMULATE_DONE;
-
-emulate_ri:
- /*
- * Rollback PC (if in branch delay slot then the PC already points to
- * branch target), and pass the RI exception to the guest OS.
- */
- vcpu->arch.pc = curr_pc;
- return kvm_mips_emulate_ri_exc(cause, opc, vcpu);
-}
-
enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
@@ -3086,207 +1650,3 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu)
done:
return er;
}
-
-static enum emulation_result kvm_mips_emulate_exc(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu)
-{
- u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- enum emulation_result er = EMULATE_DONE;
-
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_change_c0_guest_cause(cop0, (0xff),
- (exccode << CAUSEB_EXCCODE));
-
- /* Set PC to the exception entry point */
- arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180;
- kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
-
- kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
- exccode, kvm_read_c0_guest_epc(cop0),
- kvm_read_c0_guest_badvaddr(cop0));
- } else {
- kvm_err("Trying to deliver EXC when EXL is already set\n");
- er = EMULATE_FAIL;
- }
-
- return er;
-}
-
-enum emulation_result kvm_mips_check_privilege(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu)
-{
- enum emulation_result er = EMULATE_DONE;
- u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
- unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
-
- int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
-
- if (usermode) {
- switch (exccode) {
- case EXCCODE_INT:
- case EXCCODE_SYS:
- case EXCCODE_BP:
- case EXCCODE_RI:
- case EXCCODE_TR:
- case EXCCODE_MSAFPE:
- case EXCCODE_FPE:
- case EXCCODE_MSADIS:
- break;
-
- case EXCCODE_CPU:
- if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
- er = EMULATE_PRIV_FAIL;
- break;
-
- case EXCCODE_MOD:
- break;
-
- case EXCCODE_TLBL:
- /*
- * We we are accessing Guest kernel space, then send an
- * address error exception to the guest
- */
- if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
- kvm_debug("%s: LD MISS @ %#lx\n", __func__,
- badvaddr);
- cause &= ~0xff;
- cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE);
- er = EMULATE_PRIV_FAIL;
- }
- break;
-
- case EXCCODE_TLBS:
- /*
- * We we are accessing Guest kernel space, then send an
- * address error exception to the guest
- */
- if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
- kvm_debug("%s: ST MISS @ %#lx\n", __func__,
- badvaddr);
- cause &= ~0xff;
- cause |= (EXCCODE_ADES << CAUSEB_EXCCODE);
- er = EMULATE_PRIV_FAIL;
- }
- break;
-
- case EXCCODE_ADES:
- kvm_debug("%s: address error ST @ %#lx\n", __func__,
- badvaddr);
- if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
- cause &= ~0xff;
- cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE);
- }
- er = EMULATE_PRIV_FAIL;
- break;
- case EXCCODE_ADEL:
- kvm_debug("%s: address error LD @ %#lx\n", __func__,
- badvaddr);
- if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
- cause &= ~0xff;
- cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE);
- }
- er = EMULATE_PRIV_FAIL;
- break;
- default:
- er = EMULATE_PRIV_FAIL;
- break;
- }
- }
-
- if (er == EMULATE_PRIV_FAIL)
- kvm_mips_emulate_exc(cause, opc, vcpu);
-
- return er;
-}
-
-/*
- * User Address (UA) fault, this could happen if
- * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
- * case we pass on the fault to the guest kernel and let it handle it.
- * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
- * case we inject the TLB from the Guest TLB into the shadow host TLB
- */
-enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
- u32 *opc,
- struct kvm_vcpu *vcpu,
- bool write_fault)
-{
- enum emulation_result er = EMULATE_DONE;
- u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
- unsigned long va = vcpu->arch.host_cp0_badvaddr;
- int index;
-
- kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n",
- vcpu->arch.host_cp0_badvaddr);
-
- /*
- * KVM would not have got the exception if this entry was valid in the
- * shadow host TLB. Check the Guest TLB, if the entry is not there then
- * send the guest an exception. The guest exc handler should then inject
- * an entry into the guest TLB.
- */
- index = kvm_mips_guest_tlb_lookup(vcpu,
- (va & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
- KVM_ENTRYHI_ASID));
- if (index < 0) {
- if (exccode == EXCCODE_TLBL) {
- er = kvm_mips_emulate_tlbmiss_ld(cause, opc, vcpu);
- } else if (exccode == EXCCODE_TLBS) {
- er = kvm_mips_emulate_tlbmiss_st(cause, opc, vcpu);
- } else {
- kvm_err("%s: invalid exc code: %d\n", __func__,
- exccode);
- er = EMULATE_FAIL;
- }
- } else {
- struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
-
- /*
- * Check if the entry is valid, if not then setup a TLB invalid
- * exception to the guest
- */
- if (!TLB_IS_VALID(*tlb, va)) {
- if (exccode == EXCCODE_TLBL) {
- er = kvm_mips_emulate_tlbinv_ld(cause, opc,
- vcpu);
- } else if (exccode == EXCCODE_TLBS) {
- er = kvm_mips_emulate_tlbinv_st(cause, opc,
- vcpu);
- } else {
- kvm_err("%s: invalid exc code: %d\n", __func__,
- exccode);
- er = EMULATE_FAIL;
- }
- } else {
- kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
- tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]);
- /*
- * OK we have a Guest TLB entry, now inject it into the
- * shadow host TLB
- */
- if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, va,
- write_fault)) {
- kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
- __func__, va, index, vcpu,
- read_c0_entryhi());
- er = EMULATE_FAIL;
- }
- }
- }
-
- return er;
-}
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
index 832475bf2055..8131fb2bdf97 100644
--- a/arch/mips/kvm/entry.c
+++ b/arch/mips/kvm/entry.c
@@ -305,7 +305,6 @@ static void *kvm_mips_build_enter_guest(void *addr)
UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
UASM_i_MTC0(&p, T0, C0_EPC);
-#ifdef CONFIG_KVM_MIPS_VZ
/* Save normal linux process pgd (VZ guarantees pgd_reg is set) */
if (cpu_has_ldpte)
UASM_i_MFC0(&p, K0, C0_PWBASE);
@@ -367,21 +366,6 @@ static void *kvm_mips_build_enter_guest(void *addr)
/* Set the root ASID for the Guest */
UASM_i_ADDIU(&p, T1, S0,
offsetof(struct kvm, arch.gpa_mm.context.asid));
-#else
- /* Set the ASID for the Guest Kernel or User */
- UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1);
- UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]),
- T0);
- uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL);
- uasm_i_xori(&p, T0, T0, KSU_USER);
- uasm_il_bnez(&p, &r, T0, label_kernel_asid);
- UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
- guest_kernel_mm.context.asid));
- /* else user */
- UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch,
- guest_user_mm.context.asid));
- uasm_l_kernel_asid(&l, p);
-#endif
/* t1: contains the base of the ASID array, need to get the cpu id */
/* smp_processor_id */
@@ -406,24 +390,9 @@ static void *kvm_mips_build_enter_guest(void *addr)
uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
#endif
-#ifndef CONFIG_KVM_MIPS_VZ
- /*
- * Set up KVM T&E GVA pgd.
- * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD():
- * - call tlbmiss_handler_setup_pgd(mm->pgd)
- * - but skips write into CP0_PWBase for now
- */
- UASM_i_LW(&p, A0, (int)offsetof(struct mm_struct, pgd) -
- (int)offsetof(struct mm_struct, context.asid), T1);
-
- UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd);
- uasm_i_jalr(&p, RA, T9);
- uasm_i_mtc0(&p, K0, C0_ENTRYHI);
-#else
/* Set up KVM VZ root ASID (!guestid) */
uasm_i_mtc0(&p, K0, C0_ENTRYHI);
skip_asid_restore:
-#endif
uasm_i_ehb(&p);
/* Disable RDHWR access */
@@ -720,7 +689,6 @@ void *kvm_mips_build_exit(void *addr)
uasm_l_msa_1(&l, p);
}
-#ifdef CONFIG_KVM_MIPS_VZ
/* Restore host ASID */
if (!cpu_has_guestid) {
UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi),
@@ -764,7 +732,6 @@ void *kvm_mips_build_exit(void *addr)
MIPS_GCTL1_RID_WIDTH);
uasm_i_mtc0(&p, T0, C0_GUESTCTL1);
}
-#endif
/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
diff --git a/arch/mips/kvm/interrupt.c b/arch/mips/kvm/interrupt.c
index d28c2c9c343e..0277942279ea 100644
--- a/arch/mips/kvm/interrupt.c
+++ b/arch/mips/kvm/interrupt.c
@@ -21,119 +21,6 @@
#include "interrupt.h"
-void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
-{
- set_bit(priority, &vcpu->arch.pending_exceptions);
-}
-
-void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
-{
- clear_bit(priority, &vcpu->arch.pending_exceptions);
-}
-
-void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
-{
- /*
- * Cause bits to reflect the pending timer interrupt,
- * the EXC code will be set when we are actually
- * delivering the interrupt:
- */
- kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
-
- /* Queue up an INT exception for the core */
- kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
-
-}
-
-void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
-{
- kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
- kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
-}
-
-void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
- struct kvm_mips_interrupt *irq)
-{
- int intr = (int)irq->irq;
-
- /*
- * Cause bits to reflect the pending IO interrupt,
- * the EXC code will be set when we are actually
- * delivering the interrupt:
- */
- kvm_set_c0_guest_cause(vcpu->arch.cop0, 1 << (intr + 8));
- kvm_mips_queue_irq(vcpu, kvm_irq_to_priority(intr));
-}
-
-void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
- struct kvm_mips_interrupt *irq)
-{
- int intr = (int)irq->irq;
-
- kvm_clear_c0_guest_cause(vcpu->arch.cop0, 1 << (-intr + 8));
- kvm_mips_dequeue_irq(vcpu, kvm_irq_to_priority(-intr));
-}
-
-/* Deliver the interrupt of the corresponding priority, if possible. */
-int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
- u32 cause)
-{
- int allowed = 0;
- u32 exccode, ie;
-
- struct kvm_vcpu_arch *arch = &vcpu->arch;
- struct mips_coproc *cop0 = vcpu->arch.cop0;
-
- if (priority == MIPS_EXC_MAX)
- return 0;
-
- ie = 1 << (kvm_priority_to_irq[priority] + 8);
- if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
- && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
- && (kvm_read_c0_guest_status(cop0) & ie)) {
- allowed = 1;
- exccode = EXCCODE_INT;
- }
-
- /* Are we allowed to deliver the interrupt ??? */
- if (allowed) {
- if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
- /* save old pc */
- kvm_write_c0_guest_epc(cop0, arch->pc);
- kvm_set_c0_guest_status(cop0, ST0_EXL);
-
- if (cause & CAUSEF_BD)
- kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
- else
- kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
- kvm_debug("Delivering INT @ pc %#lx\n", arch->pc);
-
- } else
- kvm_err("Trying to deliver interrupt when EXL is already set\n");
-
- kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE,
- (exccode << CAUSEB_EXCCODE));
-
- /* XXXSL Set PC to the interrupt exception entry point */
- arch->pc = kvm_mips_guest_exception_base(vcpu);
- if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV)
- arch->pc += 0x200;
- else
- arch->pc += 0x180;
-
- clear_bit(priority, &vcpu->arch.pending_exceptions);
- }
-
- return allowed;
-}
-
-int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
- u32 cause)
-{
- return 1;
-}
-
void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause)
{
unsigned long *pending = &vcpu->arch.pending_exceptions;
@@ -145,10 +32,7 @@ void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause)
priority = __ffs(*pending_clr);
while (priority <= MIPS_EXC_MAX) {
- if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) {
- if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE)
- break;
- }
+ kvm_mips_callbacks->irq_clear(vcpu, priority, cause);
priority = find_next_bit(pending_clr,
BITS_PER_BYTE * sizeof(*pending_clr),
@@ -157,10 +41,7 @@ void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause)
priority = __ffs(*pending);
while (priority <= MIPS_EXC_MAX) {
- if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) {
- if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE)
- break;
- }
+ kvm_mips_callbacks->irq_deliver(vcpu, priority, cause);
priority = find_next_bit(pending,
BITS_PER_BYTE * sizeof(*pending),
diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h
index c3e878ca3e07..e529ea2bb34b 100644
--- a/arch/mips/kvm/interrupt.h
+++ b/arch/mips/kvm/interrupt.h
@@ -31,29 +31,9 @@
#define C_TI (_ULCAST_(1) << 30)
-#ifdef CONFIG_KVM_MIPS_VZ
-#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (1)
-#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (1)
-#else
-#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
-#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0)
-#endif
-
extern u32 *kvm_priority_to_irq;
u32 kvm_irq_to_priority(u32 irq);
-void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
-void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
-void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
-void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu);
-void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
- struct kvm_mips_interrupt *irq);
-void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
- struct kvm_mips_interrupt *irq);
-int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
- u32 cause);
-int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
- u32 cause);
void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause);
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 58a8812e2fa5..af9dd029a4e1 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -30,7 +30,6 @@
#include <linux/kvm_host.h>
#include "interrupt.h"
-#include "commpage.h"
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -39,45 +38,63 @@
#define VECTORSPACING 0x100 /* for EI/VI mode */
#endif
-struct kvm_stats_debugfs_item debugfs_entries[] = {
- VCPU_STAT("wait", wait_exits),
- VCPU_STAT("cache", cache_exits),
- VCPU_STAT("signal", signal_exits),
- VCPU_STAT("interrupt", int_exits),
- VCPU_STAT("cop_unusable", cop_unusable_exits),
- VCPU_STAT("tlbmod", tlbmod_exits),
- VCPU_STAT("tlbmiss_ld", tlbmiss_ld_exits),
- VCPU_STAT("tlbmiss_st", tlbmiss_st_exits),
- VCPU_STAT("addrerr_st", addrerr_st_exits),
- VCPU_STAT("addrerr_ld", addrerr_ld_exits),
- VCPU_STAT("syscall", syscall_exits),
- VCPU_STAT("resvd_inst", resvd_inst_exits),
- VCPU_STAT("break_inst", break_inst_exits),
- VCPU_STAT("trap_inst", trap_inst_exits),
- VCPU_STAT("msa_fpe", msa_fpe_exits),
- VCPU_STAT("fpe", fpe_exits),
- VCPU_STAT("msa_disabled", msa_disabled_exits),
- VCPU_STAT("flush_dcache", flush_dcache_exits),
-#ifdef CONFIG_KVM_MIPS_VZ
- VCPU_STAT("vz_gpsi", vz_gpsi_exits),
- VCPU_STAT("vz_gsfc", vz_gsfc_exits),
- VCPU_STAT("vz_hc", vz_hc_exits),
- VCPU_STAT("vz_grr", vz_grr_exits),
- VCPU_STAT("vz_gva", vz_gva_exits),
- VCPU_STAT("vz_ghfc", vz_ghfc_exits),
- VCPU_STAT("vz_gpa", vz_gpa_exits),
- VCPU_STAT("vz_resvd", vz_resvd_exits),
+const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+ KVM_GENERIC_VM_STATS()
+};
+static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
+ sizeof(struct kvm_vm_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vm_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vm_stats_desc),
+};
+
+const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ KVM_GENERIC_VCPU_STATS(),
+ STATS_DESC_COUNTER(VCPU, wait_exits),
+ STATS_DESC_COUNTER(VCPU, cache_exits),
+ STATS_DESC_COUNTER(VCPU, signal_exits),
+ STATS_DESC_COUNTER(VCPU, int_exits),
+ STATS_DESC_COUNTER(VCPU, cop_unusable_exits),
+ STATS_DESC_COUNTER(VCPU, tlbmod_exits),
+ STATS_DESC_COUNTER(VCPU, tlbmiss_ld_exits),
+ STATS_DESC_COUNTER(VCPU, tlbmiss_st_exits),
+ STATS_DESC_COUNTER(VCPU, addrerr_st_exits),
+ STATS_DESC_COUNTER(VCPU, addrerr_ld_exits),
+ STATS_DESC_COUNTER(VCPU, syscall_exits),
+ STATS_DESC_COUNTER(VCPU, resvd_inst_exits),
+ STATS_DESC_COUNTER(VCPU, break_inst_exits),
+ STATS_DESC_COUNTER(VCPU, trap_inst_exits),
+ STATS_DESC_COUNTER(VCPU, msa_fpe_exits),
+ STATS_DESC_COUNTER(VCPU, fpe_exits),
+ STATS_DESC_COUNTER(VCPU, msa_disabled_exits),
+ STATS_DESC_COUNTER(VCPU, flush_dcache_exits),
+ STATS_DESC_COUNTER(VCPU, vz_gpsi_exits),
+ STATS_DESC_COUNTER(VCPU, vz_gsfc_exits),
+ STATS_DESC_COUNTER(VCPU, vz_hc_exits),
+ STATS_DESC_COUNTER(VCPU, vz_grr_exits),
+ STATS_DESC_COUNTER(VCPU, vz_gva_exits),
+ STATS_DESC_COUNTER(VCPU, vz_ghfc_exits),
+ STATS_DESC_COUNTER(VCPU, vz_gpa_exits),
+ STATS_DESC_COUNTER(VCPU, vz_resvd_exits),
#ifdef CONFIG_CPU_LOONGSON64
- VCPU_STAT("vz_cpucfg", vz_cpucfg_exits),
+ STATS_DESC_COUNTER(VCPU, vz_cpucfg_exits),
#endif
-#endif
- VCPU_STAT("halt_successful_poll", halt_successful_poll),
- VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
- VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
- VCPU_STAT("halt_wakeup", halt_wakeup),
- VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
- VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
- {NULL}
+};
+static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
+ sizeof(struct kvm_vcpu_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vcpu_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vcpu_stats_desc),
};
bool kvm_trace_guest_mode_change;
@@ -139,11 +156,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
switch (type) {
case KVM_VM_MIPS_AUTO:
break;
-#ifdef CONFIG_KVM_MIPS_VZ
case KVM_VM_MIPS_VZ:
-#else
- case KVM_VM_MIPS_TE:
-#endif
break;
default:
/* Unsupported KVM type */
@@ -204,9 +217,7 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
{
/* Flush whole GPA */
kvm_mips_flush_gpa_pt(kvm, 0, ~0);
-
- /* Let implementation do the rest */
- kvm_mips_callbacks->flush_shadow_all(kvm);
+ kvm_flush_remote_tlbs(kvm);
}
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
@@ -221,8 +232,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
/* Flush slot from GPA */
kvm_mips_flush_gpa_pt(kvm, slot->base_gfn,
slot->base_gfn + slot->npages - 1);
- /* Let implementation do the rest */
- kvm_mips_callbacks->flush_shadow_memslot(kvm, slot);
+ kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
spin_unlock(&kvm->mmu_lock);
}
@@ -262,9 +272,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
/* Write protect GPA page table entries */
needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn,
new->base_gfn + new->npages - 1);
- /* Let implementation do the rest */
if (needs_flush)
- kvm_mips_callbacks->flush_shadow_memslot(kvm, new);
+ kvm_arch_flush_remote_tlbs_memslot(kvm, new);
spin_unlock(&kvm->mmu_lock);
}
}
@@ -361,7 +370,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
/* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */
refill_start = gebase;
- if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && IS_ENABLED(CONFIG_64BIT))
+ if (IS_ENABLED(CONFIG_64BIT))
refill_start += 0x080;
refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler);
@@ -397,20 +406,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
flush_icache_range((unsigned long)gebase,
(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
- /*
- * Allocate comm page for guest kernel, a TLB will be reserved for
- * mapping GVA @ 0xFFFF8000 to this page
- */
- vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
-
- if (!vcpu->arch.kseg0_commpage) {
- err = -ENOMEM;
- goto out_free_gebase;
- }
-
- kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
- kvm_mips_commpage_init(vcpu);
-
/* Init */
vcpu->arch.last_sched_cpu = -1;
vcpu->arch.last_exec_cpu = -1;
@@ -418,12 +413,10 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
/* Initial guest state */
err = kvm_mips_callbacks->vcpu_setup(vcpu);
if (err)
- goto out_free_commpage;
+ goto out_free_gebase;
return 0;
-out_free_commpage:
- kfree(vcpu->arch.kseg0_commpage);
out_free_gebase:
kfree(gebase);
out_uninit_vcpu:
@@ -439,7 +432,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
kvm_mmu_free_memory_caches(vcpu);
kfree(vcpu->arch.guest_ebase);
- kfree(vcpu->arch.kseg0_commpage);
kvm_mips_callbacks->vcpu_uninit(vcpu);
}
@@ -996,11 +988,16 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
}
+int kvm_arch_flush_remote_tlb(struct kvm *kvm)
+{
+ kvm_mips_callbacks->prepare_flush_shadow(kvm);
+ return 1;
+}
+
void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
- struct kvm_memory_slot *memslot)
+ const struct kvm_memory_slot *memslot)
{
- /* Let implementation handle TLB/GVA invalidation */
- kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
+ kvm_flush_remote_tlbs(kvm);
}
long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
@@ -1212,10 +1209,6 @@ int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
vcpu->mode = OUTSIDE_GUEST_MODE;
- /* re-enable HTW before enabling interrupts */
- if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
- htw_start();
-
/* Set a default exit reason */
run->exit_reason = KVM_EXIT_UNKNOWN;
run->ready_for_interrupt_injection = 1;
@@ -1232,22 +1225,6 @@ int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
cause, opc, run, vcpu);
trace_kvm_exit(vcpu, exccode);
- if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
- /*
- * Do a privilege check, if in UM most of these exit conditions
- * end up causing an exception to be delivered to the Guest
- * Kernel
- */
- er = kvm_mips_check_privilege(cause, opc, vcpu);
- if (er == EMULATE_PRIV_FAIL) {
- goto skip_emul;
- } else if (er == EMULATE_FAIL) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- goto skip_emul;
- }
- }
-
switch (exccode) {
case EXCCODE_INT:
kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
@@ -1357,7 +1334,6 @@ int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
}
-skip_emul:
local_irq_disable();
if (ret == RESUME_GUEST)
@@ -1406,11 +1382,6 @@ skip_emul:
read_c0_config5() & MIPS_CONF5_MSAEN)
__kvm_restore_msacsr(&vcpu->arch);
}
-
- /* Disable HTW before returning to guest or host */
- if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ))
- htw_stop();
-
return ret;
}
@@ -1429,10 +1400,6 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
* FR=0 FPU state, and we don't want to hit reserved instruction
* exceptions trying to save the MSA state later when CU=1 && FR=1, so
* play it safe and save it first.
- *
- * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
- * get called when guest CU1 is set, however we can't trust the guest
- * not to clobber the status register directly via the commpage.
*/
if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
@@ -1553,11 +1520,6 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
preempt_disable();
if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
- if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
- set_c0_config5(MIPS_CONF5_MSAEN);
- enable_fpu_hazard();
- }
-
__kvm_save_msa(&vcpu->arch);
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
@@ -1569,11 +1531,6 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
}
vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
} else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
- if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) {
- set_c0_status(ST0_CU1);
- enable_fpu_hazard();
- }
-
__kvm_save_fpu(&vcpu->arch);
vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 3dabeda82458..6d1f68cf4edf 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -439,85 +439,34 @@ static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn,
end_gfn << PAGE_SHIFT);
}
-static int handle_hva_to_gpa(struct kvm *kvm,
- unsigned long start,
- unsigned long end,
- int (*handler)(struct kvm *kvm, gfn_t gfn,
- gpa_t gfn_end,
- struct kvm_memory_slot *memslot,
- void *data),
- void *data)
+bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
- int ret = 0;
-
- slots = kvm_memslots(kvm);
-
- /* we only care about the pages that the guest sees */
- kvm_for_each_memslot(memslot, slots) {
- unsigned long hva_start, hva_end;
- gfn_t gfn, gfn_end;
-
- hva_start = max(start, memslot->userspace_addr);
- hva_end = min(end, memslot->userspace_addr +
- (memslot->npages << PAGE_SHIFT));
- if (hva_start >= hva_end)
- continue;
-
- /*
- * {gfn(page) | page intersects with [hva_start, hva_end)} =
- * {gfn_start, gfn_start+1, ..., gfn_end-1}.
- */
- gfn = hva_to_gfn_memslot(hva_start, memslot);
- gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
-
- ret |= handler(kvm, gfn, gfn_end, memslot, data);
- }
-
- return ret;
-}
-
-
-static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
- struct kvm_memory_slot *memslot, void *data)
-{
- kvm_mips_flush_gpa_pt(kvm, gfn, gfn_end);
+ kvm_mips_flush_gpa_pt(kvm, range->start, range->end);
return 1;
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
- unsigned flags)
+bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
-
- kvm_mips_callbacks->flush_shadow_all(kvm);
- return 0;
-}
-
-static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
- struct kvm_memory_slot *memslot, void *data)
-{
- gpa_t gpa = gfn << PAGE_SHIFT;
- pte_t hva_pte = *(pte_t *)data;
+ gpa_t gpa = range->start << PAGE_SHIFT;
+ pte_t hva_pte = range->pte;
pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
pte_t old_pte;
if (!gpa_pte)
- return 0;
+ return false;
/* Mapping may need adjusting depending on memslot flags */
old_pte = *gpa_pte;
- if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte))
+ if (range->slot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte))
hva_pte = pte_mkclean(hva_pte);
- else if (memslot->flags & KVM_MEM_READONLY)
+ else if (range->slot->flags & KVM_MEM_READONLY)
hva_pte = pte_wrprotect(hva_pte);
set_pte(gpa_pte, hva_pte);
/* Replacing an absent or old page doesn't need flushes */
if (!pte_present(old_pte) || !pte_young(old_pte))
- return 0;
+ return false;
/* Pages swapped, aged, moved, or cleaned require flushes */
return !pte_present(hva_pte) ||
@@ -526,27 +475,14 @@ static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
(pte_dirty(old_pte) && !pte_dirty(hva_pte));
}
-int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- unsigned long end = hva + PAGE_SIZE;
- int ret;
-
- ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte);
- if (ret)
- kvm_mips_callbacks->flush_shadow_all(kvm);
- return 0;
+ return kvm_mips_mkold_gpa_pt(kvm, range->start, range->end);
}
-static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
- struct kvm_memory_slot *memslot, void *data)
+bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- return kvm_mips_mkold_gpa_pt(kvm, gfn, gfn_end);
-}
-
-static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
- struct kvm_memory_slot *memslot, void *data)
-{
- gpa_t gpa = gfn << PAGE_SHIFT;
+ gpa_t gpa = range->start << PAGE_SHIFT;
pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
if (!gpa_pte)
@@ -554,16 +490,6 @@ static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
return pte_young(*gpa_pte);
}
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
-{
- return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
-}
-
-int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
-{
- return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
-}
-
/**
* _kvm_mips_map_page_fast() - Fast path GPA fault handler.
* @vcpu: VCPU pointer.
@@ -756,209 +682,6 @@ out:
return err;
}
-static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu,
- unsigned long addr)
-{
- struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
- pgd_t *pgdp;
- int ret;
-
- /* We need a minimum of cached pages ready for page table creation */
- ret = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES);
- if (ret)
- return NULL;
-
- if (KVM_GUEST_KERNEL_MODE(vcpu))
- pgdp = vcpu->arch.guest_kernel_mm.pgd;
- else
- pgdp = vcpu->arch.guest_user_mm.pgd;
-
- return kvm_mips_walk_pgd(pgdp, memcache, addr);
-}
-
-void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
- bool user)
-{
- pgd_t *pgdp;
- pte_t *ptep;
-
- addr &= PAGE_MASK << 1;
-
- pgdp = vcpu->arch.guest_kernel_mm.pgd;
- ptep = kvm_mips_walk_pgd(pgdp, NULL, addr);
- if (ptep) {
- ptep[0] = pfn_pte(0, __pgprot(0));
- ptep[1] = pfn_pte(0, __pgprot(0));
- }
-
- if (user) {
- pgdp = vcpu->arch.guest_user_mm.pgd;
- ptep = kvm_mips_walk_pgd(pgdp, NULL, addr);
- if (ptep) {
- ptep[0] = pfn_pte(0, __pgprot(0));
- ptep[1] = pfn_pte(0, __pgprot(0));
- }
- }
-}
-
-/*
- * kvm_mips_flush_gva_{pte,pmd,pud,pgd,pt}.
- * Flush a range of guest physical address space from the VM's GPA page tables.
- */
-
-static bool kvm_mips_flush_gva_pte(pte_t *pte, unsigned long start_gva,
- unsigned long end_gva)
-{
- int i_min = pte_index(start_gva);
- int i_max = pte_index(end_gva);
- bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
- int i;
-
- /*
- * There's no freeing to do, so there's no point clearing individual
- * entries unless only part of the last level page table needs flushing.
- */
- if (safe_to_remove)
- return true;
-
- for (i = i_min; i <= i_max; ++i) {
- if (!pte_present(pte[i]))
- continue;
-
- set_pte(pte + i, __pte(0));
- }
- return false;
-}
-
-static bool kvm_mips_flush_gva_pmd(pmd_t *pmd, unsigned long start_gva,
- unsigned long end_gva)
-{
- pte_t *pte;
- unsigned long end = ~0ul;
- int i_min = pmd_index(start_gva);
- int i_max = pmd_index(end_gva);
- bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1);
- int i;
-
- for (i = i_min; i <= i_max; ++i, start_gva = 0) {
- if (!pmd_present(pmd[i]))
- continue;
-
- pte = pte_offset_kernel(pmd + i, 0);
- if (i == i_max)
- end = end_gva;
-
- if (kvm_mips_flush_gva_pte(pte, start_gva, end)) {
- pmd_clear(pmd + i);
- pte_free_kernel(NULL, pte);
- } else {
- safe_to_remove = false;
- }
- }
- return safe_to_remove;
-}
-
-static bool kvm_mips_flush_gva_pud(pud_t *pud, unsigned long start_gva,
- unsigned long end_gva)
-{
- pmd_t *pmd;
- unsigned long end = ~0ul;
- int i_min = pud_index(start_gva);
- int i_max = pud_index(end_gva);
- bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1);
- int i;
-
- for (i = i_min; i <= i_max; ++i, start_gva = 0) {
- if (!pud_present(pud[i]))
- continue;
-
- pmd = pmd_offset(pud + i, 0);
- if (i == i_max)
- end = end_gva;
-
- if (kvm_mips_flush_gva_pmd(pmd, start_gva, end)) {
- pud_clear(pud + i);
- pmd_free(NULL, pmd);
- } else {
- safe_to_remove = false;
- }
- }
- return safe_to_remove;
-}
-
-static bool kvm_mips_flush_gva_pgd(pgd_t *pgd, unsigned long start_gva,
- unsigned long end_gva)
-{
- p4d_t *p4d;
- pud_t *pud;
- unsigned long end = ~0ul;
- int i_min = pgd_index(start_gva);
- int i_max = pgd_index(end_gva);
- bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1);
- int i;
-
- for (i = i_min; i <= i_max; ++i, start_gva = 0) {
- if (!pgd_present(pgd[i]))
- continue;
-
- p4d = p4d_offset(pgd, 0);
- pud = pud_offset(p4d + i, 0);
- if (i == i_max)
- end = end_gva;
-
- if (kvm_mips_flush_gva_pud(pud, start_gva, end)) {
- pgd_clear(pgd + i);
- pud_free(NULL, pud);
- } else {
- safe_to_remove = false;
- }
- }
- return safe_to_remove;
-}
-
-void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags)
-{
- if (flags & KMF_GPA) {
- /* all of guest virtual address space could be affected */
- if (flags & KMF_KERN)
- /* useg, kseg0, seg2/3 */
- kvm_mips_flush_gva_pgd(pgd, 0, 0x7fffffff);
- else
- /* useg */
- kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff);
- } else {
- /* useg */
- kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff);
-
- /* kseg2/3 */
- if (flags & KMF_KERN)
- kvm_mips_flush_gva_pgd(pgd, 0x60000000, 0x7fffffff);
- }
-}
-
-static pte_t kvm_mips_gpa_pte_to_gva_unmapped(pte_t pte)
-{
- /*
- * Don't leak writeable but clean entries from GPA page tables. We don't
- * want the normal Linux tlbmod handler to handle dirtying when KVM
- * accesses guest memory.
- */
- if (!pte_dirty(pte))
- pte = pte_wrprotect(pte);
-
- return pte;
-}
-
-static pte_t kvm_mips_gpa_pte_to_gva_mapped(pte_t pte, long entrylo)
-{
- /* Guest EntryLo overrides host EntryLo */
- if (!(entrylo & ENTRYLO_D))
- pte = pte_mkclean(pte);
-
- return kvm_mips_gpa_pte_to_gva_unmapped(pte);
-}
-
-#ifdef CONFIG_KVM_MIPS_VZ
int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
struct kvm_vcpu *vcpu,
bool write_fault)
@@ -972,125 +695,6 @@ int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
/* Invalidate this entry in the TLB */
return kvm_vz_host_tlb_inv(vcpu, badvaddr);
}
-#endif
-
-/* XXXKYMA: Must be called with interrupts disabled */
-int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
- struct kvm_vcpu *vcpu,
- bool write_fault)
-{
- unsigned long gpa;
- pte_t pte_gpa[2], *ptep_gva;
- int idx;
-
- if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
- kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
- kvm_mips_dump_host_tlbs();
- return -1;
- }
-
- /* Get the GPA page table entry */
- gpa = KVM_GUEST_CPHYSADDR(badvaddr);
- idx = (badvaddr >> PAGE_SHIFT) & 1;
- if (kvm_mips_map_page(vcpu, gpa, write_fault, &pte_gpa[idx],
- &pte_gpa[!idx]) < 0)
- return -1;
-
- /* Get the GVA page table entry */
- ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, badvaddr & ~PAGE_SIZE);
- if (!ptep_gva) {
- kvm_err("No ptep for gva %lx\n", badvaddr);
- return -1;
- }
-
- /* Copy a pair of entries from GPA page table to GVA page table */
- ptep_gva[0] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[0]);
- ptep_gva[1] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[1]);
-
- /* Invalidate this entry in the TLB, guest kernel ASID only */
- kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true);
- return 0;
-}
-
-int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
- struct kvm_mips_tlb *tlb,
- unsigned long gva,
- bool write_fault)
-{
- struct kvm *kvm = vcpu->kvm;
- long tlb_lo[2];
- pte_t pte_gpa[2], *ptep_buddy, *ptep_gva;
- unsigned int idx = TLB_LO_IDX(*tlb, gva);
- bool kernel = KVM_GUEST_KERNEL_MODE(vcpu);
-
- tlb_lo[0] = tlb->tlb_lo[0];
- tlb_lo[1] = tlb->tlb_lo[1];
-
- /*
- * The commpage address must not be mapped to anything else if the guest
- * TLB contains entries nearby, or commpage accesses will break.
- */
- if (!((gva ^ KVM_GUEST_COMMPAGE_ADDR) & VPN2_MASK & (PAGE_MASK << 1)))
- tlb_lo[TLB_LO_IDX(*tlb, KVM_GUEST_COMMPAGE_ADDR)] = 0;
-
- /* Get the GPA page table entry */
- if (kvm_mips_map_page(vcpu, mips3_tlbpfn_to_paddr(tlb_lo[idx]),
- write_fault, &pte_gpa[idx], NULL) < 0)
- return -1;
-
- /* And its GVA buddy's GPA page table entry if it also exists */
- pte_gpa[!idx] = pfn_pte(0, __pgprot(0));
- if (tlb_lo[!idx] & ENTRYLO_V) {
- spin_lock(&kvm->mmu_lock);
- ptep_buddy = kvm_mips_pte_for_gpa(kvm, NULL,
- mips3_tlbpfn_to_paddr(tlb_lo[!idx]));
- if (ptep_buddy)
- pte_gpa[!idx] = *ptep_buddy;
- spin_unlock(&kvm->mmu_lock);
- }
-
- /* Get the GVA page table entry pair */
- ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, gva & ~PAGE_SIZE);
- if (!ptep_gva) {
- kvm_err("No ptep for gva %lx\n", gva);
- return -1;
- }
-
- /* Copy a pair of entries from GPA page table to GVA page table */
- ptep_gva[0] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[0], tlb_lo[0]);
- ptep_gva[1] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[1], tlb_lo[1]);
-
- /* Invalidate this entry in the TLB, current guest mode ASID only */
- kvm_mips_host_tlb_inv(vcpu, gva, !kernel, kernel);
-
- kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
- tlb->tlb_lo[0], tlb->tlb_lo[1]);
-
- return 0;
-}
-
-int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
- struct kvm_vcpu *vcpu)
-{
- kvm_pfn_t pfn;
- pte_t *ptep;
- pgprot_t prot;
-
- ptep = kvm_trap_emul_pte_for_gva(vcpu, badvaddr);
- if (!ptep) {
- kvm_err("No ptep for commpage %lx\n", badvaddr);
- return -1;
- }
-
- pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage));
- /* Also set valid and dirty, so refill handler doesn't have to */
- prot = vm_get_page_prot(VM_READ|VM_WRITE|VM_SHARED);
- *ptep = pte_mkyoung(pte_mkdirty(pfn_pte(pfn, prot)));
-
- /* Invalidate this entry in the TLB, guest kernel ASID only */
- kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true);
- return 0;
-}
/**
* kvm_mips_migrate_count() - Migrate timer.
@@ -1153,86 +757,3 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
local_irq_restore(flags);
}
-
-/**
- * kvm_trap_emul_gva_fault() - Safely attempt to handle a GVA access fault.
- * @vcpu: Virtual CPU.
- * @gva: Guest virtual address to be accessed.
- * @write: True if write attempted (must be dirtied and made writable).
- *
- * Safely attempt to handle a GVA fault, mapping GVA pages if necessary, and
- * dirtying the page if @write so that guest instructions can be modified.
- *
- * Returns: KVM_MIPS_MAPPED on success.
- * KVM_MIPS_GVA if bad guest virtual address.
- * KVM_MIPS_GPA if bad guest physical address.
- * KVM_MIPS_TLB if guest TLB not present.
- * KVM_MIPS_TLBINV if guest TLB present but not valid.
- * KVM_MIPS_TLBMOD if guest TLB read only.
- */
-enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
- unsigned long gva,
- bool write)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_mips_tlb *tlb;
- int index;
-
- if (KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG0) {
- if (kvm_mips_handle_kseg0_tlb_fault(gva, vcpu, write) < 0)
- return KVM_MIPS_GPA;
- } else if ((KVM_GUEST_KSEGX(gva) < KVM_GUEST_KSEG0) ||
- KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG23) {
- /* Address should be in the guest TLB */
- index = kvm_mips_guest_tlb_lookup(vcpu, (gva & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID));
- if (index < 0)
- return KVM_MIPS_TLB;
- tlb = &vcpu->arch.guest_tlb[index];
-
- /* Entry should be valid, and dirty for writes */
- if (!TLB_IS_VALID(*tlb, gva))
- return KVM_MIPS_TLBINV;
- if (write && !TLB_IS_DIRTY(*tlb, gva))
- return KVM_MIPS_TLBMOD;
-
- if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, gva, write))
- return KVM_MIPS_GPA;
- } else {
- return KVM_MIPS_GVA;
- }
-
- return KVM_MIPS_MAPPED;
-}
-
-int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
-{
- int err;
-
- if (WARN(IS_ENABLED(CONFIG_KVM_MIPS_VZ),
- "Expect BadInstr/BadInstrP registers to be used with VZ\n"))
- return -EINVAL;
-
-retry:
- kvm_trap_emul_gva_lockless_begin(vcpu);
- err = get_user(*out, opc);
- kvm_trap_emul_gva_lockless_end(vcpu);
-
- if (unlikely(err)) {
- /*
- * Try to handle the fault, maybe we just raced with a GVA
- * invalidation.
- */
- err = kvm_trap_emul_gva_fault(vcpu, (unsigned long)opc,
- false);
- if (unlikely(err)) {
- kvm_err("%s: illegal address: %p\n",
- __func__, opc);
- return -EFAULT;
- }
-
- /* Hopefully it'll work now */
- goto retry;
- }
- return 0;
-}
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index 1c1fbce3f566..a3b50d5e3b25 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -30,10 +30,6 @@
#include <asm/r4kcache.h>
#define CONFIG_MIPS_MT
-#define KVM_GUEST_PC_TLB 0
-#define KVM_GUEST_SP_TLB 1
-
-#ifdef CONFIG_KVM_MIPS_VZ
unsigned long GUESTID_MASK;
EXPORT_SYMBOL_GPL(GUESTID_MASK);
unsigned long GUESTID_FIRST_VERSION;
@@ -50,91 +46,6 @@ static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu)
else
return cpu_asid(smp_processor_id(), gpa_mm);
}
-#endif
-
-static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
-{
- struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
- int cpu = smp_processor_id();
-
- return cpu_asid(cpu, kern_mm);
-}
-
-static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
-{
- struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
- int cpu = smp_processor_id();
-
- return cpu_asid(cpu, user_mm);
-}
-
-/* Structure defining an tlb entry data set. */
-
-void kvm_mips_dump_host_tlbs(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
-
- kvm_info("HOST TLBs:\n");
- dump_tlb_regs();
- pr_info("\n");
- dump_tlb_all();
-
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
-
-void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- struct kvm_mips_tlb tlb;
- int i;
-
- kvm_info("Guest TLBs:\n");
- kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
-
- for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
- tlb = vcpu->arch.guest_tlb[i];
- kvm_info("TLB%c%3d Hi 0x%08lx ",
- (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V
- ? ' ' : '*',
- i, tlb.tlb_hi);
- kvm_info("Lo0=0x%09llx %c%c attr %lx ",
- (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]),
- (tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ',
- (tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ',
- (tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT);
- kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
- (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]),
- (tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ',
- (tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ',
- (tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT,
- tlb.tlb_mask);
- }
-}
-EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
-
-int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
-{
- int i;
- int index = -1;
- struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
-
- for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
- if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
- TLB_HI_ASID_HIT(tlb[i], entryhi)) {
- index = i;
- break;
- }
- }
-
- kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
- __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]);
-
- return index;
-}
-EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup);
static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
{
@@ -147,8 +58,7 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
tlb_probe_hazard();
idx = read_c0_index();
- if (idx >= current_cpu_data.tlbsize)
- BUG();
+ BUG_ON(idx >= current_cpu_data.tlbsize);
if (idx >= 0) {
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
@@ -163,54 +73,6 @@ static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
return idx;
}
-int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
- bool user, bool kernel)
-{
- /*
- * Initialize idx_user and idx_kernel to workaround bogus
- * maybe-initialized warning when using GCC 6.
- */
- int idx_user = 0, idx_kernel = 0;
- unsigned long flags, old_entryhi;
-
- local_irq_save(flags);
-
- old_entryhi = read_c0_entryhi();
-
- if (user)
- idx_user = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
- kvm_mips_get_user_asid(vcpu));
- if (kernel)
- idx_kernel = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
- kvm_mips_get_kernel_asid(vcpu));
-
- write_c0_entryhi(old_entryhi);
- mtc0_tlbw_hazard();
-
- local_irq_restore(flags);
-
- /*
- * We don't want to get reserved instruction exceptions for missing tlb
- * entries.
- */
- if (cpu_has_vtag_icache)
- flush_icache_all();
-
- if (user && idx_user >= 0)
- kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n",
- __func__, (va & VPN2_MASK) |
- kvm_mips_get_user_asid(vcpu), idx_user);
- if (kernel && idx_kernel >= 0)
- kvm_debug("%s: Invalidated guest kernel entryhi %#lx @ idx %d\n",
- __func__, (va & VPN2_MASK) |
- kvm_mips_get_kernel_asid(vcpu), idx_kernel);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
-
-#ifdef CONFIG_KVM_MIPS_VZ
-
/* GuestID management */
/**
@@ -661,40 +523,3 @@ void kvm_loongson_clear_guest_ftlb(void)
}
EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_ftlb);
#endif
-
-#endif
-
-/**
- * kvm_mips_suspend_mm() - Suspend the active mm.
- * @cpu The CPU we're running on.
- *
- * Suspend the active_mm, ready for a switch to a KVM guest virtual address
- * space. This is left active for the duration of guest context, including time
- * with interrupts enabled, so we need to be careful not to confuse e.g. cache
- * management IPIs.
- *
- * kvm_mips_resume_mm() should be called before context switching to a different
- * process so we don't need to worry about reference counting.
- *
- * This needs to be in static kernel code to avoid exporting init_mm.
- */
-void kvm_mips_suspend_mm(int cpu)
-{
- cpumask_clear_cpu(cpu, mm_cpumask(current->active_mm));
- current->active_mm = &init_mm;
-}
-EXPORT_SYMBOL_GPL(kvm_mips_suspend_mm);
-
-/**
- * kvm_mips_resume_mm() - Resume the current process mm.
- * @cpu The CPU we're running on.
- *
- * Resume the mm of the current process, after a switch back from a KVM guest
- * virtual address space (see kvm_mips_suspend_mm()).
- */
-void kvm_mips_resume_mm(int cpu)
-{
- cpumask_set_cpu(cpu, mm_cpumask(current->mm));
- current->active_mm = current->mm;
-}
-EXPORT_SYMBOL_GPL(kvm_mips_resume_mm);
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
deleted file mode 100644
index 0788c00d7e94..000000000000
--- a/arch/mips/kvm/trap_emul.c
+++ /dev/null
@@ -1,1306 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- * Authors: Sanjay Lal <sanjayl@kymasys.com>
- */
-
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/kvm_host.h>
-#include <linux/log2.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-#include <asm/mmu_context.h>
-#include <asm/pgalloc.h>
-
-#include "interrupt.h"
-
-static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
-{
- gpa_t gpa;
- gva_t kseg = KSEGX(gva);
- gva_t gkseg = KVM_GUEST_KSEGX(gva);
-
- if ((kseg == CKSEG0) || (kseg == CKSEG1))
- gpa = CPHYSADDR(gva);
- else if (gkseg == KVM_GUEST_KSEG0)
- gpa = KVM_GUEST_CPHYSADDR(gva);
- else {
- kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
- kvm_mips_dump_host_tlbs();
- gpa = KVM_INVALID_ADDR;
- }
-
- kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
-
- return gpa;
-}
-
-static int kvm_trap_emul_no_handler(struct kvm_vcpu *vcpu)
-{
- u32 __user *opc = (u32 __user *) vcpu->arch.pc;
- u32 cause = vcpu->arch.host_cp0_cause;
- u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
- unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
- u32 inst = 0;
-
- /*
- * Fetch the instruction.
- */
- if (cause & CAUSEF_BD)
- opc += 1;
- kvm_get_badinstr(opc, vcpu, &inst);
-
- kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
- exccode, opc, inst, badvaddr,
- kvm_read_c0_guest_status(vcpu->arch.cop0));
- kvm_arch_vcpu_dump_regs(vcpu);
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- return RESUME_HOST;
-}
-
-static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- u32 __user *opc = (u32 __user *) vcpu->arch.pc;
- u32 cause = vcpu->arch.host_cp0_cause;
- enum emulation_result er = EMULATE_DONE;
- int ret = RESUME_GUEST;
-
- if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
- /* FPU Unusable */
- if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
- (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
- /*
- * Unusable/no FPU in guest:
- * deliver guest COP1 Unusable Exception
- */
- er = kvm_mips_emulate_fpu_exc(cause, opc, vcpu);
- } else {
- /* Restore FPU state */
- kvm_own_fpu(vcpu);
- er = EMULATE_DONE;
- }
- } else {
- er = kvm_mips_emulate_inst(cause, opc, vcpu);
- }
-
- switch (er) {
- case EMULATE_DONE:
- ret = RESUME_GUEST;
- break;
-
- case EMULATE_FAIL:
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- break;
-
- case EMULATE_WAIT:
- vcpu->run->exit_reason = KVM_EXIT_INTR;
- ret = RESUME_HOST;
- break;
-
- case EMULATE_HYPERCALL:
- ret = kvm_mips_handle_hypcall(vcpu);
- break;
-
- default:
- BUG();
- }
- return ret;
-}
-
-static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_vcpu *vcpu)
-{
- enum emulation_result er;
- union mips_instruction inst;
- int err;
-
- /* A code fetch fault doesn't count as an MMIO */
- if (kvm_is_ifetch_fault(&vcpu->arch)) {
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- return RESUME_HOST;
- }
-
- /* Fetch the instruction. */
- if (cause & CAUSEF_BD)
- opc += 1;
- err = kvm_get_badinstr(opc, vcpu, &inst.word);
- if (err) {
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- return RESUME_HOST;
- }
-
- /* Emulate the load */
- er = kvm_mips_emulate_load(inst, cause, vcpu);
- if (er == EMULATE_FAIL) {
- kvm_err("Emulate load from MMIO space failed\n");
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- } else {
- vcpu->run->exit_reason = KVM_EXIT_MMIO;
- }
- return RESUME_HOST;
-}
-
-static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_vcpu *vcpu)
-{
- enum emulation_result er;
- union mips_instruction inst;
- int err;
-
- /* Fetch the instruction. */
- if (cause & CAUSEF_BD)
- opc += 1;
- err = kvm_get_badinstr(opc, vcpu, &inst.word);
- if (err) {
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- return RESUME_HOST;
- }
-
- /* Emulate the store */
- er = kvm_mips_emulate_store(inst, cause, vcpu);
- if (er == EMULATE_FAIL) {
- kvm_err("Emulate store to MMIO space failed\n");
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- } else {
- vcpu->run->exit_reason = KVM_EXIT_MMIO;
- }
- return RESUME_HOST;
-}
-
-static int kvm_mips_bad_access(u32 cause, u32 *opc,
- struct kvm_vcpu *vcpu, bool store)
-{
- if (store)
- return kvm_mips_bad_store(cause, opc, vcpu);
- else
- return kvm_mips_bad_load(cause, opc, vcpu);
-}
-
-static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- u32 __user *opc = (u32 __user *) vcpu->arch.pc;
- unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
- u32 cause = vcpu->arch.host_cp0_cause;
- struct kvm_mips_tlb *tlb;
- unsigned long entryhi;
- int index;
-
- if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
- || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
- /*
- * First find the mapping in the guest TLB. If the failure to
- * write was due to the guest TLB, it should be up to the guest
- * to handle it.
- */
- entryhi = (badvaddr & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
- index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
-
- /*
- * These should never happen.
- * They would indicate stale host TLB entries.
- */
- if (unlikely(index < 0)) {
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- return RESUME_HOST;
- }
- tlb = vcpu->arch.guest_tlb + index;
- if (unlikely(!TLB_IS_VALID(*tlb, badvaddr))) {
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- return RESUME_HOST;
- }
-
- /*
- * Guest entry not dirty? That would explain the TLB modified
- * exception. Relay that on to the guest so it can handle it.
- */
- if (!TLB_IS_DIRTY(*tlb, badvaddr)) {
- kvm_mips_emulate_tlbmod(cause, opc, vcpu);
- return RESUME_GUEST;
- }
-
- if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, badvaddr,
- true))
- /* Not writable, needs handling as MMIO */
- return kvm_mips_bad_store(cause, opc, vcpu);
- return RESUME_GUEST;
- } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
- if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, true) < 0)
- /* Not writable, needs handling as MMIO */
- return kvm_mips_bad_store(cause, opc, vcpu);
- return RESUME_GUEST;
- } else {
- /* host kernel addresses are all handled as MMIO */
- return kvm_mips_bad_store(cause, opc, vcpu);
- }
-}
-
-static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
-{
- struct kvm_run *run = vcpu->run;
- u32 __user *opc = (u32 __user *) vcpu->arch.pc;
- unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
- u32 cause = vcpu->arch.host_cp0_cause;
- enum emulation_result er = EMULATE_DONE;
- int ret = RESUME_GUEST;
-
- if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
- && KVM_GUEST_KERNEL_MODE(vcpu)) {
- if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
- || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
- kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
- store ? "ST" : "LD", cause, opc, badvaddr);
-
- /*
- * User Address (UA) fault, this could happen if
- * (1) TLB entry not present/valid in both Guest and shadow host
- * TLBs, in this case we pass on the fault to the guest
- * kernel and let it handle it.
- * (2) TLB entry is present in the Guest TLB but not in the
- * shadow, in this case we inject the TLB from the Guest TLB
- * into the shadow host TLB
- */
-
- er = kvm_mips_handle_tlbmiss(cause, opc, vcpu, store);
- if (er == EMULATE_DONE)
- ret = RESUME_GUEST;
- else {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
- /*
- * All KSEG0 faults are handled by KVM, as the guest kernel does
- * not expect to ever get them
- */
- if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, store) < 0)
- ret = kvm_mips_bad_access(cause, opc, vcpu, store);
- } else if (KVM_GUEST_KERNEL_MODE(vcpu)
- && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
- /*
- * With EVA we may get a TLB exception instead of an address
- * error when the guest performs MMIO to KSeg1 addresses.
- */
- ret = kvm_mips_bad_access(cause, opc, vcpu, store);
- } else {
- kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
- store ? "ST" : "LD", cause, opc, badvaddr);
- kvm_mips_dump_host_tlbs();
- kvm_arch_vcpu_dump_regs(vcpu);
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- return ret;
-}
-
-static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
-{
- return kvm_trap_emul_handle_tlb_miss(vcpu, true);
-}
-
-static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
-{
- return kvm_trap_emul_handle_tlb_miss(vcpu, false);
-}
-
-static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
-{
- u32 __user *opc = (u32 __user *) vcpu->arch.pc;
- unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
- u32 cause = vcpu->arch.host_cp0_cause;
- int ret = RESUME_GUEST;
-
- if (KVM_GUEST_KERNEL_MODE(vcpu)
- && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
- ret = kvm_mips_bad_store(cause, opc, vcpu);
- } else {
- kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
- cause, opc, badvaddr);
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- return ret;
-}
-
-static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
-{
- u32 __user *opc = (u32 __user *) vcpu->arch.pc;
- unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
- u32 cause = vcpu->arch.host_cp0_cause;
- int ret = RESUME_GUEST;
-
- if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
- ret = kvm_mips_bad_load(cause, opc, vcpu);
- } else {
- kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
- cause, opc, badvaddr);
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- return ret;
-}
-
-static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
-{
- u32 __user *opc = (u32 __user *) vcpu->arch.pc;
- u32 cause = vcpu->arch.host_cp0_cause;
- enum emulation_result er = EMULATE_DONE;
- int ret = RESUME_GUEST;
-
- er = kvm_mips_emulate_syscall(cause, opc, vcpu);
- if (er == EMULATE_DONE)
- ret = RESUME_GUEST;
- else {
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- return ret;
-}
-
-static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
-{
- u32 __user *opc = (u32 __user *) vcpu->arch.pc;
- u32 cause = vcpu->arch.host_cp0_cause;
- enum emulation_result er = EMULATE_DONE;
- int ret = RESUME_GUEST;
-
- er = kvm_mips_handle_ri(cause, opc, vcpu);
- if (er == EMULATE_DONE)
- ret = RESUME_GUEST;
- else {
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- return ret;
-}
-
-static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
-{
- u32 __user *opc = (u32 __user *) vcpu->arch.pc;
- u32 cause = vcpu->arch.host_cp0_cause;
- enum emulation_result er = EMULATE_DONE;
- int ret = RESUME_GUEST;
-
- er = kvm_mips_emulate_bp_exc(cause, opc, vcpu);
- if (er == EMULATE_DONE)
- ret = RESUME_GUEST;
- else {
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- return ret;
-}
-
-static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
-{
- u32 __user *opc = (u32 __user *)vcpu->arch.pc;
- u32 cause = vcpu->arch.host_cp0_cause;
- enum emulation_result er = EMULATE_DONE;
- int ret = RESUME_GUEST;
-
- er = kvm_mips_emulate_trap_exc(cause, opc, vcpu);
- if (er == EMULATE_DONE) {
- ret = RESUME_GUEST;
- } else {
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- return ret;
-}
-
-static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
-{
- u32 __user *opc = (u32 __user *)vcpu->arch.pc;
- u32 cause = vcpu->arch.host_cp0_cause;
- enum emulation_result er = EMULATE_DONE;
- int ret = RESUME_GUEST;
-
- er = kvm_mips_emulate_msafpe_exc(cause, opc, vcpu);
- if (er == EMULATE_DONE) {
- ret = RESUME_GUEST;
- } else {
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- return ret;
-}
-
-static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
-{
- u32 __user *opc = (u32 __user *)vcpu->arch.pc;
- u32 cause = vcpu->arch.host_cp0_cause;
- enum emulation_result er = EMULATE_DONE;
- int ret = RESUME_GUEST;
-
- er = kvm_mips_emulate_fpe_exc(cause, opc, vcpu);
- if (er == EMULATE_DONE) {
- ret = RESUME_GUEST;
- } else {
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- return ret;
-}
-
-/**
- * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
- * @vcpu: Virtual CPU context.
- *
- * Handle when the guest attempts to use MSA when it is disabled.
- */
-static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- u32 __user *opc = (u32 __user *) vcpu->arch.pc;
- u32 cause = vcpu->arch.host_cp0_cause;
- enum emulation_result er = EMULATE_DONE;
- int ret = RESUME_GUEST;
-
- if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
- (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
- /*
- * No MSA in guest, or FPU enabled and not in FR=1 mode,
- * guest reserved instruction exception
- */
- er = kvm_mips_emulate_ri_exc(cause, opc, vcpu);
- } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
- /* MSA disabled by guest, guest MSA disabled exception */
- er = kvm_mips_emulate_msadis_exc(cause, opc, vcpu);
- } else {
- /* Restore MSA/FPU state */
- kvm_own_msa(vcpu);
- er = EMULATE_DONE;
- }
-
- switch (er) {
- case EMULATE_DONE:
- ret = RESUME_GUEST;
- break;
-
- case EMULATE_FAIL:
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- break;
-
- default:
- BUG();
- }
- return ret;
-}
-
-static int kvm_trap_emul_hardware_enable(void)
-{
- return 0;
-}
-
-static void kvm_trap_emul_hardware_disable(void)
-{
-}
-
-static int kvm_trap_emul_check_extension(struct kvm *kvm, long ext)
-{
- int r;
-
- switch (ext) {
- case KVM_CAP_MIPS_TE:
- r = 1;
- break;
- case KVM_CAP_IOEVENTFD:
- r = 1;
- break;
- default:
- r = 0;
- break;
- }
-
- return r;
-}
-
-static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
-{
- struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
- struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
-
- /*
- * Allocate GVA -> HPA page tables.
- * MIPS doesn't use the mm_struct pointer argument.
- */
- kern_mm->pgd = pgd_alloc(kern_mm);
- if (!kern_mm->pgd)
- return -ENOMEM;
-
- user_mm->pgd = pgd_alloc(user_mm);
- if (!user_mm->pgd) {
- pgd_free(kern_mm, kern_mm->pgd);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static void kvm_mips_emul_free_gva_pt(pgd_t *pgd)
-{
- /* Don't free host kernel page tables copied from init_mm.pgd */
- const unsigned long end = 0x80000000;
- unsigned long pgd_va, pud_va, pmd_va;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- int i, j, k;
-
- for (i = 0; i < USER_PTRS_PER_PGD; i++) {
- if (pgd_none(pgd[i]))
- continue;
-
- pgd_va = (unsigned long)i << PGDIR_SHIFT;
- if (pgd_va >= end)
- break;
- p4d = p4d_offset(pgd, 0);
- pud = pud_offset(p4d + i, 0);
- for (j = 0; j < PTRS_PER_PUD; j++) {
- if (pud_none(pud[j]))
- continue;
-
- pud_va = pgd_va | ((unsigned long)j << PUD_SHIFT);
- if (pud_va >= end)
- break;
- pmd = pmd_offset(pud + j, 0);
- for (k = 0; k < PTRS_PER_PMD; k++) {
- if (pmd_none(pmd[k]))
- continue;
-
- pmd_va = pud_va | (k << PMD_SHIFT);
- if (pmd_va >= end)
- break;
- pte = pte_offset_kernel(pmd + k, 0);
- pte_free_kernel(NULL, pte);
- }
- pmd_free(NULL, pmd);
- }
- pud_free(NULL, pud);
- }
- pgd_free(NULL, pgd);
-}
-
-static void kvm_trap_emul_vcpu_uninit(struct kvm_vcpu *vcpu)
-{
- kvm_mips_emul_free_gva_pt(vcpu->arch.guest_kernel_mm.pgd);
- kvm_mips_emul_free_gva_pt(vcpu->arch.guest_user_mm.pgd);
-}
-
-static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- u32 config, config1;
- int vcpu_id = vcpu->vcpu_id;
-
- /* Start off the timer at 100 MHz */
- kvm_mips_init_count(vcpu, 100*1000*1000);
-
- /*
- * Arch specific stuff, set up config registers properly so that the
- * guest will come up as expected
- */
-#ifndef CONFIG_CPU_MIPSR6
- /* r2-r5, simulate a MIPS 24kc */
- kvm_write_c0_guest_prid(cop0, 0x00019300);
-#else
- /* r6+, simulate a generic QEMU machine */
- kvm_write_c0_guest_prid(cop0, 0x00010000);
-#endif
- /*
- * Have config1, Cacheable, noncoherent, write-back, write allocate.
- * Endianness, arch revision & virtually tagged icache should match
- * host.
- */
- config = read_c0_config() & MIPS_CONF_AR;
- config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB;
-#ifdef CONFIG_CPU_BIG_ENDIAN
- config |= CONF_BE;
-#endif
- if (cpu_has_vtag_icache)
- config |= MIPS_CONF_VI;
- kvm_write_c0_guest_config(cop0, config);
-
- /* Read the cache characteristics from the host Config1 Register */
- config1 = (read_c0_config1() & ~0x7f);
-
- /* DCache line size not correctly reported in Config1 on Octeon CPUs */
- if (cpu_dcache_line_size()) {
- config1 &= ~MIPS_CONF1_DL;
- config1 |= ((ilog2(cpu_dcache_line_size()) - 1) <<
- MIPS_CONF1_DL_SHF) & MIPS_CONF1_DL;
- }
-
- /* Set up MMU size */
- config1 &= ~(0x3f << 25);
- config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
-
- /* We unset some bits that we aren't emulating */
- config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC |
- MIPS_CONF1_WR | MIPS_CONF1_CA);
- kvm_write_c0_guest_config1(cop0, config1);
-
- /* Have config3, no tertiary/secondary caches implemented */
- kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
- /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
-
- /* Have config4, UserLocal */
- kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
-
- /* Have config5 */
- kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
-
- /* No config6 */
- kvm_write_c0_guest_config5(cop0, 0);
-
- /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
- kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
-
- /* Status */
- kvm_write_c0_guest_status(cop0, ST0_BEV | ST0_ERL);
-
- /*
- * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
- */
- kvm_write_c0_guest_intctl(cop0, 0xFC000000);
-
- /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
- kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 |
- (vcpu_id & MIPS_EBASE_CPUNUM));
-
- /* Put PC at guest reset vector */
- vcpu->arch.pc = KVM_GUEST_CKSEG1ADDR(0x1fc00000);
-
- return 0;
-}
-
-static void kvm_trap_emul_flush_shadow_all(struct kvm *kvm)
-{
- /* Flush GVA page tables and invalidate GVA ASIDs on all VCPUs */
- kvm_flush_remote_tlbs(kvm);
-}
-
-static void kvm_trap_emul_flush_shadow_memslot(struct kvm *kvm,
- const struct kvm_memory_slot *slot)
-{
- kvm_trap_emul_flush_shadow_all(kvm);
-}
-
-static u64 kvm_trap_emul_get_one_regs[] = {
- KVM_REG_MIPS_CP0_INDEX,
- KVM_REG_MIPS_CP0_ENTRYLO0,
- KVM_REG_MIPS_CP0_ENTRYLO1,
- KVM_REG_MIPS_CP0_CONTEXT,
- KVM_REG_MIPS_CP0_USERLOCAL,
- KVM_REG_MIPS_CP0_PAGEMASK,
- KVM_REG_MIPS_CP0_WIRED,
- KVM_REG_MIPS_CP0_HWRENA,
- KVM_REG_MIPS_CP0_BADVADDR,
- KVM_REG_MIPS_CP0_COUNT,
- KVM_REG_MIPS_CP0_ENTRYHI,
- KVM_REG_MIPS_CP0_COMPARE,
- KVM_REG_MIPS_CP0_STATUS,
- KVM_REG_MIPS_CP0_INTCTL,
- KVM_REG_MIPS_CP0_CAUSE,
- KVM_REG_MIPS_CP0_EPC,
- KVM_REG_MIPS_CP0_PRID,
- KVM_REG_MIPS_CP0_EBASE,
- KVM_REG_MIPS_CP0_CONFIG,
- KVM_REG_MIPS_CP0_CONFIG1,
- KVM_REG_MIPS_CP0_CONFIG2,
- KVM_REG_MIPS_CP0_CONFIG3,
- KVM_REG_MIPS_CP0_CONFIG4,
- KVM_REG_MIPS_CP0_CONFIG5,
- KVM_REG_MIPS_CP0_CONFIG7,
- KVM_REG_MIPS_CP0_ERROREPC,
- KVM_REG_MIPS_CP0_KSCRATCH1,
- KVM_REG_MIPS_CP0_KSCRATCH2,
- KVM_REG_MIPS_CP0_KSCRATCH3,
- KVM_REG_MIPS_CP0_KSCRATCH4,
- KVM_REG_MIPS_CP0_KSCRATCH5,
- KVM_REG_MIPS_CP0_KSCRATCH6,
-
- KVM_REG_MIPS_COUNT_CTL,
- KVM_REG_MIPS_COUNT_RESUME,
- KVM_REG_MIPS_COUNT_HZ,
-};
-
-static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu)
-{
- return ARRAY_SIZE(kvm_trap_emul_get_one_regs);
-}
-
-static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu,
- u64 __user *indices)
-{
- if (copy_to_user(indices, kvm_trap_emul_get_one_regs,
- sizeof(kvm_trap_emul_get_one_regs)))
- return -EFAULT;
- indices += ARRAY_SIZE(kvm_trap_emul_get_one_regs);
-
- return 0;
-}
-
-static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg,
- s64 *v)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
-
- switch (reg->id) {
- case KVM_REG_MIPS_CP0_INDEX:
- *v = (long)kvm_read_c0_guest_index(cop0);
- break;
- case KVM_REG_MIPS_CP0_ENTRYLO0:
- *v = kvm_read_c0_guest_entrylo0(cop0);
- break;
- case KVM_REG_MIPS_CP0_ENTRYLO1:
- *v = kvm_read_c0_guest_entrylo1(cop0);
- break;
- case KVM_REG_MIPS_CP0_CONTEXT:
- *v = (long)kvm_read_c0_guest_context(cop0);
- break;
- case KVM_REG_MIPS_CP0_USERLOCAL:
- *v = (long)kvm_read_c0_guest_userlocal(cop0);
- break;
- case KVM_REG_MIPS_CP0_PAGEMASK:
- *v = (long)kvm_read_c0_guest_pagemask(cop0);
- break;
- case KVM_REG_MIPS_CP0_WIRED:
- *v = (long)kvm_read_c0_guest_wired(cop0);
- break;
- case KVM_REG_MIPS_CP0_HWRENA:
- *v = (long)kvm_read_c0_guest_hwrena(cop0);
- break;
- case KVM_REG_MIPS_CP0_BADVADDR:
- *v = (long)kvm_read_c0_guest_badvaddr(cop0);
- break;
- case KVM_REG_MIPS_CP0_ENTRYHI:
- *v = (long)kvm_read_c0_guest_entryhi(cop0);
- break;
- case KVM_REG_MIPS_CP0_COMPARE:
- *v = (long)kvm_read_c0_guest_compare(cop0);
- break;
- case KVM_REG_MIPS_CP0_STATUS:
- *v = (long)kvm_read_c0_guest_status(cop0);
- break;
- case KVM_REG_MIPS_CP0_INTCTL:
- *v = (long)kvm_read_c0_guest_intctl(cop0);
- break;
- case KVM_REG_MIPS_CP0_CAUSE:
- *v = (long)kvm_read_c0_guest_cause(cop0);
- break;
- case KVM_REG_MIPS_CP0_EPC:
- *v = (long)kvm_read_c0_guest_epc(cop0);
- break;
- case KVM_REG_MIPS_CP0_PRID:
- *v = (long)kvm_read_c0_guest_prid(cop0);
- break;
- case KVM_REG_MIPS_CP0_EBASE:
- *v = (long)kvm_read_c0_guest_ebase(cop0);
- break;
- case KVM_REG_MIPS_CP0_CONFIG:
- *v = (long)kvm_read_c0_guest_config(cop0);
- break;
- case KVM_REG_MIPS_CP0_CONFIG1:
- *v = (long)kvm_read_c0_guest_config1(cop0);
- break;
- case KVM_REG_MIPS_CP0_CONFIG2:
- *v = (long)kvm_read_c0_guest_config2(cop0);
- break;
- case KVM_REG_MIPS_CP0_CONFIG3:
- *v = (long)kvm_read_c0_guest_config3(cop0);
- break;
- case KVM_REG_MIPS_CP0_CONFIG4:
- *v = (long)kvm_read_c0_guest_config4(cop0);
- break;
- case KVM_REG_MIPS_CP0_CONFIG5:
- *v = (long)kvm_read_c0_guest_config5(cop0);
- break;
- case KVM_REG_MIPS_CP0_CONFIG7:
- *v = (long)kvm_read_c0_guest_config7(cop0);
- break;
- case KVM_REG_MIPS_CP0_COUNT:
- *v = kvm_mips_read_count(vcpu);
- break;
- case KVM_REG_MIPS_COUNT_CTL:
- *v = vcpu->arch.count_ctl;
- break;
- case KVM_REG_MIPS_COUNT_RESUME:
- *v = ktime_to_ns(vcpu->arch.count_resume);
- break;
- case KVM_REG_MIPS_COUNT_HZ:
- *v = vcpu->arch.count_hz;
- break;
- case KVM_REG_MIPS_CP0_ERROREPC:
- *v = (long)kvm_read_c0_guest_errorepc(cop0);
- break;
- case KVM_REG_MIPS_CP0_KSCRATCH1:
- *v = (long)kvm_read_c0_guest_kscratch1(cop0);
- break;
- case KVM_REG_MIPS_CP0_KSCRATCH2:
- *v = (long)kvm_read_c0_guest_kscratch2(cop0);
- break;
- case KVM_REG_MIPS_CP0_KSCRATCH3:
- *v = (long)kvm_read_c0_guest_kscratch3(cop0);
- break;
- case KVM_REG_MIPS_CP0_KSCRATCH4:
- *v = (long)kvm_read_c0_guest_kscratch4(cop0);
- break;
- case KVM_REG_MIPS_CP0_KSCRATCH5:
- *v = (long)kvm_read_c0_guest_kscratch5(cop0);
- break;
- case KVM_REG_MIPS_CP0_KSCRATCH6:
- *v = (long)kvm_read_c0_guest_kscratch6(cop0);
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
- const struct kvm_one_reg *reg,
- s64 v)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- int ret = 0;
- unsigned int cur, change;
-
- switch (reg->id) {
- case KVM_REG_MIPS_CP0_INDEX:
- kvm_write_c0_guest_index(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_ENTRYLO0:
- kvm_write_c0_guest_entrylo0(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_ENTRYLO1:
- kvm_write_c0_guest_entrylo1(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_CONTEXT:
- kvm_write_c0_guest_context(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_USERLOCAL:
- kvm_write_c0_guest_userlocal(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_PAGEMASK:
- kvm_write_c0_guest_pagemask(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_WIRED:
- kvm_write_c0_guest_wired(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_HWRENA:
- kvm_write_c0_guest_hwrena(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_BADVADDR:
- kvm_write_c0_guest_badvaddr(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_ENTRYHI:
- kvm_write_c0_guest_entryhi(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_STATUS:
- kvm_write_c0_guest_status(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_INTCTL:
- /* No VInt, so no VS, read-only for now */
- break;
- case KVM_REG_MIPS_CP0_EPC:
- kvm_write_c0_guest_epc(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_PRID:
- kvm_write_c0_guest_prid(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_EBASE:
- /*
- * Allow core number to be written, but the exception base must
- * remain in guest KSeg0.
- */
- kvm_change_c0_guest_ebase(cop0, 0x1ffff000 | MIPS_EBASE_CPUNUM,
- v);
- break;
- case KVM_REG_MIPS_CP0_COUNT:
- kvm_mips_write_count(vcpu, v);
- break;
- case KVM_REG_MIPS_CP0_COMPARE:
- kvm_mips_write_compare(vcpu, v, false);
- break;
- case KVM_REG_MIPS_CP0_CAUSE:
- /*
- * If the timer is stopped or started (DC bit) it must look
- * atomic with changes to the interrupt pending bits (TI, IRQ5).
- * A timer interrupt should not happen in between.
- */
- if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
- if (v & CAUSEF_DC) {
- /* disable timer first */
- kvm_mips_count_disable_cause(vcpu);
- kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC,
- v);
- } else {
- /* enable timer last */
- kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC,
- v);
- kvm_mips_count_enable_cause(vcpu);
- }
- } else {
- kvm_write_c0_guest_cause(cop0, v);
- }
- break;
- case KVM_REG_MIPS_CP0_CONFIG:
- /* read-only for now */
- break;
- case KVM_REG_MIPS_CP0_CONFIG1:
- cur = kvm_read_c0_guest_config1(cop0);
- change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
- if (change) {
- v = cur ^ change;
- kvm_write_c0_guest_config1(cop0, v);
- }
- break;
- case KVM_REG_MIPS_CP0_CONFIG2:
- /* read-only for now */
- break;
- case KVM_REG_MIPS_CP0_CONFIG3:
- cur = kvm_read_c0_guest_config3(cop0);
- change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
- if (change) {
- v = cur ^ change;
- kvm_write_c0_guest_config3(cop0, v);
- }
- break;
- case KVM_REG_MIPS_CP0_CONFIG4:
- cur = kvm_read_c0_guest_config4(cop0);
- change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
- if (change) {
- v = cur ^ change;
- kvm_write_c0_guest_config4(cop0, v);
- }
- break;
- case KVM_REG_MIPS_CP0_CONFIG5:
- cur = kvm_read_c0_guest_config5(cop0);
- change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
- if (change) {
- v = cur ^ change;
- kvm_write_c0_guest_config5(cop0, v);
- }
- break;
- case KVM_REG_MIPS_CP0_CONFIG7:
- /* writes ignored */
- break;
- case KVM_REG_MIPS_COUNT_CTL:
- ret = kvm_mips_set_count_ctl(vcpu, v);
- break;
- case KVM_REG_MIPS_COUNT_RESUME:
- ret = kvm_mips_set_count_resume(vcpu, v);
- break;
- case KVM_REG_MIPS_COUNT_HZ:
- ret = kvm_mips_set_count_hz(vcpu, v);
- break;
- case KVM_REG_MIPS_CP0_ERROREPC:
- kvm_write_c0_guest_errorepc(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_KSCRATCH1:
- kvm_write_c0_guest_kscratch1(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_KSCRATCH2:
- kvm_write_c0_guest_kscratch2(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_KSCRATCH3:
- kvm_write_c0_guest_kscratch3(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_KSCRATCH4:
- kvm_write_c0_guest_kscratch4(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_KSCRATCH5:
- kvm_write_c0_guest_kscratch5(cop0, v);
- break;
- case KVM_REG_MIPS_CP0_KSCRATCH6:
- kvm_write_c0_guest_kscratch6(cop0, v);
- break;
- default:
- return -EINVAL;
- }
- return ret;
-}
-
-static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
-{
- struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
- struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
- struct mm_struct *mm;
-
- /*
- * Were we in guest context? If so, restore the appropriate ASID based
- * on the mode of the Guest (Kernel/User).
- */
- if (current->flags & PF_VCPU) {
- mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
- check_switch_mmu_context(mm);
- kvm_mips_suspend_mm(cpu);
- ehb();
- }
-
- return 0;
-}
-
-static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
-{
- kvm_lose_fpu(vcpu);
-
- if (current->flags & PF_VCPU) {
- /* Restore normal Linux process memory map */
- check_switch_mmu_context(current->mm);
- kvm_mips_resume_mm(cpu);
- ehb();
- }
-
- return 0;
-}
-
-static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu,
- bool reload_asid)
-{
- struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
- struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
- struct mm_struct *mm;
- int i;
-
- if (likely(!kvm_request_pending(vcpu)))
- return;
-
- if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
- /*
- * Both kernel & user GVA mappings must be invalidated. The
- * caller is just about to check whether the ASID is stale
- * anyway so no need to reload it here.
- */
- kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN);
- kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER);
- for_each_possible_cpu(i) {
- set_cpu_context(i, kern_mm, 0);
- set_cpu_context(i, user_mm, 0);
- }
-
- /* Generate new ASID for current mode */
- if (reload_asid) {
- mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
- get_new_mmu_context(mm);
- htw_stop();
- write_c0_entryhi(cpu_asid(cpu, mm));
- TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
- htw_start();
- }
- }
-}
-
-/**
- * kvm_trap_emul_gva_lockless_begin() - Begin lockless access to GVA space.
- * @vcpu: VCPU pointer.
- *
- * Call before a GVA space access outside of guest mode, to ensure that
- * asynchronous TLB flush requests are handled or delayed until completion of
- * the GVA access (as indicated by a matching kvm_trap_emul_gva_lockless_end()).
- *
- * Should be called with IRQs already enabled.
- */
-void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu)
-{
- /* We re-enable IRQs in kvm_trap_emul_gva_lockless_end() */
- WARN_ON_ONCE(irqs_disabled());
-
- /*
- * The caller is about to access the GVA space, so we set the mode to
- * force TLB flush requests to send an IPI, and also disable IRQs to
- * delay IPI handling until kvm_trap_emul_gva_lockless_end().
- */
- local_irq_disable();
-
- /*
- * Make sure the read of VCPU requests is not reordered ahead of the
- * write to vcpu->mode, or we could miss a TLB flush request while
- * the requester sees the VCPU as outside of guest mode and not needing
- * an IPI.
- */
- smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
-
- /*
- * If a TLB flush has been requested (potentially while
- * OUTSIDE_GUEST_MODE and assumed immediately effective), perform it
- * before accessing the GVA space, and be sure to reload the ASID if
- * necessary as it'll be immediately used.
- *
- * TLB flush requests after this check will trigger an IPI due to the
- * mode change above, which will be delayed due to IRQs disabled.
- */
- kvm_trap_emul_check_requests(vcpu, smp_processor_id(), true);
-}
-
-/**
- * kvm_trap_emul_gva_lockless_end() - End lockless access to GVA space.
- * @vcpu: VCPU pointer.
- *
- * Called after a GVA space access outside of guest mode. Should have a matching
- * call to kvm_trap_emul_gva_lockless_begin().
- */
-void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu)
-{
- /*
- * Make sure the write to vcpu->mode is not reordered in front of GVA
- * accesses, or a TLB flush requester may not think it necessary to send
- * an IPI.
- */
- smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
-
- /*
- * Now that the access to GVA space is complete, its safe for pending
- * TLB flush request IPIs to be handled (which indicates completion).
- */
- local_irq_enable();
-}
-
-static void kvm_trap_emul_vcpu_reenter(struct kvm_vcpu *vcpu)
-{
- struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
- struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
- struct mm_struct *mm;
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- int i, cpu = smp_processor_id();
- unsigned int gasid;
-
- /*
- * No need to reload ASID, IRQs are disabled already so there's no rush,
- * and we'll check if we need to regenerate below anyway before
- * re-entering the guest.
- */
- kvm_trap_emul_check_requests(vcpu, cpu, false);
-
- if (KVM_GUEST_KERNEL_MODE(vcpu)) {
- mm = kern_mm;
- } else {
- mm = user_mm;
-
- /*
- * Lazy host ASID regeneration / PT flush for guest user mode.
- * If the guest ASID has changed since the last guest usermode
- * execution, invalidate the stale TLB entries and flush GVA PT
- * entries too.
- */
- gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID;
- if (gasid != vcpu->arch.last_user_gasid) {
- kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER);
- for_each_possible_cpu(i)
- set_cpu_context(i, user_mm, 0);
- vcpu->arch.last_user_gasid = gasid;
- }
- }
-
- /*
- * Check if ASID is stale. This may happen due to a TLB flush request or
- * a lazy user MM invalidation.
- */
- check_mmu_context(mm);
-}
-
-static int kvm_trap_emul_vcpu_run(struct kvm_vcpu *vcpu)
-{
- int cpu = smp_processor_id();
- int r;
-
- /* Check if we have any exceptions/interrupts pending */
- kvm_mips_deliver_interrupts(vcpu,
- kvm_read_c0_guest_cause(vcpu->arch.cop0));
-
- kvm_trap_emul_vcpu_reenter(vcpu);
-
- /*
- * We use user accessors to access guest memory, but we don't want to
- * invoke Linux page faulting.
- */
- pagefault_disable();
-
- /* Disable hardware page table walking while in guest */
- htw_stop();
-
- /*
- * While in guest context we're in the guest's address space, not the
- * host process address space, so we need to be careful not to confuse
- * e.g. cache management IPIs.
- */
- kvm_mips_suspend_mm(cpu);
-
- r = vcpu->arch.vcpu_run(vcpu);
-
- /* We may have migrated while handling guest exits */
- cpu = smp_processor_id();
-
- /* Restore normal Linux process memory map */
- check_switch_mmu_context(current->mm);
- kvm_mips_resume_mm(cpu);
-
- htw_start();
-
- pagefault_enable();
-
- return r;
-}
-
-static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
- /* exit handlers */
- .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
- .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
- .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
- .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
- .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
- .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
- .handle_syscall = kvm_trap_emul_handle_syscall,
- .handle_res_inst = kvm_trap_emul_handle_res_inst,
- .handle_break = kvm_trap_emul_handle_break,
- .handle_trap = kvm_trap_emul_handle_trap,
- .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
- .handle_fpe = kvm_trap_emul_handle_fpe,
- .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
- .handle_guest_exit = kvm_trap_emul_no_handler,
-
- .hardware_enable = kvm_trap_emul_hardware_enable,
- .hardware_disable = kvm_trap_emul_hardware_disable,
- .check_extension = kvm_trap_emul_check_extension,
- .vcpu_init = kvm_trap_emul_vcpu_init,
- .vcpu_uninit = kvm_trap_emul_vcpu_uninit,
- .vcpu_setup = kvm_trap_emul_vcpu_setup,
- .flush_shadow_all = kvm_trap_emul_flush_shadow_all,
- .flush_shadow_memslot = kvm_trap_emul_flush_shadow_memslot,
- .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
- .queue_timer_int = kvm_mips_queue_timer_int_cb,
- .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
- .queue_io_int = kvm_mips_queue_io_int_cb,
- .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
- .irq_deliver = kvm_mips_irq_deliver_cb,
- .irq_clear = kvm_mips_irq_clear_cb,
- .num_regs = kvm_trap_emul_num_regs,
- .copy_reg_indices = kvm_trap_emul_copy_reg_indices,
- .get_one_reg = kvm_trap_emul_get_one_reg,
- .set_one_reg = kvm_trap_emul_set_one_reg,
- .vcpu_load = kvm_trap_emul_vcpu_load,
- .vcpu_put = kvm_trap_emul_vcpu_put,
- .vcpu_run = kvm_trap_emul_vcpu_run,
- .vcpu_reenter = kvm_trap_emul_vcpu_reenter,
-};
-
-int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
-{
- *install_callbacks = &kvm_trap_emul_callbacks;
- return 0;
-}
diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c
index 2ffbe9264a31..43cad10b877d 100644
--- a/arch/mips/kvm/vz.c
+++ b/arch/mips/kvm/vz.c
@@ -292,9 +292,8 @@ static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
switch (priority) {
case MIPS_EXC_INT_TIMER:
/*
- * Call to kvm_write_c0_guest_compare() clears Cause.TI in
- * kvm_mips_emulate_CP0(). Explicitly clear irq associated with
- * Cause.IP[IPTI] if GuestCtl2 virtual interrupt register not
+ * Explicitly clear irq associated with Cause.IP[IPTI]
+ * if GuestCtl2 virtual interrupt register not
* supported or if not using GuestCtl2 Hardware Clear.
*/
if (cpu_has_guestctl2) {
@@ -3211,32 +3210,22 @@ static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
return 0;
}
-static void kvm_vz_flush_shadow_all(struct kvm *kvm)
+static void kvm_vz_prepare_flush_shadow(struct kvm *kvm)
{
- if (cpu_has_guestid) {
- /* Flush GuestID for each VCPU individually */
- kvm_flush_remote_tlbs(kvm);
- } else {
+ if (!cpu_has_guestid) {
/*
* For each CPU there is a single GPA ASID used by all VCPUs in
* the VM, so it doesn't make sense for the VCPUs to handle
* invalidation of these ASIDs individually.
*
* Instead mark all CPUs as needing ASID invalidation in
- * asid_flush_mask, and just use kvm_flush_remote_tlbs(kvm) to
+ * asid_flush_mask, and kvm_flush_remote_tlbs(kvm) will
* kick any running VCPUs so they check asid_flush_mask.
*/
cpumask_setall(&kvm->arch.asid_flush_mask);
- kvm_flush_remote_tlbs(kvm);
}
}
-static void kvm_vz_flush_shadow_memslot(struct kvm *kvm,
- const struct kvm_memory_slot *slot)
-{
- kvm_vz_flush_shadow_all(kvm);
-}
-
static void kvm_vz_vcpu_reenter(struct kvm_vcpu *vcpu)
{
int cpu = smp_processor_id();
@@ -3292,8 +3281,7 @@ static struct kvm_mips_callbacks kvm_vz_callbacks = {
.vcpu_init = kvm_vz_vcpu_init,
.vcpu_uninit = kvm_vz_vcpu_uninit,
.vcpu_setup = kvm_vz_vcpu_setup,
- .flush_shadow_all = kvm_vz_flush_shadow_all,
- .flush_shadow_memslot = kvm_vz_flush_shadow_memslot,
+ .prepare_flush_shadow = kvm_vz_prepare_flush_shadow,
.gva_to_gpa = kvm_vz_gva_to_gpa_cb,
.queue_timer_int = kvm_vz_queue_timer_int_cb,
.dequeue_timer_int = kvm_vz_dequeue_timer_int_cb,
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index aeb1b989cd4e..63dccb2ed08b 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -12,6 +12,7 @@
#include <linux/spinlock.h>
#include <linux/clk.h>
#include <linux/err.h>
+#include <linux/of.h>
#include <lantiq_soc.h>
#include <xway_dma.h>
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index 88065ee433cd..e19fb98b5d38 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -661,8 +661,14 @@ LEAF(memcpy) /* a0=dst a1=src a2=len */
EXPORT_SYMBOL(memcpy)
move v0, dst /* return value */
.L__memcpy:
-FEXPORT(__copy_user)
-EXPORT_SYMBOL(__copy_user)
+#ifndef CONFIG_EVA
+FEXPORT(__raw_copy_from_user)
+EXPORT_SYMBOL(__raw_copy_from_user)
+FEXPORT(__raw_copy_to_user)
+EXPORT_SYMBOL(__raw_copy_to_user)
+FEXPORT(__raw_copy_in_user)
+EXPORT_SYMBOL(__raw_copy_in_user)
+#endif
/* Legacy Mode, user <-> user */
__BUILD_COPY_USER LEGACY_MODE USEROP USEROP
@@ -681,10 +687,10 @@ EXPORT_SYMBOL(__copy_user)
* __copy_from_user (EVA)
*/
-LEAF(__copy_from_user_eva)
-EXPORT_SYMBOL(__copy_from_user_eva)
+LEAF(__raw_copy_from_user)
+EXPORT_SYMBOL(__raw_copy_from_user)
__BUILD_COPY_USER EVA_MODE USEROP KERNELOP
-END(__copy_from_user_eva)
+END(__raw_copy_from_user)
@@ -692,18 +698,18 @@ END(__copy_from_user_eva)
* __copy_to_user (EVA)
*/
-LEAF(__copy_to_user_eva)
-EXPORT_SYMBOL(__copy_to_user_eva)
+LEAF(__raw_copy_to_user)
+EXPORT_SYMBOL(__raw_copy_to_user)
__BUILD_COPY_USER EVA_MODE KERNELOP USEROP
-END(__copy_to_user_eva)
+END(__raw_copy_to_user)
/*
* __copy_in_user (EVA)
*/
-LEAF(__copy_in_user_eva)
-EXPORT_SYMBOL(__copy_in_user_eva)
+LEAF(__raw_copy_in_user)
+EXPORT_SYMBOL(__raw_copy_in_user)
__BUILD_COPY_USER EVA_MODE USEROP USEROP
-END(__copy_in_user_eva)
+END(__raw_copy_in_user)
#endif
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index d5449e8a3dfc..b0baa3c79fad 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -314,9 +314,6 @@ EXPORT_SYMBOL(memset)
#ifndef CONFIG_EVA
FEXPORT(__bzero)
EXPORT_SYMBOL(__bzero)
-#else
-FEXPORT(__bzero_kernel)
-EXPORT_SYMBOL(__bzero_kernel)
#endif
__BUILD_BZERO LEGACY_MODE
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
index de03838b343b..a9b72eacfc0b 100644
--- a/arch/mips/lib/mips-atomic.c
+++ b/arch/mips/lib/mips-atomic.c
@@ -37,7 +37,7 @@
*/
notrace void arch_local_irq_disable(void)
{
- preempt_disable();
+ preempt_disable_notrace();
__asm__ __volatile__(
" .set push \n"
@@ -53,7 +53,7 @@ notrace void arch_local_irq_disable(void)
: /* no inputs */
: "memory");
- preempt_enable();
+ preempt_enable_notrace();
}
EXPORT_SYMBOL(arch_local_irq_disable);
@@ -61,7 +61,7 @@ notrace unsigned long arch_local_irq_save(void)
{
unsigned long flags;
- preempt_disable();
+ preempt_disable_notrace();
__asm__ __volatile__(
" .set push \n"
@@ -78,7 +78,7 @@ notrace unsigned long arch_local_irq_save(void)
: /* no inputs */
: "memory");
- preempt_enable();
+ preempt_enable_notrace();
return flags;
}
@@ -88,7 +88,7 @@ notrace void arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
- preempt_disable();
+ preempt_disable_notrace();
__asm__ __volatile__(
" .set push \n"
@@ -106,7 +106,7 @@ notrace void arch_local_irq_restore(unsigned long flags)
: "0" (flags)
: "memory");
- preempt_enable();
+ preempt_enable_notrace();
}
EXPORT_SYMBOL(arch_local_irq_restore);
diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S
index acdff66bd5d2..556acf684d7b 100644
--- a/arch/mips/lib/strncpy_user.S
+++ b/arch/mips/lib/strncpy_user.S
@@ -29,19 +29,17 @@
* it happens at most some bytes of the exceptions handlers will be copied.
*/
- .macro __BUILD_STRNCPY_ASM func
-LEAF(__strncpy_from_\func\()_asm)
- LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
- and v0, a1
- bnez v0, .Lfault\@
-
+LEAF(__strncpy_from_user_asm)
move t0, zero
move v1, a1
-.ifeqs "\func","kernel"
-1: EX(lbu, v0, (v1), .Lfault\@)
-.else
-1: EX(lbue, v0, (v1), .Lfault\@)
-.endif
+#ifdef CONFIG_EVA
+ .set push
+ .set eva
+1: EX(lbue, v0, (v1), .Lfault)
+ .set pop
+#else
+1: EX(lbu, v0, (v1), .Lfault)
+#endif
PTR_ADDIU v1, 1
R10KCBARRIER(0(ra))
sb v0, (a0)
@@ -51,35 +49,17 @@ LEAF(__strncpy_from_\func\()_asm)
bne t0, a2, 1b
2: PTR_ADDU v0, a1, t0
xor v0, a1
- bltz v0, .Lfault\@
+ bltz v0, .Lfault
move v0, t0
jr ra # return n
- END(__strncpy_from_\func\()_asm)
+ END(__strncpy_from_user_asm)
-.Lfault\@:
+.Lfault:
li v0, -EFAULT
jr ra
.section __ex_table,"a"
- PTR 1b, .Lfault\@
+ PTR 1b, .Lfault
.previous
- .endm
-
-#ifndef CONFIG_EVA
- /* Set aliases */
- .global __strncpy_from_user_asm
- .set __strncpy_from_user_asm, __strncpy_from_kernel_asm
-EXPORT_SYMBOL(__strncpy_from_user_asm)
-#endif
-
-__BUILD_STRNCPY_ASM kernel
-EXPORT_SYMBOL(__strncpy_from_kernel_asm)
-
-#ifdef CONFIG_EVA
- .set push
- .set eva
-__BUILD_STRNCPY_ASM user
- .set pop
-EXPORT_SYMBOL(__strncpy_from_user_asm)
-#endif
+ EXPORT_SYMBOL(__strncpy_from_user_asm)
diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S
index e1bacf5a3abe..92b63f20ec05 100644
--- a/arch/mips/lib/strnlen_user.S
+++ b/arch/mips/lib/strnlen_user.S
@@ -26,12 +26,7 @@
* bytes. There's nothing secret there. On 64-bit accessing beyond
* the maximum is a tad hairier ...
*/
- .macro __BUILD_STRNLEN_ASM func
-LEAF(__strnlen_\func\()_asm)
- LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
- and v0, a0
- bnez v0, .Lfault\@
-
+LEAF(__strnlen_user_asm)
move v0, a0
PTR_ADDU a1, a0 # stop pointer
1:
@@ -40,11 +35,14 @@ LEAF(__strnlen_\func\()_asm)
li AT, 1
#endif
beq v0, a1, 1f # limit reached?
-.ifeqs "\func", "kernel"
- EX(lb, t0, (v0), .Lfault\@)
-.else
- EX(lbe, t0, (v0), .Lfault\@)
-.endif
+#ifdef CONFIG_EVA
+ .set push
+ .set eva
+ EX(lbe, t0, (v0), .Lfault)
+ .set pop
+#else
+ EX(lb, t0, (v0), .Lfault)
+#endif
.set noreorder
bnez t0, 1b
1:
@@ -57,28 +55,10 @@ LEAF(__strnlen_\func\()_asm)
.set reorder
PTR_SUBU v0, a0
jr ra
- END(__strnlen_\func\()_asm)
+ END(__strnlen_user_asm)
-.Lfault\@:
+.Lfault:
move v0, zero
jr ra
- .endm
-
-#ifndef CONFIG_EVA
- /* Set aliases */
- .global __strnlen_user_asm
- .set __strnlen_user_asm, __strnlen_kernel_asm
-EXPORT_SYMBOL(__strnlen_user_asm)
-#endif
-
-__BUILD_STRNLEN_ASM kernel
-EXPORT_SYMBOL(__strnlen_kernel_asm)
-
-#ifdef CONFIG_EVA
- .set push
- .set eva
-__BUILD_STRNLEN_ASM user
- .set pop
-EXPORT_SYMBOL(__strnlen_user_asm)
-#endif
+ EXPORT_SYMBOL(__strnlen_user_asm)
diff --git a/arch/mips/loongson64/Makefile b/arch/mips/loongson64/Makefile
index cc76944b1a9d..e806280bbb85 100644
--- a/arch/mips/loongson64/Makefile
+++ b/arch/mips/loongson64/Makefile
@@ -2,7 +2,7 @@
#
# Makefile for Loongson-3 family machines
#
-obj-$(CONFIG_MACH_LOONGSON64) += cop2-ex.o platform.o dma.o \
+obj-$(CONFIG_MACH_LOONGSON64) += cop2-ex.o dma.o \
setup.o init.o env.o time.o reset.o \
obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/mips/loongson64/env.c b/arch/mips/loongson64/env.c
index 51a5d050a94c..c961e2999f15 100644
--- a/arch/mips/loongson64/env.c
+++ b/arch/mips/loongson64/env.c
@@ -43,7 +43,18 @@ const char *get_system_type(void)
return "Generic Loongson64 System";
}
-void __init prom_init_env(void)
+
+void __init prom_dtb_init_env(void)
+{
+ if ((fw_arg2 < CKSEG0 || fw_arg2 > CKSEG1)
+ && (fw_arg2 < XKPHYS || fw_arg2 > XKSEG))
+
+ loongson_fdt_blob = __dtb_loongson64_2core_2k1000_begin;
+ else
+ loongson_fdt_blob = (void *)fw_arg2;
+}
+
+void __init prom_lefi_init_env(void)
{
struct boot_params *boot_p;
struct loongson_params *loongson_p;
@@ -51,7 +62,7 @@ void __init prom_init_env(void)
struct efi_cpuinfo_loongson *ecpu;
struct irq_source_routing_table *eirq_source;
u32 id;
- u16 vendor, device;
+ u16 vendor;
/* firmware arguments are initialized in head.S */
boot_p = (struct boot_params *)fw_arg2;
@@ -95,7 +106,6 @@ void __init prom_init_env(void)
loongson_freqctrl[1] = 0x900010001fe001d0;
loongson_freqctrl[2] = 0x900020001fe001d0;
loongson_freqctrl[3] = 0x900030001fe001d0;
- loongson_sysconf.ht_control_base = 0x90000EFDFB000000;
loongson_sysconf.workarounds = WORKAROUND_CPUFREQ;
break;
case Legacy_3B:
@@ -118,7 +128,6 @@ void __init prom_init_env(void)
loongson_freqctrl[1] = 0x900020001fe001d0;
loongson_freqctrl[2] = 0x900040001fe001d0;
loongson_freqctrl[3] = 0x900060001fe001d0;
- loongson_sysconf.ht_control_base = 0x90001EFDFB000000;
loongson_sysconf.workarounds = WORKAROUND_CPUHOTPLUG;
break;
default:
@@ -136,9 +145,6 @@ void __init prom_init_env(void)
loongson_sysconf.cores_per_node - 1) /
loongson_sysconf.cores_per_node;
- loongson_sysconf.pci_mem_start_addr = eirq_source->pci_mem_start_addr;
- loongson_sysconf.pci_mem_end_addr = eirq_source->pci_mem_end_addr;
- loongson_sysconf.pci_io_base = eirq_source->pci_io_start_addr;
loongson_sysconf.dma_mask_bits = eirq_source->dma_mask_bits;
if (loongson_sysconf.dma_mask_bits < 32 ||
loongson_sysconf.dma_mask_bits > 64)
@@ -153,29 +159,13 @@ void __init prom_init_env(void)
loongson_sysconf.poweroff_addr, loongson_sysconf.restart_addr,
loongson_sysconf.vgabios_addr);
- memset(loongson_sysconf.ecname, 0, 32);
- if (esys->has_ec)
- memcpy(loongson_sysconf.ecname, esys->ec_name, 32);
loongson_sysconf.workarounds |= esys->workarounds;
- loongson_sysconf.nr_uarts = esys->nr_uarts;
- if (esys->nr_uarts < 1 || esys->nr_uarts > MAX_UARTS)
- loongson_sysconf.nr_uarts = 1;
- memcpy(loongson_sysconf.uarts, esys->uarts,
- sizeof(struct uart_device) * loongson_sysconf.nr_uarts);
-
- loongson_sysconf.nr_sensors = esys->nr_sensors;
- if (loongson_sysconf.nr_sensors > MAX_SENSORS)
- loongson_sysconf.nr_sensors = 0;
- if (loongson_sysconf.nr_sensors)
- memcpy(loongson_sysconf.sensors, esys->sensors,
- sizeof(struct sensor_device) * loongson_sysconf.nr_sensors);
pr_info("CpuClock = %u\n", cpu_clock_freq);
/* Read the ID of PCI host bridge to detect bridge type */
id = readl(HOST_BRIDGE_CONFIG_ADDR);
vendor = id & 0xffff;
- device = (id >> 16) & 0xffff;
switch (vendor) {
case PCI_VENDOR_ID_LOONGSON:
diff --git a/arch/mips/loongson64/init.c b/arch/mips/loongson64/init.c
index cfa788bca871..76e0a9636a0e 100644
--- a/arch/mips/loongson64/init.c
+++ b/arch/mips/loongson64/init.c
@@ -52,6 +52,10 @@ void __init szmem(unsigned int node)
static unsigned long num_physpages;
u64 node_id, node_psize, start_pfn, end_pfn, mem_start, mem_size;
+ /* Otherwise come from DTB */
+ if (loongson_sysconf.fw_interface != LOONGSON_LEFI)
+ return;
+
/* Parse memory information and activate */
for (i = 0; i < loongson_memmap->nr_map; i++) {
node_id = loongson_memmap->map[i].node_id;
@@ -94,12 +98,20 @@ static void __init prom_init_memory(void)
void __init prom_init(void)
{
fw_init_cmdline();
- prom_init_env();
+
+ if (fw_arg2 == 0 || (fdt_magic(fw_arg2) == FDT_MAGIC)) {
+ loongson_sysconf.fw_interface = LOONGSON_DTB;
+ prom_dtb_init_env();
+ } else {
+ loongson_sysconf.fw_interface = LOONGSON_LEFI;
+ prom_lefi_init_env();
+ }
/* init base address of io space */
set_io_port_base(PCI_IOBASE);
- loongson_sysconf.early_config();
+ if (loongson_sysconf.early_config)
+ loongson_sysconf.early_config();
#ifdef CONFIG_NUMA
prom_init_numa_memory();
@@ -108,7 +120,10 @@ void __init prom_init(void)
#endif
/* Hardcode to CPU UART 0 */
- setup_8250_early_printk_port(TO_UNCAC(LOONGSON_REG_BASE + 0x1e0), 0, 1024);
+ if ((read_c0_prid() & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
+ setup_8250_early_printk_port(TO_UNCAC(LOONGSON_REG_BASE), 0, 1024);
+ else
+ setup_8250_early_printk_port(TO_UNCAC(LOONGSON_REG_BASE + 0x1e0), 0, 1024);
register_smp_ops(&loongson3_smp_ops);
board_nmi_handler_setup = mips_nmi_setup;
@@ -126,7 +141,7 @@ static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, resource_size_
return -ENOMEM;
range->fwnode = fwnode;
- range->size = size;
+ range->size = size = round_up(size, PAGE_SIZE);
range->hw_start = hw_start;
range->flags = LOGIC_PIO_CPU_MMIO;
diff --git a/arch/mips/loongson64/numa.c b/arch/mips/loongson64/numa.c
index a8f57bf01285..e8e3e48c5333 100644
--- a/arch/mips/loongson64/numa.c
+++ b/arch/mips/loongson64/numa.c
@@ -27,7 +27,6 @@
#include <boot_param.h>
#include <loongson.h>
-static struct pglist_data prealloc__node_data[MAX_NUMNODES];
unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES];
EXPORT_SYMBOL(__node_distances);
struct pglist_data *__node_data[MAX_NUMNODES];
@@ -84,8 +83,12 @@ static void __init init_topology_matrix(void)
static void __init node_mem_init(unsigned int node)
{
+ struct pglist_data *nd;
unsigned long node_addrspace_offset;
unsigned long start_pfn, end_pfn;
+ unsigned long nd_pa;
+ int tnid;
+ const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
node_addrspace_offset = nid_to_addrbase(node);
pr_info("Node%d's addrspace_offset is 0x%lx\n",
@@ -95,8 +98,16 @@ static void __init node_mem_init(unsigned int node)
pr_info("Node%d: start_pfn=0x%lx, end_pfn=0x%lx\n",
node, start_pfn, end_pfn);
- __node_data[node] = prealloc__node_data + node;
-
+ nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, node);
+ if (!nd_pa)
+ panic("Cannot allocate %zu bytes for node %d data\n",
+ nd_size, node);
+ nd = __va(nd_pa);
+ memset(nd, 0, sizeof(struct pglist_data));
+ tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
+ if (tnid != node)
+ pr_info("NODE_DATA(%d) on node %d\n", node, tnid);
+ __node_data[node] = nd;
NODE_DATA(node)->node_start_pfn = start_pfn;
NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
@@ -118,6 +129,9 @@ static void __init node_mem_init(unsigned int node)
if (node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT))
memblock_reserve((node_addrspace_offset | 0xfe000000),
32 << 20);
+
+ /* Reserve pfn range 0~node[0]->node_start_pfn */
+ memblock_reserve(0, PAGE_SIZE * start_pfn);
}
}
@@ -167,7 +181,6 @@ void __init mem_init(void)
high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
memblock_free_all();
setup_zero_pages(); /* This comes from node 0 */
- mem_init_print_info(NULL);
}
/* All PCI device belongs to logical Node-0 */
diff --git a/arch/mips/loongson64/platform.c b/arch/mips/loongson64/platform.c
deleted file mode 100644
index 9674ae1361a8..000000000000
--- a/arch/mips/loongson64/platform.c
+++ /dev/null
@@ -1,42 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2009 Lemote Inc.
- * Author: Wu Zhangjin, wuzhangjin@gmail.com
- * Xiang Yu, xiangy@lemote.com
- * Chen Huacai, chenhc@lemote.com
- */
-
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <asm/bootinfo.h>
-#include <boot_param.h>
-#include <loongson_hwmon.h>
-#include <workarounds.h>
-
-static int __init loongson3_platform_init(void)
-{
- int i;
- struct platform_device *pdev;
-
- if (loongson_sysconf.ecname[0] != '\0')
- platform_device_register_simple(loongson_sysconf.ecname, -1, NULL, 0);
-
- for (i = 0; i < loongson_sysconf.nr_sensors; i++) {
- if (loongson_sysconf.sensors[i].type > SENSOR_FAN)
- continue;
-
- pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL);
- if (!pdev)
- return -ENOMEM;
-
- pdev->name = loongson_sysconf.sensors[i].name;
- pdev->id = loongson_sysconf.sensors[i].id;
- pdev->dev.platform_data = &loongson_sysconf.sensors[i];
- platform_device_register(pdev);
- }
-
- return 0;
-}
-
-arch_initcall(loongson3_platform_init);
diff --git a/arch/mips/loongson64/reset.c b/arch/mips/loongson64/reset.c
index 3bb8a1ed9348..758d5d26aaaa 100644
--- a/arch/mips/loongson64/reset.c
+++ b/arch/mips/loongson64/reset.c
@@ -6,9 +6,14 @@
* Copyright (C) 2009 Lemote, Inc.
* Author: Zhangjin Wu, wuzhangjin@gmail.com
*/
+#include <linux/cpu.h>
+#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/kexec.h>
#include <linux/pm.h>
+#include <linux/slab.h>
+#include <asm/bootinfo.h>
#include <asm/idle.h>
#include <asm/reboot.h>
@@ -47,12 +52,123 @@ static void loongson_halt(void)
}
}
+#ifdef CONFIG_KEXEC
+
+/* 0X80000000~0X80200000 is safe */
+#define MAX_ARGS 64
+#define KEXEC_CTRL_CODE 0xFFFFFFFF80100000UL
+#define KEXEC_ARGV_ADDR 0xFFFFFFFF80108000UL
+#define KEXEC_ARGV_SIZE COMMAND_LINE_SIZE
+#define KEXEC_ENVP_SIZE 4800
+
+static int kexec_argc;
+static int kdump_argc;
+static void *kexec_argv;
+static void *kdump_argv;
+static void *kexec_envp;
+
+static int loongson_kexec_prepare(struct kimage *image)
+{
+ int i, argc = 0;
+ unsigned int *argv;
+ char *str, *ptr, *bootloader = "kexec";
+
+ /* argv at offset 0, argv[] at offset KEXEC_ARGV_SIZE/2 */
+ if (image->type == KEXEC_TYPE_DEFAULT)
+ argv = (unsigned int *)kexec_argv;
+ else
+ argv = (unsigned int *)kdump_argv;
+
+ argv[argc++] = (unsigned int)(KEXEC_ARGV_ADDR + KEXEC_ARGV_SIZE/2);
+
+ for (i = 0; i < image->nr_segments; i++) {
+ if (!strncmp(bootloader, (char *)image->segment[i].buf,
+ strlen(bootloader))) {
+ /*
+ * convert command line string to array
+ * of parameters (as bootloader does).
+ */
+ int offt;
+ str = (char *)argv + KEXEC_ARGV_SIZE/2;
+ memcpy(str, image->segment[i].buf, KEXEC_ARGV_SIZE/2);
+ ptr = strchr(str, ' ');
+
+ while (ptr && (argc < MAX_ARGS)) {
+ *ptr = '\0';
+ if (ptr[1] != ' ') {
+ offt = (int)(ptr - str + 1);
+ argv[argc] = KEXEC_ARGV_ADDR + KEXEC_ARGV_SIZE/2 + offt;
+ argc++;
+ }
+ ptr = strchr(ptr + 1, ' ');
+ }
+ break;
+ }
+ }
+
+ if (image->type == KEXEC_TYPE_DEFAULT)
+ kexec_argc = argc;
+ else
+ kdump_argc = argc;
+
+ /* kexec/kdump need a safe page to save reboot_code_buffer */
+ image->control_code_page = virt_to_page((void *)KEXEC_CTRL_CODE);
+
+ return 0;
+}
+
+static void loongson_kexec_shutdown(void)
+{
+#ifdef CONFIG_SMP
+ int cpu;
+
+ /* All CPUs go to reboot_code_buffer */
+ for_each_possible_cpu(cpu)
+ if (!cpu_online(cpu))
+ cpu_device_up(get_cpu_device(cpu));
+
+ secondary_kexec_args[0] = TO_UNCAC(0x3ff01000);
+#endif
+ kexec_args[0] = kexec_argc;
+ kexec_args[1] = fw_arg1;
+ kexec_args[2] = fw_arg2;
+ memcpy((void *)fw_arg1, kexec_argv, KEXEC_ARGV_SIZE);
+ memcpy((void *)fw_arg2, kexec_envp, KEXEC_ENVP_SIZE);
+}
+
+static void loongson_crash_shutdown(struct pt_regs *regs)
+{
+ default_machine_crash_shutdown(regs);
+ kexec_args[0] = kdump_argc;
+ kexec_args[1] = fw_arg1;
+ kexec_args[2] = fw_arg2;
+#ifdef CONFIG_SMP
+ secondary_kexec_args[0] = TO_UNCAC(0x3ff01000);
+#endif
+ memcpy((void *)fw_arg1, kdump_argv, KEXEC_ARGV_SIZE);
+ memcpy((void *)fw_arg2, kexec_envp, KEXEC_ENVP_SIZE);
+}
+
+#endif
+
static int __init mips_reboot_setup(void)
{
_machine_restart = loongson_restart;
_machine_halt = loongson_halt;
pm_power_off = loongson_poweroff;
+#ifdef CONFIG_KEXEC
+ kexec_argv = kmalloc(KEXEC_ARGV_SIZE, GFP_KERNEL);
+ kdump_argv = kmalloc(KEXEC_ARGV_SIZE, GFP_KERNEL);
+ kexec_envp = kmalloc(KEXEC_ENVP_SIZE, GFP_KERNEL);
+ fw_arg1 = KEXEC_ARGV_ADDR;
+ memcpy(kexec_envp, (void *)fw_arg2, KEXEC_ENVP_SIZE);
+
+ _machine_kexec_prepare = loongson_kexec_prepare;
+ _machine_kexec_shutdown = loongson_kexec_shutdown;
+ _machine_crash_shutdown = loongson_crash_shutdown;
+#endif
+
return 0;
}
diff --git a/arch/mips/loongson64/smp.c b/arch/mips/loongson64/smp.c
index 6acde65f601b..09ebe84a17fe 100644
--- a/arch/mips/loongson64/smp.c
+++ b/arch/mips/loongson64/smp.c
@@ -51,11 +51,11 @@ static uint32_t core0_c0count[NR_CPUS];
__wbflush(); \
} while (0)
-u32 (*ipi_read_clear)(int cpu);
-void (*ipi_write_action)(int cpu, u32 action);
-void (*ipi_write_enable)(int cpu);
-void (*ipi_clear_buf)(int cpu);
-void (*ipi_write_buf)(int cpu, struct task_struct *idle);
+static u32 (*ipi_read_clear)(int cpu);
+static void (*ipi_write_action)(int cpu, u32 action);
+static void (*ipi_write_enable)(int cpu);
+static void (*ipi_clear_buf)(int cpu);
+static void (*ipi_write_buf)(int cpu, struct task_struct *idle);
/* send mail via Mail_Send register for 3A4000+ CPU */
static void csr_mail_send(uint64_t data, int cpu, int mailbox)
diff --git a/arch/mips/loongson64/time.c b/arch/mips/loongson64/time.c
index 91e842b58365..f6d2c1e30570 100644
--- a/arch/mips/loongson64/time.c
+++ b/arch/mips/loongson64/time.c
@@ -11,9 +11,33 @@
#include <asm/hpet.h>
#include <loongson.h>
+#include <linux/clk.h>
+#include <linux/of_clk.h>
void __init plat_time_init(void)
{
+ struct clk *clk;
+ struct device_node *np;
+
+ if (loongson_sysconf.fw_interface == LOONGSON_DTB) {
+ of_clk_init(NULL);
+
+ np = of_get_cpu_node(0, NULL);
+ if (!np) {
+ pr_err("Failed to get CPU node\n");
+ return;
+ }
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get CPU clock: %ld\n", PTR_ERR(clk));
+ return;
+ }
+
+ cpu_clock_freq = clk_get_rate(clk);
+ clk_put(clk);
+ }
+
/* setup mips r4k timer */
mips_hpt_frequency = cpu_clock_freq / 2;
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index 865926a37775..4acc4f3d31f8 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -22,6 +22,10 @@ else
obj-y += uasm-mips.o
endif
+ifndef CONFIG_EVA
+obj-y += maccess.o
+endif
+
obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
obj-$(CONFIG_64BIT) += ioremap64.o pgtable-64.o
obj-$(CONFIG_HIGHMEM) += highmem.o
@@ -40,3 +44,5 @@ obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o
obj-$(CONFIG_SCACHE_DEBUGFS) += sc-debugfs.o
+
+obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 7719d632df8d..830ab91e574f 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -15,6 +15,7 @@
#include <linux/syscalls.h>
#include <linux/mm.h>
#include <linux/highmem.h>
+#include <linux/pagemap.h>
#include <asm/cacheflush.h>
#include <asm/processor.h>
@@ -157,31 +158,29 @@ unsigned long _page_cachable_default;
EXPORT_SYMBOL(_page_cachable_default);
#define PM(p) __pgprot(_page_cachable_default | (p))
-#define PVA(p) PM(_PAGE_VALID | _PAGE_ACCESSED | (p))
static inline void setup_protection_map(void)
{
protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[1] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[2] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[3] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[4] = PVA(_PAGE_PRESENT);
- protection_map[5] = PVA(_PAGE_PRESENT);
- protection_map[6] = PVA(_PAGE_PRESENT);
- protection_map[7] = PVA(_PAGE_PRESENT);
+ protection_map[1] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[2] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
+ protection_map[3] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[4] = PM(_PAGE_PRESENT);
+ protection_map[5] = PM(_PAGE_PRESENT);
+ protection_map[6] = PM(_PAGE_PRESENT);
+ protection_map[7] = PM(_PAGE_PRESENT);
protection_map[8] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
- protection_map[9] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC);
- protection_map[10] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
+ protection_map[9] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC);
+ protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE |
_PAGE_NO_READ);
- protection_map[11] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
- protection_map[12] = PVA(_PAGE_PRESENT);
- protection_map[13] = PVA(_PAGE_PRESENT);
- protection_map[14] = PVA(_PAGE_PRESENT);
- protection_map[15] = PVA(_PAGE_PRESENT);
+ protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
+ protection_map[12] = PM(_PAGE_PRESENT);
+ protection_map[13] = PM(_PAGE_PRESENT);
+ protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE);
+ protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE);
}
-#undef _PVA
#undef PM
void cpu_cache_init(void)
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c
index 212f3ce75a6b..3c4fc97b9f39 100644
--- a/arch/mips/mm/dma-noncoherent.c
+++ b/arch/mips/mm/dma-noncoherent.c
@@ -32,6 +32,7 @@ static inline bool cpu_needs_post_dma_flush(void)
case CPU_R12000:
case CPU_BMIPS5000:
case CPU_LOONGSON2EF:
+ case CPU_XBURST:
return true;
default:
/*
diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c
index b9f76f433617..7eaff5b07873 100644
--- a/arch/mips/mm/hugetlbpage.c
+++ b/arch/mips/mm/hugetlbpage.c
@@ -21,8 +21,8 @@
#include <asm/tlb.h>
#include <asm/tlbflush.h>
-pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr,
- unsigned long sz)
+pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
p4d_t *p4d;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 5cb73bf74a8b..19347dc6bbf8 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -394,7 +394,7 @@ void maar_init(void)
}
}
-#ifndef CONFIG_NEED_MULTIPLE_NODES
+#ifndef CONFIG_NUMA
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
@@ -454,9 +454,6 @@ void __init mem_init(void)
BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (_PFN_SHIFT > PAGE_SHIFT));
#ifdef CONFIG_HIGHMEM
-#ifdef CONFIG_DISCONTIGMEM
-#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
-#endif
max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
#else
max_mapnr = max_low_pfn;
@@ -467,7 +464,6 @@ void __init mem_init(void)
memblock_free_all();
setup_zero_pages(); /* Setup zeroed pages. */
mem_init_free_highmem();
- mem_init_print_info(NULL);
#ifdef CONFIG_64BIT
if ((unsigned long) &_text > (unsigned long) CKSEG0)
@@ -477,7 +473,7 @@ void __init mem_init(void)
0x80000000 - 4, KCORE_TEXT);
#endif
}
-#endif /* !CONFIG_NEED_MULTIPLE_NODES */
+#endif /* !CONFIG_NUMA */
void free_init_pages(const char *what, unsigned long begin, unsigned long end)
{
diff --git a/arch/mips/mm/maccess.c b/arch/mips/mm/maccess.c
new file mode 100644
index 000000000000..58173842c6be
--- /dev/null
+++ b/arch/mips/mm/maccess.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+
+bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+{
+ /* highest bit set means kernel space */
+ return (unsigned long)unsafe_src >> (BITS_PER_LONG - 1);
+}
diff --git a/arch/mips/mm/physaddr.c b/arch/mips/mm/physaddr.c
new file mode 100644
index 000000000000..a1ced5e44951
--- /dev/null
+++ b/arch/mips/mm/physaddr.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bug.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/mmdebug.h>
+#include <linux/mm.h>
+
+#include <asm/sections.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/dma.h>
+
+static inline bool __debug_virt_addr_valid(unsigned long x)
+{
+ /* high_memory does not get immediately defined, and there
+ * are early callers of __pa() against PAGE_OFFSET
+ */
+ if (!high_memory && x >= PAGE_OFFSET)
+ return true;
+
+ if (high_memory && x >= PAGE_OFFSET && x < (unsigned long)high_memory)
+ return true;
+
+ /*
+ * MAX_DMA_ADDRESS is a virtual address that may not correspond to an
+ * actual physical address. Enough code relies on
+ * virt_to_phys(MAX_DMA_ADDRESS) that we just need to work around it
+ * and always return true.
+ */
+ if (x == MAX_DMA_ADDRESS)
+ return true;
+
+ return false;
+}
+
+phys_addr_t __virt_to_phys(volatile const void *x)
+{
+ WARN(!__debug_virt_addr_valid((unsigned long)x),
+ "virt_to_phys used for non-linear address: %pK (%pS)\n",
+ x, x);
+
+ return __virt_to_phys_nodebug(x);
+}
+EXPORT_SYMBOL(__virt_to_phys);
+
+phys_addr_t __phys_addr_symbol(unsigned long x)
+{
+ /* This is bounds checking against the kernel image only.
+ * __pa_symbol should only be used on kernel symbol addresses.
+ */
+ VIRTUAL_BUG_ON(x < (unsigned long)_text ||
+ x > (unsigned long)_end);
+
+ return __pa_symbol_nodebug(x);
+}
+EXPORT_SYMBOL(__phys_addr_symbol);
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 0fb1db8a8ef7..cd4afcdf3725 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -849,8 +849,8 @@ void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
/* Clear lower 23 bits of context. */
uasm_i_dins(p, ptr, 0, 0, 23);
- /* 1 0 1 0 1 << 6 xkphys cached */
- uasm_i_ori(p, ptr, ptr, 0x540);
+ /* insert bit[63:59] of CAC_BASE into bit[11:6] of ptr */
+ uasm_i_ori(p, ptr, ptr, ((u64)(CAC_BASE) >> 53));
uasm_i_drotr(p, ptr, ptr, 11);
#elif defined(CONFIG_SMP)
UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG);
@@ -1165,8 +1165,9 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
if (pgd_reg == -1) {
vmalloc_branch_delay_filled = 1;
- /* 1 0 1 0 1 << 6 xkphys cached */
- uasm_i_ori(p, ptr, ptr, 0x540);
+ /* insert bit[63:59] of CAC_BASE into bit[11:6] of ptr */
+ uasm_i_ori(p, ptr, ptr, ((u64)(CAC_BASE) >> 53));
+
uasm_i_drotr(p, ptr, ptr, 11);
}
diff --git a/arch/mips/mti-malta/Platform b/arch/mips/mti-malta/Platform
index 41e0d2a2d325..f4616934d950 100644
--- a/arch/mips/mti-malta/Platform
+++ b/arch/mips/mti-malta/Platform
@@ -2,9 +2,5 @@
# MIPS Malta board
#
cflags-$(CONFIG_MIPS_MALTA) += -I$(srctree)/arch/mips/include/asm/mach-malta
-ifdef CONFIG_KVM_GUEST
- load-$(CONFIG_MIPS_MALTA) += 0x0000000040100000
-else
- load-$(CONFIG_MIPS_MALTA) += 0xffffffff80100000
-endif
+load-$(CONFIG_MIPS_MALTA) += 0xffffffff80100000
all-$(CONFIG_MIPS_MALTA) := $(COMPRESSION_FNAME).bin
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 567720374d57..bbf1e38e1431 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -66,11 +66,6 @@ static void __init estimate_frequencies(void)
int secs;
u64 giccount = 0, gicstart = 0;
-#if defined(CONFIG_KVM_GUEST) && CONFIG_KVM_GUEST_TIMER_FREQ
- mips_hpt_frequency = CONFIG_KVM_GUEST_TIMER_FREQ * 1000000;
- return;
-#endif
-
local_irq_save(flags);
if (mips_gic_present())
diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c
index cf33dd8a487e..c25a2ce5e29f 100644
--- a/arch/mips/netlogic/common/irq.c
+++ b/arch/mips/netlogic/common/irq.c
@@ -276,10 +276,6 @@ asmlinkage void plat_irq_dispatch(void)
}
#ifdef CONFIG_CPU_XLP
-static const struct irq_domain_ops xlp_pic_irq_domain_ops = {
- .xlate = irq_domain_xlate_onetwocell,
-};
-
static int __init xlp_of_pic_init(struct device_node *node,
struct device_node *parent)
{
@@ -324,7 +320,7 @@ static int __init xlp_of_pic_init(struct device_node *node,
xlp_pic_domain = irq_domain_add_legacy(node, n_picirqs,
nlm_irq_to_xirq(socid, PIC_IRQ_BASE), PIC_IRQ_BASE,
- &xlp_pic_irq_domain_ops, NULL);
+ &irq_domain_simple_ops, NULL);
if (xlp_pic_domain == NULL) {
pr_err("PIC %pOFn: Creating legacy domain failed!\n", node);
return -EINVAL;
diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c
index f741b8c528e4..c1a655aee599 100644
--- a/arch/mips/pci/pci-ar2315.c
+++ b/arch/mips/pci/pci-ar2315.c
@@ -31,6 +31,7 @@
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/dma-direct.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/bitops.h>
diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
index 39052de915f3..468722c8a5c6 100644
--- a/arch/mips/pci/pci-legacy.c
+++ b/arch/mips/pci/pci-legacy.c
@@ -89,7 +89,6 @@ static void pcibios_scanbus(struct pci_controller *hose)
hose->mem_resource, hose->mem_offset);
pci_add_resource_offset(&resources,
hose->io_resource, hose->io_offset);
- pci_add_resource(&resources, hose->busn_resource);
list_splice_init(&resources, &bridge->windows);
bridge->dev.parent = NULL;
bridge->sysdata = hose;
@@ -140,7 +139,6 @@ void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node)
struct of_pci_range range;
struct of_pci_range_parser parser;
- pr_info("PCI host bridge %pOF ranges:\n", node);
hose->of_node = node;
if (of_pci_range_parser_init(&parser, node))
@@ -151,23 +149,22 @@ void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node)
switch (range.flags & IORESOURCE_TYPE_BITS) {
case IORESOURCE_IO:
- pr_info(" IO 0x%016llx..0x%016llx\n",
- range.cpu_addr,
- range.cpu_addr + range.size - 1);
hose->io_map_base =
(unsigned long)ioremap(range.cpu_addr,
range.size);
res = hose->io_resource;
break;
case IORESOURCE_MEM:
- pr_info(" MEM 0x%016llx..0x%016llx\n",
- range.cpu_addr,
- range.cpu_addr + range.size - 1);
res = hose->mem_resource;
break;
}
- if (res != NULL)
- of_pci_range_to_resource(&range, node, res);
+ if (res != NULL) {
+ res->name = node->full_name;
+ res->flags = range.flags;
+ res->start = range.cpu_addr;
+ res->end = range.cpu_addr + range.size - 1;
+ res->parent = res->child = res->sibling = NULL;
+ }
}
}
@@ -252,7 +249,7 @@ static int pcibios_enable_resources(struct pci_dev *dev, int mask)
pci_read_config_word(dev, PCI_COMMAND, &cmd);
old_cmd = cmd;
- for (idx=0; idx < PCI_NUM_RESOURCES; idx++) {
+ for (idx = 0; idx < PCI_NUM_RESOURCES; idx++) {
/* Only set up the requested stuff */
if (!(mask & (1<<idx)))
continue;
@@ -282,9 +279,9 @@ static int pcibios_enable_resources(struct pci_dev *dev, int mask)
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
- int err;
+ int err = pcibios_enable_resources(dev, mask);
- if ((err = pcibios_enable_resources(dev, mask)) < 0)
+ if (err < 0)
return err;
return pcibios_plat_dev_init(dev);
diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c
index d36061603752..e032932348d6 100644
--- a/arch/mips/pci/pci-mt7620.c
+++ b/arch/mips/pci/pci-mt7620.c
@@ -30,6 +30,7 @@
#define RALINK_GPIOMODE 0x60
#define PPLL_CFG1 0x9c
+#define PPLL_LD BIT(23)
#define PPLL_DRV 0xa0
#define PDRV_SW_SET BIT(31)
@@ -239,8 +240,8 @@ static int mt7620_pci_hw_init(struct platform_device *pdev)
rt_sysc_m32(0, RALINK_PCIE0_CLK_EN, RALINK_CLKCFG1);
mdelay(100);
- if (!(rt_sysc_r32(PPLL_CFG1) & PDRV_SW_SET)) {
- dev_err(&pdev->dev, "MT7620 PPLL unlock\n");
+ if (!(rt_sysc_r32(PPLL_CFG1) & PPLL_LD)) {
+ dev_err(&pdev->dev, "pcie PLL not locked, aborting init\n");
reset_control_assert(rstpcie0);
rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1);
return -1;
diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c
index e1f12e398136..e9dd01431f21 100644
--- a/arch/mips/pci/pci-rt2880.c
+++ b/arch/mips/pci/pci-rt2880.c
@@ -41,7 +41,6 @@
#define RT2880_PCI_REG_ARBCTL 0x80
static void __iomem *rt2880_pci_base;
-static DEFINE_SPINLOCK(rt2880_pci_lock);
static u32 rt2880_pci_reg_read(u32 reg)
{
@@ -63,17 +62,14 @@ static inline u32 rt2880_pci_get_cfgaddr(unsigned int bus, unsigned int slot,
static int rt2880_pci_config_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
- unsigned long flags;
u32 address;
u32 data;
address = rt2880_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn),
PCI_FUNC(devfn), where);
- spin_lock_irqsave(&rt2880_pci_lock, flags);
rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR);
data = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA);
- spin_unlock_irqrestore(&rt2880_pci_lock, flags);
switch (size) {
case 1:
@@ -93,14 +89,12 @@ static int rt2880_pci_config_read(struct pci_bus *bus, unsigned int devfn,
static int rt2880_pci_config_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
- unsigned long flags;
u32 address;
u32 data;
address = rt2880_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn),
PCI_FUNC(devfn), where);
- spin_lock_irqsave(&rt2880_pci_lock, flags);
rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR);
data = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA);
@@ -119,7 +113,6 @@ static int rt2880_pci_config_write(struct pci_bus *bus, unsigned int devfn,
}
rt2880_pci_reg_write(data, RT2880_PCI_REG_CONFIG_DATA);
- spin_unlock_irqrestore(&rt2880_pci_lock, flags);
return PCIBIOS_SUCCESSFUL;
}
@@ -151,36 +144,29 @@ static struct pci_controller rt2880_pci_controller = {
static inline u32 rt2880_pci_read_u32(unsigned long reg)
{
- unsigned long flags;
u32 address;
u32 ret;
address = rt2880_pci_get_cfgaddr(0, 0, 0, reg);
- spin_lock_irqsave(&rt2880_pci_lock, flags);
rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR);
ret = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA);
- spin_unlock_irqrestore(&rt2880_pci_lock, flags);
return ret;
}
static inline void rt2880_pci_write_u32(unsigned long reg, u32 val)
{
- unsigned long flags;
u32 address;
address = rt2880_pci_get_cfgaddr(0, 0, 0, reg);
- spin_lock_irqsave(&rt2880_pci_lock, flags);
rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR);
rt2880_pci_reg_write(val, RT2880_PCI_REG_CONFIG_DATA);
- spin_unlock_irqrestore(&rt2880_pci_lock, flags);
}
int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- u16 cmd;
int irq = -1;
if (dev->bus->number != 0)
@@ -188,8 +174,6 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
switch (PCI_SLOT(dev->devfn)) {
case 0x00:
- rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000);
- (void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0);
break;
case 0x11:
irq = RT288X_CPU_IRQ_PCI;
@@ -201,16 +185,6 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
break;
}
- pci_write_config_byte((struct pci_dev *) dev,
- PCI_CACHE_LINE_SIZE, 0x14);
- pci_write_config_byte((struct pci_dev *) dev, PCI_LATENCY_TIMER, 0xFF);
- pci_read_config_word((struct pci_dev *) dev, PCI_COMMAND, &cmd);
- cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
- PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK |
- PCI_COMMAND_SERR | PCI_COMMAND_WAIT | PCI_COMMAND_PARITY;
- pci_write_config_word((struct pci_dev *) dev, PCI_COMMAND, cmd);
- pci_write_config_byte((struct pci_dev *) dev, PCI_INTERRUPT_LINE,
- dev->irq);
return irq;
}
@@ -251,6 +225,30 @@ static int rt288x_pci_probe(struct platform_device *pdev)
int pcibios_plat_dev_init(struct pci_dev *dev)
{
+ static bool slot0_init;
+
+ /*
+ * Nobody seems to initialize slot 0, but this platform requires it, so
+ * do it once when some other slot is being enabled. The PCI subsystem
+ * should configure other slots properly, so no need to do anything
+ * special for those.
+ */
+ if (!slot0_init && dev->bus->number == 0) {
+ u16 cmd;
+ u32 bar0;
+
+ slot0_init = true;
+
+ pci_bus_write_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0,
+ 0x08000000);
+ pci_bus_read_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0,
+ &bar0);
+
+ pci_bus_read_config_word(dev->bus, 0, PCI_COMMAND, &cmd);
+ cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
+ pci_bus_write_config_word(dev->bus, 0, PCI_COMMAND, cmd);
+ }
+
return 0;
}
diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c
index 0ac6346026d0..c48e23cf5b5e 100644
--- a/arch/mips/pci/pci-rt3883.c
+++ b/arch/mips/pci/pci-rt3883.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_pci.h>
@@ -100,7 +101,6 @@ static u32 rt3883_pci_read_cfg32(struct rt3883_pci_controller *rpc,
unsigned bus, unsigned slot,
unsigned func, unsigned reg)
{
- unsigned long flags;
u32 address;
u32 ret;
@@ -116,7 +116,6 @@ static void rt3883_pci_write_cfg32(struct rt3883_pci_controller *rpc,
unsigned bus, unsigned slot,
unsigned func, unsigned reg, u32 val)
{
- unsigned long flags;
u32 address;
address = rt3883_pci_get_cfgaddr(bus, slot, func, reg);
@@ -229,7 +228,6 @@ static int rt3883_pci_config_read(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
struct rt3883_pci_controller *rpc;
- unsigned long flags;
u32 address;
u32 data;
@@ -263,7 +261,6 @@ static int rt3883_pci_config_write(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 val)
{
struct rt3883_pci_controller *rpc;
- unsigned long flags;
u32 address;
u32 data;
@@ -435,8 +432,7 @@ static int rt3883_pci_probe(struct platform_device *pdev)
if (!rpc->intc_of_node) {
dev_err(dev, "%pOF has no %s child node",
- rpc->intc_of_node,
- "interrupt controller");
+ np, "interrupt controller");
return -EINVAL;
}
@@ -450,8 +446,7 @@ static int rt3883_pci_probe(struct platform_device *pdev)
if (!rpc->pci_controller.of_node) {
dev_err(dev, "%pOF has no %s child node",
- rpc->intc_of_node,
- "PCI host bridge");
+ np, "PCI host bridge");
err = -EINVAL;
goto err_put_intc_node;
}
diff --git a/arch/mips/pci/pci-xtalk-bridge.c b/arch/mips/pci/pci-xtalk-bridge.c
index 50f7d42cca5a..ab9bedb82b28 100644
--- a/arch/mips/pci/pci-xtalk-bridge.c
+++ b/arch/mips/pci/pci-xtalk-bridge.c
@@ -13,6 +13,7 @@
#include <linux/platform_data/xtalk-bridge.h>
#include <linux/nvmem-consumer.h>
#include <linux/crc16.h>
+#include <linux/irqdomain.h>
#include <asm/pci/bridge.h>
#include <asm/paccess.h>
@@ -385,7 +386,7 @@ static int bridge_domain_activate(struct irq_domain *domain,
bridge_set(bc, b_int_enable, 0x7ffffe00); /* more stuff in int_enable */
/*
- * Enable sending of an interrupt clear packt to the hub on a high to
+ * Enable sending of an interrupt clear packet to the hub on a high to
* low transition of the interrupt pin.
*
* IRIX sets additional bits in the address which are documented as
diff --git a/arch/mips/pic32/Kconfig b/arch/mips/pic32/Kconfig
index 7acbb50c1dcd..bb6ab1f3e80d 100644
--- a/arch/mips/pic32/Kconfig
+++ b/arch/mips/pic32/Kconfig
@@ -17,7 +17,6 @@ config PIC32MZDA
select SYS_SUPPORTS_LITTLE_ENDIAN
select GPIOLIB
select COMMON_CLK
- select CLKDEV_LOOKUP
select LIBFDT
select USE_OF
select PINCTRL
diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
index c10d8b233ab1..c800bf5559b5 100644
--- a/arch/mips/ralink/Kconfig
+++ b/arch/mips/ralink/Kconfig
@@ -26,23 +26,20 @@ choice
config SOC_RT288X
bool "RT288x"
+ select MIPS_AUTO_PFN_OFFSET
select MIPS_L1_CACHE_SHIFT_4
- select HAVE_LEGACY_CLK
select HAVE_PCI
config SOC_RT305X
bool "RT305x"
- select HAVE_LEGACY_CLK
config SOC_RT3883
bool "RT3883"
- select HAVE_LEGACY_CLK
select HAVE_PCI
config SOC_MT7620
bool "MT7620/8"
select CPU_MIPSR2_IRQ_VI
- select HAVE_LEGACY_CLK
select HAVE_PCI
config SOC_MT7621
@@ -53,7 +50,6 @@ choice
select SYS_SUPPORTS_MIPS_CPS
select SYS_SUPPORTS_HIGHMEM
select MIPS_GIC
- select COMMON_CLK
select CLKSRC_MIPS_GIC
select HAVE_PCI if PCI_MT7621
select SOC_BUS
diff --git a/arch/mips/ralink/clk.c b/arch/mips/ralink/clk.c
index 2f9d5acb38ea..5b02bb7e0829 100644
--- a/arch/mips/ralink/clk.c
+++ b/arch/mips/ralink/clk.c
@@ -10,65 +10,21 @@
#include <linux/export.h>
#include <linux/clkdev.h>
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <asm/time.h>
#include "common.h"
-struct clk {
- struct clk_lookup cl;
- unsigned long rate;
-};
-
void ralink_clk_add(const char *dev, unsigned long rate)
{
- struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+ struct clk *clk = clk_register_fixed_rate(NULL, dev, NULL, 0, rate);
if (!clk)
panic("failed to add clock");
- clk->cl.dev_id = dev;
- clk->cl.clk = clk;
-
- clk->rate = rate;
-
- clkdev_add(&clk->cl);
-}
-
-/*
- * Linux clock API
- */
-int clk_enable(struct clk *clk)
-{
- return 0;
-}
-EXPORT_SYMBOL_GPL(clk_enable);
-
-void clk_disable(struct clk *clk)
-{
-}
-EXPORT_SYMBOL_GPL(clk_disable);
-
-unsigned long clk_get_rate(struct clk *clk)
-{
- if (!clk)
- return 0;
-
- return clk->rate;
-}
-EXPORT_SYMBOL_GPL(clk_get_rate);
-
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- return -1;
-}
-EXPORT_SYMBOL_GPL(clk_set_rate);
-
-long clk_round_rate(struct clk *clk, unsigned long rate)
-{
- return -1;
+ clkdev_create(clk, NULL, "%s", dev);
}
-EXPORT_SYMBOL_GPL(clk_round_rate);
void __init plat_time_init(void)
{
diff --git a/arch/mips/ralink/common.h b/arch/mips/ralink/common.h
index 4bc65b7a3241..87fc16751281 100644
--- a/arch/mips/ralink/common.h
+++ b/arch/mips/ralink/common.h
@@ -17,6 +17,7 @@ struct ralink_soc_info {
unsigned long mem_size;
unsigned long mem_size_min;
unsigned long mem_size_max;
+ void (*mem_detect)(void);
};
extern struct ralink_soc_info soc_info;
@@ -27,7 +28,7 @@ extern void ralink_clk_add(const char *dev, unsigned long rate);
extern void ralink_rst_init(void);
-extern void prom_soc_init(struct ralink_soc_info *soc_info);
+extern void __init prom_soc_init(struct ralink_soc_info *soc_info);
__iomem void *plat_of_remap_node(const char *node);
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
index fcf010038054..ae1fa0391c88 100644
--- a/arch/mips/ralink/mt7620.c
+++ b/arch/mips/ralink/mt7620.c
@@ -15,7 +15,6 @@
#include <asm/mipsregs.h>
#include <asm/mach-ralink/ralink_regs.h>
#include <asm/mach-ralink/mt7620.h>
-#include <asm/mach-ralink/pinmux.h>
#include "common.h"
@@ -50,320 +49,6 @@
/* does the board have sdram or ddram */
static int dram_type;
-static struct rt2880_pmx_func i2c_grp[] = { FUNC("i2c", 0, 1, 2) };
-static struct rt2880_pmx_func spi_grp[] = { FUNC("spi", 0, 3, 4) };
-static struct rt2880_pmx_func uartlite_grp[] = { FUNC("uartlite", 0, 15, 2) };
-static struct rt2880_pmx_func mdio_grp[] = {
- FUNC("mdio", MT7620_GPIO_MODE_MDIO, 22, 2),
- FUNC("refclk", MT7620_GPIO_MODE_MDIO_REFCLK, 22, 2),
-};
-static struct rt2880_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 24, 12) };
-static struct rt2880_pmx_func refclk_grp[] = { FUNC("spi refclk", 0, 37, 3) };
-static struct rt2880_pmx_func ephy_grp[] = { FUNC("ephy", 0, 40, 5) };
-static struct rt2880_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 60, 12) };
-static struct rt2880_pmx_func wled_grp[] = { FUNC("wled", 0, 72, 1) };
-static struct rt2880_pmx_func pa_grp[] = { FUNC("pa", 0, 18, 4) };
-static struct rt2880_pmx_func uartf_grp[] = {
- FUNC("uartf", MT7620_GPIO_MODE_UARTF, 7, 8),
- FUNC("pcm uartf", MT7620_GPIO_MODE_PCM_UARTF, 7, 8),
- FUNC("pcm i2s", MT7620_GPIO_MODE_PCM_I2S, 7, 8),
- FUNC("i2s uartf", MT7620_GPIO_MODE_I2S_UARTF, 7, 8),
- FUNC("pcm gpio", MT7620_GPIO_MODE_PCM_GPIO, 11, 4),
- FUNC("gpio uartf", MT7620_GPIO_MODE_GPIO_UARTF, 7, 4),
- FUNC("gpio i2s", MT7620_GPIO_MODE_GPIO_I2S, 7, 4),
-};
-static struct rt2880_pmx_func wdt_grp[] = {
- FUNC("wdt rst", 0, 17, 1),
- FUNC("wdt refclk", 0, 17, 1),
- };
-static struct rt2880_pmx_func pcie_rst_grp[] = {
- FUNC("pcie rst", MT7620_GPIO_MODE_PCIE_RST, 36, 1),
- FUNC("pcie refclk", MT7620_GPIO_MODE_PCIE_REF, 36, 1)
-};
-static struct rt2880_pmx_func nd_sd_grp[] = {
- FUNC("nand", MT7620_GPIO_MODE_NAND, 45, 15),
- FUNC("sd", MT7620_GPIO_MODE_SD, 47, 13)
-};
-
-static struct rt2880_pmx_group mt7620a_pinmux_data[] = {
- GRP("i2c", i2c_grp, 1, MT7620_GPIO_MODE_I2C),
- GRP("uartf", uartf_grp, MT7620_GPIO_MODE_UART0_MASK,
- MT7620_GPIO_MODE_UART0_SHIFT),
- GRP("spi", spi_grp, 1, MT7620_GPIO_MODE_SPI),
- GRP("uartlite", uartlite_grp, 1, MT7620_GPIO_MODE_UART1),
- GRP_G("wdt", wdt_grp, MT7620_GPIO_MODE_WDT_MASK,
- MT7620_GPIO_MODE_WDT_GPIO, MT7620_GPIO_MODE_WDT_SHIFT),
- GRP_G("mdio", mdio_grp, MT7620_GPIO_MODE_MDIO_MASK,
- MT7620_GPIO_MODE_MDIO_GPIO, MT7620_GPIO_MODE_MDIO_SHIFT),
- GRP("rgmii1", rgmii1_grp, 1, MT7620_GPIO_MODE_RGMII1),
- GRP("spi refclk", refclk_grp, 1, MT7620_GPIO_MODE_SPI_REF_CLK),
- GRP_G("pcie", pcie_rst_grp, MT7620_GPIO_MODE_PCIE_MASK,
- MT7620_GPIO_MODE_PCIE_GPIO, MT7620_GPIO_MODE_PCIE_SHIFT),
- GRP_G("nd_sd", nd_sd_grp, MT7620_GPIO_MODE_ND_SD_MASK,
- MT7620_GPIO_MODE_ND_SD_GPIO, MT7620_GPIO_MODE_ND_SD_SHIFT),
- GRP("rgmii2", rgmii2_grp, 1, MT7620_GPIO_MODE_RGMII2),
- GRP("wled", wled_grp, 1, MT7620_GPIO_MODE_WLED),
- GRP("ephy", ephy_grp, 1, MT7620_GPIO_MODE_EPHY),
- GRP("pa", pa_grp, 1, MT7620_GPIO_MODE_PA),
- { 0 }
-};
-
-static struct rt2880_pmx_func pwm1_grp_mt7628[] = {
- FUNC("sdxc d6", 3, 19, 1),
- FUNC("utif", 2, 19, 1),
- FUNC("gpio", 1, 19, 1),
- FUNC("pwm1", 0, 19, 1),
-};
-
-static struct rt2880_pmx_func pwm0_grp_mt7628[] = {
- FUNC("sdxc d7", 3, 18, 1),
- FUNC("utif", 2, 18, 1),
- FUNC("gpio", 1, 18, 1),
- FUNC("pwm0", 0, 18, 1),
-};
-
-static struct rt2880_pmx_func uart2_grp_mt7628[] = {
- FUNC("sdxc d5 d4", 3, 20, 2),
- FUNC("pwm", 2, 20, 2),
- FUNC("gpio", 1, 20, 2),
- FUNC("uart2", 0, 20, 2),
-};
-
-static struct rt2880_pmx_func uart1_grp_mt7628[] = {
- FUNC("sw_r", 3, 45, 2),
- FUNC("pwm", 2, 45, 2),
- FUNC("gpio", 1, 45, 2),
- FUNC("uart1", 0, 45, 2),
-};
-
-static struct rt2880_pmx_func i2c_grp_mt7628[] = {
- FUNC("-", 3, 4, 2),
- FUNC("debug", 2, 4, 2),
- FUNC("gpio", 1, 4, 2),
- FUNC("i2c", 0, 4, 2),
-};
-
-static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("refclk", 0, 37, 1) };
-static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 36, 1) };
-static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) };
-static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) };
-
-static struct rt2880_pmx_func sd_mode_grp_mt7628[] = {
- FUNC("jtag", 3, 22, 8),
- FUNC("utif", 2, 22, 8),
- FUNC("gpio", 1, 22, 8),
- FUNC("sdxc", 0, 22, 8),
-};
-
-static struct rt2880_pmx_func uart0_grp_mt7628[] = {
- FUNC("-", 3, 12, 2),
- FUNC("-", 2, 12, 2),
- FUNC("gpio", 1, 12, 2),
- FUNC("uart0", 0, 12, 2),
-};
-
-static struct rt2880_pmx_func i2s_grp_mt7628[] = {
- FUNC("antenna", 3, 0, 4),
- FUNC("pcm", 2, 0, 4),
- FUNC("gpio", 1, 0, 4),
- FUNC("i2s", 0, 0, 4),
-};
-
-static struct rt2880_pmx_func spi_cs1_grp_mt7628[] = {
- FUNC("-", 3, 6, 1),
- FUNC("refclk", 2, 6, 1),
- FUNC("gpio", 1, 6, 1),
- FUNC("spi cs1", 0, 6, 1),
-};
-
-static struct rt2880_pmx_func spis_grp_mt7628[] = {
- FUNC("pwm_uart2", 3, 14, 4),
- FUNC("utif", 2, 14, 4),
- FUNC("gpio", 1, 14, 4),
- FUNC("spis", 0, 14, 4),
-};
-
-static struct rt2880_pmx_func gpio_grp_mt7628[] = {
- FUNC("pcie", 3, 11, 1),
- FUNC("refclk", 2, 11, 1),
- FUNC("gpio", 1, 11, 1),
- FUNC("gpio", 0, 11, 1),
-};
-
-static struct rt2880_pmx_func p4led_kn_grp_mt7628[] = {
- FUNC("jtag", 3, 30, 1),
- FUNC("utif", 2, 30, 1),
- FUNC("gpio", 1, 30, 1),
- FUNC("p4led_kn", 0, 30, 1),
-};
-
-static struct rt2880_pmx_func p3led_kn_grp_mt7628[] = {
- FUNC("jtag", 3, 31, 1),
- FUNC("utif", 2, 31, 1),
- FUNC("gpio", 1, 31, 1),
- FUNC("p3led_kn", 0, 31, 1),
-};
-
-static struct rt2880_pmx_func p2led_kn_grp_mt7628[] = {
- FUNC("jtag", 3, 32, 1),
- FUNC("utif", 2, 32, 1),
- FUNC("gpio", 1, 32, 1),
- FUNC("p2led_kn", 0, 32, 1),
-};
-
-static struct rt2880_pmx_func p1led_kn_grp_mt7628[] = {
- FUNC("jtag", 3, 33, 1),
- FUNC("utif", 2, 33, 1),
- FUNC("gpio", 1, 33, 1),
- FUNC("p1led_kn", 0, 33, 1),
-};
-
-static struct rt2880_pmx_func p0led_kn_grp_mt7628[] = {
- FUNC("jtag", 3, 34, 1),
- FUNC("rsvd", 2, 34, 1),
- FUNC("gpio", 1, 34, 1),
- FUNC("p0led_kn", 0, 34, 1),
-};
-
-static struct rt2880_pmx_func wled_kn_grp_mt7628[] = {
- FUNC("rsvd", 3, 35, 1),
- FUNC("rsvd", 2, 35, 1),
- FUNC("gpio", 1, 35, 1),
- FUNC("wled_kn", 0, 35, 1),
-};
-
-static struct rt2880_pmx_func p4led_an_grp_mt7628[] = {
- FUNC("jtag", 3, 39, 1),
- FUNC("utif", 2, 39, 1),
- FUNC("gpio", 1, 39, 1),
- FUNC("p4led_an", 0, 39, 1),
-};
-
-static struct rt2880_pmx_func p3led_an_grp_mt7628[] = {
- FUNC("jtag", 3, 40, 1),
- FUNC("utif", 2, 40, 1),
- FUNC("gpio", 1, 40, 1),
- FUNC("p3led_an", 0, 40, 1),
-};
-
-static struct rt2880_pmx_func p2led_an_grp_mt7628[] = {
- FUNC("jtag", 3, 41, 1),
- FUNC("utif", 2, 41, 1),
- FUNC("gpio", 1, 41, 1),
- FUNC("p2led_an", 0, 41, 1),
-};
-
-static struct rt2880_pmx_func p1led_an_grp_mt7628[] = {
- FUNC("jtag", 3, 42, 1),
- FUNC("utif", 2, 42, 1),
- FUNC("gpio", 1, 42, 1),
- FUNC("p1led_an", 0, 42, 1),
-};
-
-static struct rt2880_pmx_func p0led_an_grp_mt7628[] = {
- FUNC("jtag", 3, 43, 1),
- FUNC("rsvd", 2, 43, 1),
- FUNC("gpio", 1, 43, 1),
- FUNC("p0led_an", 0, 43, 1),
-};
-
-static struct rt2880_pmx_func wled_an_grp_mt7628[] = {
- FUNC("rsvd", 3, 44, 1),
- FUNC("rsvd", 2, 44, 1),
- FUNC("gpio", 1, 44, 1),
- FUNC("wled_an", 0, 44, 1),
-};
-
-#define MT7628_GPIO_MODE_MASK 0x3
-
-#define MT7628_GPIO_MODE_P4LED_KN 58
-#define MT7628_GPIO_MODE_P3LED_KN 56
-#define MT7628_GPIO_MODE_P2LED_KN 54
-#define MT7628_GPIO_MODE_P1LED_KN 52
-#define MT7628_GPIO_MODE_P0LED_KN 50
-#define MT7628_GPIO_MODE_WLED_KN 48
-#define MT7628_GPIO_MODE_P4LED_AN 42
-#define MT7628_GPIO_MODE_P3LED_AN 40
-#define MT7628_GPIO_MODE_P2LED_AN 38
-#define MT7628_GPIO_MODE_P1LED_AN 36
-#define MT7628_GPIO_MODE_P0LED_AN 34
-#define MT7628_GPIO_MODE_WLED_AN 32
-#define MT7628_GPIO_MODE_PWM1 30
-#define MT7628_GPIO_MODE_PWM0 28
-#define MT7628_GPIO_MODE_UART2 26
-#define MT7628_GPIO_MODE_UART1 24
-#define MT7628_GPIO_MODE_I2C 20
-#define MT7628_GPIO_MODE_REFCLK 18
-#define MT7628_GPIO_MODE_PERST 16
-#define MT7628_GPIO_MODE_WDT 14
-#define MT7628_GPIO_MODE_SPI 12
-#define MT7628_GPIO_MODE_SDMODE 10
-#define MT7628_GPIO_MODE_UART0 8
-#define MT7628_GPIO_MODE_I2S 6
-#define MT7628_GPIO_MODE_CS1 4
-#define MT7628_GPIO_MODE_SPIS 2
-#define MT7628_GPIO_MODE_GPIO 0
-
-static struct rt2880_pmx_group mt7628an_pinmux_data[] = {
- GRP_G("pwm1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_PWM1),
- GRP_G("pwm0", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_PWM0),
- GRP_G("uart2", uart2_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_UART2),
- GRP_G("uart1", uart1_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_UART1),
- GRP_G("i2c", i2c_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_I2C),
- GRP("refclk", refclk_grp_mt7628, 1, MT7628_GPIO_MODE_REFCLK),
- GRP("perst", perst_grp_mt7628, 1, MT7628_GPIO_MODE_PERST),
- GRP("wdt", wdt_grp_mt7628, 1, MT7628_GPIO_MODE_WDT),
- GRP("spi", spi_grp_mt7628, 1, MT7628_GPIO_MODE_SPI),
- GRP_G("sdmode", sd_mode_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_SDMODE),
- GRP_G("uart0", uart0_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_UART0),
- GRP_G("i2s", i2s_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_I2S),
- GRP_G("spi cs1", spi_cs1_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_CS1),
- GRP_G("spis", spis_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_SPIS),
- GRP_G("gpio", gpio_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_GPIO),
- GRP_G("wled_an", wled_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_WLED_AN),
- GRP_G("p0led_an", p0led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P0LED_AN),
- GRP_G("p1led_an", p1led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P1LED_AN),
- GRP_G("p2led_an", p2led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P2LED_AN),
- GRP_G("p3led_an", p3led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P3LED_AN),
- GRP_G("p4led_an", p4led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P4LED_AN),
- GRP_G("wled_kn", wled_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_WLED_KN),
- GRP_G("p0led_kn", p0led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P0LED_KN),
- GRP_G("p1led_kn", p1led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P1LED_KN),
- GRP_G("p2led_kn", p2led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P2LED_KN),
- GRP_G("p3led_kn", p3led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P3LED_KN),
- GRP_G("p4led_kn", p4led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
- 1, MT7628_GPIO_MODE_P4LED_KN),
- { 0 }
-};
-
-static inline int is_mt76x8(void)
-{
- return ralink_soc == MT762X_SOC_MT7628AN ||
- ralink_soc == MT762X_SOC_MT7688;
-}
-
static __init u32
mt7620_calc_rate(u32 ref_rate, u32 mul, u32 div)
{
@@ -639,7 +324,7 @@ mt7628_dram_init(struct ralink_soc_info *soc_info)
}
}
-void prom_soc_init(struct ralink_soc_info *soc_info)
+void __init prom_soc_init(struct ralink_soc_info *soc_info)
{
void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7620_SYSC_BASE);
unsigned char *name = NULL;
@@ -710,9 +395,4 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
(pmu0 & PMU_SW_SET) ? ("sw") : ("hw"));
pr_info("Digital PMU set to %s control\n",
(pmu1 & DIG_SW_SEL) ? ("sw") : ("hw"));
-
- if (is_mt76x8())
- rt2880_pinmux_data = mt7628an_pinmux_data;
- else
- rt2880_pinmux_data = mt7620a_pinmux_data;
}
diff --git a/arch/mips/ralink/mt7621.c b/arch/mips/ralink/mt7621.c
index ca0ac607b0f3..bd71f5b14238 100644
--- a/arch/mips/ralink/mt7621.c
+++ b/arch/mips/ralink/mt7621.c
@@ -9,111 +9,48 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
+#include <linux/memblock.h>
+#include <asm/bootinfo.h>
#include <asm/mipsregs.h>
#include <asm/smp-ops.h>
#include <asm/mips-cps.h>
#include <asm/mach-ralink/ralink_regs.h>
#include <asm/mach-ralink/mt7621.h>
-#include <pinmux.h>
-
#include "common.h"
-#define MT7621_GPIO_MODE_UART1 1
-#define MT7621_GPIO_MODE_I2C 2
-#define MT7621_GPIO_MODE_UART3_MASK 0x3
-#define MT7621_GPIO_MODE_UART3_SHIFT 3
-#define MT7621_GPIO_MODE_UART3_GPIO 1
-#define MT7621_GPIO_MODE_UART2_MASK 0x3
-#define MT7621_GPIO_MODE_UART2_SHIFT 5
-#define MT7621_GPIO_MODE_UART2_GPIO 1
-#define MT7621_GPIO_MODE_JTAG 7
-#define MT7621_GPIO_MODE_WDT_MASK 0x3
-#define MT7621_GPIO_MODE_WDT_SHIFT 8
-#define MT7621_GPIO_MODE_WDT_GPIO 1
-#define MT7621_GPIO_MODE_PCIE_RST 0
-#define MT7621_GPIO_MODE_PCIE_REF 2
-#define MT7621_GPIO_MODE_PCIE_MASK 0x3
-#define MT7621_GPIO_MODE_PCIE_SHIFT 10
-#define MT7621_GPIO_MODE_PCIE_GPIO 1
-#define MT7621_GPIO_MODE_MDIO_MASK 0x3
-#define MT7621_GPIO_MODE_MDIO_SHIFT 12
-#define MT7621_GPIO_MODE_MDIO_GPIO 1
-#define MT7621_GPIO_MODE_RGMII1 14
-#define MT7621_GPIO_MODE_RGMII2 15
-#define MT7621_GPIO_MODE_SPI_MASK 0x3
-#define MT7621_GPIO_MODE_SPI_SHIFT 16
-#define MT7621_GPIO_MODE_SPI_GPIO 1
-#define MT7621_GPIO_MODE_SDHCI_MASK 0x3
-#define MT7621_GPIO_MODE_SDHCI_SHIFT 18
-#define MT7621_GPIO_MODE_SDHCI_GPIO 1
-
-static struct rt2880_pmx_func uart1_grp[] = { FUNC("uart1", 0, 1, 2) };
-static struct rt2880_pmx_func i2c_grp[] = { FUNC("i2c", 0, 3, 2) };
-static struct rt2880_pmx_func uart3_grp[] = {
- FUNC("uart3", 0, 5, 4),
- FUNC("i2s", 2, 5, 4),
- FUNC("spdif3", 3, 5, 4),
-};
-static struct rt2880_pmx_func uart2_grp[] = {
- FUNC("uart2", 0, 9, 4),
- FUNC("pcm", 2, 9, 4),
- FUNC("spdif2", 3, 9, 4),
-};
-static struct rt2880_pmx_func jtag_grp[] = { FUNC("jtag", 0, 13, 5) };
-static struct rt2880_pmx_func wdt_grp[] = {
- FUNC("wdt rst", 0, 18, 1),
- FUNC("wdt refclk", 2, 18, 1),
-};
-static struct rt2880_pmx_func pcie_rst_grp[] = {
- FUNC("pcie rst", MT7621_GPIO_MODE_PCIE_RST, 19, 1),
- FUNC("pcie refclk", MT7621_GPIO_MODE_PCIE_REF, 19, 1)
-};
-static struct rt2880_pmx_func mdio_grp[] = { FUNC("mdio", 0, 20, 2) };
-static struct rt2880_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 22, 12) };
-static struct rt2880_pmx_func spi_grp[] = {
- FUNC("spi", 0, 34, 7),
- FUNC("nand1", 2, 34, 7),
-};
-static struct rt2880_pmx_func sdhci_grp[] = {
- FUNC("sdhci", 0, 41, 8),
- FUNC("nand2", 2, 41, 8),
-};
-static struct rt2880_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 49, 12) };
-
-static struct rt2880_pmx_group mt7621_pinmux_data[] = {
- GRP("uart1", uart1_grp, 1, MT7621_GPIO_MODE_UART1),
- GRP("i2c", i2c_grp, 1, MT7621_GPIO_MODE_I2C),
- GRP_G("uart3", uart3_grp, MT7621_GPIO_MODE_UART3_MASK,
- MT7621_GPIO_MODE_UART3_GPIO, MT7621_GPIO_MODE_UART3_SHIFT),
- GRP_G("uart2", uart2_grp, MT7621_GPIO_MODE_UART2_MASK,
- MT7621_GPIO_MODE_UART2_GPIO, MT7621_GPIO_MODE_UART2_SHIFT),
- GRP("jtag", jtag_grp, 1, MT7621_GPIO_MODE_JTAG),
- GRP_G("wdt", wdt_grp, MT7621_GPIO_MODE_WDT_MASK,
- MT7621_GPIO_MODE_WDT_GPIO, MT7621_GPIO_MODE_WDT_SHIFT),
- GRP_G("pcie", pcie_rst_grp, MT7621_GPIO_MODE_PCIE_MASK,
- MT7621_GPIO_MODE_PCIE_GPIO, MT7621_GPIO_MODE_PCIE_SHIFT),
- GRP_G("mdio", mdio_grp, MT7621_GPIO_MODE_MDIO_MASK,
- MT7621_GPIO_MODE_MDIO_GPIO, MT7621_GPIO_MODE_MDIO_SHIFT),
- GRP("rgmii2", rgmii2_grp, 1, MT7621_GPIO_MODE_RGMII2),
- GRP_G("spi", spi_grp, MT7621_GPIO_MODE_SPI_MASK,
- MT7621_GPIO_MODE_SPI_GPIO, MT7621_GPIO_MODE_SPI_SHIFT),
- GRP_G("sdhci", sdhci_grp, MT7621_GPIO_MODE_SDHCI_MASK,
- MT7621_GPIO_MODE_SDHCI_GPIO, MT7621_GPIO_MODE_SDHCI_SHIFT),
- GRP("rgmii1", rgmii1_grp, 1, MT7621_GPIO_MODE_RGMII1),
- { 0 }
-};
+static void *detect_magic __initdata = detect_memory_region;
phys_addr_t mips_cpc_default_phys_base(void)
{
panic("Cannot detect cpc address");
}
+static void __init mt7621_memory_detect(void)
+{
+ void *dm = &detect_magic;
+ phys_addr_t size;
+
+ for (size = 32 * SZ_1M; size < 256 * SZ_1M; size <<= 1) {
+ if (!__builtin_memcmp(dm, dm + size, sizeof(detect_magic)))
+ break;
+ }
+
+ if ((size == 256 * SZ_1M) &&
+ (CPHYSADDR(dm + size) < MT7621_LOWMEM_MAX_SIZE) &&
+ __builtin_memcmp(dm, dm + size, sizeof(detect_magic))) {
+ memblock_add(MT7621_LOWMEM_BASE, MT7621_LOWMEM_MAX_SIZE);
+ memblock_add(MT7621_HIGHMEM_BASE, MT7621_HIGHMEM_SIZE);
+ } else {
+ memblock_add(MT7621_LOWMEM_BASE, size);
+ }
+}
+
void __init ralink_of_remap(void)
{
- rt_sysc_membase = plat_of_remap_node("mtk,mt7621-sysc");
- rt_memc_membase = plat_of_remap_node("mtk,mt7621-memc");
+ rt_sysc_membase = plat_of_remap_node("mediatek,mt7621-sysc");
+ rt_memc_membase = plat_of_remap_node("mediatek,mt7621-memc");
if (!rt_sysc_membase || !rt_memc_membase)
panic("Failed to remap core resources");
@@ -146,7 +83,7 @@ static void soc_dev_init(struct ralink_soc_info *soc_info, u32 rev)
}
}
-void prom_soc_init(struct ralink_soc_info *soc_info)
+void __init prom_soc_init(struct ralink_soc_info *soc_info)
{
void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7621_SYSC_BASE);
unsigned char *name = NULL;
@@ -181,7 +118,7 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
if (n0 == MT7621_CHIP_NAME0 && n1 == MT7621_CHIP_NAME1) {
name = "MT7621";
- soc_info->compatible = "mtk,mt7621-soc";
+ soc_info->compatible = "mediatek,mt7621-soc";
} else {
panic("mt7621: unknown SoC, n0:%08x n1:%08x\n", n0, n1);
}
@@ -194,11 +131,7 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
(rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK,
(rev & CHIP_REV_ECO_MASK));
- soc_info->mem_size_min = MT7621_DDR2_SIZE_MIN;
- soc_info->mem_size_max = MT7621_DDR2_SIZE_MAX;
- soc_info->mem_base = MT7621_DRAM_BASE;
-
- rt2880_pinmux_data = mt7621_pinmux_data;
+ soc_info->mem_detect = mt7621_memory_detect;
soc_dev_init(soc_info, rev);
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
index 8286c3521476..0135376c5de5 100644
--- a/arch/mips/ralink/of.c
+++ b/arch/mips/ralink/of.c
@@ -8,6 +8,7 @@
#include <linux/io.h>
#include <linux/clk.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/sizes.h>
#include <linux/of_fdt.h>
@@ -25,6 +26,7 @@
__iomem void *rt_sysc_membase;
__iomem void *rt_memc_membase;
+EXPORT_SYMBOL_GPL(rt_sysc_membase);
__iomem void *plat_of_remap_node(const char *node)
{
@@ -78,6 +80,8 @@ void __init plat_mem_setup(void)
of_scan_flat_dt(early_init_dt_find_memory, NULL);
if (memory_dtb)
of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+ else if (soc_info.mem_detect)
+ soc_info.mem_detect();
else if (soc_info.mem_size)
memblock_add(soc_info.mem_base, soc_info.mem_size * SZ_1M);
else
diff --git a/arch/mips/ralink/prom.c b/arch/mips/ralink/prom.c
index 25728def3503..aaac1e6ec7d9 100644
--- a/arch/mips/ralink/prom.c
+++ b/arch/mips/ralink/prom.c
@@ -18,7 +18,6 @@
#include "common.h"
struct ralink_soc_info soc_info;
-struct rt2880_pmx_group *rt2880_pinmux_data = NULL;
enum ralink_soc_type ralink_soc;
EXPORT_SYMBOL_GPL(ralink_soc);
diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c
index 3f096897858c..493335db2fe1 100644
--- a/arch/mips/ralink/rt288x.c
+++ b/arch/mips/ralink/rt288x.c
@@ -14,29 +14,9 @@
#include <asm/mipsregs.h>
#include <asm/mach-ralink/ralink_regs.h>
#include <asm/mach-ralink/rt288x.h>
-#include <asm/mach-ralink/pinmux.h>
#include "common.h"
-static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
-static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
-static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 7, 8) };
-static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
-static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
-static struct rt2880_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) };
-static struct rt2880_pmx_func pci_func[] = { FUNC("pci", 0, 40, 32) };
-
-static struct rt2880_pmx_group rt2880_pinmux_data_act[] = {
- GRP("i2c", i2c_func, 1, RT2880_GPIO_MODE_I2C),
- GRP("spi", spi_func, 1, RT2880_GPIO_MODE_SPI),
- GRP("uartlite", uartlite_func, 1, RT2880_GPIO_MODE_UART0),
- GRP("jtag", jtag_func, 1, RT2880_GPIO_MODE_JTAG),
- GRP("mdio", mdio_func, 1, RT2880_GPIO_MODE_MDIO),
- GRP("sdram", sdram_func, 1, RT2880_GPIO_MODE_SDRAM),
- GRP("pci", pci_func, 1, RT2880_GPIO_MODE_PCI),
- { 0 }
-};
-
void __init ralink_clk_init(void)
{
unsigned long cpu_rate, wmac_rate = 40000000;
@@ -77,7 +57,7 @@ void __init ralink_of_remap(void)
panic("Failed to remap core resources");
}
-void prom_soc_init(struct ralink_soc_info *soc_info)
+void __init prom_soc_init(struct ralink_soc_info *soc_info)
{
void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT2880_SYSC_BASE);
const char *name;
@@ -106,6 +86,5 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
soc_info->mem_size_min = RT2880_MEM_SIZE_MIN;
soc_info->mem_size_max = RT2880_MEM_SIZE_MAX;
- rt2880_pinmux_data = rt2880_pinmux_data_act;
ralink_soc = RT2880_SOC;
}
diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c
index 496f966c05f9..8b095a9dcb15 100644
--- a/arch/mips/ralink/rt305x.c
+++ b/arch/mips/ralink/rt305x.c
@@ -16,83 +16,9 @@
#include <asm/mipsregs.h>
#include <asm/mach-ralink/ralink_regs.h>
#include <asm/mach-ralink/rt305x.h>
-#include <asm/mach-ralink/pinmux.h>
#include "common.h"
-static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
-static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
-static struct rt2880_pmx_func uartf_func[] = {
- FUNC("uartf", RT305X_GPIO_MODE_UARTF, 7, 8),
- FUNC("pcm uartf", RT305X_GPIO_MODE_PCM_UARTF, 7, 8),
- FUNC("pcm i2s", RT305X_GPIO_MODE_PCM_I2S, 7, 8),
- FUNC("i2s uartf", RT305X_GPIO_MODE_I2S_UARTF, 7, 8),
- FUNC("pcm gpio", RT305X_GPIO_MODE_PCM_GPIO, 11, 4),
- FUNC("gpio uartf", RT305X_GPIO_MODE_GPIO_UARTF, 7, 4),
- FUNC("gpio i2s", RT305X_GPIO_MODE_GPIO_I2S, 7, 4),
-};
-static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
-static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
-static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
-static struct rt2880_pmx_func rt5350_led_func[] = { FUNC("led", 0, 22, 5) };
-static struct rt2880_pmx_func rt5350_cs1_func[] = {
- FUNC("spi_cs1", 0, 27, 1),
- FUNC("wdg_cs1", 1, 27, 1),
-};
-static struct rt2880_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) };
-static struct rt2880_pmx_func rt3352_rgmii_func[] = {
- FUNC("rgmii", 0, 24, 12)
-};
-static struct rt2880_pmx_func rgmii_func[] = { FUNC("rgmii", 0, 40, 12) };
-static struct rt2880_pmx_func rt3352_lna_func[] = { FUNC("lna", 0, 36, 2) };
-static struct rt2880_pmx_func rt3352_pa_func[] = { FUNC("pa", 0, 38, 2) };
-static struct rt2880_pmx_func rt3352_led_func[] = { FUNC("led", 0, 40, 5) };
-static struct rt2880_pmx_func rt3352_cs1_func[] = {
- FUNC("spi_cs1", 0, 45, 1),
- FUNC("wdg_cs1", 1, 45, 1),
-};
-
-static struct rt2880_pmx_group rt3050_pinmux_data[] = {
- GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C),
- GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI),
- GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK,
- RT305X_GPIO_MODE_UART0_SHIFT),
- GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1),
- GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG),
- GRP("mdio", mdio_func, 1, RT305X_GPIO_MODE_MDIO),
- GRP("rgmii", rgmii_func, 1, RT305X_GPIO_MODE_RGMII),
- GRP("sdram", sdram_func, 1, RT305X_GPIO_MODE_SDRAM),
- { 0 }
-};
-
-static struct rt2880_pmx_group rt3352_pinmux_data[] = {
- GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C),
- GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI),
- GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK,
- RT305X_GPIO_MODE_UART0_SHIFT),
- GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1),
- GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG),
- GRP("mdio", mdio_func, 1, RT305X_GPIO_MODE_MDIO),
- GRP("rgmii", rt3352_rgmii_func, 1, RT305X_GPIO_MODE_RGMII),
- GRP("lna", rt3352_lna_func, 1, RT3352_GPIO_MODE_LNA),
- GRP("pa", rt3352_pa_func, 1, RT3352_GPIO_MODE_PA),
- GRP("led", rt3352_led_func, 1, RT5350_GPIO_MODE_PHY_LED),
- GRP("spi_cs1", rt3352_cs1_func, 2, RT5350_GPIO_MODE_SPI_CS1),
- { 0 }
-};
-
-static struct rt2880_pmx_group rt5350_pinmux_data[] = {
- GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C),
- GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI),
- GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK,
- RT305X_GPIO_MODE_UART0_SHIFT),
- GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1),
- GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG),
- GRP("led", rt5350_led_func, 1, RT5350_GPIO_MODE_PHY_LED),
- GRP("spi_cs1", rt5350_cs1_func, 2, RT5350_GPIO_MODE_SPI_CS1),
- { 0 }
-};
-
static unsigned long rt5350_get_mem_size(void)
{
void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE);
@@ -214,7 +140,7 @@ void __init ralink_of_remap(void)
panic("Failed to remap core resources");
}
-void prom_soc_init(struct ralink_soc_info *soc_info)
+void __init prom_soc_init(struct ralink_soc_info *soc_info)
{
void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE);
unsigned char *name;
@@ -265,14 +191,11 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
soc_info->mem_base = RT305X_SDRAM_BASE;
if (soc_is_rt5350()) {
soc_info->mem_size = rt5350_get_mem_size();
- rt2880_pinmux_data = rt5350_pinmux_data;
} else if (soc_is_rt305x() || soc_is_rt3350()) {
soc_info->mem_size_min = RT305X_MEM_SIZE_MIN;
soc_info->mem_size_max = RT305X_MEM_SIZE_MAX;
- rt2880_pinmux_data = rt3050_pinmux_data;
} else if (soc_is_rt3352()) {
soc_info->mem_size_min = RT3352_MEM_SIZE_MIN;
soc_info->mem_size_max = RT3352_MEM_SIZE_MAX;
- rt2880_pinmux_data = rt3352_pinmux_data;
}
}
diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
index 8f3fe3106708..d9875f146d66 100644
--- a/arch/mips/ralink/rt3883.c
+++ b/arch/mips/ralink/rt3883.c
@@ -14,52 +14,9 @@
#include <asm/mipsregs.h>
#include <asm/mach-ralink/ralink_regs.h>
#include <asm/mach-ralink/rt3883.h>
-#include <asm/mach-ralink/pinmux.h>
#include "common.h"
-static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
-static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
-static struct rt2880_pmx_func uartf_func[] = {
- FUNC("uartf", RT3883_GPIO_MODE_UARTF, 7, 8),
- FUNC("pcm uartf", RT3883_GPIO_MODE_PCM_UARTF, 7, 8),
- FUNC("pcm i2s", RT3883_GPIO_MODE_PCM_I2S, 7, 8),
- FUNC("i2s uartf", RT3883_GPIO_MODE_I2S_UARTF, 7, 8),
- FUNC("pcm gpio", RT3883_GPIO_MODE_PCM_GPIO, 11, 4),
- FUNC("gpio uartf", RT3883_GPIO_MODE_GPIO_UARTF, 7, 4),
- FUNC("gpio i2s", RT3883_GPIO_MODE_GPIO_I2S, 7, 4),
-};
-static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
-static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
-static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
-static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
-static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) };
-static struct rt2880_pmx_func pci_func[] = {
- FUNC("pci-dev", 0, 40, 32),
- FUNC("pci-host2", 1, 40, 32),
- FUNC("pci-host1", 2, 40, 32),
- FUNC("pci-fnc", 3, 40, 32)
-};
-static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
-static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
-
-static struct rt2880_pmx_group rt3883_pinmux_data[] = {
- GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),
- GRP("spi", spi_func, 1, RT3883_GPIO_MODE_SPI),
- GRP("uartf", uartf_func, RT3883_GPIO_MODE_UART0_MASK,
- RT3883_GPIO_MODE_UART0_SHIFT),
- GRP("uartlite", uartlite_func, 1, RT3883_GPIO_MODE_UART1),
- GRP("jtag", jtag_func, 1, RT3883_GPIO_MODE_JTAG),
- GRP("mdio", mdio_func, 1, RT3883_GPIO_MODE_MDIO),
- GRP("lna a", lna_a_func, 1, RT3883_GPIO_MODE_LNA_A),
- GRP("lna g", lna_g_func, 1, RT3883_GPIO_MODE_LNA_G),
- GRP("pci", pci_func, RT3883_GPIO_MODE_PCI_MASK,
- RT3883_GPIO_MODE_PCI_SHIFT),
- GRP("ge1", ge1_func, 1, RT3883_GPIO_MODE_GE1),
- GRP("ge2", ge2_func, 1, RT3883_GPIO_MODE_GE2),
- { 0 }
-};
-
void __init ralink_clk_init(void)
{
unsigned long cpu_rate, sys_rate;
@@ -113,7 +70,7 @@ void __init ralink_of_remap(void)
panic("Failed to remap core resources");
}
-void prom_soc_init(struct ralink_soc_info *soc_info)
+void __init prom_soc_init(struct ralink_soc_info *soc_info)
{
void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT3883_SYSC_BASE);
const char *name;
@@ -142,7 +99,5 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
soc_info->mem_size_min = RT3883_MEM_SIZE_MIN;
soc_info->mem_size_max = RT3883_MEM_SIZE_MAX;
- rt2880_pinmux_data = rt3883_pinmux_data;
-
ralink_soc = RT3883_SOC;
}
diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c
index dd34f1b32b79..04684990e28e 100644
--- a/arch/mips/rb532/devices.c
+++ b/arch/mips/rb532/devices.c
@@ -58,37 +58,27 @@ EXPORT_SYMBOL(get_latch_u5);
static struct resource korina_dev0_res[] = {
{
- .name = "korina_regs",
+ .name = "emac",
.start = ETH0_BASE_ADDR,
.end = ETH0_BASE_ADDR + sizeof(struct eth_regs),
.flags = IORESOURCE_MEM,
}, {
- .name = "korina_rx",
+ .name = "rx",
.start = ETH0_DMA_RX_IRQ,
.end = ETH0_DMA_RX_IRQ,
.flags = IORESOURCE_IRQ
}, {
- .name = "korina_tx",
+ .name = "tx",
.start = ETH0_DMA_TX_IRQ,
.end = ETH0_DMA_TX_IRQ,
.flags = IORESOURCE_IRQ
}, {
- .name = "korina_ovr",
- .start = ETH0_RX_OVR_IRQ,
- .end = ETH0_RX_OVR_IRQ,
- .flags = IORESOURCE_IRQ
- }, {
- .name = "korina_und",
- .start = ETH0_TX_UND_IRQ,
- .end = ETH0_TX_UND_IRQ,
- .flags = IORESOURCE_IRQ
- }, {
- .name = "korina_dma_rx",
+ .name = "dma_rx",
.start = ETH0_RX_DMA_ADDR,
.end = ETH0_RX_DMA_ADDR + DMA_CHAN_OFFSET - 1,
.flags = IORESOURCE_MEM,
}, {
- .name = "korina_dma_tx",
+ .name = "dma_tx",
.start = ETH0_TX_DMA_ADDR,
.end = ETH0_TX_DMA_ADDR + DMA_CHAN_OFFSET - 1,
.flags = IORESOURCE_MEM,
@@ -105,6 +95,9 @@ static struct platform_device korina_dev0 = {
.name = "korina",
.resource = korina_dev0_res,
.num_resources = ARRAY_SIZE(korina_dev0_res),
+ .dev = {
+ .platform_data = &korina_dev0_data.mac,
+ }
};
static struct resource cf_slot0_res[] = {
@@ -299,8 +292,6 @@ static int __init plat_setup_devices(void)
/* set the uart clock to the current cpu frequency */
rb532_uart_res[0].uartclk = idt_cpu_freq;
- dev_set_drvdata(&korina_dev0.dev, &korina_dev0_data);
-
gpiod_add_lookup_table(&cf_slot0_gpio_table);
return platform_add_devices(rb532_devs, ARRAY_SIZE(rb532_devs));
}
diff --git a/arch/mips/sgi-ip22/ip22-reset.c b/arch/mips/sgi-ip22/ip22-reset.c
index c374f3ceec38..9028dbbb45dd 100644
--- a/arch/mips/sgi-ip22/ip22-reset.c
+++ b/arch/mips/sgi-ip22/ip22-reset.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/notifier.h>
+#include <linux/panic_notifier.h>
#include <linux/pm.h>
#include <linux/timer.h>
diff --git a/arch/mips/sgi-ip27/TODO b/arch/mips/sgi-ip27/TODO
deleted file mode 100644
index 160857ff1483..000000000000
--- a/arch/mips/sgi-ip27/TODO
+++ /dev/null
@@ -1,19 +0,0 @@
-1. Need to figure out why PCI writes to the IOC3 hang, and if it is okay
-not to write to the IOC3 ever.
-2. Need to figure out RRB allocation in bridge_startup().
-3. Need to figure out why address swaizzling is needed in inw/outw for
-Qlogic scsi controllers.
-4. Need to integrate ip27-klconfig.c:find_lboard and
-ip27-init.c:find_lbaord_real. DONE
-5. Is it okay to set calias space on all nodes as 0, instead of 8k as
-in irix?
-6. Investigate why things do not work without the setup_test() call
-being invoked on all nodes in ip27-memory.c.
-8. Too many do_page_faults invoked - investigate.
-9. start_thread must turn off UX64 ... and define tlb_refill_debug.
-10. Need a bad pmd table, bad pte table. __bad_pmd_table/__bad_pagetable
-does not agree with pgd_bad/pmd_bad.
-11. All intrs (ip27_do_irq handlers) are targeted at cpu A on the node.
-This might need to change later. Only the timer intr is set up to be
-received on both Cpu A and B. (ip27_do_irq()/bridge_startup())
-13. Cache flushing (specially the SMP version) has to be investigated.
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 42df9fafa943..95c1bff1ab9f 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 87bb6945ec25..6173684b5aaa 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -420,5 +420,4 @@ void __init mem_init(void)
high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT);
memblock_free_all();
setup_zero_pages(); /* This comes from node 0 */
- mem_init_print_info(NULL);
}
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index 79c434fece52..444b5e0e935f 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copytight (C) 1999, 2000, 05, 06 Ralf Baechle (ralf@linux-mips.org)
- * Copytight (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 1999, 2000, 05, 06 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
#include <linux/bcd.h>
#include <linux/clockchips.h>
diff --git a/arch/mips/sgi-ip30/ip30-irq.c b/arch/mips/sgi-ip30/ip30-irq.c
index e8374e4c705b..ba87704073c8 100644
--- a/arch/mips/sgi-ip30/ip30-irq.c
+++ b/arch/mips/sgi-ip30/ip30-irq.c
@@ -6,6 +6,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
#include <linux/tick.h>
diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c
index 20d8637340be..18d1c115cd53 100644
--- a/arch/mips/sgi-ip32/ip32-reset.c
+++ b/arch/mips/sgi-ip32/ip32-reset.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/panic_notifier.h>
#include <linux/sched.h>
#include <linux/sched/signal.h>
#include <linux/notifier.h>
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index 2131d3fd7333..1b2ea34c3d3b 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -46,7 +46,7 @@ CFLAGS_vgettimeofday-o32.o = -include $(srctree)/$(src)/config-n32-o32-env.c -in
CFLAGS_vgettimeofday-n32.o = -include $(srctree)/$(src)/config-n32-o32-env.c -include $(c-gettimeofday-y)
endif
-CFLAGS_REMOVE_vgettimeofday.o = -pg
+CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE)
ifdef CONFIG_MIPS_DISABLE_VDSO
ifndef CONFIG_MIPS_LD_CAN_LINK_VDSO
@@ -60,7 +60,7 @@ ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \
$(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared \
-G 0 --eh-frame-hdr --hash-style=sysv --build-id=sha1 -T
-CFLAGS_REMOVE_vdso.o = -pg
+CFLAGS_REMOVE_vdso.o = $(CC_FLAGS_FTRACE)
GCOV_PROFILE := n
UBSAN_SANITIZE := n
diff --git a/arch/nds32/include/asm/memory.h b/arch/nds32/include/asm/memory.h
index 940d32842793..62faafbc28e4 100644
--- a/arch/nds32/include/asm/memory.h
+++ b/arch/nds32/include/asm/memory.h
@@ -76,18 +76,12 @@
* virt_to_page(k) convert a _valid_ virtual address to struct page *
* virt_addr_valid(k) indicates whether a virtual address is valid
*/
-#ifndef CONFIG_DISCONTIGMEM
-
#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
#define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr))
#define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT))
#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
-#else /* CONFIG_DISCONTIGMEM */
-#error CONFIG_DISCONTIGMEM is not supported yet.
-#endif /* !CONFIG_DISCONTIGMEM */
-
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#endif
diff --git a/arch/nds32/include/asm/pgalloc.h b/arch/nds32/include/asm/pgalloc.h
index 85c117347c86..a08e1ebca70e 100644
--- a/arch/nds32/include/asm/pgalloc.h
+++ b/arch/nds32/include/asm/pgalloc.h
@@ -12,11 +12,6 @@
#define __HAVE_ARCH_PTE_ALLOC_ONE
#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
-/*
- * Since we have only two-level page tables, these are trivial
- */
-#define pmd_pgtable(pmd) pmd_page(pmd)
-
extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t * pgd);
diff --git a/arch/nds32/include/asm/uaccess.h b/arch/nds32/include/asm/uaccess.h
index 010ba5f1d7dd..d4cbf069dc22 100644
--- a/arch/nds32/include/asm/uaccess.h
+++ b/arch/nds32/include/asm/uaccess.h
@@ -260,7 +260,6 @@ do { \
extern unsigned long __arch_clear_user(void __user * addr, unsigned long n);
extern long strncpy_from_user(char *dest, const char __user * src, long count);
-extern __must_check long strlen_user(const char __user * str);
extern __must_check long strnlen_user(const char __user * str, long n);
extern unsigned long __arch_copy_from_user(void *to, const void __user * from,
unsigned long n);
diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c
index 414f8a780cc3..0e23e3a8df6b 100644
--- a/arch/nds32/kernel/ftrace.c
+++ b/arch/nds32/kernel/ftrace.c
@@ -236,7 +236,7 @@ void __naked return_to_handler(void)
"bal ftrace_return_to_handler\n\t"
"move $lp, $r0 \n\t"
- /* restore state nedded by the ABI */
+ /* restore state needed by the ABI */
"lmw.bim $r0,[$sp],$r1,#0x0 \n\t");
}
diff --git a/arch/nds32/kernel/process.c b/arch/nds32/kernel/process.c
index c1327e552ec6..391895b54d13 100644
--- a/arch/nds32/kernel/process.c
+++ b/arch/nds32/kernel/process.c
@@ -239,7 +239,7 @@ unsigned long get_wchan(struct task_struct *p)
unsigned long stack_start, stack_end;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
if (IS_ENABLED(CONFIG_FRAME_POINTER)) {
diff --git a/arch/nds32/mm/cacheflush.c b/arch/nds32/mm/cacheflush.c
index 6eb98a7ad27d..ad5344ef5d33 100644
--- a/arch/nds32/mm/cacheflush.c
+++ b/arch/nds32/mm/cacheflush.c
@@ -238,7 +238,7 @@ void flush_dcache_page(struct page *page)
{
struct address_space *mapping;
- mapping = page_mapping(page);
+ mapping = page_mapping_file(page);
if (mapping && !mapping_mapped(mapping))
set_bit(PG_dcache_dirty, &page->flags);
else {
diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c
index fa86f7b2f416..f63f839738c4 100644
--- a/arch/nds32/mm/init.c
+++ b/arch/nds32/mm/init.c
@@ -191,7 +191,6 @@ void __init mem_init(void)
/* this will put all low memory onto the freelists */
memblock_free_all();
- mem_init_print_info(NULL);
pr_info("virtual kernel memory layout:\n"
" fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
diff --git a/arch/nios2/include/asm/irq.h b/arch/nios2/include/asm/irq.h
index 13ce37272279..c52c94884e93 100644
--- a/arch/nios2/include/asm/irq.h
+++ b/arch/nios2/include/asm/irq.h
@@ -10,6 +10,5 @@
#define NIOS2_CPU_NR_IRQS 32
#include <asm-generic/irq.h>
-#include <linux/irqdomain.h>
#endif
diff --git a/arch/nios2/include/asm/pgalloc.h b/arch/nios2/include/asm/pgalloc.h
index e6600d2a5ae0..3c4ae74d5798 100644
--- a/arch/nios2/include/asm/pgalloc.h
+++ b/arch/nios2/include/asm/pgalloc.h
@@ -25,7 +25,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
{
set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* Initialize a new pmd table with invalid pointers.
diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h
index 2600d76c310c..4a995fa628ee 100644
--- a/arch/nios2/include/asm/pgtable.h
+++ b/arch/nios2/include/asm/pgtable.h
@@ -24,8 +24,6 @@
#include <asm/pgtable-bits.h>
#include <asm-generic/pgtable-nopmd.h>
-#define FIRST_USER_ADDRESS 0UL
-
#define VMALLOC_START CONFIG_NIOS2_KERNEL_MMU_REGION_BASE
#define VMALLOC_END (CONFIG_NIOS2_KERNEL_REGION_BASE - 1)
diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h
index a741abbed6fb..ba9340e96fd4 100644
--- a/arch/nios2/include/asm/uaccess.h
+++ b/arch/nios2/include/asm/uaccess.h
@@ -83,7 +83,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n);
extern long strncpy_from_user(char *__to, const char __user *__from,
long __len);
-extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *s, long n);
/* Optimized macros */
diff --git a/arch/nios2/kernel/irq.c b/arch/nios2/kernel/irq.c
index 5f3555ce4865..c6a1a9f6ac42 100644
--- a/arch/nios2/kernel/irq.c
+++ b/arch/nios2/kernel/irq.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/of.h>
static u32 ienable;
diff --git a/arch/nios2/kernel/process.c b/arch/nios2/kernel/process.c
index c5f916ca6845..9ff37ba2bb60 100644
--- a/arch/nios2/kernel/process.c
+++ b/arch/nios2/kernel/process.c
@@ -223,7 +223,7 @@ unsigned long get_wchan(struct task_struct *p)
unsigned long stack_page;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
stack_page = (unsigned long)p;
diff --git a/arch/nios2/mm/cacheflush.c b/arch/nios2/mm/cacheflush.c
index 65de1bd6a760..6aa9257c3ede 100644
--- a/arch/nios2/mm/cacheflush.c
+++ b/arch/nios2/mm/cacheflush.c
@@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/fs.h>
+#include <linux/pagemap.h>
#include <asm/cacheflush.h>
#include <asm/cpuinfo.h>
diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c
index 61862dbb0e32..613fcaa5988a 100644
--- a/arch/nios2/mm/init.c
+++ b/arch/nios2/mm/init.c
@@ -71,7 +71,6 @@ void __init mem_init(void)
/* this will put all memory onto the freelists */
memblock_free_all();
- mem_init_print_info(NULL);
}
void __init mmu_init(void)
diff --git a/arch/openrisc/configs/or1ksim_defconfig b/arch/openrisc/configs/or1ksim_defconfig
index 75f2da324d0e..6e1e004047c7 100644
--- a/arch/openrisc/configs/or1ksim_defconfig
+++ b/arch/openrisc/configs/or1ksim_defconfig
@@ -43,7 +43,6 @@ CONFIG_MICREL_PHY=y
# CONFIG_SERIO is not set
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
diff --git a/arch/openrisc/include/asm/atomic.h b/arch/openrisc/include/asm/atomic.h
index b589fac39b92..326167e4783a 100644
--- a/arch/openrisc/include/asm/atomic.h
+++ b/arch/openrisc/include/asm/atomic.h
@@ -13,7 +13,7 @@
/* Atomically perform op with v->counter and i */
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
int tmp; \
\
@@ -30,7 +30,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
/* Atomically perform op with v->counter and i, return the result */
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int tmp; \
\
@@ -49,7 +49,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
/* Atomically perform op with v->counter and i, return orig v->counter */
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int tmp, old; \
\
@@ -75,6 +75,8 @@ ATOMIC_FETCH_OP(and)
ATOMIC_FETCH_OP(or)
ATOMIC_FETCH_OP(xor)
+ATOMIC_OP(add)
+ATOMIC_OP(sub)
ATOMIC_OP(and)
ATOMIC_OP(or)
ATOMIC_OP(xor)
@@ -83,16 +85,18 @@ ATOMIC_OP(xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_add_return atomic_add_return
-#define atomic_sub_return atomic_sub_return
-#define atomic_fetch_add atomic_fetch_add
-#define atomic_fetch_sub atomic_fetch_sub
-#define atomic_fetch_and atomic_fetch_and
-#define atomic_fetch_or atomic_fetch_or
-#define atomic_fetch_xor atomic_fetch_xor
-#define atomic_and atomic_and
-#define atomic_or atomic_or
-#define atomic_xor atomic_xor
+#define arch_atomic_add_return arch_atomic_add_return
+#define arch_atomic_sub_return arch_atomic_sub_return
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+#define arch_atomic_add arch_atomic_add
+#define arch_atomic_sub arch_atomic_sub
+#define arch_atomic_and arch_atomic_and
+#define arch_atomic_or arch_atomic_or
+#define arch_atomic_xor arch_atomic_xor
/*
* Atomically add a to v->counter as long as v is not already u.
@@ -100,7 +104,7 @@ ATOMIC_OP(xor)
*
* This is often used through atomic_inc_not_zero()
*/
-static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int old, tmp;
@@ -119,8 +123,14 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
return old;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
-#include <asm-generic/atomic.h>
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
+
+#include <asm/cmpxchg.h>
+
+#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v)))
+#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new)))
#endif /* __ASM_OPENRISC_ATOMIC_H */
diff --git a/arch/openrisc/include/asm/barrier.h b/arch/openrisc/include/asm/barrier.h
new file mode 100644
index 000000000000..7538294721be
--- /dev/null
+++ b/arch/openrisc/include/asm/barrier.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#define mb() asm volatile ("l.msync" ::: "memory")
+
+#include <asm-generic/barrier.h>
+
+#endif /* __ASM_BARRIER_H */
diff --git a/arch/openrisc/include/asm/cmpxchg.h b/arch/openrisc/include/asm/cmpxchg.h
index f9cd43a39d72..79fd16162ccb 100644
--- a/arch/openrisc/include/asm/cmpxchg.h
+++ b/arch/openrisc/include/asm/cmpxchg.h
@@ -132,7 +132,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
}
}
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
(__typeof__(*(ptr))) __cmpxchg((ptr), \
(unsigned long)(o), \
@@ -161,7 +161,7 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long with,
}
}
-#define xchg(ptr, with) \
+#define arch_xchg(ptr, with) \
({ \
(__typeof__(*(ptr))) __xchg((ptr), \
(unsigned long)(with), \
diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h
index 88820299ecc4..b7b2b8d16fad 100644
--- a/arch/openrisc/include/asm/pgalloc.h
+++ b/arch/openrisc/include/asm/pgalloc.h
@@ -72,6 +72,4 @@ do { \
tlb_remove_page((tlb), (pte)); \
} while (0)
-#define pmd_pgtable(pmd) pmd_page(pmd)
-
#endif
diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h
index 9425bedab4fc..4ac591c9ca33 100644
--- a/arch/openrisc/include/asm/pgtable.h
+++ b/arch/openrisc/include/asm/pgtable.h
@@ -73,7 +73,6 @@ extern void paging_init(void);
*/
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
/*
* Kernels own virtual memory area.
diff --git a/arch/openrisc/include/asm/tlbflush.h b/arch/openrisc/include/asm/tlbflush.h
index 185dcd3731ed..dbf030365ab4 100644
--- a/arch/openrisc/include/asm/tlbflush.h
+++ b/arch/openrisc/include/asm/tlbflush.h
@@ -25,7 +25,7 @@
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
- * - flush_tlb_range(mm, start, end) flushes a range of pages
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
*/
extern void local_flush_tlb_all(void);
extern void local_flush_tlb_mm(struct mm_struct *mm);
diff --git a/arch/openrisc/include/asm/unaligned.h b/arch/openrisc/include/asm/unaligned.h
deleted file mode 100644
index 14353f2101f2..000000000000
--- a/arch/openrisc/include/asm/unaligned.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * OpenRISC Linux
- *
- * Linux architectural port borrowing liberally from similar works of
- * others. All original copyrights apply as per the original source
- * declaration.
- *
- * OpenRISC implementation:
- * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
- * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
- * et al.
- */
-
-#ifndef __ASM_OPENRISC_UNALIGNED_H
-#define __ASM_OPENRISC_UNALIGNED_H
-
-/*
- * This is copied from the generic implementation and the C-struct
- * variant replaced with the memmove variant. The GCC compiler
- * for the OR32 arch optimizes too aggressively for the C-struct
- * variant to work, so use the memmove variant instead.
- *
- * It may be worth considering implementing the unaligned access
- * exception handler and allowing unaligned accesses (access_ok.h)...
- * not sure if it would be much of a performance win without further
- * investigation.
- */
-#include <asm/byteorder.h>
-
-#if defined(__LITTLE_ENDIAN)
-# include <linux/unaligned/le_memmove.h>
-# include <linux/unaligned/be_byteshift.h>
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_le
-# define put_unaligned __put_unaligned_le
-#elif defined(__BIG_ENDIAN)
-# include <linux/unaligned/be_memmove.h>
-# include <linux/unaligned/le_byteshift.h>
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_be
-# define put_unaligned __put_unaligned_be
-#else
-# error need to define endianess
-#endif
-
-#endif /* __ASM_OPENRISC_UNALIGNED_H */
diff --git a/arch/openrisc/kernel/asm-offsets.c b/arch/openrisc/kernel/asm-offsets.c
index 18c703d1d761..710651d5aaae 100644
--- a/arch/openrisc/kernel/asm-offsets.c
+++ b/arch/openrisc/kernel/asm-offsets.c
@@ -37,7 +37,6 @@
int main(void)
{
/* offsets into the task_struct */
- DEFINE(TASK_STATE, offsetof(struct task_struct, state));
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index 2416a9f91533..c6f9e7b9f7cb 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -278,6 +278,8 @@ void calibrate_delay(void)
pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
loops_per_jiffy / (500000 / HZ),
(loops_per_jiffy / (5000 / HZ)) % 100, loops_per_jiffy);
+
+ of_node_put(cpu);
}
void __init setup_arch(char **cmdline_p)
diff --git a/arch/openrisc/kernel/smp.c b/arch/openrisc/kernel/smp.c
index 48e1092a64de..415e209732a3 100644
--- a/arch/openrisc/kernel/smp.c
+++ b/arch/openrisc/kernel/smp.c
@@ -145,8 +145,6 @@ asmlinkage __init void secondary_start_kernel(void)
set_cpu_online(cpu, true);
local_irq_enable();
-
- preempt_disable();
/*
* OK, it's off to the idle thread for us
*/
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index bf9b2310fc93..cfef61a7b6c2 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -75,7 +75,6 @@ static void __init map_ram(void)
/* These mark extents of read-only kernel pages...
* ...from vmlinux.lds.S
*/
- struct memblock_region *region;
v = PAGE_OFFSET;
@@ -121,7 +120,7 @@ static void __init map_ram(void)
}
printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
- region->base, region->base + region->size);
+ start, end);
}
}
@@ -129,7 +128,6 @@ void __init paging_init(void)
{
extern void tlb_init(void);
- unsigned long end;
int i;
printk(KERN_INFO "Setting up paging and PTEs.\n");
@@ -145,8 +143,6 @@ void __init paging_init(void)
*/
current_pgd[smp_processor_id()] = init_mm.pgd;
- end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
-
map_ram();
zone_sizes_init();
@@ -211,8 +207,6 @@ void __init mem_init(void)
/* this will put all low memory onto the freelists */
memblock_free_all();
- mem_init_print_info(NULL);
-
printk("mem_init_done ...........................................\n");
mem_init_done = 1;
return;
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 4e53ac46e857..bde9907bc5b2 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -12,6 +12,7 @@ config PARISC
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_NO_SG_CHAIN
+ select ARCH_SUPPORTS_HUGETLBFS if PA20
select ARCH_SUPPORTS_MEMORY_FAILURE
select DMA_OPS
select RTC_CLASS
@@ -138,10 +139,6 @@ config PGTABLE_LEVELS
default 3 if 64BIT && PARISC_PAGE_SIZE_4KB
default 2
-config SYS_SUPPORTS_HUGETLBFS
- def_bool y if PA20
-
-
menu "Processor type and features"
choice
@@ -203,9 +200,12 @@ config PREFETCH
def_bool y
depends on PA8X00 || PA7200
+config PARISC_HUGE_KERNEL
+ def_bool y if !MODULES || UBSAN || FTRACE || COMPILE_TEST
+
config MLONGCALLS
- def_bool y if !MODULES || UBSAN || FTRACE
- bool "Enable the -mlong-calls compiler option for big kernels" if MODULES && !UBSAN && !FTRACE
+ def_bool y if PARISC_HUGE_KERNEL
+ bool "Enable the -mlong-calls compiler option for big kernels" if !PARISC_HUGE_KERNEL
depends on PA8X00
help
If you configure the kernel to include many drivers built-in instead
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 7d9f71aa829a..aed8ea29268b 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -41,7 +41,7 @@ endif
export LD_BFD
-ifneq ($(SUBARCH),$(UTS_MACHINE))
+ifdef cross_compiling
ifeq ($(CROSS_COMPILE),)
CC_SUFFIXES = linux linux-gnu unknown-linux-gnu
CROSS_COMPILE := $(call cc-cross-prefix, \
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index 4406475a2304..e6e7f74c8ac9 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table_32.h
generated-y += syscall_table_64.h
-generated-y += syscall_table_c32.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
generic-y += user.h
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 21b375c67e53..dd5a299ada69 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -56,7 +56,7 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
* are atomic, so a reader never sees inconsistent values.
*/
-static __inline__ void atomic_set(atomic_t *v, int i)
+static __inline__ void arch_atomic_set(atomic_t *v, int i)
{
unsigned long flags;
_atomic_spin_lock_irqsave(v, flags);
@@ -66,19 +66,19 @@ static __inline__ void atomic_set(atomic_t *v, int i)
_atomic_spin_unlock_irqrestore(v, flags);
}
-#define atomic_set_release(v, i) atomic_set((v), (i))
+#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
-static __inline__ int atomic_read(const atomic_t *v)
+static __inline__ int arch_atomic_read(const atomic_t *v)
{
return READ_ONCE((v)->counter);
}
/* exported interface */
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#define ATOMIC_OP(op, c_op) \
-static __inline__ void atomic_##op(int i, atomic_t *v) \
+static __inline__ void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
@@ -88,7 +88,7 @@ static __inline__ void atomic_##op(int i, atomic_t *v) \
}
#define ATOMIC_OP_RETURN(op, c_op) \
-static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
+static __inline__ int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
@@ -101,7 +101,7 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op) \
-static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \
+static __inline__ int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
@@ -141,7 +141,7 @@ ATOMIC_OPS(xor, ^=)
#define ATOMIC64_INIT(i) { (i) }
#define ATOMIC64_OP(op, c_op) \
-static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
+static __inline__ void arch_atomic64_##op(s64 i, atomic64_t *v) \
{ \
unsigned long flags; \
\
@@ -151,7 +151,7 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
}
#define ATOMIC64_OP_RETURN(op, c_op) \
-static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
+static __inline__ s64 arch_atomic64_##op##_return(s64 i, atomic64_t *v) \
{ \
unsigned long flags; \
s64 ret; \
@@ -164,7 +164,7 @@ static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
}
#define ATOMIC64_FETCH_OP(op, c_op) \
-static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v) \
+static __inline__ s64 arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
{ \
unsigned long flags; \
s64 ret; \
@@ -200,7 +200,7 @@ ATOMIC64_OPS(xor, ^=)
#undef ATOMIC64_OP
static __inline__ void
-atomic64_set(atomic64_t *v, s64 i)
+arch_atomic64_set(atomic64_t *v, s64 i)
{
unsigned long flags;
_atomic_spin_lock_irqsave(v, flags);
@@ -210,18 +210,18 @@ atomic64_set(atomic64_t *v, s64 i)
_atomic_spin_unlock_irqrestore(v, flags);
}
-#define atomic64_set_release(v, i) atomic64_set((v), (i))
+#define arch_atomic64_set_release(v, i) arch_atomic64_set((v), (i))
static __inline__ s64
-atomic64_read(const atomic64_t *v)
+arch_atomic64_read(const atomic64_t *v)
{
return READ_ONCE((v)->counter);
}
/* exported interface */
-#define atomic64_cmpxchg(v, o, n) \
- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic64_cmpxchg(v, o, n) \
+ ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
#endif /* !CONFIG_64BIT */
diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
index cf5ee9b0b393..5f274be10567 100644
--- a/arch/parisc/include/asm/cmpxchg.h
+++ b/arch/parisc/include/asm/cmpxchg.h
@@ -44,7 +44,7 @@ __xchg(unsigned long x, volatile void *ptr, int size)
** if (((unsigned long)p & 0xf) == 0)
** return __ldcw(p);
*/
-#define xchg(ptr, x) \
+#define arch_xchg(ptr, x) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \
@@ -72,13 +72,13 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
#endif
case 4: return __cmpxchg_u32((unsigned int *)ptr,
(unsigned int)old, (unsigned int)new_);
- case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_);
+ case 1: return __cmpxchg_u8((u8 *)ptr, old & 0xff, new_ & 0xff);
}
__cmpxchg_called_with_bad_pointer();
return old;
}
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -98,7 +98,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
#endif
case 4: return __cmpxchg_u32(ptr, old, new_);
default:
- return __cmpxchg_local_generic(ptr, old, new_, size);
+ return __generic_cmpxchg_local(ptr, old, new_, size);
}
}
@@ -106,19 +106,19 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr))))
#ifdef CONFIG_64BIT
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
})
#else
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#endif
-#define cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n)
+#define arch_cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n)
#endif /* _ASM_PARISC_CMPXCHG_H_ */
diff --git a/arch/parisc/include/asm/io.h b/arch/parisc/include/asm/io.h
index 8a11b8cf4719..0b5259102319 100644
--- a/arch/parisc/include/asm/io.h
+++ b/arch/parisc/include/asm/io.h
@@ -316,11 +316,6 @@ extern void iowrite64be(u64 val, void __iomem *addr);
*/
#define xlate_dev_mem_ptr(p) __va(p)
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p) p
-
extern int devmem_is_allowed(unsigned long pfn);
#endif
diff --git a/arch/parisc/include/asm/pdc_chassis.h b/arch/parisc/include/asm/pdc_chassis.h
index ae3e108d22ad..d6d82f53d3d0 100644
--- a/arch/parisc/include/asm/pdc_chassis.h
+++ b/arch/parisc/include/asm/pdc_chassis.h
@@ -365,4 +365,3 @@ void parisc_pdc_chassis_init(void);
PDC_CHASSIS_EOM_SET )
#endif /* _PARISC_PDC_CHASSIS_H */
-/* vim: set ts=8 */
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index dda557085311..6a7e98e71f1d 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -69,6 +69,5 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
#define pmd_populate(mm, pmd, pte_page) \
pmd_populate_kernel(mm, pmd, page_address(pte_page))
-#define pmd_pgtable(pmd) pmd_page(pmd)
#endif
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 39017210dbf0..7f33c29764cc 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -171,8 +171,6 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
* pgd entries used up by user/kernel:
*/
-#define FIRST_USER_ADDRESS 0UL
-
/* NB: The tlb miss handlers make certain assumptions about the order */
/* of the following bits, so be careful (One example, bits 25-31 */
/* are moved together in one instruction). */
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 11ece0d07374..b5fbcd2c1780 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -272,7 +272,6 @@ on downward growing arches, it looks like this:
regs->gr[23] = 0; \
} while(0)
-struct task_struct;
struct mm_struct;
/* Free all resources held by a thread. */
diff --git a/arch/parisc/include/asm/unaligned.h b/arch/parisc/include/asm/unaligned.h
index e9029c7c2a69..3bda16773ba6 100644
--- a/arch/parisc/include/asm/unaligned.h
+++ b/arch/parisc/include/asm/unaligned.h
@@ -2,11 +2,7 @@
#ifndef _ASM_PARISC_UNALIGNED_H
#define _ASM_PARISC_UNALIGNED_H
-#include <linux/unaligned/be_struct.h>
-#include <linux/unaligned/le_byteshift.h>
-#include <linux/unaligned/generic.h>
-#define get_unaligned __get_unaligned_be
-#define put_unaligned __put_unaligned_be
+#include <asm-generic/unaligned.h>
#ifdef __KERNEL__
struct pt_regs;
diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h
index ab78cba446ed..9e3c010c0f61 100644
--- a/arch/parisc/include/uapi/asm/mman.h
+++ b/arch/parisc/include/uapi/asm/mman.h
@@ -52,6 +52,9 @@
#define MADV_COLD 20 /* deactivate these pages */
#define MADV_PAGEOUT 21 /* reclaim these pages */
+#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */
+#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */
+
#define MADV_MERGEABLE 65 /* KSM may merge identical pages */
#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index f60904329bbc..5b5351cdcb33 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -119,6 +119,8 @@
#define SO_PREFER_BUSY_POLL 0x4043
#define SO_BUSY_POLL_BUDGET 0x4044
+#define SO_NETNS_COOKIE 0x4045
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index cd2cc1b1648c..33113ba24054 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -42,7 +42,6 @@
int main(void)
{
DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
- DEFINE(TASK_STATE, offsetof(struct task_struct, state));
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, pending));
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
diff --git a/arch/parisc/kernel/pdc_chassis.c b/arch/parisc/kernel/pdc_chassis.c
index 75ae88d13909..da154406d368 100644
--- a/arch/parisc/kernel/pdc_chassis.c
+++ b/arch/parisc/kernel/pdc_chassis.c
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/panic_notifier.h>
#include <linux/reboot.h>
#include <linux/notifier.h>
#include <linux/cache.h>
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index b144fbe29bc1..184ec3c1eae4 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -249,7 +249,7 @@ get_wchan(struct task_struct *p)
unsigned long ip;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
/*
@@ -260,7 +260,7 @@ get_wchan(struct task_struct *p)
do {
if (unwind_once(&info) < 0)
return 0;
- if (p->state == TASK_RUNNING)
+ if (task_is_running(p))
return 0;
ip = info.ip;
if (!in_sched_functions(ip))
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 2127974982df..65de6c4c9354 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -567,8 +567,6 @@ static const struct user_regset_view user_parisc_native_view = {
};
#ifdef CONFIG_64BIT
-#include <linux/compat.h>
-
static int gpr32_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index e320bae501d3..3fb86ee507dd 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -268,7 +268,7 @@ static int __init parisc_init_resources(void)
result = request_resource(&iomem_resource, &local_broadcast);
if (result < 0) {
printk(KERN_ERR
- "%s: failed to claim %saddress space!\n",
+ "%s: failed to claim %s address space!\n",
__FILE__, local_broadcast.name);
return result;
}
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 10227f667c8a..1405b603b91b 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -302,7 +302,6 @@ void __init smp_callin(unsigned long pdce_proc)
#endif
smp_cpu_init(slave_id);
- preempt_disable();
flush_cache_all_local(); /* start with known state */
flush_tlb_all_local(NULL);
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 322503780db6..3f24a0af1e04 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -919,24 +919,24 @@ ENTRY(lws_table)
END(lws_table)
/* End of lws table */
+#ifdef CONFIG_64BIT
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat)
+#else
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
+#endif
#define __SYSCALL(nr, entry) ASM_ULONG_INSN entry
.align 8
ENTRY(sys_call_table)
.export sys_call_table,data
-#ifdef CONFIG_64BIT
-#include <asm/syscall_table_c32.h> /* Compat syscalls */
-#else
-#include <asm/syscall_table_32.h> /* 32-bit native syscalls */
-#endif
+#include <asm/syscall_table_32.h> /* 32-bit syscalls */
END(sys_call_table)
#ifdef CONFIG_64BIT
.align 8
ENTRY(sys_call_table64)
-#include <asm/syscall_table_64.h> /* 64-bit native syscalls */
+#include <asm/syscall_table_64.h> /* 64-bit syscalls */
END(sys_call_table64)
#endif
-#undef __SYSCALL
/*
All light-weight-syscall atomic operations
@@ -961,5 +961,3 @@ END(lws_lock_start)
.previous
.end
-
-
diff --git a/arch/parisc/kernel/syscalls/Makefile b/arch/parisc/kernel/syscalls/Makefile
index 283f64407b07..0f2ea5bcb0d7 100644
--- a/arch/parisc/kernel/syscalls/Makefile
+++ b/arch/parisc/kernel/syscalls/Makefile
@@ -6,46 +6,34 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
$(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
syscall := $(src)/syscall.tbl
-syshdr := $(srctree)/$(src)/syscallhdr.sh
-systbl := $(srctree)/$(src)/syscalltbl.sh
+syshdr := $(srctree)/scripts/syscallhdr.sh
+systbl := $(srctree)/scripts/syscalltbl.sh
quiet_cmd_syshdr = SYSHDR $@
- cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
- '$(syshdr_abis_$(basetarget))' \
- '$(syshdr_pfx_$(basetarget))' \
- '$(syshdr_offset_$(basetarget))'
+ cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr --abis $(abis) $< $@
quiet_cmd_systbl = SYSTBL $@
- cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \
- '$(systbl_abis_$(basetarget))' \
- '$(systbl_abi_$(basetarget))' \
- '$(systbl_offset_$(basetarget))'
+ cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis $(abis) $< $@
-syshdr_abis_unistd_32 := common,32
+$(uapi)/unistd_32.h: abis := common,32
$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
-syshdr_abis_unistd_64 := common,64
+$(uapi)/unistd_64.h: abis := common,64
$(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
-systbl_abis_syscall_table_32 := common,32
+$(kapi)/syscall_table_32.h: abis := common,32
$(kapi)/syscall_table_32.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
-systbl_abis_syscall_table_64 := common,64
+$(kapi)/syscall_table_64.h: abis := common,64
$(kapi)/syscall_table_64.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
-systbl_abis_syscall_table_c32 := common,32
-systbl_abi_syscall_table_c32 := c32
-$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) FORCE
- $(call if_changed,systbl)
-
uapisyshdr-y += unistd_32.h unistd_64.h
kapisyshdr-y += syscall_table_32.h \
- syscall_table_64.h \
- syscall_table_c32.h
+ syscall_table_64.h
uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y))
kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y))
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
index 271a92519683..e26187b9ab87 100644
--- a/arch/parisc/kernel/syscalls/syscall.tbl
+++ b/arch/parisc/kernel/syscalls/syscall.tbl
@@ -440,3 +440,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/parisc/kernel/syscalls/syscallhdr.sh b/arch/parisc/kernel/syscalls/syscallhdr.sh
deleted file mode 100644
index 730db288fe54..000000000000
--- a/arch/parisc/kernel/syscalls/syscallhdr.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-prefix="$4"
-offset="$5"
-
-fileguard=_UAPI_ASM_PARISC_`basename "$out" | sed \
- -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
- -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- printf "#ifndef %s\n" "${fileguard}"
- printf "#define %s\n" "${fileguard}"
- printf "\n"
-
- nxt=0
- while read nr abi name entry compat ; do
- if [ -z "$offset" ]; then
- printf "#define __NR_%s%s\t%s\n" \
- "${prefix}" "${name}" "${nr}"
- else
- printf "#define __NR_%s%s\t(%s + %s)\n" \
- "${prefix}" "${name}" "${offset}" "${nr}"
- fi
- nxt=$((nr+1))
- done
-
- printf "\n"
- printf "#ifdef __KERNEL__\n"
- printf "#define __NR_syscalls\t%s\n" "${nxt}"
- printf "#endif\n"
- printf "\n"
- printf "#endif /* %s */\n" "${fileguard}"
-) > "$out"
diff --git a/arch/parisc/kernel/syscalls/syscalltbl.sh b/arch/parisc/kernel/syscalls/syscalltbl.sh
deleted file mode 100644
index f7393a7b18aa..000000000000
--- a/arch/parisc/kernel/syscalls/syscalltbl.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-my_abi="$4"
-offset="$5"
-
-emit() {
- t_nxt="$1"
- t_nr="$2"
- t_entry="$3"
-
- while [ $t_nxt -lt $t_nr ]; do
- printf "__SYSCALL(%s,sys_ni_syscall)\n" "${t_nxt}"
- t_nxt=$((t_nxt+1))
- done
- printf "__SYSCALL(%s,%s)\n" "${t_nxt}" "${t_entry}"
-}
-
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- nxt=0
- if [ -z "$offset" ]; then
- offset=0
- fi
-
- while read nr abi name entry compat ; do
- if [ "$my_abi" = "c32" ] && [ ! -z "$compat" ]; then
- emit $((nxt+offset)) $((nr+offset)) $compat
- else
- emit $((nxt+offset)) $((nr+offset)) $entry
- fi
- nxt=$((nr+1))
- done
-) > "$out"
diff --git a/arch/parisc/math-emu/fpu.h b/arch/parisc/math-emu/fpu.h
index 853c19c03828..dec951d40286 100644
--- a/arch/parisc/math-emu/fpu.h
+++ b/arch/parisc/math-emu/fpu.h
@@ -5,34 +5,10 @@
* Floating-point emulation code
* Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org>
*/
-/*
- * BEGIN_DESC
- *
- * File:
- * @(#) pa/fp/fpu.h $Revision: 1.1 $
- *
- * Purpose:
- * <<please update with a synopis of the functionality provided by this file>>
- *
- *
- * END_DESC
-*/
-
-#ifdef __NO_PA_HDRS
- PA header file -- do not include this header file for non-PA builds.
-#endif
-
#ifndef _MACHINE_FPU_INCLUDED /* allows multiple inclusion */
#define _MACHINE_FPU_INCLUDED
-#if 0
-#ifndef _SYS_STDSYMS_INCLUDED
-# include <sys/stdsyms.h>
-#endif /* _SYS_STDSYMS_INCLUDED */
-#include <machine/pdc/pdc_rqsts.h>
-#endif
-
#define PA83_FPU_FLAG 0x00000001
#define PA89_FPU_FLAG 0x00000002
#define PA2_0_FPU_FLAG 0x00000010
@@ -43,21 +19,19 @@
#define COPR_FP 0x00000080 /* Floating point -- Coprocessor 0 */
#define SFU_MPY_DIVIDE 0x00008000 /* Multiply/Divide __ SFU 0 */
-
#define EM_FPU_TYPE_OFFSET 272
/* version of EMULATION software for COPR,0,0 instruction */
#define EMULATION_VERSION 4
/*
- * The only was to differeniate between TIMEX and ROLEX (or PCX-S and PCX-T)
- * is thorough the potential type field from the PDC_MODEL call. The
- * following flags are used at assist this differeniation.
+ * The only way to differentiate between TIMEX and ROLEX (or PCX-S and PCX-T)
+ * is through the potential type field from the PDC_MODEL call.
+ * The following flags are used to assist this differentiation.
*/
#define ROLEX_POTENTIAL_KEY_FLAGS PDC_MODEL_CPU_KEY_WORD_TO_IO
#define TIMEX_POTENTIAL_KEY_FLAGS (PDC_MODEL_CPU_KEY_QUAD_STORE | \
PDC_MODEL_CPU_KEY_RECIP_SQRT)
-
#endif /* ! _MACHINE_FPU_INCLUDED */
diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c
index 43652de5f139..d1d3990b83f6 100644
--- a/arch/parisc/mm/hugetlbpage.c
+++ b/arch/parisc/mm/hugetlbpage.c
@@ -44,7 +44,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
}
-pte_t *huge_pte_alloc(struct mm_struct *mm,
+pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 9ca4e4ff6895..591a4e939415 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -573,8 +573,6 @@ void __init mem_init(void)
#endif
parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
- mem_init_print_info(NULL);
-
#if 0
/*
* Do not expose the virtual kernel memory layout to userspace.
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 386ae12d8523..d01e3401581d 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -118,37 +118,45 @@ config PPC
# Please keep this list sorted alphabetically.
#
select ARCH_32BIT_OFF_T if PPC32
+ select ARCH_ENABLE_MEMORY_HOTPLUG
+ select ARCH_ENABLE_MEMORY_HOTREMOVE
+ select ARCH_HAS_COPY_MC if PPC64
select ARCH_HAS_DEBUG_VIRTUAL
+ select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEVMEM_IS_ALLOWED
+ select ARCH_HAS_DMA_MAP_DIRECT if PPC_PSERIES
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
- select ARCH_HAS_KCOV
select ARCH_HAS_HUGEPD if HUGETLB_PAGE
+ select ARCH_HAS_KCOV
+ select ARCH_HAS_MEMBARRIER_CALLBACKS
+ select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_MEMREMAP_COMPAT_ALIGN
select ARCH_HAS_MMIOWB if PPC64
+ select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_PMEM_API
- select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64
select ARCH_HAS_PTE_SPECIAL
- select ARCH_HAS_MEMBARRIER_CALLBACKS
- select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
- select ARCH_HAS_STRICT_KERNEL_RWX if (PPC32 && !HIBERNATION)
+ select ARCH_HAS_SET_MEMORY
+ select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !HIBERNATION)
+ select ARCH_HAS_STRICT_MODULE_RWX if ARCH_HAS_STRICT_KERNEL_RWX && !PPC_BOOK3S_32
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UACCESS_FLUSHCACHE
- select ARCH_HAS_COPY_MC if PPC64
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_KEEP_MEMBLOCK
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
+ select ARCH_STACKWALK
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC32 || PPC_BOOK3S_64
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
+ select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS if PPC_QUEUED_SPINLOCKS
select ARCH_USE_QUEUED_SPINLOCKS if PPC_QUEUED_SPINLOCKS
select ARCH_WANT_IPC_PARSE_VERSION
@@ -159,9 +167,8 @@ config PPC
select BUILDTIME_TABLE_SORT
select CLONE_BACKWARDS
select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
- select DMA_OPS if PPC64
select DMA_OPS_BYPASS if PPC64
- select ARCH_HAS_DMA_MAP_DIRECT if PPC64 && PPC_PSERIES
+ select DMA_OPS if PPC64
select DYNAMIC_FTRACE if FUNCTION_TRACER
select EDAC_ATOMIC_SCRUB
select EDAC_SUPPORT
@@ -171,6 +178,7 @@ config PPC
select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_VULNERABILITIES if PPC_BARRIER_NOSPEC
select GENERIC_EARLY_IOREMAP
+ select GENERIC_GETTIMEOFDAY
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL
select GENERIC_PCI_IOMAP if PCI
@@ -178,12 +186,15 @@ config PPC
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select GENERIC_TIME_VSYSCALL
- select GENERIC_GETTIMEOFDAY
+ select GENERIC_VDSO_TIME_NS
select HAVE_ARCH_AUDITSYSCALL
- select HAVE_ARCH_HUGE_VMAP if PPC_BOOK3S_64 && PPC_RADIX_MMU
+ select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP
+ select HAVE_ARCH_HUGE_VMAP if PPC_RADIX_MMU || PPC_8xx
select HAVE_ARCH_JUMP_LABEL
+ select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14
select HAVE_ARCH_KASAN_VMALLOC if PPC32 && PPC_PAGE_SHIFT <= 14
+ select HAVE_ARCH_KFENCE if PPC32
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
@@ -191,16 +202,13 @@ config PPC
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_ASM_MODVERSIONS
- select HAVE_C_RECORDMCOUNT
- select HAVE_CBPF_JIT if !PPC64
- select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13)
- select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2)
select HAVE_CONTEXT_TRACKING if PPC64
+ select HAVE_C_RECORDMCOUNT
select HAVE_DEBUG_KMEMLEAK
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL
- select HAVE_EBPF_JIT if PPC64
+ select HAVE_EBPF_JIT
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
select HAVE_FAST_GUP
select HAVE_FTRACE_MCOUNT_RECORD
@@ -209,10 +217,13 @@ config PPC
select HAVE_FUNCTION_TRACER
select HAVE_GCC_PLUGINS if GCC_VERSION >= 50200 # plugin support on gcc <= 5.1 is buggy on PPC
select HAVE_GENERIC_VDSO
+ select HAVE_HARDLOCKUP_DETECTOR_ARCH if PPC_BOOK3S_64 && SMP
+ select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
select HAVE_IDE
select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK
+ select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZMA if DEFAULT_UIMAGE
select HAVE_KERNEL_LZO if DEFAULT_UIMAGE
@@ -224,25 +235,25 @@ config PPC
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
- select HAVE_HARDLOCKUP_DETECTOR_ARCH if (PPC64 && PPC_BOOK3S)
- select HAVE_OPTPROBES if PPC64
+ select HAVE_OPTPROBES
select HAVE_PERF_EVENTS
select HAVE_PERF_EVENTS_NMI if PPC64
- select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
- select MMU_GATHER_RCU_TABLE_FREE
- select MMU_GATHER_PAGE_SIZE
select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
+ select HAVE_RELIABLE_STACKTRACE
+ select HAVE_RSEQ
select HAVE_SOFTIRQ_ON_OWN_STACK
+ select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2)
+ select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13)
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING
- select HAVE_IRQ_TIME_ACCOUNTING
- select HAVE_RSEQ
+ select HUGETLB_PAGE_SIZE_VARIABLE if PPC_BOOK3S_64 && HUGETLB_PAGE
select IOMMU_HELPER if PPC64
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
+ select MMU_GATHER_PAGE_SIZE
+ select MMU_GATHER_RCU_TABLE_FREE
select MODULES_USE_ELF_RELA
select NEED_DMA_MAP_STATE if PPC64 || NOT_COHERENT_CACHE
select NEED_SG_DMA_LENGTH
@@ -257,6 +268,7 @@ config PPC
select PPC_DAWR if PPC64
select RTC_LIB
select SPARSE_IRQ
+ select STRICT_KERNEL_RWX if STRICT_MODULE_RWX
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
select VIRT_TO_BUS if !PPC64
@@ -280,6 +292,7 @@ config PANIC_TIMEOUT
config COMPAT
bool "Enable support for 32bit binaries"
depends on PPC64
+ depends on !CC_IS_CLANG || CLANG_VERSION >= 120000
default y if !CPU_LITTLE_ENDIAN
select ARCH_WANT_OLD_COMPAT_IPC
select COMPAT_OLD_SIGACTION
@@ -394,10 +407,6 @@ config PPC_ADV_DEBUG_DAC_RANGE
config PPC_DAWR
bool
-config ZONE_DMA
- bool
- default y if PPC_BOOK3E_64
-
config PGTABLE_LEVELS
int
default 2 if !PPC64
@@ -415,14 +424,9 @@ config HIGHMEM
source "kernel/Kconfig.hz"
-config HUGETLB_PAGE_SIZE_VARIABLE
- bool
- depends on HUGETLB_PAGE && PPC_BOOK3S_64
- default y
-
config MATH_EMULATION
bool "Math emulation"
- depends on 4xx || PPC_8xx || PPC_MPC832x || BOOKE
+ depends on 4xx || PPC_8xx || PPC_MPC832x || BOOKE || PPC_MICROWATT
select PPC_FPU_REGS
help
Some PowerPC chips designed for embedded applications do not have
@@ -515,12 +519,6 @@ config ARCH_CPU_PROBE_RELEASE
def_bool y
depends on HOTPLUG_CPU
-config ARCH_ENABLE_MEMORY_HOTPLUG
- def_bool y
-
-config ARCH_ENABLE_MEMORY_HOTREMOVE
- def_bool y
-
config PPC64_SUPPORTS_MEMORY_FAILURE
bool "Add support for memory hwpoison"
depends on PPC_BOOK3S_64
@@ -548,7 +546,7 @@ config KEXEC
config KEXEC_FILE
bool "kexec file based system call"
select KEXEC_CORE
- select HAVE_IMA_KEXEC
+ select HAVE_IMA_KEXEC if IMA
select BUILD_BIN2C
select KEXEC_ELF
depends on PPC64
@@ -673,7 +671,7 @@ config NODES_SHIFT
int
default "8" if PPC64
default "4"
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
config USE_PERCPU_NUMA_NODE_ID
def_bool y
@@ -700,9 +698,6 @@ config ARCH_SPARSEMEM_DEFAULT
def_bool y
depends on PPC_BOOK3S_64
-config SYS_SUPPORTS_HUGETLBFS
- bool
-
config ILLEGAL_POINTER_VALUE
hex
# This is roughly half way between the top of user space and the bottom
@@ -786,7 +781,7 @@ config THREAD_SHIFT
config DATA_SHIFT_BOOL
bool "Set custom data alignment"
depends on ADVANCED_OPTIONS
- depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC
+ depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE
depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && !STRICT_KERNEL_RWX)
help
This option allows you to set the kernel data alignment. When
@@ -798,13 +793,13 @@ config DATA_SHIFT_BOOL
config DATA_SHIFT
int "Data shift" if DATA_SHIFT_BOOL
default 24 if STRICT_KERNEL_RWX && PPC64
- range 17 28 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC) && PPC_BOOK3S_32
- range 19 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC) && PPC_8xx
+ range 17 28 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32
+ range 19 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
- default 18 if DEBUG_PAGEALLOC && PPC_BOOK3S_32
+ default 18 if (DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32
default 23 if STRICT_KERNEL_RWX && PPC_8xx
- default 23 if DEBUG_PAGEALLOC && PPC_8xx && PIN_TLB_DATA
- default 19 if DEBUG_PAGEALLOC && PPC_8xx
+ default 23 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx && PIN_TLB_DATA
+ default 19 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
default PPC_PAGE_SHIFT
help
On Book3S 32 (603+), DBATs are used to map kernel text and rodata RO.
@@ -1217,7 +1212,7 @@ config TASK_SIZE_BOOL
config TASK_SIZE
hex "Size of user task space" if TASK_SIZE_BOOL
default "0x80000000" if PPC_8xx
- default "0xb0000000" if PPC_BOOK3S_32 && STRICT_KERNEL_RWX
+ default "0xb0000000" if PPC_BOOK3S_32
default "0xc0000000"
endmenu
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index ae084357994e..205cd77f321f 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -84,6 +84,11 @@ config MSI_BITMAP_SELFTEST
config PPC_IRQ_SOFT_MASK_DEBUG
bool "Include extra checks for powerpc irq soft masking"
+ depends on PPC64
+
+config PPC_RFI_SRR_DEBUG
+ bool "Include extra checks for RFI SRR register validity"
+ depends on PPC_BOOK3S_64
config XMON
bool "Include xmon kernel debugger"
@@ -353,6 +358,7 @@ config PPC_EARLY_DEBUG_CPM_ADDR
config FAIL_IOMMU
bool "Fault-injection capability for IOMMU"
depends on FAULT_INJECTION
+ depends on PCI || IBMVIO
help
Provide fault-injection capability for IOMMU. Each device can
be selectively enabled via the fail_iommu property.
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 5f8544cf724a..712c5e8768ce 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -181,12 +181,6 @@ CC_FLAGS_FTRACE := -pg
ifdef CONFIG_MPROFILE_KERNEL
CC_FLAGS_FTRACE += -mprofile-kernel
endif
-# Work around gcc code-gen bugs with -pg / -fno-omit-frame-pointer in gcc <= 4.8
-# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=44199
-# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52828
-ifndef CONFIG_CC_IS_CLANG
-CC_FLAGS_FTRACE += $(call cc-ifversion, -lt, 0409, -mno-sched-epilog)
-endif
endif
CFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU))
@@ -382,6 +376,16 @@ ppc64_book3e_allmodconfig:
$(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/85xx-64bit.config \
-f $(srctree)/Makefile allmodconfig
+PHONY += ppc32_randconfig
+ppc32_randconfig:
+ $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/32-bit.config \
+ -f $(srctree)/Makefile randconfig
+
+PHONY += ppc64_randconfig
+ppc64_randconfig:
+ $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/64-bit.config \
+ -f $(srctree)/Makefile randconfig
+
define archhelp
@echo '* zImage - Build default images selected by kernel config'
@echo ' zImage.* - Compressed kernel image (arch/$(ARCH)/boot/zImage.*)'
@@ -444,12 +448,15 @@ endif
endif
ifdef CONFIG_SMP
+ifdef CONFIG_PPC32
prepare: task_cpu_prepare
PHONY += task_cpu_prepare
task_cpu_prepare: prepare0
$(eval KBUILD_CFLAGS += -D_TASK_CPU=$(shell awk '{if ($$2 == "TASK_CPU") print $$3;}' include/generated/asm-offsets.h))
-endif
+
+endif # CONFIG_PPC32
+endif # CONFIG_SMP
PHONY += checkbin
# Check toolchain versions:
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 2b8da923ceca..e312ea802aa6 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -163,6 +163,8 @@ src-plat-$(CONFIG_PPC_POWERNV) += pseries-head.S
src-plat-$(CONFIG_PPC_IBM_CELL_BLADE) += pseries-head.S
src-plat-$(CONFIG_MVME7100) += motload-head.S mvme7100.c
+src-plat-$(CONFIG_PPC_MICROWATT) += fixed-head.S microwatt.c
+
src-wlib := $(sort $(src-wlib-y))
src-plat := $(sort $(src-plat-y))
src-boot := $(src-wlib) $(src-plat) empty.c
@@ -227,7 +229,7 @@ $(obj)/wrapper.a: $(obj-wlib) FORCE
hostprogs := addnote hack-coff mktree
-targets += $(patsubst $(obj)/%,%,$(obj-boot) wrapper.a)
+targets += $(patsubst $(obj)/%,%,$(obj-boot) wrapper.a) zImage.lds
extra-y := $(obj)/wrapper.a $(obj-plat) $(obj)/empty.o \
$(obj)/zImage.lds $(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds
@@ -355,6 +357,8 @@ image-$(CONFIG_MVME5100) += dtbImage.mvme5100
# Board port in arch/powerpc/platform/amigaone/Kconfig
image-$(CONFIG_AMIGAONE) += cuImage.amigaone
+image-$(CONFIG_PPC_MICROWATT) += dtbImage.microwatt
+
# For 32-bit powermacs, build the COFF and miboot images
# as well as the ELF images.
ifdef CONFIG_PPC32
diff --git a/arch/powerpc/boot/decompress.c b/arch/powerpc/boot/decompress.c
index 6098b879ac97..977eb15a6d17 100644
--- a/arch/powerpc/boot/decompress.c
+++ b/arch/powerpc/boot/decompress.c
@@ -99,8 +99,8 @@ static void print_err(char *s)
* partial_decompress - decompresses part or all of a compressed buffer
* @inbuf: input buffer
* @input_size: length of the input buffer
- * @outbuf: input buffer
- * @output_size: length of the input buffer
+ * @outbuf: output buffer
+ * @output_size: length of the output buffer
* @skip number of output bytes to ignore
*
* This function takes compressed data from inbuf, decompresses and write it to
diff --git a/arch/powerpc/boot/devtree.c b/arch/powerpc/boot/devtree.c
index 5d91036ad626..58fbcfcc98c9 100644
--- a/arch/powerpc/boot/devtree.c
+++ b/arch/powerpc/boot/devtree.c
@@ -13,6 +13,7 @@
#include "string.h"
#include "stdio.h"
#include "ops.h"
+#include "of.h"
void dt_fixup_memory(u64 start, u64 size)
{
@@ -23,21 +24,25 @@ void dt_fixup_memory(u64 start, u64 size)
root = finddevice("/");
if (getprop(root, "#address-cells", &naddr, sizeof(naddr)) < 0)
naddr = 2;
+ else
+ naddr = be32_to_cpu(naddr);
if (naddr < 1 || naddr > 2)
fatal("Can't cope with #address-cells == %d in /\n\r", naddr);
if (getprop(root, "#size-cells", &nsize, sizeof(nsize)) < 0)
nsize = 1;
+ else
+ nsize = be32_to_cpu(nsize);
if (nsize < 1 || nsize > 2)
fatal("Can't cope with #size-cells == %d in /\n\r", nsize);
i = 0;
if (naddr == 2)
- memreg[i++] = start >> 32;
- memreg[i++] = start & 0xffffffff;
+ memreg[i++] = cpu_to_be32(start >> 32);
+ memreg[i++] = cpu_to_be32(start & 0xffffffff);
if (nsize == 2)
- memreg[i++] = size >> 32;
- memreg[i++] = size & 0xffffffff;
+ memreg[i++] = cpu_to_be32(size >> 32);
+ memreg[i++] = cpu_to_be32(size & 0xffffffff);
memory = finddevice("/memory");
if (! memory) {
@@ -45,9 +50,9 @@ void dt_fixup_memory(u64 start, u64 size)
setprop_str(memory, "device_type", "memory");
}
- printf("Memory <- <0x%x", memreg[0]);
+ printf("Memory <- <0x%x", be32_to_cpu(memreg[0]));
for (i = 1; i < (naddr + nsize); i++)
- printf(" 0x%x", memreg[i]);
+ printf(" 0x%x", be32_to_cpu(memreg[i]));
printf("> (%ldMB)\n\r", (unsigned long)(size >> 20));
setprop(memory, "reg", memreg, (naddr + nsize)*sizeof(u32));
@@ -65,10 +70,10 @@ void dt_fixup_cpu_clocks(u32 cpu, u32 tb, u32 bus)
printf("CPU bus-frequency <- 0x%x (%dMHz)\n\r", bus, MHZ(bus));
while ((devp = find_node_by_devtype(devp, "cpu"))) {
- setprop_val(devp, "clock-frequency", cpu);
- setprop_val(devp, "timebase-frequency", tb);
+ setprop_val(devp, "clock-frequency", cpu_to_be32(cpu));
+ setprop_val(devp, "timebase-frequency", cpu_to_be32(tb));
if (bus > 0)
- setprop_val(devp, "bus-frequency", bus);
+ setprop_val(devp, "bus-frequency", cpu_to_be32(bus));
}
timebase_period_ns = 1000000000 / tb;
@@ -80,7 +85,7 @@ void dt_fixup_clock(const char *path, u32 freq)
if (devp) {
printf("%s: clock-frequency <- %x (%dMHz)\n\r", path, freq, MHZ(freq));
- setprop_val(devp, "clock-frequency", freq);
+ setprop_val(devp, "clock-frequency", cpu_to_be32(freq));
}
}
@@ -133,8 +138,12 @@ void dt_get_reg_format(void *node, u32 *naddr, u32 *nsize)
{
if (getprop(node, "#address-cells", naddr, 4) != 4)
*naddr = 2;
+ else
+ *naddr = be32_to_cpu(*naddr);
if (getprop(node, "#size-cells", nsize, 4) != 4)
*nsize = 1;
+ else
+ *nsize = be32_to_cpu(*nsize);
}
static void copy_val(u32 *dest, u32 *src, int naddr)
@@ -163,9 +172,9 @@ static int add_reg(u32 *reg, u32 *add, int naddr)
int i, carry = 0;
for (i = MAX_ADDR_CELLS - 1; i >= MAX_ADDR_CELLS - naddr; i--) {
- u64 tmp = (u64)reg[i] + add[i] + carry;
+ u64 tmp = (u64)be32_to_cpu(reg[i]) + be32_to_cpu(add[i]) + carry;
carry = tmp >> 32;
- reg[i] = (u32)tmp;
+ reg[i] = cpu_to_be32((u32)tmp);
}
return !carry;
@@ -180,18 +189,18 @@ static int compare_reg(u32 *reg, u32 *range, u32 *rangesize)
u32 end;
for (i = 0; i < MAX_ADDR_CELLS; i++) {
- if (reg[i] < range[i])
+ if (be32_to_cpu(reg[i]) < be32_to_cpu(range[i]))
return 0;
- if (reg[i] > range[i])
+ if (be32_to_cpu(reg[i]) > be32_to_cpu(range[i]))
break;
}
for (i = 0; i < MAX_ADDR_CELLS; i++) {
- end = range[i] + rangesize[i];
+ end = be32_to_cpu(range[i]) + be32_to_cpu(rangesize[i]);
- if (reg[i] < end)
+ if (be32_to_cpu(reg[i]) < end)
break;
- if (reg[i] > end)
+ if (be32_to_cpu(reg[i]) > end)
return 0;
}
@@ -240,7 +249,6 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr,
return 0;
dt_get_reg_format(parent, &naddr, &nsize);
-
if (nsize > 2)
return 0;
@@ -252,10 +260,10 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr,
copy_val(last_addr, prop_buf + offset, naddr);
- ret_size = prop_buf[offset + naddr];
+ ret_size = be32_to_cpu(prop_buf[offset + naddr]);
if (nsize == 2) {
ret_size <<= 32;
- ret_size |= prop_buf[offset + naddr + 1];
+ ret_size |= be32_to_cpu(prop_buf[offset + naddr + 1]);
}
for (;;) {
@@ -278,7 +286,6 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr,
offset = find_range(last_addr, prop_buf, prev_naddr,
naddr, prev_nsize, buflen / 4);
-
if (offset < 0)
return 0;
@@ -296,8 +303,7 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr,
if (naddr > 2)
return 0;
- ret_addr = ((u64)last_addr[2] << 32) | last_addr[3];
-
+ ret_addr = ((u64)be32_to_cpu(last_addr[2]) << 32) | be32_to_cpu(last_addr[3]);
if (sizeof(void *) == 4 &&
(ret_addr >= 0x100000000ULL || ret_size > 0x100000000ULL ||
ret_addr + ret_size > 0x100000000ULL))
@@ -350,11 +356,14 @@ int dt_is_compatible(void *node, const char *compat)
int dt_get_virtual_reg(void *node, void **addr, int nres)
{
unsigned long xaddr;
- int n;
+ int n, i;
n = getprop(node, "virtual-reg", addr, nres * 4);
- if (n > 0)
+ if (n > 0) {
+ for (i = 0; i < n/4; i ++)
+ ((u32 *)addr)[i] = be32_to_cpu(((u32 *)addr)[i]);
return n / 4;
+ }
for (n = 0; n < nres; n++) {
if (!dt_xlate_reg(node, n, &xaddr, NULL))
diff --git a/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi b/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi
index 0c0efa94cfb4..2a677fd323eb 100644
--- a/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi
@@ -170,8 +170,6 @@ timer@41100 {
/include/ "pq3-etsec2-0.dtsi"
enet0: ethernet@b0000 {
queue-group@b0000 {
- fsl,rx-bit-map = <0xff>;
- fsl,tx-bit-map = <0xff>;
interrupts = <26 2 0 0 27 2 0 0 28 2 0 0>;
};
};
@@ -179,8 +177,6 @@ enet0: ethernet@b0000 {
/include/ "pq3-etsec2-1.dtsi"
enet1: ethernet@b1000 {
queue-group@b1000 {
- fsl,rx-bit-map = <0xff>;
- fsl,tx-bit-map = <0xff>;
interrupts = <33 2 0 0 34 2 0 0 35 2 0 0>;
};
};
diff --git a/arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi b/arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi
index b5f071574e83..b8e0edd1ac69 100644
--- a/arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi
@@ -190,8 +190,6 @@ crypto@30000 {
/include/ "pq3-etsec2-0.dtsi"
enet0: ethernet@b0000 {
queue-group@b0000 {
- fsl,rx-bit-map = <0xff>;
- fsl,tx-bit-map = <0xff>;
interrupts = <26 2 0 0 27 2 0 0 28 2 0 0>;
};
};
@@ -199,8 +197,6 @@ enet0: ethernet@b0000 {
/include/ "pq3-etsec2-1.dtsi"
enet1: ethernet@b1000 {
queue-group@b1000 {
- fsl,rx-bit-map = <0xff>;
- fsl,tx-bit-map = <0xff>;
interrupts = <33 2 0 0 34 2 0 0 35 2 0 0>;
};
};
diff --git a/arch/powerpc/boot/dts/fsl/c293si-post.dtsi b/arch/powerpc/boot/dts/fsl/c293si-post.dtsi
index bd208320bff5..bec0fc36849d 100644
--- a/arch/powerpc/boot/dts/fsl/c293si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/c293si-post.dtsi
@@ -171,8 +171,6 @@
enet0: ethernet@b0000 {
queue-group@b0000 {
reg = <0x10000 0x1000>;
- fsl,rx-bit-map = <0xff>;
- fsl,tx-bit-map = <0xff>;
};
};
@@ -180,8 +178,6 @@
enet1: ethernet@b1000 {
queue-group@b1000 {
reg = <0x11000 0x1000>;
- fsl,rx-bit-map = <0xff>;
- fsl,tx-bit-map = <0xff>;
};
};
diff --git a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
index 1b4aafc1f6a2..ccda0a91abf0 100644
--- a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
@@ -122,7 +122,15 @@
};
/include/ "pq3-i2c-0.dtsi"
+ i2c@3000 {
+ fsl,i2c-erratum-a004447;
+ };
+
/include/ "pq3-i2c-1.dtsi"
+ i2c@3100 {
+ fsl,i2c-erratum-a004447;
+ };
+
/include/ "pq3-duart-0.dtsi"
/include/ "pq3-espi-0.dtsi"
spi0: spi@7000 {
@@ -172,29 +180,8 @@
/include/ "pq3-mpic-timer-B.dtsi"
/include/ "pq3-etsec2-0.dtsi"
- enet0: ethernet@b0000 {
- queue-group@b0000 {
- fsl,rx-bit-map = <0xff>;
- fsl,tx-bit-map = <0xff>;
- };
- };
-
/include/ "pq3-etsec2-1.dtsi"
- enet1: ethernet@b1000 {
- queue-group@b1000 {
- fsl,rx-bit-map = <0xff>;
- fsl,tx-bit-map = <0xff>;
- };
- };
-
/include/ "pq3-etsec2-2.dtsi"
- enet2: ethernet@b2000 {
- queue-group@b2000 {
- fsl,rx-bit-map = <0xff>;
- fsl,tx-bit-map = <0xff>;
- };
-
- };
global-utilities@e0000 {
compatible = "fsl,p1010-guts";
diff --git a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
index 872e4485dc3f..ddc018d42252 100644
--- a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
@@ -371,7 +371,23 @@
};
/include/ "qoriq-i2c-0.dtsi"
+ i2c@118000 {
+ fsl,i2c-erratum-a004447;
+ };
+
+ i2c@118100 {
+ fsl,i2c-erratum-a004447;
+ };
+
/include/ "qoriq-i2c-1.dtsi"
+ i2c@119000 {
+ fsl,i2c-erratum-a004447;
+ };
+
+ i2c@119100 {
+ fsl,i2c-erratum-a004447;
+ };
+
/include/ "qoriq-duart-0.dtsi"
/include/ "qoriq-duart-1.dtsi"
/include/ "qoriq-gpio-0.dtsi"
diff --git a/arch/powerpc/boot/dts/icon.dts b/arch/powerpc/boot/dts/icon.dts
index fbaa60b8f87a..4fd7a4fbb4fb 100644
--- a/arch/powerpc/boot/dts/icon.dts
+++ b/arch/powerpc/boot/dts/icon.dts
@@ -197,13 +197,6 @@
reg = <0x00fa0000 0x00060000>;
};
};
-
- SysACE_CompactFlash: sysace@1,0 {
- compatible = "xlnx,sysace";
- interrupt-parent = <&UIC2>;
- interrupts = <24 0x4>;
- reg = <0x00000001 0x00000000 0x10000>;
- };
};
UART0: serial@f0000200 {
diff --git a/arch/powerpc/boot/dts/microwatt.dts b/arch/powerpc/boot/dts/microwatt.dts
new file mode 100644
index 000000000000..974abbdda249
--- /dev/null
+++ b/arch/powerpc/boot/dts/microwatt.dts
@@ -0,0 +1,138 @@
+/dts-v1/;
+
+/ {
+ #size-cells = <0x02>;
+ #address-cells = <0x02>;
+ model-name = "microwatt";
+ compatible = "microwatt-soc";
+
+ aliases {
+ serial0 = &UART0;
+ };
+
+ reserved-memory {
+ #size-cells = <0x02>;
+ #address-cells = <0x02>;
+ ranges;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x00000000 0x00000000 0x10000000>;
+ };
+
+ cpus {
+ #size-cells = <0x00>;
+ #address-cells = <0x01>;
+
+ ibm,powerpc-cpu-features {
+ display-name = "Microwatt";
+ isa = <3000>;
+ device_type = "cpu-features";
+ compatible = "ibm,powerpc-cpu-features";
+
+ mmu-radix {
+ isa = <3000>;
+ usable-privilege = <2>;
+ };
+
+ little-endian {
+ isa = <2050>;
+ usable-privilege = <3>;
+ hwcap-bit-nr = <1>;
+ };
+
+ cache-inhibited-large-page {
+ isa = <2040>;
+ usable-privilege = <2>;
+ };
+
+ fixed-point-v3 {
+ isa = <3000>;
+ usable-privilege = <3>;
+ };
+
+ no-execute {
+ isa = <2010>;
+ usable-privilege = <2>;
+ };
+
+ floating-point {
+ hwcap-bit-nr = <27>;
+ isa = <0>;
+ usable-privilege = <3>;
+ };
+ };
+
+ PowerPC,Microwatt@0 {
+ i-cache-sets = <2>;
+ ibm,dec-bits = <64>;
+ reservation-granule-size = <64>;
+ clock-frequency = <100000000>;
+ timebase-frequency = <100000000>;
+ i-tlb-sets = <1>;
+ ibm,ppc-interrupt-server#s = <0>;
+ i-cache-block-size = <64>;
+ d-cache-block-size = <64>;
+ d-cache-sets = <2>;
+ i-tlb-size = <64>;
+ cpu-version = <0x990000>;
+ status = "okay";
+ i-cache-size = <0x1000>;
+ ibm,processor-radix-AP-encodings = <0x0c 0xa0000010 0x20000015 0x4000001e>;
+ tlb-size = <0>;
+ tlb-sets = <0>;
+ device_type = "cpu";
+ d-tlb-size = <128>;
+ d-tlb-sets = <2>;
+ reg = <0>;
+ general-purpose;
+ 64-bit;
+ d-cache-size = <0x1000>;
+ ibm,chip-id = <0>;
+ };
+ };
+
+ soc@c0000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&ICS>;
+
+ ranges = <0 0 0xc0000000 0x40000000>;
+
+ interrupt-controller@4000 {
+ compatible = "openpower,xics-presentation", "ibm,ppc-xicp";
+ ibm,interrupt-server-ranges = <0x0 0x1>;
+ reg = <0x4000 0x100>;
+ };
+
+ ICS: interrupt-controller@5000 {
+ compatible = "openpower,xics-sources";
+ interrupt-controller;
+ interrupt-ranges = <0x10 0x10>;
+ reg = <0x5000 0x100>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+ };
+
+ UART0: serial@2000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x2000 0x8>;
+ clock-frequency = <100000000>;
+ current-speed = <115200>;
+ reg-shift = <2>;
+ fifo-size = <16>;
+ interrupts = <0x10 0x1>;
+ };
+ };
+
+ chosen {
+ bootargs = "";
+ ibm,architecture-vec-5 = [19 00 10 00 00 00 00 00 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00 40 00 40];
+ stdout-path = &UART0;
+ };
+};
diff --git a/arch/powerpc/boot/microwatt.c b/arch/powerpc/boot/microwatt.c
new file mode 100644
index 000000000000..ca9d83617fc1
--- /dev/null
+++ b/arch/powerpc/boot/microwatt.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <stddef.h>
+#include "stdio.h"
+#include "types.h"
+#include "io.h"
+#include "ops.h"
+
+BSS_STACK(8192);
+
+void platform_init(unsigned long r3, unsigned long r4, unsigned long r5)
+{
+ unsigned long heapsize = 16*1024*1024 - (unsigned long)_end;
+
+ /*
+ * Disable interrupts and turn off MSR_RI, since we'll
+ * shortly be overwriting the interrupt vectors.
+ */
+ __asm__ volatile("mtmsrd %0,1" : : "r" (0));
+
+ simple_alloc_init(_end, heapsize, 32, 64);
+ fdt_init(_dtb_start);
+ serial_console_init();
+}
diff --git a/arch/powerpc/boot/ns16550.c b/arch/powerpc/boot/ns16550.c
index b0da4466d419..f16d2be1d0f3 100644
--- a/arch/powerpc/boot/ns16550.c
+++ b/arch/powerpc/boot/ns16550.c
@@ -15,6 +15,7 @@
#include "stdio.h"
#include "io.h"
#include "ops.h"
+#include "of.h"
#define UART_DLL 0 /* Out: Divisor Latch Low */
#define UART_DLM 1 /* Out: Divisor Latch High */
@@ -58,16 +59,20 @@ int ns16550_console_init(void *devp, struct serial_console_data *scdp)
int n;
u32 reg_offset;
- if (dt_get_virtual_reg(devp, (void **)&reg_base, 1) < 1)
+ if (dt_get_virtual_reg(devp, (void **)&reg_base, 1) < 1) {
+ printf("virt reg parse fail...\r\n");
return -1;
+ }
n = getprop(devp, "reg-offset", &reg_offset, sizeof(reg_offset));
if (n == sizeof(reg_offset))
- reg_base += reg_offset;
+ reg_base += be32_to_cpu(reg_offset);
n = getprop(devp, "reg-shift", &reg_shift, sizeof(reg_shift));
if (n != sizeof(reg_shift))
reg_shift = 0;
+ else
+ reg_shift = be32_to_cpu(reg_shift);
scdp->open = ns16550_open;
scdp->putc = ns16550_putc;
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index 41fa0a8715e3..1f4676bab786 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -191,7 +191,7 @@ if [ -z "$kernel" ]; then
kernel=vmlinux
fi
-LANG=C elfformat="`${CROSS}objdump -p "$kernel" | grep 'file format' | awk '{print $4}'`"
+LC_ALL=C elfformat="`${CROSS}objdump -p "$kernel" | grep 'file format' | awk '{print $4}'`"
case "$elfformat" in
elf64-powerpcle) format=elf64lppc ;;
elf64-powerpc) format=elf32ppc ;;
@@ -342,6 +342,11 @@ gamecube|wii)
link_address='0x600000'
platformo="$object/$platform-head.o $object/$platform.o"
;;
+microwatt)
+ link_address='0x500000'
+ platformo="$object/fixed-head.o $object/$platform.o"
+ binary=y
+ ;;
treeboot-currituck)
link_address='0x1000000'
;;
diff --git a/arch/powerpc/boot/zImage.ps3.lds.S b/arch/powerpc/boot/zImage.ps3.lds.S
index 7b2ff2eaa73a..d0ffb493614d 100644
--- a/arch/powerpc/boot/zImage.ps3.lds.S
+++ b/arch/powerpc/boot/zImage.ps3.lds.S
@@ -8,7 +8,7 @@ SECTIONS
.kernel:vmlinux.bin : { *(.kernel:vmlinux.bin) }
_vmlinux_end = .;
- . = ALIGN(4096);
+ . = ALIGN(8);
_dtb_start = .;
.kernel:dtb : { *(.kernel:dtb) }
_dtb_end = .;
diff --git a/arch/powerpc/configs/32-bit.config b/arch/powerpc/configs/32-bit.config
new file mode 100644
index 000000000000..ad6546850c68
--- /dev/null
+++ b/arch/powerpc/configs/32-bit.config
@@ -0,0 +1 @@
+# CONFIG_PPC64 is not set
diff --git a/arch/powerpc/configs/44x/icon_defconfig b/arch/powerpc/configs/44x/icon_defconfig
index 930948a1da76..fb9a15573546 100644
--- a/arch/powerpc/configs/44x/icon_defconfig
+++ b/arch/powerpc/configs/44x/icon_defconfig
@@ -28,7 +28,6 @@ CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
-CONFIG_XILINX_SYSACE=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_CONSTANTS=y
diff --git a/arch/powerpc/configs/64-bit.config b/arch/powerpc/configs/64-bit.config
new file mode 100644
index 000000000000..0fe6406929e2
--- /dev/null
+++ b/arch/powerpc/configs/64-bit.config
@@ -0,0 +1 @@
+CONFIG_PPC64=y
diff --git a/arch/powerpc/configs/microwatt_defconfig b/arch/powerpc/configs/microwatt_defconfig
new file mode 100644
index 000000000000..a08b739123da
--- /dev/null
+++ b/arch/powerpc/configs/microwatt_defconfig
@@ -0,0 +1,98 @@
+# CONFIG_SWAP is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_TICK_CPU_ACCOUNTING=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=12
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB_MERGE_DEFAULT is not set
+CONFIG_PPC64=y
+# CONFIG_PPC_KUEP is not set
+# CONFIG_PPC_KUAP is not set
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_NR_IRQS=64
+CONFIG_PANIC_TIMEOUT=10
+# CONFIG_PPC_POWERNV is not set
+# CONFIG_PPC_PSERIES is not set
+CONFIG_PPC_MICROWATT=y
+# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
+CONFIG_CPU_FREQ=y
+CONFIG_HZ_100=y
+# CONFIG_PPC_MEM_KEYS is not set
+# CONFIG_SECCOMP is not set
+# CONFIG_MQ_IOSCHED_KYBER is not set
+# CONFIG_COREDUMP is not set
+# CONFIG_COMPACTION is not set
+# CONFIG_MIGRATION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_INET=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_RAW_DIAG=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_PARTITIONED_MASTER=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_NETDEVICES=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_NONSTANDARD=y
+# CONFIG_NVRAM is not set
+CONFIG_RANDOM_TRUST_CPU=y
+CONFIG_SPI=y
+CONFIG_SPI_DEBUG=y
+CONFIG_SPI_BITBANG=y
+CONFIG_SPI_SPIDEV=y
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_VIRTIO_MENU is not set
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_NVMEM is not set
+CONFIG_EXT4_FS=y
+# CONFIG_FILE_LOCKING is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+# CONFIG_MISC_FILESYSTEMS is not set
+# CONFIG_CRYPTO_HW is not set
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_ARM is not set
+# CONFIG_XZ_DEC_ARMTHUMB is not set
+# CONFIG_XZ_DEC_SPARC is not set
+CONFIG_PRINTK_TIME=y
+# CONFIG_SYMBOLIC_ERRNAME is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_MISC is not set
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_PPC_DISABLE_WERROR=y
+CONFIG_XMON=y
+CONFIG_XMON_DEFAULT=y
+# CONFIG_XMON_DEFAULT_RO_MODE is not set
+# CONFIG_RUNTIME_TESTING_MENU is not set
diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
index 949ff9ccda5e..d21f266cea9a 100644
--- a/arch/powerpc/configs/mpc885_ads_defconfig
+++ b/arch/powerpc/configs/mpc885_ads_defconfig
@@ -57,3 +57,28 @@ CONFIG_CRC32_SLICEBY4=y
CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
+CONFIG_PPC_16K_PAGES=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_FS=y
+CONFIG_PPC_PTDUMP=y
+CONFIG_MODULES=y
+CONFIG_SPI=y
+CONFIG_SPI_FSL_SPI=y
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_DEV_TALITOS=y
+CONFIG_8xx_GPIO=y
+CONFIG_WATCHDOG=y
+CONFIG_8xxx_WDT=y
+CONFIG_SMC_UCODE_PATCH=y
+CONFIG_ADVANCED_OPTIONS=y
+CONFIG_PIN_TLB=y
+CONFIG_PERF_EVENTS=y
+CONFIG_MATH_EMULATION=y
+CONFIG_VIRT_CPU_ACCOUNTING_NATIVE=y
+CONFIG_STRICT_KERNEL_RWX=y
+CONFIG_IPV6=y
+CONFIG_BPF_JIT=y
+CONFIG_DEBUG_VM_PGTABLE=y
+CONFIG_BDI_SWITCH=y
+CONFIG_PPC_EARLY_DEBUG=y
+CONFIG_PPC_EARLY_DEBUG_CPM_ADDR=0xff002008
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index 2c87e856d839..8bfeea6c7de7 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -309,6 +309,7 @@ CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_HARDLOCKUP_DETECTOR=y
CONFIG_FUNCTION_TRACER=y
CONFIG_SCHED_TRACER=y
+CONFIG_STACK_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_PPC_EMULATED_STATS=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 4f05a6652478..0ad2291337a7 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -50,6 +50,7 @@ CONFIG_PPC_TRANSACTIONAL_MEM=y
CONFIG_KEXEC=y
CONFIG_KEXEC_FILE=y
CONFIG_CRASH_DUMP=y
+CONFIG_FA_DUMP=y
CONFIG_IRQ_ALL_CPUS=y
CONFIG_PPC_64K_PAGES=y
CONFIG_SCHED_SMT=y
@@ -177,6 +178,7 @@ CONFIG_CHELSIO_T1=m
CONFIG_BE2NET=m
CONFIG_IBMVETH=m
CONFIG_EHEA=m
+CONFIG_IBMVNIC=m
CONFIG_E100=y
CONFIG_E1000=y
CONFIG_E1000E=y
@@ -366,7 +368,9 @@ CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_HARDLOCKUP_DETECTOR=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_FUNCTION_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
CONFIG_SCHED_TRACER=y
+CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_CODE_PATCHING_SELFTEST=y
CONFIG_FTR_FIXUP_SELFTEST=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 6677ac0da45a..1fd9d1260f9e 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -595,7 +595,6 @@ CONFIG_GAMEPORT_FM801=m
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_NONSTANDARD=y
CONFIG_ROCKETPORT=m
-CONFIG_CYCLADES=m
CONFIG_SYNCLINK_GT=m
CONFIG_NOZOMI=m
CONFIG_N_HDLC=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 777221775c83..b183629f1bcf 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -41,6 +41,7 @@ CONFIG_DTL=y
CONFIG_SCANLOG=m
CONFIG_PPC_SMLPAR=y
CONFIG_IBMEBUS=y
+CONFIG_PAPR_SCM=m
CONFIG_PPC_SVM=y
# CONFIG_PPC_PMAC is not set
CONFIG_RTAS_FLASH=m
@@ -159,6 +160,7 @@ CONFIG_BE2NET=m
CONFIG_S2IO=m
CONFIG_IBMVETH=y
CONFIG_EHEA=y
+CONFIG_IBMVNIC=y
CONFIG_E100=y
CONFIG_E1000=y
CONFIG_E1000E=y
@@ -287,7 +289,9 @@ CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_HARDLOCKUP_DETECTOR=y
CONFIG_FUNCTION_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
CONFIG_SCHED_TRACER=y
+CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_CODE_PATCHING_SELFTEST=y
CONFIG_FTR_FIXUP_SELFTEST=y
diff --git a/arch/powerpc/crypto/sha1-spe-glue.c b/arch/powerpc/crypto/sha1-spe-glue.c
index b1e577cbf00c..88e8ea73bfa7 100644
--- a/arch/powerpc/crypto/sha1-spe-glue.c
+++ b/arch/powerpc/crypto/sha1-spe-glue.c
@@ -107,7 +107,7 @@ static int ppc_spe_sha1_update(struct shash_desc *desc, const u8 *data,
src += bytes;
len -= bytes;
- };
+ }
memcpy((char *)sctx->buffer, src, len);
return 0;
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index e1f9b4ea1c53..bcf95ce0964f 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table_32.h
generated-y += syscall_table_64.h
-generated-y += syscall_table_c32.h
generated-y += syscall_table_spu.h
generic-y += export.h
generic-y += kvm_types.h
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index 939f3c94c8f3..222823861a67 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -71,14 +71,17 @@ void __init machine_init(u64 dt_ptr);
#endif
long system_call_exception(long r3, long r4, long r5, long r6, long r7, long r8, unsigned long r0, struct pt_regs *regs);
notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs, long scv);
-notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr);
-notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr);
+notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs);
+notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs);
+#ifdef CONFIG_PPC64
+unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs);
+unsigned long interrupt_exit_user_restart(struct pt_regs *regs);
+unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs);
+#endif
long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
u32 len_high, u32 len_low);
long sys_switch_endian(void);
-notrace unsigned int __check_irq_replay(void);
-void notrace restore_interrupts(void);
/* prom_init (OpenFirmware) */
unsigned long __init prom_init(unsigned long r3, unsigned long r4,
@@ -122,6 +125,7 @@ extern s32 patch__call_flush_branch_caches3;
extern s32 patch__flush_count_cache_return;
extern s32 patch__flush_link_stack_return;
extern s32 patch__call_kvm_flush_link_stack;
+extern s32 patch__call_kvm_flush_link_stack_p9;
extern s32 patch__memset_nocache, patch__memcpy_nocache;
extern long flush_branch_caches;
@@ -142,7 +146,7 @@ void kvmhv_load_host_pmu(void);
void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use);
void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu);
-int __kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu);
+void kvmppc_p9_enter_guest(struct kvm_vcpu *vcpu);
long kvmppc_h_set_dabr(struct kvm_vcpu *vcpu, unsigned long dabr);
long kvmppc_h_set_xdabr(struct kvm_vcpu *vcpu, unsigned long dabr,
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 61c6e8b200e8..a1732a79e92a 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -23,7 +23,7 @@
#define __atomic_release_fence() \
__asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
-static __inline__ int atomic_read(const atomic_t *v)
+static __inline__ int arch_atomic_read(const atomic_t *v)
{
int t;
@@ -32,13 +32,13 @@ static __inline__ int atomic_read(const atomic_t *v)
return t;
}
-static __inline__ void atomic_set(atomic_t *v, int i)
+static __inline__ void arch_atomic_set(atomic_t *v, int i)
{
__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i));
}
#define ATOMIC_OP(op, asm_op) \
-static __inline__ void atomic_##op(int a, atomic_t *v) \
+static __inline__ void arch_atomic_##op(int a, atomic_t *v) \
{ \
int t; \
\
@@ -53,7 +53,7 @@ static __inline__ void atomic_##op(int a, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
-static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
+static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v) \
{ \
int t; \
\
@@ -70,7 +70,7 @@ static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
}
#define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
-static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
+static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
{ \
int res, t; \
\
@@ -94,11 +94,11 @@ static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
ATOMIC_OPS(add, add)
ATOMIC_OPS(sub, subf)
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm_op) \
@@ -109,16 +109,16 @@ ATOMIC_OPS(and, and)
ATOMIC_OPS(or, or)
ATOMIC_OPS(xor, xor)
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP_RELAXED
#undef ATOMIC_OP_RETURN_RELAXED
#undef ATOMIC_OP
-static __inline__ void atomic_inc(atomic_t *v)
+static __inline__ void arch_atomic_inc(atomic_t *v)
{
int t;
@@ -131,9 +131,9 @@ static __inline__ void atomic_inc(atomic_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
-#define atomic_inc atomic_inc
+#define arch_atomic_inc arch_atomic_inc
-static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
+static __inline__ int arch_atomic_inc_return_relaxed(atomic_t *v)
{
int t;
@@ -149,7 +149,7 @@ static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
return t;
}
-static __inline__ void atomic_dec(atomic_t *v)
+static __inline__ void arch_atomic_dec(atomic_t *v)
{
int t;
@@ -162,9 +162,9 @@ static __inline__ void atomic_dec(atomic_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
-#define atomic_dec atomic_dec
+#define arch_atomic_dec arch_atomic_dec
-static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
+static __inline__ int arch_atomic_dec_return_relaxed(atomic_t *v)
{
int t;
@@ -180,17 +180,20 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
return t;
}
-#define atomic_inc_return_relaxed atomic_inc_return_relaxed
-#define atomic_dec_return_relaxed atomic_dec_return_relaxed
+#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
+#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_cmpxchg_relaxed(v, o, n) \
- cmpxchg_relaxed(&((v)->counter), (o), (n))
-#define atomic_cmpxchg_acquire(v, o, n) \
- cmpxchg_acquire(&((v)->counter), (o), (n))
+#define arch_atomic_cmpxchg(v, o, n) \
+ (arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_cmpxchg_relaxed(v, o, n) \
+ arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
+#define arch_atomic_cmpxchg_acquire(v, o, n) \
+ arch_cmpxchg_acquire(&((v)->counter), (o), (n))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
+#define arch_atomic_xchg(v, new) \
+ (arch_xchg(&((v)->counter), new))
+#define arch_atomic_xchg_relaxed(v, new) \
+ arch_xchg_relaxed(&((v)->counter), (new))
/*
* Don't want to override the generic atomic_try_cmpxchg_acquire, because
@@ -199,7 +202,7 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
* would be a surprise).
*/
static __always_inline bool
-atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
+arch_atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
{
int r, o = *old;
@@ -229,7 +232,7 @@ atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int t;
@@ -250,7 +253,7 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
return t;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
/**
* atomic_inc_not_zero - increment unless the number is zero
@@ -259,7 +262,7 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
* Atomically increments @v by 1, so long as @v is non-zero.
* Returns non-zero if @v was non-zero, and zero otherwise.
*/
-static __inline__ int atomic_inc_not_zero(atomic_t *v)
+static __inline__ int arch_atomic_inc_not_zero(atomic_t *v)
{
int t1, t2;
@@ -280,14 +283,14 @@ static __inline__ int atomic_inc_not_zero(atomic_t *v)
return t1;
}
-#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
+#define arch_atomic_inc_not_zero(v) arch_atomic_inc_not_zero((v))
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented.
*/
-static __inline__ int atomic_dec_if_positive(atomic_t *v)
+static __inline__ int arch_atomic_dec_if_positive(atomic_t *v)
{
int t;
@@ -307,13 +310,13 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
return t;
}
-#define atomic_dec_if_positive atomic_dec_if_positive
+#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
#ifdef __powerpc64__
#define ATOMIC64_INIT(i) { (i) }
-static __inline__ s64 atomic64_read(const atomic64_t *v)
+static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
{
s64 t;
@@ -322,13 +325,13 @@ static __inline__ s64 atomic64_read(const atomic64_t *v)
return t;
}
-static __inline__ void atomic64_set(atomic64_t *v, s64 i)
+static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
{
__asm__ __volatile__("std%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i));
}
#define ATOMIC64_OP(op, asm_op) \
-static __inline__ void atomic64_##op(s64 a, atomic64_t *v) \
+static __inline__ void arch_atomic64_##op(s64 a, atomic64_t *v) \
{ \
s64 t; \
\
@@ -344,7 +347,7 @@ static __inline__ void atomic64_##op(s64 a, atomic64_t *v) \
#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
static inline s64 \
-atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
+arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
{ \
s64 t; \
\
@@ -362,7 +365,7 @@ atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
static inline s64 \
-atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
+arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
{ \
s64 res, t; \
\
@@ -386,11 +389,11 @@ atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
ATOMIC64_OPS(add, add)
ATOMIC64_OPS(sub, subf)
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, asm_op) \
@@ -401,16 +404,16 @@ ATOMIC64_OPS(and, and)
ATOMIC64_OPS(or, or)
ATOMIC64_OPS(xor, xor)
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#undef ATOPIC64_OPS
#undef ATOMIC64_FETCH_OP_RELAXED
#undef ATOMIC64_OP_RETURN_RELAXED
#undef ATOMIC64_OP
-static __inline__ void atomic64_inc(atomic64_t *v)
+static __inline__ void arch_atomic64_inc(atomic64_t *v)
{
s64 t;
@@ -423,9 +426,9 @@ static __inline__ void atomic64_inc(atomic64_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
-#define atomic64_inc atomic64_inc
+#define arch_atomic64_inc arch_atomic64_inc
-static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v)
+static __inline__ s64 arch_atomic64_inc_return_relaxed(atomic64_t *v)
{
s64 t;
@@ -441,7 +444,7 @@ static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v)
return t;
}
-static __inline__ void atomic64_dec(atomic64_t *v)
+static __inline__ void arch_atomic64_dec(atomic64_t *v)
{
s64 t;
@@ -454,9 +457,9 @@ static __inline__ void atomic64_dec(atomic64_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
-#define atomic64_dec atomic64_dec
+#define arch_atomic64_dec arch_atomic64_dec
-static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v)
+static __inline__ s64 arch_atomic64_dec_return_relaxed(atomic64_t *v)
{
s64 t;
@@ -472,14 +475,14 @@ static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v)
return t;
}
-#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
-#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
+#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
+#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1.
*/
-static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
+static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 t;
@@ -498,16 +501,19 @@ static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
return t;
}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
-#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-#define atomic64_cmpxchg_relaxed(v, o, n) \
- cmpxchg_relaxed(&((v)->counter), (o), (n))
-#define atomic64_cmpxchg_acquire(v, o, n) \
- cmpxchg_acquire(&((v)->counter), (o), (n))
+#define arch_atomic64_cmpxchg(v, o, n) \
+ (arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic64_cmpxchg_relaxed(v, o, n) \
+ arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
+#define arch_atomic64_cmpxchg_acquire(v, o, n) \
+ arch_cmpxchg_acquire(&((v)->counter), (o), (n))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
+#define arch_atomic64_xchg(v, new) \
+ (arch_xchg(&((v)->counter), new))
+#define arch_atomic64_xchg_relaxed(v, new) \
+ arch_xchg_relaxed(&((v)->counter), (new))
/**
* atomic64_fetch_add_unless - add unless the number is a given value
@@ -518,7 +524,7 @@ static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 t;
@@ -539,7 +545,7 @@ static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
return t;
}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
/**
* atomic_inc64_not_zero - increment unless the number is zero
@@ -548,7 +554,7 @@ static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
* Atomically increments @v by 1, so long as @v is non-zero.
* Returns non-zero if @v was non-zero, and zero otherwise.
*/
-static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
+static __inline__ int arch_atomic64_inc_not_zero(atomic64_t *v)
{
s64 t1, t2;
@@ -569,7 +575,7 @@ static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
return t1 != 0;
}
-#define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
+#define arch_atomic64_inc_not_zero(v) arch_atomic64_inc_not_zero((v))
#endif /* __powerpc64__ */
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index aecfde829d5d..f0e687236484 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -46,6 +46,8 @@
# define SMPWMB eieio
#endif
+/* clang defines this macro for a builtin, which will not work with runtime patching */
+#undef __lwsync
#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
#define dma_rmb() __lwsync()
#define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
@@ -80,22 +82,6 @@ do { \
___p1; \
})
-#ifdef CONFIG_PPC64
-#define smp_cond_load_relaxed(ptr, cond_expr) ({ \
- typeof(ptr) __PTR = (ptr); \
- __unqual_scalar_typeof(*ptr) VAL; \
- VAL = READ_ONCE(*__PTR); \
- if (unlikely(!(cond_expr))) { \
- spin_begin(); \
- do { \
- VAL = READ_ONCE(*__PTR); \
- } while (!(cond_expr)); \
- spin_end(); \
- } \
- (typeof(*ptr))VAL; \
-})
-#endif
-
#ifdef CONFIG_PPC_BOOK3S_64
#define NOSPEC_BARRIER_SLOT nop
#elif defined(CONFIG_PPC_FSL_BOOK3E)
diff --git a/arch/powerpc/include/asm/book3s/32/hash.h b/arch/powerpc/include/asm/book3s/32/hash.h
deleted file mode 100644
index 2a0a467d2985..000000000000
--- a/arch/powerpc/include/asm/book3s/32/hash.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_BOOK3S_32_HASH_H
-#define _ASM_POWERPC_BOOK3S_32_HASH_H
-#ifdef __KERNEL__
-
-/*
- * The "classic" 32-bit implementation of the PowerPC MMU uses a hash
- * table containing PTEs, together with a set of 16 segment registers,
- * to define the virtual to physical address mapping.
- *
- * We use the hash table as an extended TLB, i.e. a cache of currently
- * active mappings. We maintain a two-level page table tree, much
- * like that used by the i386, for the sake of the Linux memory
- * management code. Low-level assembler code in hash_low_32.S
- * (procedure hash_page) is responsible for extracting ptes from the
- * tree and putting them into the hash table when necessary, and
- * updating the accessed and modified bits in the page table tree.
- */
-
-#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
-#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
-#define _PAGE_USER 0x004 /* usermode access allowed */
-#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
-#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
-#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
-#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
-#define _PAGE_DIRTY 0x080 /* C: page changed */
-#define _PAGE_ACCESSED 0x100 /* R: page referenced */
-#define _PAGE_EXEC 0x200 /* software: exec allowed */
-#define _PAGE_RW 0x400 /* software: user write access allowed */
-#define _PAGE_SPECIAL 0x800 /* software: Special page */
-
-#ifdef CONFIG_PTE_64BIT
-/* We never clear the high word of the pte */
-#define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE)
-#else
-#define _PTE_NONE_MASK _PAGE_HASHPTE
-#endif
-
-#define _PMD_PRESENT 0
-#define _PMD_PRESENT_MASK (PAGE_MASK)
-#define _PMD_BAD (~PAGE_MASK)
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_BOOK3S_32_HASH_H */
diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h
index 73bc5d2c431d..64201125a287 100644
--- a/arch/powerpc/include/asm/book3s/32/kup.h
+++ b/arch/powerpc/include/asm/book3s/32/kup.h
@@ -5,185 +5,214 @@
#include <asm/bug.h>
#include <asm/book3s/32/mmu-hash.h>
-#ifdef __ASSEMBLY__
-
-.macro kuep_update_sr gpr1, gpr2 /* NEVER use r0 as gpr2 due to addis */
-101: mtsrin \gpr1, \gpr2
- addi \gpr1, \gpr1, 0x111 /* next VSID */
- rlwinm \gpr1, \gpr1, 0, 0xf0ffffff /* clear VSID overflow */
- addis \gpr2, \gpr2, 0x1000 /* address of next segment */
- bdnz 101b
- isync
-.endm
-
-.macro kuep_lock gpr1, gpr2
-#ifdef CONFIG_PPC_KUEP
- li \gpr1, NUM_USER_SEGMENTS
- li \gpr2, 0
- mtctr \gpr1
- mfsrin \gpr1, \gpr2
- oris \gpr1, \gpr1, SR_NX@h /* set Nx */
- kuep_update_sr \gpr1, \gpr2
-#endif
-.endm
-
-.macro kuep_unlock gpr1, gpr2
-#ifdef CONFIG_PPC_KUEP
- li \gpr1, NUM_USER_SEGMENTS
- li \gpr2, 0
- mtctr \gpr1
- mfsrin \gpr1, \gpr2
- rlwinm \gpr1, \gpr1, 0, ~SR_NX /* Clear Nx */
- kuep_update_sr \gpr1, \gpr2
-#endif
-.endm
+#ifndef __ASSEMBLY__
-#ifdef CONFIG_PPC_KUAP
+#include <linux/jump_label.h>
-.macro kuap_update_sr gpr1, gpr2, gpr3 /* NEVER use r0 as gpr2 due to addis */
-101: mtsrin \gpr1, \gpr2
- addi \gpr1, \gpr1, 0x111 /* next VSID */
- rlwinm \gpr1, \gpr1, 0, 0xf0ffffff /* clear VSID overflow */
- addis \gpr2, \gpr2, 0x1000 /* address of next segment */
- cmplw \gpr2, \gpr3
- blt- 101b
- isync
-.endm
-
-.macro kuap_save_and_lock sp, thread, gpr1, gpr2, gpr3
- lwz \gpr2, KUAP(\thread)
- rlwinm. \gpr3, \gpr2, 28, 0xf0000000
- stw \gpr2, STACK_REGS_KUAP(\sp)
- beq+ 102f
- li \gpr1, 0
- stw \gpr1, KUAP(\thread)
- mfsrin \gpr1, \gpr2
- oris \gpr1, \gpr1, SR_KS@h /* set Ks */
- kuap_update_sr \gpr1, \gpr2, \gpr3
-102:
-.endm
-
-.macro kuap_restore sp, current, gpr1, gpr2, gpr3
- lwz \gpr2, STACK_REGS_KUAP(\sp)
- rlwinm. \gpr3, \gpr2, 28, 0xf0000000
- stw \gpr2, THREAD + KUAP(\current)
- beq+ 102f
- mfsrin \gpr1, \gpr2
- rlwinm \gpr1, \gpr1, 0, ~SR_KS /* Clear Ks */
- kuap_update_sr \gpr1, \gpr2, \gpr3
-102:
-.endm
-
-.macro kuap_check current, gpr
-#ifdef CONFIG_PPC_KUAP_DEBUG
- lwz \gpr, THREAD + KUAP(\current)
-999: twnei \gpr, 0
- EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
-#endif
-.endm
+extern struct static_key_false disable_kuap_key;
+extern struct static_key_false disable_kuep_key;
-#endif /* CONFIG_PPC_KUAP */
+static __always_inline bool kuap_is_disabled(void)
+{
+ return !IS_ENABLED(CONFIG_PPC_KUAP) || static_branch_unlikely(&disable_kuap_key);
+}
+
+static __always_inline bool kuep_is_disabled(void)
+{
+ return !IS_ENABLED(CONFIG_PPC_KUEP) || static_branch_unlikely(&disable_kuep_key);
+}
-#else /* !__ASSEMBLY__ */
+static inline void kuep_lock(void)
+{
+ if (kuep_is_disabled())
+ return;
+
+ update_user_segments(mfsr(0) | SR_NX);
+}
+
+static inline void kuep_unlock(void)
+{
+ if (kuep_is_disabled())
+ return;
+
+ update_user_segments(mfsr(0) & ~SR_NX);
+}
#ifdef CONFIG_PPC_KUAP
#include <linux/sched.h>
-static inline void kuap_update_sr(u32 sr, u32 addr, u32 end)
+#define KUAP_NONE (~0UL)
+#define KUAP_ALL (~1UL)
+
+static inline void kuap_lock_one(unsigned long addr)
{
- addr &= 0xf0000000; /* align addr to start of segment */
- barrier(); /* make sure thread.kuap is updated before playing with SRs */
- while (addr < end) {
- mtsr(sr, addr);
- sr += 0x111; /* next VSID */
- sr &= 0xf0ffffff; /* clear VSID overflow */
- addr += 0x10000000; /* address of next segment */
- }
+ mtsr(mfsr(addr) | SR_KS, addr);
isync(); /* Context sync required after mtsr() */
}
-static __always_inline void allow_user_access(void __user *to, const void __user *from,
- u32 size, unsigned long dir)
+static inline void kuap_unlock_one(unsigned long addr)
{
- u32 addr, end;
+ mtsr(mfsr(addr) & ~SR_KS, addr);
+ isync(); /* Context sync required after mtsr() */
+}
- BUILD_BUG_ON(!__builtin_constant_p(dir));
- BUILD_BUG_ON(dir & ~KUAP_READ_WRITE);
+static inline void kuap_lock_all(void)
+{
+ update_user_segments(mfsr(0) | SR_KS);
+ isync(); /* Context sync required after mtsr() */
+}
- if (!(dir & KUAP_WRITE))
+static inline void kuap_unlock_all(void)
+{
+ update_user_segments(mfsr(0) & ~SR_KS);
+ isync(); /* Context sync required after mtsr() */
+}
+
+void kuap_lock_all_ool(void);
+void kuap_unlock_all_ool(void);
+
+static inline void kuap_lock(unsigned long addr, bool ool)
+{
+ if (likely(addr != KUAP_ALL))
+ kuap_lock_one(addr);
+ else if (!ool)
+ kuap_lock_all();
+ else
+ kuap_lock_all_ool();
+}
+
+static inline void kuap_unlock(unsigned long addr, bool ool)
+{
+ if (likely(addr != KUAP_ALL))
+ kuap_unlock_one(addr);
+ else if (!ool)
+ kuap_unlock_all();
+ else
+ kuap_unlock_all_ool();
+}
+
+static inline void kuap_save_and_lock(struct pt_regs *regs)
+{
+ unsigned long kuap = current->thread.kuap;
+
+ if (kuap_is_disabled())
return;
- addr = (__force u32)to;
+ regs->kuap = kuap;
+ if (unlikely(kuap == KUAP_NONE))
+ return;
+
+ current->thread.kuap = KUAP_NONE;
+ kuap_lock(kuap, false);
+}
+
+static inline void kuap_user_restore(struct pt_regs *regs)
+{
+}
- if (unlikely(addr >= TASK_SIZE || !size))
+static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
+{
+ if (kuap_is_disabled())
return;
- end = min(addr + size, TASK_SIZE);
+ current->thread.kuap = regs->kuap;
- current->thread.kuap = (addr & 0xf0000000) | ((((end - 1) >> 28) + 1) & 0xf);
- kuap_update_sr(mfsr(addr) & ~SR_KS, addr, end); /* Clear Ks */
+ kuap_unlock(regs->kuap, false);
}
-static __always_inline void prevent_user_access(void __user *to, const void __user *from,
- u32 size, unsigned long dir)
+static inline unsigned long kuap_get_and_assert_locked(void)
+{
+ unsigned long kuap = current->thread.kuap;
+
+ if (kuap_is_disabled())
+ return KUAP_NONE;
+
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && kuap != KUAP_NONE);
+
+ return kuap;
+}
+
+static inline void kuap_assert_locked(void)
+{
+ kuap_get_and_assert_locked();
+}
+
+static __always_inline void allow_user_access(void __user *to, const void __user *from,
+ u32 size, unsigned long dir)
{
- u32 addr, end;
+ if (kuap_is_disabled())
+ return;
BUILD_BUG_ON(!__builtin_constant_p(dir));
- if (dir & KUAP_CURRENT_WRITE) {
- u32 kuap = current->thread.kuap;
+ if (!(dir & KUAP_WRITE))
+ return;
- if (unlikely(!kuap))
- return;
+ current->thread.kuap = (__force u32)to;
+ kuap_unlock_one((__force u32)to);
+}
- addr = kuap & 0xf0000000;
- end = kuap << 28;
- } else if (dir & KUAP_WRITE) {
- addr = (__force u32)to;
- end = min(addr + size, TASK_SIZE);
+static __always_inline void prevent_user_access(unsigned long dir)
+{
+ u32 kuap = current->thread.kuap;
- if (unlikely(addr >= TASK_SIZE || !size))
- return;
- } else {
+ if (kuap_is_disabled())
return;
- }
- current->thread.kuap = 0;
- kuap_update_sr(mfsr(addr) | SR_KS, addr, end); /* set Ks */
+ BUILD_BUG_ON(!__builtin_constant_p(dir));
+
+ if (!(dir & KUAP_WRITE))
+ return;
+
+ current->thread.kuap = KUAP_NONE;
+ kuap_lock(kuap, true);
}
static inline unsigned long prevent_user_access_return(void)
{
unsigned long flags = current->thread.kuap;
- unsigned long addr = flags & 0xf0000000;
- unsigned long end = flags << 28;
- void __user *to = (__force void __user *)addr;
- if (flags)
- prevent_user_access(to, to, end - addr, KUAP_READ_WRITE);
+ if (kuap_is_disabled())
+ return KUAP_NONE;
+
+ if (flags != KUAP_NONE) {
+ current->thread.kuap = KUAP_NONE;
+ kuap_lock(flags, true);
+ }
return flags;
}
static inline void restore_user_access(unsigned long flags)
{
- unsigned long addr = flags & 0xf0000000;
- unsigned long end = flags << 28;
- void __user *to = (__force void __user *)addr;
+ if (kuap_is_disabled())
+ return;
- if (flags)
- allow_user_access(to, to, end - addr, KUAP_READ_WRITE);
+ if (flags != KUAP_NONE) {
+ current->thread.kuap = flags;
+ kuap_unlock(flags, true);
+ }
}
static inline bool
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
- unsigned long begin = regs->kuap & 0xf0000000;
- unsigned long end = regs->kuap << 28;
+ unsigned long kuap = regs->kuap;
+
+ if (kuap_is_disabled())
+ return false;
+
+ if (!is_write || kuap == KUAP_ALL)
+ return false;
+ if (kuap == KUAP_NONE)
+ return true;
+
+ /* If faulting address doesn't match unlocked segment, unlock all */
+ if ((kuap ^ address) & 0xf0000000)
+ regs->kuap = KUAP_ALL;
- return is_write && (address < begin || address >= end);
+ return false;
}
#endif /* CONFIG_PPC_KUAP */
diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
index b85f8e114a9c..f5be185cbdf8 100644
--- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
@@ -67,6 +67,16 @@ struct ppc_bat {
#ifndef __ASSEMBLY__
/*
+ * This macro defines the mapping from contexts to VSIDs (virtual
+ * segment IDs). We use a skew on both the context and the high 4 bits
+ * of the 32-bit virtual address (the "effective segment ID") in order
+ * to spread out the entries in the MMU hash table. Note, if this
+ * function is changed then hash functions will have to be
+ * changed to correspond.
+ */
+#define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
+
+/*
* Hardware Page Table Entry
* Note that the xpn and x bitfields are used only by processors that
* support extended addressing; otherwise, those bits are reserved.
@@ -102,6 +112,37 @@ extern s32 patch__hash_page_B, patch__hash_page_C;
extern s32 patch__flush_hash_A0, patch__flush_hash_A1, patch__flush_hash_A2;
extern s32 patch__flush_hash_B;
+#include <asm/reg.h>
+#include <asm/task_size_32.h>
+
+static __always_inline void update_user_segment(u32 n, u32 val)
+{
+ if (n << 28 < TASK_SIZE)
+ mtsr(val + n * 0x111, n << 28);
+}
+
+static __always_inline void update_user_segments(u32 val)
+{
+ val &= 0xf0ffffff;
+
+ update_user_segment(0, val);
+ update_user_segment(1, val);
+ update_user_segment(2, val);
+ update_user_segment(3, val);
+ update_user_segment(4, val);
+ update_user_segment(5, val);
+ update_user_segment(6, val);
+ update_user_segment(7, val);
+ update_user_segment(8, val);
+ update_user_segment(9, val);
+ update_user_segment(10, val);
+ update_user_segment(11, val);
+ update_user_segment(12, val);
+ update_user_segment(13, val);
+ update_user_segment(14, val);
+ update_user_segment(15, val);
+}
+
#endif /* !__ASSEMBLY__ */
/* We happily ignore the smaller BATs on 601, we don't actually use
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 415ae29fa73a..609c80f67194 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -4,7 +4,43 @@
#include <asm-generic/pgtable-nopmd.h>
-#include <asm/book3s/32/hash.h>
+/*
+ * The "classic" 32-bit implementation of the PowerPC MMU uses a hash
+ * table containing PTEs, together with a set of 16 segment registers,
+ * to define the virtual to physical address mapping.
+ *
+ * We use the hash table as an extended TLB, i.e. a cache of currently
+ * active mappings. We maintain a two-level page table tree, much
+ * like that used by the i386, for the sake of the Linux memory
+ * management code. Low-level assembler code in hash_low_32.S
+ * (procedure hash_page) is responsible for extracting ptes from the
+ * tree and putting them into the hash table when necessary, and
+ * updating the accessed and modified bits in the page table tree.
+ */
+
+#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
+#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
+#define _PAGE_USER 0x004 /* usermode access allowed */
+#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
+#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
+#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
+#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
+#define _PAGE_DIRTY 0x080 /* C: page changed */
+#define _PAGE_ACCESSED 0x100 /* R: page referenced */
+#define _PAGE_EXEC 0x200 /* software: exec allowed */
+#define _PAGE_RW 0x400 /* software: user write access allowed */
+#define _PAGE_SPECIAL 0x800 /* software: Special page */
+
+#ifdef CONFIG_PTE_64BIT
+/* We never clear the high word of the pte */
+#define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE)
+#else
+#define _PTE_NONE_MASK _PAGE_HASHPTE
+#endif
+
+#define _PMD_PRESENT 0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD (~PAGE_MASK)
/* And here we include common definitions */
@@ -194,10 +230,8 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
#define VMALLOC_END ioremap_bot
#endif
-#ifdef CONFIG_STRICT_KERNEL_RWX
#define MODULES_END ALIGN_DOWN(PAGE_OFFSET, SZ_256M)
#define MODULES_VADDR (MODULES_END - SZ_256M)
-#endif
#ifndef __ASSEMBLY__
#include <linux/sched.h>
diff --git a/arch/powerpc/include/asm/book3s/32/tlbflush.h b/arch/powerpc/include/asm/book3s/32/tlbflush.h
index d941c06d4f2e..ba1743c52b56 100644
--- a/arch/powerpc/include/asm/book3s/32/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/32/tlbflush.h
@@ -79,4 +79,4 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
flush_tlb_mm(mm);
}
-#endif /* _ASM_POWERPC_TLBFLUSH_H */
+#endif /* _ASM_POWERPC_BOOK3S_32_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/book3s/64/kup.h b/arch/powerpc/include/asm/book3s/64/kup.h
index 8bd905050896..a1cc73a88710 100644
--- a/arch/powerpc/include/asm/book3s/64/kup.h
+++ b/arch/powerpc/include/asm/book3s/64/kup.h
@@ -287,7 +287,7 @@ static inline void kuap_kernel_restore(struct pt_regs *regs,
*/
}
-static inline unsigned long kuap_get_and_check_amr(void)
+static inline unsigned long kuap_get_and_assert_locked(void)
{
if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
unsigned long amr = mfspr(SPRN_AMR);
@@ -298,27 +298,7 @@ static inline unsigned long kuap_get_and_check_amr(void)
return 0;
}
-#else /* CONFIG_PPC_PKEY */
-
-static inline void kuap_user_restore(struct pt_regs *regs)
-{
-}
-
-static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
-{
-}
-
-static inline unsigned long kuap_get_and_check_amr(void)
-{
- return 0;
-}
-
-#endif /* CONFIG_PPC_PKEY */
-
-
-#ifdef CONFIG_PPC_KUAP
-
-static inline void kuap_check_amr(void)
+static inline void kuap_assert_locked(void)
{
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED);
@@ -418,8 +398,7 @@ static __always_inline void allow_user_access(void __user *to, const void __user
#endif /* !CONFIG_PPC_KUAP */
-static inline void prevent_user_access(void __user *to, const void __user *from,
- unsigned long size, unsigned long dir)
+static inline void prevent_user_access(unsigned long dir)
{
set_kuap(AMR_KUAP_BLOCKED);
if (static_branch_unlikely(&uaccess_flush_key))
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index f911bdb68d8b..3004f3323144 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -18,7 +18,6 @@
* complete pgtable.h but only a portion of it.
*/
#include <asm/book3s/64/pgtable.h>
-#include <asm/bug.h>
#include <asm/task_size_64.h>
#include <asm/cpu_has_feature.h>
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index eace8c3f7b0a..c02f42d1031e 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -19,6 +19,7 @@ struct mmu_psize_def {
int penc[MMU_PAGE_COUNT]; /* HPTE encoding */
unsigned int tlbiel; /* tlbiel supported for that page size */
unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
+ unsigned long h_rpt_pgsize; /* H_RPT_INVALIDATE page size encoding */
union {
unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
unsigned long ap; /* Ap encoding used by PowerISA 3.0 */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 058601efbc8a..4d9941b2fe51 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -7,6 +7,7 @@
#ifndef __ASSEMBLY__
#include <linux/mmdebug.h>
#include <linux/bug.h>
+#include <linux/sizes.h>
#endif
/*
@@ -116,6 +117,7 @@
*/
#define _PAGE_KERNEL_RW (_PAGE_PRIVILEGED | _PAGE_RW | _PAGE_DIRTY)
#define _PAGE_KERNEL_RO (_PAGE_PRIVILEGED | _PAGE_READ)
+#define _PAGE_KERNEL_ROX (_PAGE_PRIVILEGED | _PAGE_READ | _PAGE_EXEC)
#define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | \
_PAGE_RW | _PAGE_EXEC)
/*
@@ -230,6 +232,9 @@ extern unsigned long __pmd_frag_size_shift;
#define PTRS_PER_PUD (1 << PUD_INDEX_SIZE)
#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
+#define MAX_PTRS_PER_PGD (1 << (H_PGD_INDEX_SIZE > RADIX_PGD_INDEX_SIZE ? \
+ H_PGD_INDEX_SIZE : RADIX_PGD_INDEX_SIZE))
+
/* PMD_SHIFT determines what a second-level page table entry can map */
#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
#define PMD_SIZE (1UL << PMD_SHIFT)
@@ -323,7 +328,8 @@ extern unsigned long pci_io_base;
#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
#define IOREMAP_BASE (PHB_IO_END)
#define IOREMAP_START (ioremap_bot)
-#define IOREMAP_END (KERN_IO_END)
+#define IOREMAP_END (KERN_IO_END - FIXADDR_SIZE)
+#define FIXADDR_SIZE SZ_32M
/* Advertise special mapping type for AGP */
#define HAVE_PAGE_AGP
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index c7813dc628fc..59cab558e2f0 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -222,8 +222,10 @@ static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr,
* from ptesync, it should probably go into update_mmu_cache, rather
* than set_pte_at (which is used to set ptes unrelated to faults).
*
- * Spurious faults to vmalloc region are not tolerated, so there is
- * a ptesync in flush_cache_vmap.
+ * Spurious faults from the kernel memory are not tolerated, so there
+ * is a ptesync in flush_cache_vmap, and __map_kernel_page() follows
+ * the pte update sequence from ISA Book III 6.10 Translation Table
+ * Update Synchronization Requirements.
*/
}
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index 8b33601cdb9d..a46fd37ad552 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -4,6 +4,10 @@
#include <asm/hvcall.h>
+#define RIC_FLUSH_TLB 0
+#define RIC_FLUSH_PWC 1
+#define RIC_FLUSH_ALL 2
+
struct vm_area_struct;
struct mm_struct;
struct mmu_gather;
diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h
index 0e1263455d73..ad130e15a126 100644
--- a/arch/powerpc/include/asm/book3s/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/pgtable.h
@@ -8,7 +8,6 @@
#include <asm/book3s/32/pgtable.h>
#endif
-#define FIRST_USER_ADDRESS 0UL
#ifndef __ASSEMBLY__
/* Insert a PTE, top-level function is out of line. It uses an inline
* low level function in the respective pgtable-* files
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index d1635ffbb179..0b2162890d8b 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -111,11 +111,8 @@
#ifndef __ASSEMBLY__
struct pt_regs;
-long do_page_fault(struct pt_regs *);
-long hash__do_page_fault(struct pt_regs *);
+void hash__do_page_fault(struct pt_regs *);
void bad_page_fault(struct pt_regs *, int);
-void __bad_page_fault(struct pt_regs *regs, int sig);
-void do_bad_page_fault_segv(struct pt_regs *regs);
extern void _exception(int, struct pt_regs *, int, unsigned long);
extern void _exception_pkey(struct pt_regs *, unsigned long, int);
extern void die(const char *, struct pt_regs *, long);
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index f63495109f63..7564dd4fd12b 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -30,7 +30,19 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
#endif /* CONFIG_PPC_BOOK3S_64 */
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
-extern void flush_dcache_page(struct page *page);
+/*
+ * This is called when a page has been modified by the kernel.
+ * It just marks the page as not i-cache clean. We do the i-cache
+ * flush later when the page is given to a user process, if necessary.
+ */
+static inline void flush_dcache_page(struct page *page)
+{
+ if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
+ return;
+ /* avoid an atomic op if possible */
+ if (test_bit(PG_dcache_clean, &page->flags))
+ clear_bit(PG_dcache_clean, &page->flags);
+}
void flush_icache_range(unsigned long start, unsigned long stop);
#define flush_icache_range flush_icache_range
@@ -40,7 +52,6 @@ void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
#define flush_icache_user_page flush_icache_user_page
void flush_dcache_icache_page(struct page *page);
-void __flush_dcache_icache(void *page);
/**
* flush_dcache_range(): Write any modified data cache blocks out to memory and
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index d5da7ddbf0fc..350de8f90250 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -91,7 +91,7 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
}
#define HAVE_ARCH_CSUM_ADD
-static inline __wsum csum_add(__wsum csum, __wsum addend)
+static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
{
#ifdef __powerpc64__
u64 res = (__force u64)csum;
diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
index cf091c4c22e5..05f246c0e36e 100644
--- a/arch/powerpc/include/asm/cmpxchg.h
+++ b/arch/powerpc/include/asm/cmpxchg.h
@@ -185,14 +185,14 @@ __xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local");
return x;
}
-#define xchg_local(ptr,x) \
+#define arch_xchg_local(ptr,x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_local((ptr), \
(unsigned long)_x_, sizeof(*(ptr))); \
})
-#define xchg_relaxed(ptr, x) \
+#define arch_xchg_relaxed(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_relaxed((ptr), \
@@ -467,7 +467,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
return old;
}
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -476,7 +476,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
})
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -484,7 +484,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
(unsigned long)_n_, sizeof(*(ptr))); \
})
-#define cmpxchg_relaxed(ptr, o, n) \
+#define arch_cmpxchg_relaxed(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -493,7 +493,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
sizeof(*(ptr))); \
})
-#define cmpxchg_acquire(ptr, o, n) \
+#define arch_cmpxchg_acquire(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -502,29 +502,29 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
sizeof(*(ptr))); \
})
#ifdef CONFIG_PPC64
-#define cmpxchg64(ptr, o, n) \
+#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
+ arch_cmpxchg((ptr), (o), (n)); \
})
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_local((ptr), (o), (n)); \
+ arch_cmpxchg_local((ptr), (o), (n)); \
})
-#define cmpxchg64_relaxed(ptr, o, n) \
+#define arch_cmpxchg64_relaxed(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_relaxed((ptr), (o), (n)); \
+ arch_cmpxchg_relaxed((ptr), (o), (n)); \
})
-#define cmpxchg64_acquire(ptr, o, n) \
+#define arch_cmpxchg64_acquire(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_acquire((ptr), (o), (n)); \
+ arch_cmpxchg_acquire((ptr), (o), (n)); \
})
#else
#include <asm-generic/cmpxchg-local.h>
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index eacc9102c251..a95f63788c6b 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -23,13 +23,13 @@
#define BRANCH_ABSOLUTE 0x2
bool is_offset_in_branch_range(long offset);
-int create_branch(struct ppc_inst *instr, const struct ppc_inst *addr,
+int create_branch(struct ppc_inst *instr, const u32 *addr,
unsigned long target, int flags);
-int create_cond_branch(struct ppc_inst *instr, const struct ppc_inst *addr,
+int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
unsigned long target, int flags);
-int patch_branch(struct ppc_inst *addr, unsigned long target, int flags);
-int patch_instruction(struct ppc_inst *addr, struct ppc_inst instr);
-int raw_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr);
+int patch_branch(u32 *addr, unsigned long target, int flags);
+int patch_instruction(u32 *addr, struct ppc_inst instr);
+int raw_patch_instruction(u32 *addr, struct ppc_inst instr);
static inline unsigned long patch_site_addr(s32 *site)
{
@@ -38,18 +38,18 @@ static inline unsigned long patch_site_addr(s32 *site)
static inline int patch_instruction_site(s32 *site, struct ppc_inst instr)
{
- return patch_instruction((struct ppc_inst *)patch_site_addr(site), instr);
+ return patch_instruction((u32 *)patch_site_addr(site), instr);
}
static inline int patch_branch_site(s32 *site, unsigned long target, int flags)
{
- return patch_branch((struct ppc_inst *)patch_site_addr(site), target, flags);
+ return patch_branch((u32 *)patch_site_addr(site), target, flags);
}
static inline int modify_instruction(unsigned int *addr, unsigned int clr,
unsigned int set)
{
- return patch_instruction((struct ppc_inst *)addr, ppc_inst((*addr & ~clr) | set));
+ return patch_instruction(addr, ppc_inst((*addr & ~clr) | set));
}
static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set)
@@ -59,10 +59,8 @@ static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned
int instr_is_relative_branch(struct ppc_inst instr);
int instr_is_relative_link_branch(struct ppc_inst instr);
-int instr_is_branch_to_addr(const struct ppc_inst *instr, unsigned long addr);
-unsigned long branch_target(const struct ppc_inst *instr);
-int translate_branch(struct ppc_inst *instr, const struct ppc_inst *dest,
- const struct ppc_inst *src);
+unsigned long branch_target(const u32 *instr);
+int translate_branch(struct ppc_inst *instr, const u32 *dest, const u32 *src);
extern bool is_conditional_branch(struct ppc_inst instr);
#ifdef CONFIG_PPC_BOOK3E_64
void __patch_exception(int exc, unsigned long addr);
@@ -73,9 +71,10 @@ void __patch_exception(int exc, unsigned long addr);
#endif
#define OP_RT_RA_MASK 0xffff0000UL
-#define LIS_R2 0x3c020000UL
-#define ADDIS_R2_R12 0x3c4c0000UL
-#define ADDI_R2_R2 0x38420000UL
+#define LIS_R2 (PPC_RAW_LIS(_R2, 0))
+#define ADDIS_R2_R12 (PPC_RAW_ADDIS(_R2, _R12, 0))
+#define ADDI_R2_R2 (PPC_RAW_ADDI(_R2, _R2, 0))
+
static inline unsigned long ppc_function_entry(void *func)
{
@@ -179,12 +178,10 @@ static inline unsigned long ppc_kallsyms_lookup_name(const char *name)
#define R2_STACK_OFFSET 40
#endif
-#define PPC_INST_LD_TOC (PPC_INST_LD | ___PPC_RT(__REG_R2) | \
- ___PPC_RA(__REG_R1) | R2_STACK_OFFSET)
+#define PPC_INST_LD_TOC PPC_RAW_LD(_R2, _R1, R2_STACK_OFFSET)
/* usually preceded by a mflr r0 */
-#define PPC_INST_STD_LR (PPC_INST_STD | ___PPC_RS(__REG_R0) | \
- ___PPC_RA(__REG_R1) | PPC_LR_STKOFF)
+#define PPC_INST_STD_LR PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF)
#endif /* CONFIG_PPC64 */
#endif /* _ASM_POWERPC_CODE_PATCHING_H */
diff --git a/arch/powerpc/include/asm/cpm2.h b/arch/powerpc/include/asm/cpm2.h
index 2211b934ecb4..bda45788cfcc 100644
--- a/arch/powerpc/include/asm/cpm2.h
+++ b/arch/powerpc/include/asm/cpm2.h
@@ -594,7 +594,7 @@ typedef struct fcc_enet {
uint fen_p256c; /* Total packets 256 < bytes <= 511 */
uint fen_p512c; /* Total packets 512 < bytes <= 1023 */
uint fen_p1024c; /* Total packets 1024 < bytes <= 1518 */
- uint fen_cambuf; /* Internal CAM buffer poiner */
+ uint fen_cambuf; /* Internal CAM buffer pointer */
ushort fen_rfthr; /* Received frames threshold */
ushort fen_rfcnt; /* Received frames count */
} fcc_enet_t;
diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
index 7897d16e0990..727d4b321937 100644
--- a/arch/powerpc/include/asm/cpu_has_feature.h
+++ b/arch/powerpc/include/asm/cpu_has_feature.h
@@ -7,7 +7,7 @@
#include <linux/bug.h>
#include <asm/cputable.h>
-static inline bool early_cpu_has_feature(unsigned long feature)
+static __always_inline bool early_cpu_has_feature(unsigned long feature)
{
return !!((CPU_FTRS_ALWAYS & feature) ||
(CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
@@ -46,7 +46,7 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
return static_branch_likely(&cpu_feature_keys[i]);
}
#else
-static inline bool cpu_has_feature(unsigned long feature)
+static __always_inline bool cpu_has_feature(unsigned long feature)
{
return early_cpu_has_feature(feature);
}
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index 98c8bd155bf9..b167186aaee4 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -98,6 +98,36 @@ static inline int cpu_last_thread_sibling(int cpu)
return cpu | (threads_per_core - 1);
}
+/*
+ * tlb_thread_siblings are siblings which share a TLB. This is not
+ * architected, is not something a hypervisor could emulate and a future
+ * CPU may change behaviour even in compat mode, so this should only be
+ * used on PowerNV, and only with care.
+ */
+static inline int cpu_first_tlb_thread_sibling(int cpu)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
+ return cpu & ~0x6; /* Big Core */
+ else
+ return cpu_first_thread_sibling(cpu);
+}
+
+static inline int cpu_last_tlb_thread_sibling(int cpu)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
+ return cpu | 0x6; /* Big Core */
+ else
+ return cpu_last_thread_sibling(cpu);
+}
+
+static inline int cpu_tlb_thread_sibling_step(void)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
+ return 2; /* Big Core */
+ else
+ return 1;
+}
+
static inline u32 get_tensr(void)
{
#ifdef CONFIG_BOOKE
diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h
index 7141ccea8c94..a92059964579 100644
--- a/arch/powerpc/include/asm/dcr-native.h
+++ b/arch/powerpc/include/asm/dcr-native.h
@@ -53,8 +53,8 @@ static inline void mtdcrx(unsigned int reg, unsigned int val)
#define mfdcr(rn) \
({unsigned int rval; \
if (__builtin_constant_p(rn) && rn < 1024) \
- asm volatile("mfdcr %0," __stringify(rn) \
- : "=r" (rval)); \
+ asm volatile("mfdcr %0, %1" : "=r" (rval) \
+ : "n" (rn)); \
else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
rval = mfdcrx(rn); \
else \
@@ -64,8 +64,8 @@ static inline void mtdcrx(unsigned int reg, unsigned int val)
#define mtdcr(rn, v) \
do { \
if (__builtin_constant_p(rn) && rn < 1024) \
- asm volatile("mtdcr " __stringify(rn) ",%0" \
- : : "r" (v)); \
+ asm volatile("mtdcr %0, %1" \
+ : : "n" (rn), "r" (v)); \
else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
mtdcrx(rn, v); \
else \
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index c1a8aac01cf9..bb6f78fcf981 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -35,6 +35,19 @@
/* PACA save area size in u64 units (exgen, exmc, etc) */
#define EX_SIZE 10
+/* PACA save area offsets */
+#define EX_R9 0
+#define EX_R10 8
+#define EX_R11 16
+#define EX_R12 24
+#define EX_R13 32
+#define EX_DAR 40
+#define EX_DSISR 48
+#define EX_CCR 52
+#define EX_CFAR 56
+#define EX_PPR 64
+#define EX_CTR 72
+
/*
* maximum recursive depth of MCE exceptions
*/
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index 8d03c16a3663..947b5b9c4424 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -23,12 +23,17 @@
#include <asm/kmap_size.h>
#endif
+#ifdef CONFIG_PPC64
+#define FIXADDR_TOP (IOREMAP_END + FIXADDR_SIZE)
+#else
+#define FIXADDR_SIZE 0
#ifdef CONFIG_KASAN
#include <asm/kasan.h>
#define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE)
#else
#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
#endif
+#endif
/*
* Here we define all the compile-time 'special' virtual
@@ -50,6 +55,7 @@
*/
enum fixed_addresses {
FIX_HOLE,
+#ifdef CONFIG_PPC32
/* reserve the top 128K for early debugging purposes */
FIX_EARLY_DEBUG_TOP = FIX_HOLE,
FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1,
@@ -72,6 +78,7 @@ enum fixed_addresses {
FIX_IMMR_SIZE,
#endif
/* FIX_PCIE_MCFG, */
+#endif /* CONFIG_PPC32 */
__end_of_permanent_fixed_addresses,
#define NR_FIX_BTMAPS (SZ_256K / PAGE_SIZE)
@@ -98,6 +105,8 @@ enum fixed_addresses {
static inline void __set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags)
{
+ BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC64) && __FIXADDR_SIZE > FIXADDR_SIZE);
+
if (__builtin_constant_p(idx))
BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
else if (WARN_ON(idx >= __end_of_fixed_addresses))
diff --git a/arch/powerpc/include/asm/fsl_pamu_stash.h b/arch/powerpc/include/asm/fsl_pamu_stash.h
index 30a31ad2123d..c0fbadb70b5d 100644
--- a/arch/powerpc/include/asm/fsl_pamu_stash.h
+++ b/arch/powerpc/include/asm/fsl_pamu_stash.h
@@ -7,6 +7,8 @@
#ifndef __FSL_PAMU_STASH_H
#define __FSL_PAMU_STASH_H
+struct iommu_domain;
+
/* cache stash targets */
enum pamu_stash_target {
PAMU_ATTR_CACHE_L1 = 1,
@@ -14,14 +16,6 @@ enum pamu_stash_target {
PAMU_ATTR_CACHE_L3,
};
-/*
- * This attribute allows configuring stashig specific parameters
- * in the PAMU hardware.
- */
-
-struct pamu_stash_attribute {
- u32 cpu; /* cpu number */
- u32 cache; /* cache to stash to: L1,L2,L3 */
-};
+int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu);
#endif /* __FSL_PAMU_STASH_H */
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
index bc76970b6ee5..debe8c4f7062 100644
--- a/arch/powerpc/include/asm/ftrace.h
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -12,7 +12,7 @@
#ifdef __ASSEMBLY__
-/* Based off of objdump optput from glibc */
+/* Based off of objdump output from glibc */
#define MCOUNT_SAVE_FRAME \
stwu r1,-48(r1); \
@@ -52,7 +52,7 @@ extern void _mcount(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
- /* reloction of mcount call site is the same as the address */
+ /* relocation of mcount call site is the same as the address */
return addr;
}
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index e93ee3202e4c..b3001f8b2c1e 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -33,9 +33,8 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
{
int oldval = 0, ret;
- if (!access_ok(uaddr, sizeof(u32)))
+ if (!user_access_begin(uaddr, sizeof(u32)))
return -EFAULT;
- allow_read_write_user(uaddr, uaddr, sizeof(*uaddr));
switch (op) {
case FUTEX_OP_SET:
@@ -56,10 +55,10 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
default:
ret = -ENOSYS;
}
+ user_access_end();
*oval = oldval;
- prevent_read_write_user(uaddr, uaddr, sizeof(*uaddr));
return ret;
}
@@ -70,11 +69,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
int ret = 0;
u32 prev;
- if (!access_ok(uaddr, sizeof(u32)))
+ if (!user_access_begin(uaddr, sizeof(u32)))
return -EFAULT;
- allow_read_write_user(uaddr, uaddr, sizeof(*uaddr));
-
__asm__ __volatile__ (
PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\
@@ -93,8 +90,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT)
: "cc", "memory");
+ user_access_end();
+
*uval = prev;
- prevent_read_write_user(uaddr, uaddr, sizeof(*uaddr));
return ret;
}
diff --git a/arch/powerpc/include/asm/head-64.h b/arch/powerpc/include/asm/head-64.h
index 4cb9efa2eb21..242204e12993 100644
--- a/arch/powerpc/include/asm/head-64.h
+++ b/arch/powerpc/include/asm/head-64.h
@@ -16,7 +16,7 @@
.section ".head.data.\name\()","a",@progbits
.endm
.macro use_ftsec name
- .section ".head.text.\name\()"
+ .section ".head.text.\name\()","ax",@progbits
.endm
/*
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index ed6086d57b22..9bcf345cb208 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -294,6 +294,13 @@
#define H_RESIZE_HPT_COMMIT 0x370
#define H_REGISTER_PROC_TBL 0x37C
#define H_SIGNAL_SYS_RESET 0x380
+#define H_ALLOCATE_VAS_WINDOW 0x388
+#define H_MODIFY_VAS_WINDOW 0x38C
+#define H_DEALLOCATE_VAS_WINDOW 0x390
+#define H_QUERY_VAS_WINDOW 0x394
+#define H_QUERY_VAS_CAPABILITIES 0x398
+#define H_QUERY_NX_CAPABILITIES 0x39C
+#define H_GET_NX_FAULT 0x3A0
#define H_INT_GET_SOURCE_INFO 0x3A8
#define H_INT_SET_SOURCE_CONFIG 0x3AC
#define H_INT_GET_SOURCE_CONFIG 0x3B0
@@ -315,7 +322,8 @@
#define H_SCM_HEALTH 0x400
#define H_SCM_PERFORMANCE_STATS 0x418
#define H_RPT_INVALIDATE 0x448
-#define MAX_HCALL_OPCODE H_RPT_INVALIDATE
+#define H_SCM_FLUSH 0x44C
+#define MAX_HCALL_OPCODE H_SCM_FLUSH
/* Scope args for H_SCM_UNBIND_ALL */
#define H_UNBIND_SCOPE_ALL (0x1)
@@ -389,8 +397,12 @@
#define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0
#define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1
#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2
+#define H_CPU_BEHAV_FAVOUR_SECURITY_H (1ull << 60) // IBM bit 3
#define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5
#define H_CPU_BEHAV_FLUSH_LINK_STACK (1ull << 57) // IBM bit 6
+#define H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY (1ull << 56) // IBM bit 7
+#define H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS (1ull << 55) // IBM bit 8
+#define H_CPU_BEHAV_NO_STF_BARRIER (1ull << 54) // IBM bit 9
/* Flag values used in H_REGISTER_PROC_TBL hcall */
#define PROC_TABLE_OP_MASK 0x18
@@ -411,9 +423,9 @@
#define H_RPTI_TYPE_NESTED 0x0001 /* Invalidate nested guest partition-scope */
#define H_RPTI_TYPE_TLB 0x0002 /* Invalidate TLB */
#define H_RPTI_TYPE_PWC 0x0004 /* Invalidate Page Walk Cache */
-/* Invalidate Process Table Entries if H_RPTI_TYPE_NESTED is clear */
+/* Invalidate caching of Process Table Entries if H_RPTI_TYPE_NESTED is clear */
#define H_RPTI_TYPE_PRT 0x0008
-/* Invalidate Partition Table Entries if H_RPTI_TYPE_NESTED is set */
+/* Invalidate caching of Partition Table Entries if H_RPTI_TYPE_NESTED is set */
#define H_RPTI_TYPE_PAT 0x0008
#define H_RPTI_TYPE_ALL (H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC | \
H_RPTI_TYPE_PRT)
@@ -446,6 +458,9 @@
*/
long plpar_hcall_norets(unsigned long opcode, ...);
+/* Variant which does not do hcall tracing */
+long plpar_hcall_norets_notrace(unsigned long opcode, ...);
+
/**
* plpar_hcall: - Make a pseries hypervisor call
* @opcode: The hypervisor call to make.
diff --git a/arch/powerpc/include/asm/hvconsole.h b/arch/powerpc/include/asm/hvconsole.h
index 999ed5ac9053..ccb2034506f0 100644
--- a/arch/powerpc/include/asm/hvconsole.h
+++ b/arch/powerpc/include/asm/hvconsole.h
@@ -24,5 +24,8 @@
extern int hvc_get_chars(uint32_t vtermno, char *buf, int count);
extern int hvc_put_chars(uint32_t vtermno, const char *buf, int count);
+/* Provided by HVC VIO */
+void hvc_vio_init_early(void);
+
#endif /* __KERNEL__ */
#endif /* _PPC64_HVCONSOLE_H */
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 56a98936a6a9..21cc571ea9c2 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -18,8 +18,17 @@
* PACA flags in paca->irq_happened.
*
* This bits are set when interrupts occur while soft-disabled
- * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
- * is set whenever we manually hard disable.
+ * and allow a proper replay.
+ *
+ * The PACA_IRQ_HARD_DIS is set whenever we hard disable. It is almost
+ * always in synch with the MSR[EE] state, except:
+ * - A window in interrupt entry, where hardware disables MSR[EE] and that
+ * must be "reconciled" with the soft mask state.
+ * - NMI interrupts that hit in awkward places, until they fix the state.
+ * - When local irqs are being enabled and state is being fixed up.
+ * - When returning from an interrupt there are some windows where this
+ * can become out of synch, but gets fixed before the RFI or before
+ * executing the next user instruction (see arch/powerpc/kernel/interrupt.c).
*/
#define PACA_IRQ_HARD_DIS 0x01
#define PACA_IRQ_DBELL 0x02
@@ -389,7 +398,15 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
return !(regs->msr & MSR_EE);
}
-static inline void may_hard_irq_enable(void) { }
+static inline bool may_hard_irq_enable(void)
+{
+ return false;
+}
+
+static inline void do_hard_irq_enable(void)
+{
+ BUILD_BUG();
+}
static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
{
diff --git a/arch/powerpc/include/asm/hydra.h b/arch/powerpc/include/asm/hydra.h
index ae02eb53d6ef..d024447283a0 100644
--- a/arch/powerpc/include/asm/hydra.h
+++ b/arch/powerpc/include/asm/hydra.h
@@ -94,8 +94,6 @@ extern volatile struct Hydra __iomem *Hydra;
#define HYDRA_INT_EXT7 18 /* Power Off Request */
#define HYDRA_INT_SPARE 19
-extern int hydra_init(void);
-
#endif /* __KERNEL__ */
#endif /* _ASMPPC_HYDRA_H */
diff --git a/arch/powerpc/include/asm/ima.h b/arch/powerpc/include/asm/ima.h
deleted file mode 100644
index ead488cf3981..000000000000
--- a/arch/powerpc/include/asm/ima.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_IMA_H
-#define _ASM_POWERPC_IMA_H
-
-struct kimage;
-
-int ima_get_kexec_buffer(void **addr, size_t *size);
-int ima_free_kexec_buffer(void);
-
-#ifdef CONFIG_IMA
-void remove_ima_buffer(void *fdt, int chosen_node);
-#else
-static inline void remove_ima_buffer(void *fdt, int chosen_node) {}
-#endif
-
-#ifdef CONFIG_IMA_KEXEC
-int arch_ima_add_kexec_buffer(struct kimage *image, unsigned long load_addr,
- size_t size);
-
-int setup_ima_buffer(const struct kimage *image, void *fdt, int chosen_node);
-#else
-static inline int setup_ima_buffer(const struct kimage *image, void *fdt,
- int chosen_node)
-{
- remove_ima_buffer(fdt, chosen_node);
- return 0;
-}
-#endif /* CONFIG_IMA_KEXEC */
-
-#endif /* _ASM_POWERPC_IMA_H */
diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h
index cc73c1267572..b11c0e2f9639 100644
--- a/arch/powerpc/include/asm/inst.h
+++ b/arch/powerpc/include/asm/inst.h
@@ -4,6 +4,41 @@
#include <asm/ppc-opcode.h>
+#ifdef CONFIG_PPC64
+
+#define ___get_user_instr(gu_op, dest, ptr) \
+({ \
+ long __gui_ret; \
+ u32 __user *__gui_ptr = (u32 __user *)ptr; \
+ struct ppc_inst __gui_inst; \
+ unsigned int __prefix, __suffix; \
+ \
+ __chk_user_ptr(ptr); \
+ __gui_ret = gu_op(__prefix, __gui_ptr); \
+ if (__gui_ret == 0) { \
+ if ((__prefix >> 26) == OP_PREFIX) { \
+ __gui_ret = gu_op(__suffix, __gui_ptr + 1); \
+ __gui_inst = ppc_inst_prefix(__prefix, __suffix); \
+ } else { \
+ __gui_inst = ppc_inst(__prefix); \
+ } \
+ if (__gui_ret == 0) \
+ (dest) = __gui_inst; \
+ } \
+ __gui_ret; \
+})
+#else /* !CONFIG_PPC64 */
+#define ___get_user_instr(gu_op, dest, ptr) \
+({ \
+ __chk_user_ptr(ptr); \
+ gu_op((dest).val, (u32 __user *)(ptr)); \
+})
+#endif /* CONFIG_PPC64 */
+
+#define get_user_instr(x, ptr) ___get_user_instr(get_user, x, ptr)
+
+#define __get_user_instr(x, ptr) ___get_user_instr(__get_user, x, ptr)
+
/*
* Instruction data type for POWER
*/
@@ -25,9 +60,9 @@ static inline int ppc_inst_primary_opcode(struct ppc_inst x)
return ppc_inst_val(x) >> 26;
}
-#ifdef CONFIG_PPC64
-#define ppc_inst(x) ((struct ppc_inst){ .val = (x), .suffix = 0xff })
+#define ppc_inst(x) ((struct ppc_inst){ .val = (x) })
+#ifdef CONFIG_PPC64
#define ppc_inst_prefix(x, y) ((struct ppc_inst){ .val = (x), .suffix = (y) })
static inline u32 ppc_inst_suffix(struct ppc_inst x)
@@ -35,66 +70,43 @@ static inline u32 ppc_inst_suffix(struct ppc_inst x)
return x.suffix;
}
-static inline bool ppc_inst_prefixed(struct ppc_inst x)
-{
- return (ppc_inst_primary_opcode(x) == 1) && ppc_inst_suffix(x) != 0xff;
-}
+#else
+#define ppc_inst_prefix(x, y) ppc_inst(x)
-static inline struct ppc_inst ppc_inst_swab(struct ppc_inst x)
+static inline u32 ppc_inst_suffix(struct ppc_inst x)
{
- return ppc_inst_prefix(swab32(ppc_inst_val(x)),
- swab32(ppc_inst_suffix(x)));
+ return 0;
}
-static inline struct ppc_inst ppc_inst_read(const struct ppc_inst *ptr)
-{
- u32 val, suffix;
-
- val = *(u32 *)ptr;
- if ((val >> 26) == OP_PREFIX) {
- suffix = *((u32 *)ptr + 1);
- return ppc_inst_prefix(val, suffix);
- } else {
- return ppc_inst(val);
- }
-}
+#endif /* CONFIG_PPC64 */
-static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y)
+static inline struct ppc_inst ppc_inst_read(const u32 *ptr)
{
- return *(u64 *)&x == *(u64 *)&y;
+ if (IS_ENABLED(CONFIG_PPC64) && (*ptr >> 26) == OP_PREFIX)
+ return ppc_inst_prefix(*ptr, *(ptr + 1));
+ else
+ return ppc_inst(*ptr);
}
-#else
-
-#define ppc_inst(x) ((struct ppc_inst){ .val = x })
-
static inline bool ppc_inst_prefixed(struct ppc_inst x)
{
- return false;
-}
-
-static inline u32 ppc_inst_suffix(struct ppc_inst x)
-{
- return 0;
+ return IS_ENABLED(CONFIG_PPC64) && ppc_inst_primary_opcode(x) == OP_PREFIX;
}
static inline struct ppc_inst ppc_inst_swab(struct ppc_inst x)
{
- return ppc_inst(swab32(ppc_inst_val(x)));
-}
-
-static inline struct ppc_inst ppc_inst_read(const struct ppc_inst *ptr)
-{
- return *ptr;
+ return ppc_inst_prefix(swab32(ppc_inst_val(x)), swab32(ppc_inst_suffix(x)));
}
static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y)
{
- return ppc_inst_val(x) == ppc_inst_val(y);
+ if (ppc_inst_val(x) != ppc_inst_val(y))
+ return false;
+ if (!ppc_inst_prefixed(x))
+ return true;
+ return ppc_inst_suffix(x) == ppc_inst_suffix(y);
}
-#endif /* CONFIG_PPC64 */
-
static inline int ppc_inst_len(struct ppc_inst x)
{
return ppc_inst_prefixed(x) ? 8 : 4;
@@ -104,22 +116,23 @@ static inline int ppc_inst_len(struct ppc_inst x)
* Return the address of the next instruction, if the instruction @value was
* located at @location.
*/
-static inline struct ppc_inst *ppc_inst_next(void *location, struct ppc_inst *value)
+static inline u32 *ppc_inst_next(u32 *location, u32 *value)
{
struct ppc_inst tmp;
tmp = ppc_inst_read(value);
- return location + ppc_inst_len(tmp);
+ return (void *)location + ppc_inst_len(tmp);
}
-static inline u64 ppc_inst_as_u64(struct ppc_inst x)
+static inline unsigned long ppc_inst_as_ulong(struct ppc_inst x)
{
-#ifdef CONFIG_CPU_LITTLE_ENDIAN
- return (u64)ppc_inst_suffix(x) << 32 | ppc_inst_val(x);
-#else
- return (u64)ppc_inst_val(x) << 32 | ppc_inst_suffix(x);
-#endif
+ if (IS_ENABLED(CONFIG_PPC32))
+ return ppc_inst_val(x);
+ else if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
+ return (u64)ppc_inst_suffix(x) << 32 | ppc_inst_val(x);
+ else
+ return (u64)ppc_inst_val(x) << 32 | ppc_inst_suffix(x);
}
#define PPC_INST_STR_LEN sizeof("00000000 00000000")
@@ -141,10 +154,6 @@ static inline char *__ppc_inst_as_str(char str[PPC_INST_STR_LEN], struct ppc_ins
__str; \
})
-int probe_user_read_inst(struct ppc_inst *inst,
- struct ppc_inst __user *nip);
-
-int probe_kernel_read_inst(struct ppc_inst *inst,
- struct ppc_inst *src);
+int copy_inst_from_kernel_nofault(struct ppc_inst *inst, u32 *src);
#endif /* _ASM_POWERPC_INST_H */
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
index aedfba29e43a..d4bdf7d274ac 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -2,6 +2,70 @@
#ifndef _ASM_POWERPC_INTERRUPT_H
#define _ASM_POWERPC_INTERRUPT_H
+/* BookE/4xx */
+#define INTERRUPT_CRITICAL_INPUT 0x100
+
+/* BookE */
+#define INTERRUPT_DEBUG 0xd00
+#ifdef CONFIG_BOOKE
+#define INTERRUPT_PERFMON 0x260
+#define INTERRUPT_DOORBELL 0x280
+#endif
+
+/* BookS/4xx/8xx */
+#define INTERRUPT_MACHINE_CHECK 0x200
+
+/* BookS/8xx */
+#define INTERRUPT_SYSTEM_RESET 0x100
+
+/* BookS */
+#define INTERRUPT_DATA_SEGMENT 0x380
+#define INTERRUPT_INST_SEGMENT 0x480
+#define INTERRUPT_TRACE 0xd00
+#define INTERRUPT_H_DATA_STORAGE 0xe00
+#define INTERRUPT_HMI 0xe60
+#define INTERRUPT_H_FAC_UNAVAIL 0xf80
+#ifdef CONFIG_PPC_BOOK3S
+#define INTERRUPT_DOORBELL 0xa00
+#define INTERRUPT_PERFMON 0xf00
+#define INTERRUPT_ALTIVEC_UNAVAIL 0xf20
+#endif
+
+/* BookE/BookS/4xx/8xx */
+#define INTERRUPT_DATA_STORAGE 0x300
+#define INTERRUPT_INST_STORAGE 0x400
+#define INTERRUPT_EXTERNAL 0x500
+#define INTERRUPT_ALIGNMENT 0x600
+#define INTERRUPT_PROGRAM 0x700
+#define INTERRUPT_SYSCALL 0xc00
+#define INTERRUPT_TRACE 0xd00
+
+/* BookE/BookS/44x */
+#define INTERRUPT_FP_UNAVAIL 0x800
+
+/* BookE/BookS/44x/8xx */
+#define INTERRUPT_DECREMENTER 0x900
+
+#ifndef INTERRUPT_PERFMON
+#define INTERRUPT_PERFMON 0x0
+#endif
+
+/* 8xx */
+#define INTERRUPT_SOFT_EMU_8xx 0x1000
+#define INTERRUPT_INST_TLB_MISS_8xx 0x1100
+#define INTERRUPT_DATA_TLB_MISS_8xx 0x1200
+#define INTERRUPT_INST_TLB_ERROR_8xx 0x1300
+#define INTERRUPT_DATA_TLB_ERROR_8xx 0x1400
+#define INTERRUPT_DATA_BREAKPOINT_8xx 0x1c00
+#define INTERRUPT_INST_BREAKPOINT_8xx 0x1d00
+
+/* 603 */
+#define INTERRUPT_INST_TLB_MISS_603 0x1000
+#define INTERRUPT_DATA_LOAD_TLB_MISS_603 0x1100
+#define INTERRUPT_DATA_STORE_TLB_MISS_603 0x1200
+
+#ifndef __ASSEMBLY__
+
#include <linux/context_tracking.h>
#include <linux/hardirq.h>
#include <asm/cputime.h>
@@ -9,10 +73,52 @@
#include <asm/kprobes.h>
#include <asm/runlatch.h>
-struct interrupt_state {
-#ifdef CONFIG_PPC_BOOK3E_64
- enum ctx_state ctx_state;
+#ifdef CONFIG_PPC_BOOK3S_64
+extern char __end_soft_masked[];
+bool search_kernel_soft_mask_table(unsigned long addr);
+unsigned long search_kernel_restart_table(unsigned long addr);
+
+DECLARE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
+
+static inline bool is_implicit_soft_masked(struct pt_regs *regs)
+{
+ if (regs->msr & MSR_PR)
+ return false;
+
+ if (regs->nip >= (unsigned long)__end_soft_masked)
+ return false;
+
+ return search_kernel_soft_mask_table(regs->nip);
+}
+
+static inline void srr_regs_clobbered(void)
+{
+ local_paca->srr_valid = 0;
+ local_paca->hsrr_valid = 0;
+}
+#else
+static inline bool is_implicit_soft_masked(struct pt_regs *regs)
+{
+ return false;
+}
+
+static inline void srr_regs_clobbered(void)
+{
+}
#endif
+
+static inline void nap_adjust_return(struct pt_regs *regs)
+{
+#ifdef CONFIG_PPC_970_NAP
+ if (unlikely(test_thread_local_flags(_TLF_NAPPING))) {
+ /* Can avoid a test-and-clear because NMIs do not call this */
+ clear_thread_local_flags(_TLF_NAPPING);
+ regs_set_return_ip(regs, (unsigned long)power4_idle_nap_return);
+ }
+#endif
+}
+
+struct interrupt_state {
};
static inline void booke_restore_dbcr0(void)
@@ -29,10 +135,19 @@ static inline void booke_restore_dbcr0(void)
static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
{
- /*
- * Book3E reconciles irq soft mask in asm
- */
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC32
+ if (!arch_irq_disabled_regs(regs))
+ trace_hardirqs_off();
+
+ if (user_mode(regs)) {
+ kuep_lock();
+ account_cpu_user_entry();
+ } else {
+ kuap_save_and_lock(regs);
+ }
+#endif
+
+#ifdef CONFIG_PPC64
if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
trace_hardirqs_off();
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
@@ -48,16 +163,21 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
* CT_WARN_ON comes here via program_check_exception,
* so avoid recursion.
*/
- if (TRAP(regs) != 0x700)
+ if (TRAP(regs) != INTERRUPT_PROGRAM) {
CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
+ BUG_ON(is_implicit_soft_masked(regs));
+ }
+#ifdef CONFIG_PPC_BOOK3S
+ /* Move this under a debugging check */
+ if (arch_irq_disabled_regs(regs))
+ BUG_ON(search_kernel_restart_table(regs->nip));
+#endif
}
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
#endif
-#ifdef CONFIG_PPC_BOOK3E_64
- state->ctx_state = exception_enter();
- if (user_mode(regs))
- account_cpu_user_entry();
-#endif
+ booke_restore_dbcr0();
}
/*
@@ -76,23 +196,6 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
*/
static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
{
-#ifdef CONFIG_PPC_BOOK3E_64
- exception_exit(state->ctx_state);
-#endif
-
- /*
- * Book3S exits to user via interrupt_exit_user_prepare(), which does
- * context tracking, which is a cleaner way to handle PREEMPT=y
- * and avoid context entry/exit in e.g., preempt_schedule_irq()),
- * which is likely to be where the core code wants to end up.
- *
- * The above comment explains why we can't do the
- *
- * if (user_mode(regs))
- * user_exit_irqoff();
- *
- * sequence here.
- */
}
static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
@@ -109,26 +212,50 @@ static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct in
static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state)
{
+ /*
+ * Adjust at exit so the main handler sees the true NIA. This must
+ * come before irq_exit() because irq_exit can enable interrupts, and
+ * if another interrupt is taken before nap_adjust_return has run
+ * here, then that interrupt would return directly to idle nap return.
+ */
+ nap_adjust_return(regs);
+
irq_exit();
interrupt_exit_prepare(regs, state);
}
struct interrupt_nmi_state {
#ifdef CONFIG_PPC64
-#ifdef CONFIG_PPC_BOOK3S_64
u8 irq_soft_mask;
u8 irq_happened;
-#endif
u8 ftrace_enabled;
+ u64 softe;
#endif
};
+static inline bool nmi_disables_ftrace(struct pt_regs *regs)
+{
+ /* Allow DEC and PMI to be traced when they are soft-NMI */
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
+ if (TRAP(regs) == INTERRUPT_DECREMENTER)
+ return false;
+ if (TRAP(regs) == INTERRUPT_PERFMON)
+ return false;
+ }
+ if (IS_ENABLED(CONFIG_PPC_BOOK3E)) {
+ if (TRAP(regs) == INTERRUPT_PERFMON)
+ return false;
+ }
+
+ return true;
+}
+
static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
{
#ifdef CONFIG_PPC64
-#ifdef CONFIG_PPC_BOOK3S_64
state->irq_soft_mask = local_paca->irq_soft_mask;
state->irq_happened = local_paca->irq_happened;
+ state->softe = regs->softe;
/*
* Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
@@ -138,10 +265,17 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+ if (is_implicit_soft_masked(regs)) {
+ // Adjust regs->softe soft implicit soft-mask, so
+ // arch_irq_disabled_regs(regs) behaves as expected.
+ regs->softe = IRQS_ALL_DISABLED;
+ }
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
+
/* Don't do any per-CPU operations until interrupt state is fixed */
-#endif
- /* Allow DEC and PMI to be traced when they are soft-NMI */
- if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260) {
+
+ if (nmi_disables_ftrace(regs)) {
state->ftrace_enabled = this_cpu_get_ftrace_enabled();
this_cpu_set_ftrace_enabled(0);
}
@@ -164,17 +298,29 @@ static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct inter
radix_enabled() || (mfmsr() & MSR_DR))
nmi_exit();
+ /*
+ * nmi does not call nap_adjust_return because nmi should not create
+ * new work to do (must use irq_work for that).
+ */
+
#ifdef CONFIG_PPC64
- if (TRAP(regs) != 0x900 && TRAP(regs) != 0xf00 && TRAP(regs) != 0x260)
+#ifdef CONFIG_PPC_BOOK3S
+ if (arch_irq_disabled_regs(regs)) {
+ unsigned long rst = search_kernel_restart_table(regs->nip);
+ if (rst)
+ regs_set_return_ip(regs, rst);
+ }
+#endif
+
+ if (nmi_disables_ftrace(regs))
this_cpu_set_ftrace_enabled(state->ftrace_enabled);
-#ifdef CONFIG_PPC_BOOK3S_64
/* Check we didn't change the pending interrupt mask. */
WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);
+ regs->softe = state->softe;
local_paca->irq_happened = state->irq_happened;
local_paca->irq_soft_mask = state->irq_soft_mask;
#endif
-#endif
}
/*
@@ -387,6 +533,7 @@ DECLARE_INTERRUPT_HANDLER(SMIException);
DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
DECLARE_INTERRUPT_HANDLER(unknown_exception);
DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception);
+DECLARE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception);
DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception);
DECLARE_INTERRUPT_HANDLER(RunModeException);
DECLARE_INTERRUPT_HANDLER(single_step_exception);
@@ -410,8 +557,7 @@ DECLARE_INTERRUPT_HANDLER(altivec_assist_exception);
DECLARE_INTERRUPT_HANDLER(CacheLockingException);
DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException);
DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException);
-DECLARE_INTERRUPT_HANDLER(unrecoverable_exception);
-DECLARE_INTERRUPT_HANDLER(WatchdogException);
+DECLARE_INTERRUPT_HANDLER_NMI(WatchdogException);
DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
/* slb.c */
@@ -422,7 +568,7 @@ DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault);
DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault);
/* fault.c */
-DECLARE_INTERRUPT_HANDLER_RET(do_page_fault);
+DECLARE_INTERRUPT_HANDLER(do_page_fault);
DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv);
/* process.c */
@@ -437,6 +583,8 @@ DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
+void __noreturn unrecoverable_exception(struct pt_regs *regs);
+
void replay_system_reset(void);
void replay_soft_interrupts(void);
@@ -446,4 +594,6 @@ static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs)
local_irq_enable();
}
+#endif /* __ASSEMBLY__ */
+
#endif /* _ASM_POWERPC_INTERRUPT_H */
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 273edd208ec5..f130783c8301 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -663,11 +663,6 @@ static inline void name at \
#define xlate_dev_mem_ptr(p) __va(p)
/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p) p
-
-/*
* We don't do relaxed operations yet, at least not with this semantic
*/
#define readb_relaxed(addr) readb(addr)
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index f3f264e441a7..4982f3711fc3 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -6,7 +6,6 @@
/*
*/
-#include <linux/irqdomain.h>
#include <linux/threads.h>
#include <linux/list.h>
#include <linux/radix-tree.h>
@@ -23,8 +22,8 @@ extern atomic_t ppc_n_lost_interrupts;
/* Total number of virq in the platform */
#define NR_IRQS CONFIG_NR_IRQS
-/* Same thing, used by the generic IRQ code */
-#define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS
+/* Number of irqs reserved for a legacy isa controller */
+#define NR_IRQS_LEGACY 16
extern irq_hw_number_t virq_to_hw(unsigned int virq);
@@ -53,8 +52,6 @@ extern void *mcheckirq_ctx[NR_CPUS];
extern void *hardirq_ctx[NR_CPUS];
extern void *softirq_ctx[NR_CPUS];
-void call_do_softirq(void *sp);
-void call_do_irq(struct pt_regs *regs, void *sp);
extern void do_IRQ(struct pt_regs *regs);
extern void __init init_IRQ(void);
extern void __do_irq(struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index 09297ec9fa52..93ce3ec25387 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -20,7 +20,8 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
asm_volatile_goto("1:\n\t"
"nop # arch_static_branch\n\t"
".pushsection __jump_table, \"aw\"\n\t"
- JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
+ ".long 1b - ., %l[l_yes] - .\n\t"
+ JUMP_ENTRY_TYPE "%c0 - .\n\t"
".popsection \n\t"
: : "i" (&((char *)key)[branch]) : : l_yes);
@@ -34,7 +35,8 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool
asm_volatile_goto("1:\n\t"
"b %l[l_yes] # arch_static_branch_jump\n\t"
".pushsection __jump_table, \"aw\"\n\t"
- JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
+ ".long 1b - ., %l[l_yes] - .\n\t"
+ JUMP_ENTRY_TYPE "%c0 - .\n\t"
".popsection \n\t"
: : "i" (&((char *)key)[branch]) : : l_yes);
@@ -43,23 +45,12 @@ l_yes:
return true;
}
-#ifdef CONFIG_PPC64
-typedef u64 jump_label_t;
-#else
-typedef u32 jump_label_t;
-#endif
-
-struct jump_entry {
- jump_label_t code;
- jump_label_t target;
- jump_label_t key;
-};
-
#else
#define ARCH_STATIC_BRANCH(LABEL, KEY) \
1098: nop; \
.pushsection __jump_table, "aw"; \
- FTR_ENTRY_LONG 1098b, LABEL, KEY; \
+ .long 1098b - ., LABEL - .; \
+ FTR_ENTRY_LONG KEY - .; \
.popsection
#endif
diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h
index 7355ed05e65e..3c478e5ef24c 100644
--- a/arch/powerpc/include/asm/kasan.h
+++ b/arch/powerpc/include/asm/kasan.h
@@ -19,7 +19,7 @@
#define KASAN_SHADOW_SCALE_SHIFT 3
-#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_MODULES) && defined(CONFIG_STRICT_KERNEL_RWX)
+#ifdef CONFIG_MODULES
#define KASAN_KERN_START ALIGN_DOWN(PAGE_OFFSET - SZ_256M, SZ_256M)
#else
#define KASAN_KERN_START PAGE_OFFSET
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 9ab344d29a54..88d0d7cf3a79 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -107,15 +107,7 @@ struct kimage_arch {
unsigned long backup_start;
void *backup_buf;
-
- unsigned long elfcorehdr_addr;
- unsigned long elf_headers_sz;
- void *elf_headers;
-
-#ifdef CONFIG_IMA_KEXEC
- phys_addr_t ima_buffer_addr;
- size_t ima_buffer_size;
-#endif
+ void *fdt;
};
char *setup_kdump_cmdline(struct kimage *image, char *cmdline,
@@ -123,10 +115,6 @@ char *setup_kdump_cmdline(struct kimage *image, char *cmdline,
int setup_purgatory(struct kimage *image, const void *slave_code,
const void *fdt, unsigned long kernel_load_addr,
unsigned long fdt_load_addr);
-int setup_new_fdt(const struct kimage *image, void *fdt,
- unsigned long initrd_load_addr, unsigned long initrd_len,
- const char *cmdline);
-int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size);
#ifdef CONFIG_PPC64
struct kexec_buf;
@@ -136,7 +124,7 @@ int load_crashdump_segments_ppc64(struct kimage *image,
int setup_purgatory_ppc64(struct kimage *image, const void *slave_code,
const void *fdt, unsigned long kernel_load_addr,
unsigned long fdt_load_addr);
-unsigned int kexec_fdt_totalsize_ppc64(struct kimage *image);
+unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image);
int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
unsigned long initrd_load_addr,
unsigned long initrd_len, const char *cmdline);
diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/kfence.h
new file mode 100644
index 000000000000..a9846b68c6b9
--- /dev/null
+++ b/arch/powerpc/include/asm/kfence.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * powerpc KFENCE support.
+ *
+ * Copyright (C) 2020 CS GROUP France
+ */
+
+#ifndef __ASM_POWERPC_KFENCE_H
+#define __ASM_POWERPC_KFENCE_H
+
+#include <linux/mm.h>
+#include <asm/pgtable.h>
+
+static inline bool arch_kfence_init_pool(void)
+{
+ return true;
+}
+
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+ pte_t *kpte = virt_to_kpte(addr);
+
+ if (protect) {
+ pte_update(&init_mm, addr, kpte, _PAGE_PRESENT, 0, 0);
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+ } else {
+ pte_update(&init_mm, addr, kpte, 0, _PAGE_PRESENT, 0);
+ }
+
+ return true;
+}
+
+#endif /* __ASM_POWERPC_KFENCE_H */
diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h
index 7ec21af49a45..1df763002726 100644
--- a/arch/powerpc/include/asm/kup.h
+++ b/arch/powerpc/include/asm/kup.h
@@ -5,14 +5,6 @@
#define KUAP_READ 1
#define KUAP_WRITE 2
#define KUAP_READ_WRITE (KUAP_READ | KUAP_WRITE)
-/*
- * For prevent_user_access() only.
- * Use the current saved situation instead of the to/from/size params.
- * Used on book3s/32
- */
-#define KUAP_CURRENT_READ 4
-#define KUAP_CURRENT_WRITE 8
-#define KUAP_CURRENT (KUAP_CURRENT_READ | KUAP_CURRENT_WRITE)
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/kup.h>
@@ -28,15 +20,6 @@
#ifdef __ASSEMBLY__
#ifndef CONFIG_PPC_KUAP
-.macro kuap_save_and_lock sp, thread, gpr1, gpr2, gpr3
-.endm
-
-.macro kuap_restore sp, current, gpr1, gpr2, gpr3
-.endm
-
-.macro kuap_check current, gpr
-.endm
-
.macro kuap_check_amr gpr1, gpr2
.endm
@@ -55,6 +38,11 @@ void setup_kuep(bool disabled);
static inline void setup_kuep(bool disabled) { }
#endif /* CONFIG_PPC_KUEP */
+#ifndef CONFIG_PPC_BOOK3S_32
+static inline void kuep_lock(void) { }
+static inline void kuep_unlock(void) { }
+#endif
+
#ifdef CONFIG_PPC_KUAP
void setup_kuap(bool disabled);
#else
@@ -66,7 +54,15 @@ bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
return false;
}
-static inline void kuap_check_amr(void) { }
+static inline void kuap_assert_locked(void) { }
+static inline void kuap_save_and_lock(struct pt_regs *regs) { }
+static inline void kuap_user_restore(struct pt_regs *regs) { }
+static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) { }
+
+static inline unsigned long kuap_get_and_assert_locked(void)
+{
+ return 0;
+}
/*
* book3s/64/kup-radix.h defines these functions for the !KUAP case to flush
@@ -76,8 +72,7 @@ static inline void kuap_check_amr(void) { }
#ifndef CONFIG_PPC_BOOK3S_64
static inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir) { }
-static inline void prevent_user_access(void __user *to, const void __user *from,
- unsigned long size, unsigned long dir) { }
+static inline void prevent_user_access(unsigned long dir) { }
static inline unsigned long prevent_user_access_return(void) { return 0UL; }
static inline void restore_user_access(unsigned long flags) { }
#endif /* CONFIG_PPC_BOOK3S_64 */
@@ -89,53 +84,53 @@ static __always_inline void setup_kup(void)
setup_kuap(disable_kuap);
}
-static inline void allow_read_from_user(const void __user *from, unsigned long size)
+static __always_inline void allow_read_from_user(const void __user *from, unsigned long size)
{
barrier_nospec();
allow_user_access(NULL, from, size, KUAP_READ);
}
-static inline void allow_write_to_user(void __user *to, unsigned long size)
+static __always_inline void allow_write_to_user(void __user *to, unsigned long size)
{
allow_user_access(to, NULL, size, KUAP_WRITE);
}
-static inline void allow_read_write_user(void __user *to, const void __user *from,
- unsigned long size)
+static __always_inline void allow_read_write_user(void __user *to, const void __user *from,
+ unsigned long size)
{
barrier_nospec();
allow_user_access(to, from, size, KUAP_READ_WRITE);
}
-static inline void prevent_read_from_user(const void __user *from, unsigned long size)
+static __always_inline void prevent_read_from_user(const void __user *from, unsigned long size)
{
- prevent_user_access(NULL, from, size, KUAP_READ);
+ prevent_user_access(KUAP_READ);
}
-static inline void prevent_write_to_user(void __user *to, unsigned long size)
+static __always_inline void prevent_write_to_user(void __user *to, unsigned long size)
{
- prevent_user_access(to, NULL, size, KUAP_WRITE);
+ prevent_user_access(KUAP_WRITE);
}
-static inline void prevent_read_write_user(void __user *to, const void __user *from,
- unsigned long size)
+static __always_inline void prevent_read_write_user(void __user *to, const void __user *from,
+ unsigned long size)
{
- prevent_user_access(to, from, size, KUAP_READ_WRITE);
+ prevent_user_access(KUAP_READ_WRITE);
}
-static inline void prevent_current_access_user(void)
+static __always_inline void prevent_current_access_user(void)
{
- prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT);
+ prevent_user_access(KUAP_READ_WRITE);
}
-static inline void prevent_current_read_from_user(void)
+static __always_inline void prevent_current_read_from_user(void)
{
- prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT_READ);
+ prevent_user_access(KUAP_READ);
}
-static inline void prevent_current_write_to_user(void)
+static __always_inline void prevent_current_write_to_user(void)
{
- prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT_WRITE);
+ prevent_user_access(KUAP_WRITE);
}
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index a3633560493b..fbbf3cec92e9 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -147,6 +147,7 @@
#define KVM_GUEST_MODE_SKIP 2
#define KVM_GUEST_MODE_GUEST_HV 3
#define KVM_GUEST_MODE_HOST_HV 4
+#define KVM_GUEST_MODE_HV_P9 5 /* ISA >= v3.0 path */
#define KVM_INST_FETCH_FAILED -1
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 2f5f919f6cd3..caaa0f592d8e 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -210,12 +210,12 @@ extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd,
unsigned int lpid);
extern int kvmppc_radix_init(void);
extern void kvmppc_radix_exit(void);
-extern int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
- unsigned long gfn);
-extern int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
- unsigned long gfn);
-extern int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
- unsigned long gfn);
+extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ unsigned long gfn);
+extern bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ unsigned long gfn);
+extern bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ unsigned long gfn);
extern long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
struct kvm_memory_slot *memslot, unsigned long *map);
extern void kvmppc_radix_flush_memslot(struct kvm *kvm,
@@ -258,6 +258,8 @@ extern long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm,
extern void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa,
struct kvm_memory_slot *memslot,
unsigned long *map);
+extern unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm,
+ unsigned long lpcr);
extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
unsigned long mask);
extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr);
@@ -305,6 +307,9 @@ void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1);
void kvmhv_release_all_nested(struct kvm *kvm);
long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
+long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
+ unsigned long type, unsigned long pg_sizes,
+ unsigned long start, unsigned long end);
int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu,
u64 time_limit, unsigned long lpcr);
void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 9bb9bb370b53..eaf3a562bf1e 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -153,10 +153,18 @@ static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu)
return radix;
}
+int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr);
+
#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
#endif
/*
+ * Invalid HDSISR value which is used to indicate when HW has not set the reg.
+ * Used to work around an errata.
+ */
+#define HDSISR_CANARY 0x7fff
+
+/*
* We use a lock bit in HPTE dword 0 to synchronize updates and
* accesses to each HPTE, and another bit to indicate non-present
* HPTEs.
diff --git a/arch/powerpc/include/asm/kvm_guest.h b/arch/powerpc/include/asm/kvm_guest.h
index 2fca299f7e19..c63105d2c9e7 100644
--- a/arch/powerpc/include/asm/kvm_guest.h
+++ b/arch/powerpc/include/asm/kvm_guest.h
@@ -16,10 +16,10 @@ static inline bool is_kvm_guest(void)
return static_branch_unlikely(&kvm_guest);
}
-bool check_kvm_guest(void);
+int check_kvm_guest(void);
#else
static inline bool is_kvm_guest(void) { return false; }
-static inline bool check_kvm_guest(void) { return false; }
+static inline int check_kvm_guest(void) { return 0; }
#endif
#endif /* _ASM_POWERPC_KVM_GUEST_H_ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 05fb00d37609..9f52f282b1aa 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -51,18 +51,12 @@
/* PPC-specific vcpu->requests bit members */
#define KVM_REQ_WATCHDOG KVM_ARCH_REQ(0)
#define KVM_REQ_EPR_EXIT KVM_ARCH_REQ(1)
+#define KVM_REQ_PENDING_TIMER KVM_ARCH_REQ(2)
#include <linux/mmu_notifier.h>
#define KVM_ARCH_WANT_MMU_NOTIFIER
-extern int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end,
- unsigned flags);
-extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
-extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
-extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
-
#define HPTEG_CACHE_NUM (1 << 15)
#define HPTEG_HASH_BITS_PTE 13
#define HPTEG_HASH_BITS_PTE_LONG 12
@@ -87,12 +81,13 @@ struct kvmppc_book3s_shadow_vcpu;
struct kvm_nested_guest;
struct kvm_vm_stat {
- ulong remote_tlb_flush;
- ulong num_2M_pages;
- ulong num_1G_pages;
+ struct kvm_vm_stat_generic generic;
+ u64 num_2M_pages;
+ u64 num_1G_pages;
};
struct kvm_vcpu_stat {
+ struct kvm_vcpu_stat_generic generic;
u64 sum_exits;
u64 mmio_exits;
u64 signal_exits;
@@ -108,14 +103,8 @@ struct kvm_vcpu_stat {
u64 emulated_inst_exits;
u64 dec_exits;
u64 ext_intr_exits;
- u64 halt_poll_success_ns;
- u64 halt_poll_fail_ns;
u64 halt_wait_ns;
- u64 halt_successful_poll;
- u64 halt_attempted_poll;
u64 halt_successful_wait;
- u64 halt_poll_invalid;
- u64 halt_wakeup;
u64 dbell_exits;
u64 gdbell_exits;
u64 ld;
@@ -304,7 +293,6 @@ struct kvm_arch {
u8 fwnmi_enabled;
u8 secure_guest;
u8 svm_enabled;
- bool threads_indep;
bool nested_enable;
bool dawr1_enabled;
pgd_t *pgtable;
@@ -690,7 +678,12 @@ struct kvm_vcpu_arch {
ulong fault_dar;
u32 fault_dsisr;
unsigned long intr_msr;
- ulong fault_gpa; /* guest real address of page fault (POWER9) */
+ /*
+ * POWER9 and later: fault_gpa contains the guest real address of page
+ * fault for a radix guest, or segment descriptor (equivalent to result
+ * from slbmfev of SLB entry that translated the EA) for hash guests.
+ */
+ ulong fault_gpa;
#endif
#ifdef CONFIG_BOOKE
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 8aacd76bb702..2d88944f9f34 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -129,6 +129,7 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
+extern void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
@@ -281,11 +282,10 @@ struct kvmppc_ops {
const struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change);
- int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
- unsigned long end);
- int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
- int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
- void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
+ bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range);
+ bool (*age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
+ bool (*test_age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
+ bool (*set_spte_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
void (*free_memslot)(struct kvm_memory_slot *slot);
int (*init_vm)(struct kvm *kvm);
void (*destroy_vm)(struct kvm *kvm);
@@ -607,6 +607,7 @@ extern void kvmppc_free_pimap(struct kvm *kvm);
extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
+extern int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req);
extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
@@ -639,6 +640,8 @@ static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
{ return 0; }
+static inline int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
+ { return 0; }
#endif
#ifdef CONFIG_KVM_XIVE
@@ -656,8 +659,6 @@ extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
u32 *priority);
extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
-extern void kvmppc_xive_init_module(void);
-extern void kvmppc_xive_exit_module(void);
extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
struct kvm_vcpu *vcpu, u32 cpu);
@@ -672,6 +673,8 @@ extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
int level, bool line_status);
extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
+extern void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu);
+extern void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu);
static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
{
@@ -681,8 +684,6 @@ static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
struct kvm_vcpu *vcpu, u32 cpu);
extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
-extern void kvmppc_xive_native_init_module(void);
-extern void kvmppc_xive_native_exit_module(void);
extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
union kvmppc_one_reg *val);
extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
@@ -696,8 +697,6 @@ static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
u32 *priority) { return -1; }
static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
-static inline void kvmppc_xive_init_module(void) { }
-static inline void kvmppc_xive_exit_module(void) { }
static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
@@ -712,14 +711,14 @@ static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { retur
static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
int level, bool line_status) { return -ENODEV; }
static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
+static inline void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) { }
+static inline void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { }
static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
{ return 0; }
static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
-static inline void kvmppc_xive_native_init_module(void) { }
-static inline void kvmppc_xive_native_exit_module(void) { }
static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
union kvmppc_one_reg *val)
{ return 0; }
@@ -755,7 +754,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
unsigned long tce_value, unsigned long npages);
long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
unsigned int yield_count);
-long kvmppc_h_random(struct kvm_vcpu *vcpu);
+long kvmppc_rm_h_random(struct kvm_vcpu *vcpu);
void kvmhv_commence_exit(int trap);
void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
void kvmppc_subcore_enter_guest(void);
@@ -767,8 +766,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index, unsigned long avpn);
long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
- unsigned long pte_index, unsigned long avpn,
- unsigned long va);
+ unsigned long pte_index, unsigned long avpn);
long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index);
long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h
index ae25e6e72997..4fe018cc207b 100644
--- a/arch/powerpc/include/asm/livepatch.h
+++ b/arch/powerpc/include/asm/livepatch.h
@@ -16,7 +16,7 @@ static inline void klp_arch_set_pc(struct ftrace_regs *fregs, unsigned long ip)
{
struct pt_regs *regs = ftrace_get_regs(fregs);
- regs->nip = ip;
+ regs_set_return_ip(regs, ip);
}
#define klp_get_ftrace_location klp_get_ftrace_location
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 80b27f5d9648..27016b98ecb2 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -220,7 +220,7 @@ enum {
#elif defined(CONFIG_44x)
#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_44x
#endif
-#if defined(CONFIG_E200) || defined(CONFIG_E500)
+#ifdef CONFIG_E500
#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_FSL_E
#endif
@@ -228,7 +228,7 @@ enum {
#define MMU_FTRS_ALWAYS 0
#endif
-static inline bool early_mmu_has_feature(unsigned long feature)
+static __always_inline bool early_mmu_has_feature(unsigned long feature)
{
if (MMU_FTRS_ALWAYS & feature)
return true;
@@ -286,7 +286,7 @@ static inline void mmu_feature_keys_init(void)
}
-static inline bool mmu_has_feature(unsigned long feature)
+static __always_inline bool mmu_has_feature(unsigned long feature)
{
return early_mmu_has_feature(feature);
}
@@ -324,7 +324,6 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
}
#endif /* !CONFIG_DEBUG_VM */
-#ifdef CONFIG_PPC_RADIX_MMU
static inline bool radix_enabled(void)
{
return mmu_has_feature(MMU_FTR_TYPE_RADIX);
@@ -334,17 +333,6 @@ static inline bool early_radix_enabled(void)
{
return early_mmu_has_feature(MMU_FTR_TYPE_RADIX);
}
-#else
-static inline bool radix_enabled(void)
-{
- return false;
-}
-
-static inline bool early_radix_enabled(void)
-{
- return false;
-}
-#endif
#ifdef CONFIG_STRICT_KERNEL_RWX
static inline bool strict_kernel_rwx_enabled(void)
@@ -357,6 +345,11 @@ static inline bool strict_kernel_rwx_enabled(void)
return false;
}
#endif
+
+static inline bool strict_module_rwx_enabled(void)
+{
+ return IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && strict_kernel_rwx_enabled();
+}
#endif /* !__ASSEMBLY__ */
/* The kernel use the constants below to index in the page sizes array.
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 652ce85f9410..9ba6b585337f 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -57,7 +57,6 @@ static inline bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
static inline void mm_iommu_init(struct mm_struct *mm) { }
#endif
extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
-extern void set_context(unsigned long id, pgd_t *pgd);
#ifdef CONFIG_PPC_BOOK3S_64
extern void radix__switch_mmu_context(struct mm_struct *prev,
@@ -122,12 +121,6 @@ static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
}
#endif
-#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
-extern void radix_kvm_prefetch_workaround(struct mm_struct *mm);
-#else
-static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }
-#endif
-
extern void switch_cop(struct mm_struct *next);
extern int use_cop(unsigned long acop, struct mm_struct *mm);
extern void drop_cop(unsigned long acop, struct mm_struct *mm);
@@ -222,6 +215,18 @@ static inline void mm_context_add_copro(struct mm_struct *mm) { }
static inline void mm_context_remove_copro(struct mm_struct *mm) { }
#endif
+#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
+void do_h_rpt_invalidate_prt(unsigned long pid, unsigned long lpid,
+ unsigned long type, unsigned long pg_sizes,
+ unsigned long start, unsigned long end);
+#else
+static inline void do_h_rpt_invalidate_prt(unsigned long pid,
+ unsigned long lpid,
+ unsigned long type,
+ unsigned long pg_sizes,
+ unsigned long start,
+ unsigned long end) { }
+#endif
extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk);
@@ -263,7 +268,7 @@ extern void arch_exit_mmap(struct mm_struct *mm);
static inline void arch_unmap(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
- unsigned long vdso_base = (unsigned long)mm->context.vdso - PAGE_SIZE;
+ unsigned long vdso_base = (unsigned long)mm->context.vdso;
if (start <= vdso_base && vdso_base < end)
mm->context.vdso = NULL;
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
index 6cda76b57c5d..4c6c6dbd182f 100644
--- a/arch/powerpc/include/asm/mmzone.h
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -18,7 +18,7 @@
* flags field of the struct page
*/
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
extern struct pglist_data *node_data[];
/*
@@ -41,7 +41,7 @@ u64 memory_hotplug_max(void);
#else
#define memory_hotplug_max() memblock_end_of_DRAM()
-#endif /* CONFIG_NEED_MULTIPLE_NODES */
+#endif /* CONFIG_NUMA */
#ifdef CONFIG_FA_DUMP
#define __HAVE_ARCH_RESERVED_KERNEL_PAGES
#endif
diff --git a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
index 39be9aea86db..64b6c608eca4 100644
--- a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
@@ -66,10 +66,9 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
}
#ifdef CONFIG_PPC_4K_PAGES
-static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable)
+static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
{
- size_t size = huge_page_size(hstate_vma(vma));
+ size_t size = 1UL << shift;
if (size == SZ_16K)
return __pte(pte_val(entry) & ~_PAGE_HUGE);
diff --git a/arch/powerpc/include/asm/nohash/32/kup-8xx.h b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
index 17a4a616436f..882a0bc7887a 100644
--- a/arch/powerpc/include/asm/nohash/32/kup-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
@@ -7,48 +7,86 @@
#ifdef CONFIG_PPC_KUAP
-#ifdef __ASSEMBLY__
-
-.macro kuap_save_and_lock sp, thread, gpr1, gpr2, gpr3
- lis \gpr2, MD_APG_KUAP@h /* only APG0 and APG1 are used */
- mfspr \gpr1, SPRN_MD_AP
- mtspr SPRN_MD_AP, \gpr2
- stw \gpr1, STACK_REGS_KUAP(\sp)
-.endm
-
-.macro kuap_restore sp, current, gpr1, gpr2, gpr3
- lwz \gpr1, STACK_REGS_KUAP(\sp)
- mtspr SPRN_MD_AP, \gpr1
-.endm
-
-.macro kuap_check current, gpr
-#ifdef CONFIG_PPC_KUAP_DEBUG
- mfspr \gpr, SPRN_MD_AP
- rlwinm \gpr, \gpr, 16, 0xffff
-999: twnei \gpr, MD_APG_KUAP@h
- EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
-#endif
-.endm
-
-#else /* !__ASSEMBLY__ */
+#ifndef __ASSEMBLY__
+
+#include <linux/jump_label.h>
#include <asm/reg.h>
+extern struct static_key_false disable_kuap_key;
+
+static __always_inline bool kuap_is_disabled(void)
+{
+ return static_branch_unlikely(&disable_kuap_key);
+}
+
+static inline void kuap_save_and_lock(struct pt_regs *regs)
+{
+ if (kuap_is_disabled())
+ return;
+
+ regs->kuap = mfspr(SPRN_MD_AP);
+ mtspr(SPRN_MD_AP, MD_APG_KUAP);
+}
+
+static inline void kuap_user_restore(struct pt_regs *regs)
+{
+}
+
+static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
+{
+ if (kuap_is_disabled())
+ return;
+
+ mtspr(SPRN_MD_AP, regs->kuap);
+}
+
+static inline unsigned long kuap_get_and_assert_locked(void)
+{
+ unsigned long kuap;
+
+ if (kuap_is_disabled())
+ return MD_APG_INIT;
+
+ kuap = mfspr(SPRN_MD_AP);
+
+ if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
+ WARN_ON_ONCE(kuap >> 16 != MD_APG_KUAP >> 16);
+
+ return kuap;
+}
+
+static inline void kuap_assert_locked(void)
+{
+ if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && !kuap_is_disabled())
+ kuap_get_and_assert_locked();
+}
+
static inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir)
{
+ if (kuap_is_disabled())
+ return;
+
mtspr(SPRN_MD_AP, MD_APG_INIT);
}
-static inline void prevent_user_access(void __user *to, const void __user *from,
- unsigned long size, unsigned long dir)
+static inline void prevent_user_access(unsigned long dir)
{
+ if (kuap_is_disabled())
+ return;
+
mtspr(SPRN_MD_AP, MD_APG_KUAP);
}
static inline unsigned long prevent_user_access_return(void)
{
- unsigned long flags = mfspr(SPRN_MD_AP);
+ unsigned long flags;
+
+ if (kuap_is_disabled())
+ return MD_APG_INIT;
+
+ flags = mfspr(SPRN_MD_AP);
mtspr(SPRN_MD_AP, MD_APG_KUAP);
@@ -57,12 +95,18 @@ static inline unsigned long prevent_user_access_return(void)
static inline void restore_user_access(unsigned long flags)
{
+ if (kuap_is_disabled())
+ return;
+
mtspr(SPRN_MD_AP, flags);
}
static inline bool
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
+ if (kuap_is_disabled())
+ return false;
+
return !((regs->kuap ^ MD_APG_KUAP) & 0xff000000);
}
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-44x.h b/arch/powerpc/include/asm/nohash/32/mmu-44x.h
index 2d92a39d8f2e..43ceca128531 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-44x.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-44x.h
@@ -113,6 +113,7 @@ typedef struct {
/* patch sites */
extern s32 patch__tlb_44x_hwater_D, patch__tlb_44x_hwater_I;
+extern s32 patch__tlb_44x_kuep, patch__tlb_47x_kuep;
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
index 478249959baa..997cec973406 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
@@ -172,9 +172,13 @@
#define mmu_linear_psize MMU_PAGE_8M
+#define MODULES_VADDR (PAGE_OFFSET - SZ_256M)
+#define MODULES_END PAGE_OFFSET
+
#ifndef __ASSEMBLY__
#include <linux/mmdebug.h>
+#include <linux/sizes.h>
void mmu_pin_tlb(unsigned long top, bool readonly);
@@ -222,6 +226,48 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
BUG();
}
+static inline bool arch_vmap_try_size(unsigned long addr, unsigned long end, u64 pfn,
+ unsigned int max_page_shift, unsigned long size)
+{
+ if (end - addr < size)
+ return false;
+
+ if ((1UL << max_page_shift) < size)
+ return false;
+
+ if (!IS_ALIGNED(addr, size))
+ return false;
+
+ if (!IS_ALIGNED(PFN_PHYS(pfn), size))
+ return false;
+
+ return true;
+}
+
+static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
+ u64 pfn, unsigned int max_page_shift)
+{
+ if (arch_vmap_try_size(addr, end, pfn, max_page_shift, SZ_512K))
+ return SZ_512K;
+ if (PAGE_SIZE == SZ_16K)
+ return SZ_16K;
+ if (arch_vmap_try_size(addr, end, pfn, max_page_shift, SZ_16K))
+ return SZ_16K;
+ return PAGE_SIZE;
+}
+#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
+
+static inline int arch_vmap_pte_supported_shift(unsigned long size)
+{
+ if (size >= SZ_512K)
+ return 19;
+ else if (size >= SZ_16K)
+ return 14;
+ else
+ return PAGE_SHIFT;
+}
+#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
+
/* patch sites */
extern s32 patch__itlbmiss_exit_1, patch__dtlbmiss_exit_1;
extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf;
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 96522f7f0618..f06ae00f2a65 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -54,7 +54,6 @@ extern int icache_44x_need_flush;
#define PGD_MASKED_BITS 0
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 6cb8aa357191..53fbfdfac93d 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -6,12 +6,12 @@
* the ppc64 non-hashed page table.
*/
+#include <linux/sizes.h>
+
#include <asm/nohash/64/pgtable-4k.h>
#include <asm/barrier.h>
#include <asm/asm-const.h>
-#define FIRST_USER_ADDRESS 0UL
-
/*
* Size of EA range mapped by our pagetables.
*/
@@ -54,7 +54,8 @@
#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
#define IOREMAP_BASE (PHB_IO_END)
#define IOREMAP_START (ioremap_bot)
-#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE)
+#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE - FIXADDR_SIZE)
+#define FIXADDR_SIZE SZ_32M
/*
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 9986ac34b8e2..6ea9001de9a9 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -28,9 +28,6 @@ extern struct device_node *opal_node;
/* API functions */
int64_t opal_invalid_call(void);
-int64_t opal_npu_destroy_context(uint64_t phb_id, uint64_t pid, uint64_t bdf);
-int64_t opal_npu_init_context(uint64_t phb_id, int pasid, uint64_t msr,
- uint64_t bdf);
int64_t opal_npu_map_lpar(uint64_t phb_id, uint64_t bdf, uint64_t lparid,
uint64_t lpcr);
int64_t opal_npu_spa_setup(uint64_t phb_id, uint32_t bdfn,
@@ -307,7 +304,7 @@ int opal_secvar_enqueue_update(const char *key, uint64_t key_len, u8 *data,
s64 opal_mpipl_update(enum opal_mpipl_ops op, u64 src, u64 dest, u64 size);
s64 opal_mpipl_register_tag(enum opal_mpipl_tags tag, u64 addr);
-s64 opal_mpipl_query_tag(enum opal_mpipl_tags tag, u64 *addr);
+s64 opal_mpipl_query_tag(enum opal_mpipl_tags tag, __be64 *addr);
s64 opal_signal_system_reset(s32 cpu);
s64 opal_quiesce(u64 shutdown_type, s32 cpu);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index ec18ac818e3a..dc05a862e72a 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -149,11 +149,9 @@ struct paca_struct {
#endif /* CONFIG_PPC_BOOK3E */
#ifdef CONFIG_PPC_BOOK3S
- mm_context_id_t mm_ctx_id;
#ifdef CONFIG_PPC_MM_SLICES
unsigned char mm_ctx_low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE];
- unsigned long mm_ctx_slb_addr_limit;
#else
u16 mm_ctx_user_psize;
u16 mm_ctx_sllp;
@@ -167,9 +165,16 @@ struct paca_struct {
u64 kstack; /* Saved Kernel stack addr */
u64 saved_r1; /* r1 save for RTAS calls or PM or EE=0 */
u64 saved_msr; /* MSR saved here by enter_rtas */
+#ifdef CONFIG_PPC64
+ u64 exit_save_r1; /* Syscall/interrupt R1 save */
+#endif
#ifdef CONFIG_PPC_BOOK3E
u16 trap_save; /* Used when bad stack is encountered */
#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ u8 hsrr_valid; /* HSRRs set for HRFID */
+ u8 srr_valid; /* SRRs set for RFID */
+#endif
u8 irq_soft_mask; /* mask for irq soft masking */
u8 irq_happened; /* irq happened while soft-disabled */
u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h
index 5d1726bb28e7..bcb7b5f917be 100644
--- a/arch/powerpc/include/asm/paravirt.h
+++ b/arch/powerpc/include/asm/paravirt.h
@@ -28,19 +28,35 @@ static inline u32 yield_count_of(int cpu)
return be32_to_cpu(yield_count);
}
+/*
+ * Spinlock code confers and prods, so don't trace the hcalls because the
+ * tracing code takes spinlocks which can cause recursion deadlocks.
+ *
+ * These calls are made while the lock is not held: the lock slowpath yields if
+ * it can not acquire the lock, and unlock slow path might prod if a waiter has
+ * yielded). So this may not be a problem for simple spin locks because the
+ * tracing does not technically recurse on the lock, but we avoid it anyway.
+ *
+ * However the queued spin lock contended path is more strictly ordered: the
+ * H_CONFER hcall is made after the task has queued itself on the lock, so then
+ * recursing on that lock will cause the task to then queue up again behind the
+ * first instance (or worse: queued spinlocks use tricks that assume a context
+ * never waits on more than one spinlock, so such recursion may cause random
+ * corruption in the lock code).
+ */
static inline void yield_to_preempted(int cpu, u32 yield_count)
{
- plpar_hcall_norets(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
+ plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
}
static inline void prod_cpu(int cpu)
{
- plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
+ plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
}
static inline void yield_to_any(void)
{
- plpar_hcall_norets(H_CONFER, -1, 0);
+ plpar_hcall_norets_notrace(H_CONFER, -1, 0);
}
#else
static inline bool is_shared_processor(void)
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index d2a2a14e56f9..74424c14515c 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -126,7 +126,6 @@ struct pci_controller {
#endif /* CONFIG_PPC64 */
void *private_data;
- struct npu *npu;
};
/* These are used for config access before all the PCI probing
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 6436f0b41539..d1f53260725c 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -119,11 +119,4 @@ extern void pcibios_scan_phb(struct pci_controller *hose);
#endif /* __KERNEL__ */
-extern struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev);
-extern struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index);
-extern int pnv_npu2_init(struct pci_controller *hose);
-extern int pnv_npu2_map_lpar_dev(struct pci_dev *gpdev, unsigned int lparid,
- unsigned long msr);
-extern int pnv_npu2_unmap_lpar_dev(struct pci_dev *gpdev);
-
#endif /* __ASM_POWERPC_PCI_H */
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 00e7e671bb4b..f4c3428e816b 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -43,7 +43,7 @@ struct power_pmu {
u64 alt[]);
void (*get_mem_data_src)(union perf_mem_data_src *dsrc,
u32 flags, struct pt_regs *regs);
- void (*get_mem_weight)(u64 *weight);
+ void (*get_mem_weight)(u64 *weight, u64 type);
unsigned long group_constraint_mask;
unsigned long group_constraint_val;
u64 (*bhrb_filter_map)(u64 branch_sample_type);
@@ -67,6 +67,12 @@ struct power_pmu {
* the pmu supports extended perf regs capability
*/
int capabilities;
+ /*
+ * Function to check event code for values which are
+ * reserved. Function takes struct perf_event as input,
+ * since event code could be spread in attr.config*
+ */
+ int (*check_attr_config)(struct perf_event *ev);
};
/*
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index 6dd78a2dc03a..3360cad78ace 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -70,9 +70,4 @@ extern struct kmem_cache *pgtable_cache[];
#include <asm/nohash/pgalloc.h>
#endif
-static inline pgtable_t pmd_pgtable(pmd_t pmd)
-{
- return (pgtable_t)pmd_page_vaddr(pmd);
-}
-
#endif /* _ASM_POWERPC_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 4eed82172e33..d564d0ecd4cd 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -41,7 +41,9 @@ struct mm_struct;
#ifndef __ASSEMBLY__
-#include <asm/tlbflush.h>
+#ifndef MAX_PTRS_PER_PGD
+#define MAX_PTRS_PER_PGD PTRS_PER_PGD
+#endif
/* Keep these as a macros to avoid include dependency mess */
#define pte_page(x) pfn_to_page(pte_pfn(x))
@@ -74,6 +76,7 @@ extern unsigned long empty_zero_page[];
extern pgd_t swapper_pg_dir[];
extern void paging_init(void);
+void poking_init(void);
extern unsigned long ioremap_bot;
@@ -154,6 +157,12 @@ static inline bool p4d_is_leaf(p4d_t p4d)
}
#endif
+#define pmd_pgtable pmd_pgtable
+static inline pgtable_t pmd_pgtable(pmd_t pmd)
+{
+ return (pgtable_t)pmd_page_vaddr(pmd);
+}
+
#ifdef CONFIG_PPC64
#define is_ioremap_addr is_ioremap_addr
static inline bool is_ioremap_addr(const void *x)
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
index ece84a430701..83e0f701ebc6 100644
--- a/arch/powerpc/include/asm/plpar_wrappers.h
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -28,7 +28,11 @@ static inline void set_cede_latency_hint(u8 latency_hint)
static inline long cede_processor(void)
{
- return plpar_hcall_norets(H_CEDE);
+ /*
+ * We cannot call tracepoints inside RCU idle regions which
+ * means we must not trace H_CEDE.
+ */
+ return plpar_hcall_norets_notrace(H_CEDE);
}
static inline long extended_cede_processor(unsigned long latency_hint)
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index ed161ef2b3ca..bede76dd3db7 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -76,6 +76,40 @@
#define __REGA0_R30 30
#define __REGA0_R31 31
+/* For use with PPC_RAW_() macros */
+#define _R0 0
+#define _R1 1
+#define _R2 2
+#define _R3 3
+#define _R4 4
+#define _R5 5
+#define _R6 6
+#define _R7 7
+#define _R8 8
+#define _R9 9
+#define _R10 10
+#define _R11 11
+#define _R12 12
+#define _R13 13
+#define _R14 14
+#define _R15 15
+#define _R16 16
+#define _R17 17
+#define _R18 18
+#define _R19 19
+#define _R20 20
+#define _R21 21
+#define _R22 22
+#define _R23 23
+#define _R24 24
+#define _R25 25
+#define _R26 26
+#define _R27 27
+#define _R28 28
+#define _R29 29
+#define _R30 30
+#define _R31 31
+
#define IMM_L(i) ((uintptr_t)(i) & 0xffff)
#define IMM_DS(i) ((uintptr_t)(i) & 0xfffc)
#define IMM_DQ(i) ((uintptr_t)(i) & 0xfff0)
@@ -222,13 +256,11 @@
#define PPC_INST_LWSYNC 0x7c2004ac
#define PPC_INST_SYNC 0x7c0004ac
#define PPC_INST_SYNC_MASK 0xfc0007fe
-#define PPC_INST_ISYNC 0x4c00012c
#define PPC_INST_MCRXR 0x7c000400
#define PPC_INST_MCRXR_MASK 0xfc0007fe
#define PPC_INST_MFSPR_PVR 0x7c1f42a6
#define PPC_INST_MFSPR_PVR_MASK 0xfc1ffffe
#define PPC_INST_MTMSRD 0x7c000164
-#define PPC_INST_NOP 0x60000000
#define PPC_INST_POPCNTB 0x7c0000f4
#define PPC_INST_POPCNTB_MASK 0xfc0007fe
#define PPC_INST_RFEBB 0x4c000124
@@ -241,10 +273,10 @@
#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1ffffe
#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1ffffe
-#define PPC_INST_SC 0x44000002
#define PPC_INST_STRING 0x7c00042a
#define PPC_INST_STRING_MASK 0xfc0007fe
#define PPC_INST_STRING_GEN_MASK 0xfc00067e
+#define PPC_INST_SETB 0x7c000100
#define PPC_INST_STSWI 0x7c0005aa
#define PPC_INST_STSWX 0x7c00052a
#define PPC_INST_TRECHKPT 0x7c0007dd
@@ -252,19 +284,11 @@
#define PPC_INST_TSR 0x7c0005dd
#define PPC_INST_LD 0xe8000000
#define PPC_INST_STD 0xf8000000
-#define PPC_INST_MFLR 0x7c0802a6
-#define PPC_INST_MTCTR 0x7c0903a6
-#define PPC_INST_ADDI 0x38000000
#define PPC_INST_ADDIS 0x3c000000
#define PPC_INST_ADD 0x7c000214
-#define PPC_INST_BLR 0x4e800020
-#define PPC_INST_BCTR 0x4e800420
-#define PPC_INST_BCTRL 0x4e800421
#define PPC_INST_DIVD 0x7c0003d2
-#define PPC_INST_RLDICR 0x78000004
-#define PPC_INST_ORI 0x60000000
-#define PPC_INST_ORIS 0x64000000
#define PPC_INST_BRANCH 0x48000000
+#define PPC_INST_BL 0x48000001
#define PPC_INST_BRANCH_COND 0x40800000
/* Prefixes */
@@ -322,6 +346,8 @@
#define PPC_LO(v) ((v) & 0xffff)
#define PPC_HI(v) (((v) >> 16) & 0xffff)
#define PPC_HA(v) PPC_HI((v) + 0x8000)
+#define PPC_HIGHER(v) (((v) >> 32) & 0xffff)
+#define PPC_HIGHEST(v) (((v) >> 48) & 0xffff)
/*
* Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
@@ -382,6 +408,10 @@
#define PPC_RAW_STBCIX(s, a, b) (0x7c0007aa | __PPC_RS(s) | __PPC_RA(a) | __PPC_RB(b))
#define PPC_RAW_DCBFPS(a, b) (0x7c0000ac | ___PPC_RA(a) | ___PPC_RB(b) | (4 << 21))
#define PPC_RAW_DCBSTPS(a, b) (0x7c0000ac | ___PPC_RA(a) | ___PPC_RB(b) | (6 << 21))
+#define PPC_RAW_SC() (0x44000002)
+#define PPC_RAW_SYNC() (0x7c0004ac)
+#define PPC_RAW_ISYNC() (0x4c00012c)
+
/*
* Define what the VSX XX1 form instructions will look like, then add
* the 128 bit load store instructions based on that.
@@ -403,10 +433,10 @@
#define PPC_RAW_STXVP(xsp, a, i) (0x18000001 | __PPC_XSP(xsp) | ___PPC_RA(a) | IMM_DQ(i))
#define PPC_RAW_LXVPX(xtp, a, b) (0x7c00029a | __PPC_XTP(xtp) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_STXVPX(xsp, a, b) (0x7c00039a | __PPC_XSP(xsp) | ___PPC_RA(a) | ___PPC_RB(b))
-#define PPC_RAW_PLXVP(xtp, i, a, pr) \
- ((PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i)) << 32 | (0xe8000000 | __PPC_XTP(xtp) | ___PPC_RA(a) | IMM_D1(i)))
-#define PPC_RAW_PSTXVP(xsp, i, a, pr) \
- ((PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i)) << 32 | (0xf8000000 | __PPC_XSP(xsp) | ___PPC_RA(a) | IMM_D1(i)))
+#define PPC_RAW_PLXVP_P(xtp, i, a, pr) (PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i))
+#define PPC_RAW_PLXVP_S(xtp, i, a, pr) (0xe8000000 | __PPC_XTP(xtp) | ___PPC_RA(a) | IMM_D1(i))
+#define PPC_RAW_PSTXVP_P(xsp, i, a, pr) (PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i))
+#define PPC_RAW_PSTXVP_S(xsp, i, a, pr) (0xf8000000 | __PPC_XSP(xsp) | ___PPC_RA(a) | IMM_D1(i))
#define PPC_RAW_NAP (0x4c000364)
#define PPC_RAW_SLEEP (0x4c0003a4)
#define PPC_RAW_WINKLE (0x4c0003e4)
@@ -437,19 +467,26 @@
#define PPC_RAW_STFDX(s, a, b) (0x7c0005ae | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_LVX(t, a, b) (0x7c0000ce | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_STVX(s, a, b) (0x7c0001ce | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_ADDE(t, a, b) (0x7c000114 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_ADDZE(t, a) (0x7c000194 | ___PPC_RT(t) | ___PPC_RA(a))
+#define PPC_RAW_ADDME(t, a) (0x7c0001d4 | ___PPC_RT(t) | ___PPC_RA(a))
#define PPC_RAW_ADD(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_ADD_DOT(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
#define PPC_RAW_ADDC(t, a, b) (0x7c000014 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_ADDC_DOT(t, a, b) (0x7c000014 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
-#define PPC_RAW_NOP() (PPC_INST_NOP)
-#define PPC_RAW_BLR() (PPC_INST_BLR)
+#define PPC_RAW_NOP() PPC_RAW_ORI(0, 0, 0)
+#define PPC_RAW_BLR() (0x4e800020)
#define PPC_RAW_BLRL() (0x4e800021)
#define PPC_RAW_MTLR(r) (0x7c0803a6 | ___PPC_RT(r))
-#define PPC_RAW_BCTR() (PPC_INST_BCTR)
-#define PPC_RAW_MTCTR(r) (PPC_INST_MTCTR | ___PPC_RT(r))
-#define PPC_RAW_ADDI(d, a, i) (PPC_INST_ADDI | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_MFLR(t) (0x7c0802a6 | ___PPC_RT(t))
+#define PPC_RAW_BCTR() (0x4e800420)
+#define PPC_RAW_BCTRL() (0x4e800421)
+#define PPC_RAW_MTCTR(r) (0x7c0903a6 | ___PPC_RT(r))
+#define PPC_RAW_ADDI(d, a, i) (0x38000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
#define PPC_RAW_LI(r, i) PPC_RAW_ADDI(r, 0, i)
-#define PPC_RAW_ADDIS(d, a, i) (PPC_INST_ADDIS | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_ADDIS(d, a, i) (0x3c000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_ADDIC(d, a, i) (0x30000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_ADDIC_DOT(d, a, i) (0x34000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
#define PPC_RAW_LIS(r, i) PPC_RAW_ADDIS(r, 0, i)
#define PPC_RAW_STDX(r, base, b) (0x7c00012a | ___PPC_RS(r) | ___PPC_RA(base) | ___PPC_RB(b))
#define PPC_RAW_STDU(r, base, i) (0xf8000001 | ___PPC_RS(r) | ___PPC_RA(base) | ((i) & 0xfffc))
@@ -472,6 +509,10 @@
#define PPC_RAW_CMPLW(a, b) (0x7c000040 | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_CMPLD(a, b) (0x7c200040 | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_SUB(d, a, b) (0x7c000050 | ___PPC_RT(d) | ___PPC_RB(a) | ___PPC_RA(b))
+#define PPC_RAW_SUBFC(d, a, b) (0x7c000010 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_SUBFE(d, a, b) (0x7c000110 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_SUBFIC(d, a, i) (0x20000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_SUBFZE(d, a) (0x7c000190 | ___PPC_RT(d) | ___PPC_RA(a))
#define PPC_RAW_MULD(d, a, b) (0x7c0001d2 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_MULW(d, a, b) (0x7c0001d6 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_MULHWU(d, a, b) (0x7c000016 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
@@ -484,11 +525,13 @@
#define PPC_RAW_DIVDEU_DOT(t, a, b) (0x7c000312 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
#define PPC_RAW_AND(d, a, b) (0x7c000038 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_ANDI(d, a, i) (0x70000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_ANDIS(d, a, i) (0x74000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
#define PPC_RAW_AND_DOT(d, a, b) (0x7c000039 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_OR(d, a, b) (0x7c000378 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_MR(d, a) PPC_RAW_OR(d, a, a)
-#define PPC_RAW_ORI(d, a, i) (PPC_INST_ORI | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
-#define PPC_RAW_ORIS(d, a, i) (PPC_INST_ORIS | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_ORI(d, a, i) (0x60000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_ORIS(d, a, i) (0x64000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_NOR(d, a, b) (0x7c0000f8 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_XOR(d, a, b) (0x7c000278 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
#define PPC_RAW_XORI(d, a, i) (0x68000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
#define PPC_RAW_XORIS(d, a, i) (0x6c000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
@@ -506,7 +549,7 @@
(0x54000001 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me))
#define PPC_RAW_RLWIMI(d, a, i, mb, me) (0x50000000 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me))
#define PPC_RAW_RLDICL(d, a, i, mb) (0x78000000 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_MB64(mb))
-#define PPC_RAW_RLDICR(d, a, i, me) (PPC_INST_RLDICR | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_ME64(me))
+#define PPC_RAW_RLDICR(d, a, i, me) (0x78000004 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_ME64(me))
/* slwi = rlwinm Rx, Ry, n, 0, 31-n */
#define PPC_RAW_SLWI(d, a, i) PPC_RAW_RLWINM(d, a, i, 0, 31-(i))
@@ -520,6 +563,8 @@
#define PPC_RAW_NEG(d, a) (0x7c0000d0 | ___PPC_RT(d) | ___PPC_RA(a))
#define PPC_RAW_MFSPR(d, spr) (0x7c0002a6 | ___PPC_RT(d) | __PPC_SPR(spr))
+#define PPC_RAW_MTSPR(spr, d) (0x7c0003a6 | ___PPC_RS(d) | __PPC_SPR(spr))
+#define PPC_RAW_EIEIO() (0x7c0006ac)
/* Deal with instructions that older assemblers aren't aware of */
#define PPC_BCCTR_FLUSH stringify_in_c(.long PPC_INST_BCCTR_FLUSH)
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 3dceb64fc9af..116c1519728a 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -16,36 +16,6 @@
#define SZL (BITS_PER_LONG/8)
/*
- * Stuff for accurate CPU time accounting.
- * These macros handle transitions between user and system state
- * in exception entry and exit and accumulate time to the
- * user_time and system_time fields in the paca.
- */
-
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-#define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb)
-#define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb)
-#else
-#define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb) \
- MFTB(ra); /* get timebase */ \
- PPC_LL rb, ACCOUNT_STARTTIME_USER(ptr); \
- PPC_STL ra, ACCOUNT_STARTTIME(ptr); \
- subf rb,rb,ra; /* subtract start value */ \
- PPC_LL ra, ACCOUNT_USER_TIME(ptr); \
- add ra,ra,rb; /* add on to user time */ \
- PPC_STL ra, ACCOUNT_USER_TIME(ptr); \
-
-#define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb) \
- MFTB(ra); /* get timebase */ \
- PPC_LL rb, ACCOUNT_STARTTIME(ptr); \
- PPC_STL ra, ACCOUNT_STARTTIME_USER(ptr); \
- subf rb,rb,ra; /* subtract start value */ \
- PPC_LL ra, ACCOUNT_SYSTEM_TIME(ptr); \
- add ra,ra,rb; /* add on to system time */ \
- PPC_STL ra, ACCOUNT_SYSTEM_TIME(ptr)
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
-
-/*
* Macros for storing registers into and loading registers from
* exception frames.
*/
@@ -792,6 +762,21 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
stringify_in_c(.long (_target) - . ;) \
stringify_in_c(.previous)
+#define SOFT_MASK_TABLE(_start, _end) \
+ stringify_in_c(.section __soft_mask_table,"a";)\
+ stringify_in_c(.balign 8;) \
+ stringify_in_c(.llong (_start);) \
+ stringify_in_c(.llong (_end);) \
+ stringify_in_c(.previous)
+
+#define RESTART_TABLE(_start, _end, _target) \
+ stringify_in_c(.section __restart_table,"a";)\
+ stringify_in_c(.balign 8;) \
+ stringify_in_c(.llong (_start);) \
+ stringify_in_c(.llong (_end);) \
+ stringify_in_c(.llong (_target);) \
+ stringify_in_c(.previous)
+
#ifdef CONFIG_PPC_FSL_BOOK3E
#define BTB_FLUSH(reg) \
lis reg,BUCSR_INIT@h; \
diff --git a/arch/powerpc/include/asm/probes.h b/arch/powerpc/include/asm/probes.h
index 84dd1addd434..c5d984700d24 100644
--- a/arch/powerpc/include/asm/probes.h
+++ b/arch/powerpc/include/asm/probes.h
@@ -34,14 +34,14 @@ typedef u32 ppc_opcode_t;
/* Enable single stepping for the current task */
static inline void enable_single_step(struct pt_regs *regs)
{
- regs->msr |= MSR_SINGLESTEP;
+ regs_set_return_msr(regs, regs->msr | MSR_SINGLESTEP);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
/*
* We turn off Critical Input Exception(CE) to ensure that the single
* step will be for the instruction we have the probe on; if we don't,
* it is possible we'd get the single step reported for CE.
*/
- regs->msr &= ~MSR_CE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_CE);
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
#ifdef CONFIG_PPC_47x
isync();
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 8acc3590c971..f348e564f7dd 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -144,15 +144,12 @@ struct thread_struct {
#endif
#ifdef CONFIG_PPC32
void *pgdir; /* root of page-table tree */
- unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
#ifdef CONFIG_PPC_RTAS
unsigned long rtas_sp; /* stack pointer for when in RTAS */
#endif
-#endif
#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
unsigned long kuap; /* opened segments for user access */
#endif
-#ifdef CONFIG_VMAP_STACK
unsigned long srr0;
unsigned long srr1;
unsigned long dar;
@@ -161,7 +158,7 @@ struct thread_struct {
unsigned long r0, r3, r4, r5, r6, r8, r9, r11;
unsigned long lr, ctr;
#endif
-#endif
+#endif /* CONFIG_PPC32 */
/* Debug Registers */
struct debug_reg debug;
#ifdef CONFIG_PPC_FPU_REGS
@@ -279,10 +276,17 @@ struct thread_struct {
#define SPEFSCR_INIT
#endif
-#ifdef CONFIG_PPC32
+#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
+#define INIT_THREAD { \
+ .ksp = INIT_SP, \
+ .pgdir = swapper_pg_dir, \
+ .kuap = ~0UL, /* KUAP_NONE */ \
+ .fpexc_mode = MSR_FE0 | MSR_FE1, \
+ SPEFSCR_INIT \
+}
+#elif defined(CONFIG_PPC32)
#define INIT_THREAD { \
.ksp = INIT_SP, \
- .ksp_limit = INIT_SP_LIMIT, \
.pgdir = swapper_pg_dir, \
.fpexc_mode = MSR_FE0 | MSR_FE1, \
SPEFSCR_INIT \
@@ -343,17 +347,6 @@ static inline unsigned long __pack_fe01(unsigned int fpmode)
#define spin_end() HMT_medium()
-#define spin_until_cond(cond) \
-do { \
- if (unlikely(!(cond))) { \
- spin_begin(); \
- do { \
- spin_cpu_relax(); \
- } while (!(cond)); \
- spin_end(); \
- } \
-} while (0)
-
#endif
/* Check that a certain kernel stack pointer is valid in task_struct p */
@@ -393,6 +386,7 @@ extern unsigned long isa300_idle_stop_mayloss(unsigned long psscr_val);
extern unsigned long isa206_idle_insn_mayloss(unsigned long type);
#ifdef CONFIG_PPC_970_NAP
extern void power4_idle_nap(void);
+void power4_idle_nap_return(void);
#endif
extern unsigned long cpuidle_disable;
@@ -417,6 +411,8 @@ extern int fix_alignment(struct pt_regs *);
#define NET_IP_ALIGN 0
#endif
+int do_mathemu(struct pt_regs *regs);
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PROCESSOR_H */
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index e646c7f218bc..8a0d8fb35328 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -71,6 +71,7 @@ struct ps3_dma_region_ops;
* @bus_addr: The 'translated' bus address of the region.
* @len: The length in bytes of the region.
* @offset: The offset from the start of memory of the region.
+ * @dma_mask: Device dma_mask.
* @ioid: The IOID of the device who owns this region
* @chunk_list: Opaque variable used by the ioc page manager.
* @region_ops: struct ps3_dma_region_ops - dma region operations
@@ -85,6 +86,7 @@ struct ps3_dma_region {
enum ps3_dma_region_type region_type;
unsigned long len;
unsigned long offset;
+ u64 dma_mask;
/* driver variables (set by ps3_dma_region_create) */
unsigned long bus_addr;
@@ -232,7 +234,7 @@ enum lv1_result {
static inline const char* ps3_result(int result)
{
-#if defined(DEBUG) || defined(PS3_VERBOSE_RESULT)
+#if defined(DEBUG) || defined(PS3_VERBOSE_RESULT) || defined(CONFIG_PS3_VERBOSE_RESULT)
switch (result) {
case LV1_SUCCESS:
return "LV1_SUCCESS (0)";
diff --git a/arch/powerpc/include/asm/pte-walk.h b/arch/powerpc/include/asm/pte-walk.h
index 33fa5dd8ee6a..714a35f0d425 100644
--- a/arch/powerpc/include/asm/pte-walk.h
+++ b/arch/powerpc/include/asm/pte-walk.h
@@ -31,6 +31,35 @@ static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
pgd_t *pgdir = init_mm.pgd;
return __find_linux_pte(pgdir, ea, NULL, hshift);
}
+
+/*
+ * Convert a kernel vmap virtual address (vmalloc or ioremap space) to a
+ * physical address, without taking locks. This can be used in real-mode.
+ */
+static inline phys_addr_t ppc_find_vmap_phys(unsigned long addr)
+{
+ pte_t *ptep;
+ phys_addr_t pa;
+ int hugepage_shift;
+
+ /*
+ * init_mm does not free page tables, and does not do THP. It may
+ * have huge pages from huge vmalloc / ioremap etc.
+ */
+ ptep = find_init_mm_pte(addr, &hugepage_shift);
+ if (WARN_ON(!ptep))
+ return 0;
+
+ pa = PFN_PHYS(pte_pfn(*ptep));
+
+ if (!hugepage_shift)
+ hugepage_shift = PAGE_SHIFT;
+
+ pa |= addr & ((1ul << hugepage_shift) - 1);
+
+ return pa;
+}
+
/*
* This is what we should always use. Any other lockless page table lookup needs
* careful audit against THP split.
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 975ba260006a..3e5d470a6155 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -19,6 +19,7 @@
#ifndef _ASM_POWERPC_PTRACE_H
#define _ASM_POWERPC_PTRACE_H
+#include <linux/err.h>
#include <uapi/asm/ptrace.h>
#include <asm/asm-const.h>
@@ -47,11 +48,12 @@ struct pt_regs
unsigned long result;
};
};
-
+#if defined(CONFIG_PPC64) || defined(CONFIG_PPC_KUAP)
union {
struct {
#ifdef CONFIG_PPC64
unsigned long ppr;
+ unsigned long exit_result;
#endif
union {
#ifdef CONFIG_PPC_KUAP
@@ -67,6 +69,7 @@ struct pt_regs
};
unsigned long __pad[4]; /* Maintain 16 byte interrupt stack alignment */
};
+#endif
};
#endif
@@ -121,54 +124,61 @@ struct pt_regs
#endif /* __powerpc64__ */
#ifndef __ASSEMBLY__
+#include <asm/paca.h>
-static inline unsigned long instruction_pointer(struct pt_regs *regs)
+#ifdef CONFIG_SMP
+extern unsigned long profile_pc(struct pt_regs *regs);
+#else
+#define profile_pc(regs) instruction_pointer(regs)
+#endif
+
+long do_syscall_trace_enter(struct pt_regs *regs);
+void do_syscall_trace_leave(struct pt_regs *regs);
+
+static inline void set_return_regs_changed(void)
{
- return regs->nip;
+#ifdef CONFIG_PPC_BOOK3S_64
+ local_paca->hsrr_valid = 0;
+ local_paca->srr_valid = 0;
+#endif
}
-static inline void instruction_pointer_set(struct pt_regs *regs,
- unsigned long val)
+static inline void regs_set_return_ip(struct pt_regs *regs, unsigned long ip)
{
- regs->nip = val;
+ regs->nip = ip;
+ set_return_regs_changed();
}
-static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+static inline void regs_set_return_msr(struct pt_regs *regs, unsigned long msr)
{
- return regs->gpr[1];
+ regs->msr = msr;
+ set_return_regs_changed();
}
-static inline unsigned long frame_pointer(struct pt_regs *regs)
+static inline void regs_add_return_ip(struct pt_regs *regs, long offset)
{
- return 0;
+ regs_set_return_ip(regs, regs->nip + offset);
}
-#ifdef CONFIG_SMP
-extern unsigned long profile_pc(struct pt_regs *regs);
-#else
-#define profile_pc(regs) instruction_pointer(regs)
-#endif
-
-long do_syscall_trace_enter(struct pt_regs *regs);
-void do_syscall_trace_leave(struct pt_regs *regs);
+static inline unsigned long instruction_pointer(struct pt_regs *regs)
+{
+ return regs->nip;
+}
-#define kernel_stack_pointer(regs) ((regs)->gpr[1])
-static inline int is_syscall_success(struct pt_regs *regs)
+static inline void instruction_pointer_set(struct pt_regs *regs,
+ unsigned long val)
{
- return !(regs->ccr & 0x10000000);
+ regs_set_return_ip(regs, val);
}
-static inline long regs_return_value(struct pt_regs *regs)
+static inline unsigned long user_stack_pointer(struct pt_regs *regs)
{
- if (is_syscall_success(regs))
- return regs->gpr[3];
- else
- return -regs->gpr[3];
+ return regs->gpr[1];
}
-static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
+static inline unsigned long frame_pointer(struct pt_regs *regs)
{
- regs->gpr[3] = rc;
+ return 0;
}
#ifdef __powerpc64__
@@ -185,44 +195,27 @@ static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
#define current_pt_regs() \
((struct pt_regs *)((unsigned long)task_stack_page(current) + THREAD_SIZE) - 1)
+/*
+ * The 4 low bits (0xf) are available as flags to overload the trap word,
+ * because interrupt vectors have minimum alignment of 0x10. TRAP_FLAGS_MASK
+ * must cover the bits used as flags, including bit 0 which is used as the
+ * "norestart" bit.
+ */
#ifdef __powerpc64__
-#ifdef CONFIG_PPC_BOOK3S
-#define TRAP_FLAGS_MASK 0x10
-#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
-#define FULL_REGS(regs) true
-#define SET_FULL_REGS(regs) do { } while (0)
-#else
-#define TRAP_FLAGS_MASK 0x11
-#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
-#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
-#define SET_FULL_REGS(regs) ((regs)->trap |= 1)
-#endif
-#define CHECK_FULL_REGS(regs) BUG_ON(!FULL_REGS(regs))
-#define NV_REG_POISON 0xdeadbeefdeadbeefUL
+#define TRAP_FLAGS_MASK 0x1
#else
/*
- * We use the least-significant bit of the trap field to indicate
- * whether we have saved the full set of registers, or only a
- * partial set. A 1 there means the partial set.
- * On 4xx we use the next bit to indicate whether the exception
+ * On 4xx we use bit 1 in the trap word to indicate whether the exception
* is a critical exception (1 means it is).
*/
-#define TRAP_FLAGS_MASK 0x1F
-#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
-#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
-#define SET_FULL_REGS(regs) ((regs)->trap |= 1)
+#define TRAP_FLAGS_MASK 0xf
#define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) != 0)
#define IS_MCHECK_EXC(regs) (((regs)->trap & 4) != 0)
#define IS_DEBUG_EXC(regs) (((regs)->trap & 8) != 0)
-#define NV_REG_POISON 0xdeadbeef
-#define CHECK_FULL_REGS(regs) \
-do { \
- if ((regs)->trap & 1) \
- printk(KERN_CRIT "%s: partial register set\n", __func__); \
-} while (0)
#endif /* __powerpc64__ */
+#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
-static inline void set_trap(struct pt_regs *regs, unsigned long val)
+static __always_inline void set_trap(struct pt_regs *regs, unsigned long val)
{
regs->trap = (regs->trap & TRAP_FLAGS_MASK) | (val & ~TRAP_FLAGS_MASK);
}
@@ -244,12 +237,37 @@ static inline bool trap_is_syscall(struct pt_regs *regs)
static inline bool trap_norestart(struct pt_regs *regs)
{
- return regs->trap & 0x10;
+ return regs->trap & 0x1;
+}
+
+static __always_inline void set_trap_norestart(struct pt_regs *regs)
+{
+ regs->trap |= 0x1;
+}
+
+#define kernel_stack_pointer(regs) ((regs)->gpr[1])
+static inline int is_syscall_success(struct pt_regs *regs)
+{
+ if (trap_is_scv(regs))
+ return !IS_ERR_VALUE((unsigned long)regs->gpr[3]);
+ else
+ return !(regs->ccr & 0x10000000);
}
-static inline void set_trap_norestart(struct pt_regs *regs)
+static inline long regs_return_value(struct pt_regs *regs)
{
- regs->trap |= 0x10;
+ if (trap_is_scv(regs))
+ return regs->gpr[3];
+
+ if (is_syscall_success(regs))
+ return regs->gpr[3];
+ else
+ return -regs->gpr[3];
+}
+
+static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
+{
+ regs->gpr[3] = rc;
}
#define arch_has_single_step() (1)
diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h
index b752d34517b3..b676c4fb90fd 100644
--- a/arch/powerpc/include/asm/qspinlock.h
+++ b/arch/powerpc/include/asm/qspinlock.h
@@ -37,27 +37,13 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock)
{
u32 val = 0;
- if (likely(atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL)))
+ if (likely(arch_atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL)))
return;
queued_spin_lock_slowpath(lock, val);
}
#define queued_spin_lock queued_spin_lock
-#define smp_mb__after_spinlock() smp_mb()
-
-static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
-{
- /*
- * This barrier was added to simple spinlocks by commit 51d7d5205d338,
- * but it should now be possible to remove it, asm arm64 has done with
- * commit c6f5d02b6a0f.
- */
- smp_mb();
- return atomic_read(&lock->val);
-}
-#define queued_spin_is_locked queued_spin_is_locked
-
#ifdef CONFIG_PARAVIRT_SPINLOCKS
#define SPIN_THRESHOLD (1<<15) /* not tuned */
@@ -86,6 +72,13 @@ static inline void pv_spinlocks_init(void)
#endif
+/*
+ * Queued spinlocks rely heavily on smp_cond_load_relaxed() to busy-wait,
+ * which was found to have performance problems if implemented with
+ * the preferred spin_begin()/spin_end() SMT priority pattern. Use the
+ * generic version instead.
+ */
+
#include <asm-generic/qspinlock.h>
#endif /* _ASM_POWERPC_QSPINLOCK_H */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index da103e92c112..be85cf156a1f 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -124,7 +124,7 @@
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
#else
-#define MSR_TM_ACTIVE(x) 0
+#define MSR_TM_ACTIVE(x) ((void)(x), 0)
#endif
#if defined(CONFIG_PPC_BOOK3S_64)
@@ -393,6 +393,7 @@
#define SPRN_PMMAR 0x356 /* Power Management Memory Activity Register */
#define SPRN_PSSCR 0x357 /* Processor Stop Status and Control Register (ISA 3.0) */
#define SPRN_PSSCR_PR 0x337 /* PSSCR ISA 3.0, privileged mode access */
+#define SPRN_TRIG2 0x372
#define SPRN_PMCR 0x374 /* Power Management Control Register */
#define SPRN_RWMR 0x375 /* Region-Weighting Mode Register */
@@ -441,6 +442,7 @@
#define LPCR_VRMA_LP1 ASM_CONST(0x0000800000000000)
#define LPCR_RMLS 0x1C000000 /* Implementation dependent RMO limit sel */
#define LPCR_RMLS_SH 26
+#define LPCR_HAIL ASM_CONST(0x0000000004000000) /* HV AIL (ISAv3.1) */
#define LPCR_ILE ASM_CONST(0x0000000002000000) /* !HV irqs set MSR:LE */
#define LPCR_AIL ASM_CONST(0x0000000001800000) /* Alternate interrupt location */
#define LPCR_AIL_0 ASM_CONST(0x0000000000000000) /* MMU off exception offset 0x0 */
@@ -1393,8 +1395,7 @@ static inline void mtmsr_isync(unsigned long val)
: "r" ((unsigned long)(v)) \
: "memory")
#endif
-#define wrtspr(rn) asm volatile("mtspr " __stringify(rn) ",0" : \
- : : "memory")
+#define wrtspr(rn) asm volatile("mtspr " __stringify(rn) ",2" : : : "memory")
static inline void wrtee(unsigned long val)
{
@@ -1435,8 +1436,6 @@ static inline void mtsr(u32 val, u32 idx)
}
#endif
-#define proc_trap() asm volatile("trap")
-
extern unsigned long current_stack_frame(void);
register unsigned long current_stack_pointer asm("r1");
@@ -1447,16 +1446,6 @@ extern void scom970_write(unsigned int address, unsigned long value);
struct pt_regs;
extern void ppc_save_regs(struct pt_regs *regs);
-
-static inline void update_power8_hid0(unsigned long hid0)
-{
- /*
- * The HID0 update on Power8 should at the very least be
- * preceded by a SYNC instruction followed by an ISYNC
- * instruction
- */
- asm volatile("sync; mtspr %0,%1; isync":: "i"(SPRN_HID0), "r"(hid0));
-}
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_REG_H */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 658448ca5b8a..9dc97d2f9d27 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -19,8 +19,8 @@
#define RTAS_UNKNOWN_SERVICE (-1)
#define RTAS_INSTANTIATE_MAX (1ULL<<30) /* Don't instantiate rtas at/above this value */
-/* Buffer size for ppc_rtas system call. */
-#define RTAS_RMOBUF_MAX (64 * 1024)
+/* Memory set aside for sys_rtas to use with calls that need a work area. */
+#define RTAS_USER_REGION_SIZE (64 * 1024)
/* RTAS return status codes */
#define RTAS_BUSY -2 /* RTAS Busy */
@@ -357,7 +357,7 @@ extern void rtas_take_timebase(void);
static inline int page_is_rtas_user_buf(unsigned long pfn)
{
unsigned long paddr = (pfn << PAGE_SHIFT);
- if (paddr >= rtas_rmo_buf && paddr < (rtas_rmo_buf + RTAS_RMOBUF_MAX))
+ if (paddr >= rtas_rmo_buf && paddr < (rtas_rmo_buf + RTAS_USER_REGION_SIZE))
return 1;
return 0;
}
diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
index b774a4477d5f..792eefaf230b 100644
--- a/arch/powerpc/include/asm/security_features.h
+++ b/arch/powerpc/include/asm/security_features.h
@@ -92,6 +92,9 @@ static inline bool security_ftr_enabled(u64 feature)
// The L1-D cache should be flushed after user accesses from the kernel
#define SEC_FTR_L1D_FLUSH_UACCESS 0x0000000000008000ull
+// The STF flush should be executed on privilege state switch
+#define SEC_FTR_STF_BARRIER 0x0000000000010000ull
+
// Features enabled by default
#define SEC_FTR_DEFAULT \
(SEC_FTR_L1D_FLUSH_HV | \
@@ -99,6 +102,7 @@ static inline bool security_ftr_enabled(u64 feature)
SEC_FTR_BNDS_CHK_SPEC_BAR | \
SEC_FTR_L1D_FLUSH_ENTRY | \
SEC_FTR_L1D_FLUSH_UACCESS | \
+ SEC_FTR_STF_BARRIER | \
SEC_FTR_FAVOUR_SECURITY)
#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
diff --git a/arch/powerpc/include/asm/set_memory.h b/arch/powerpc/include/asm/set_memory.h
new file mode 100644
index 000000000000..b040094f7920
--- /dev/null
+++ b/arch/powerpc/include/asm/set_memory.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SET_MEMORY_H
+#define _ASM_POWERPC_SET_MEMORY_H
+
+#define SET_MEMORY_RO 0
+#define SET_MEMORY_RW 1
+#define SET_MEMORY_NX 2
+#define SET_MEMORY_X 3
+
+int change_memory_attr(unsigned long addr, int numpages, long action);
+
+static inline int set_memory_ro(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_RO);
+}
+
+static inline int set_memory_rw(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_RW);
+}
+
+static inline int set_memory_nx(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_NX);
+}
+
+static inline int set_memory_x(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_X);
+}
+
+int set_memory_attr(unsigned long addr, int numpages, pgprot_t prot);
+
+#endif
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index e89bfebd4e00..6c1a7d217d1a 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -10,7 +10,6 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
extern unsigned int rtas_data;
extern unsigned long long memory_limit;
extern bool init_mem_is_free;
-extern unsigned long klimit;
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
struct device_node;
diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h
index 5b862de29dff..552f325412cc 100644
--- a/arch/powerpc/include/asm/simple_spinlock.h
+++ b/arch/powerpc/include/asm/simple_spinlock.h
@@ -38,8 +38,7 @@ static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
- smp_mb();
- return !arch_spin_value_unlocked(*lock);
+ return !arch_spin_value_unlocked(READ_ONCE(*lock));
}
/*
@@ -282,7 +281,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
#define arch_read_relax(lock) rw_yield(lock)
#define arch_write_relax(lock) rw_yield(lock)
-/* See include/linux/spinlock.h */
-#define smp_mb__after_spinlock() smp_mb()
-
#endif /* _ASM_POWERPC_SIMPLE_SPINLOCK_H */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 7a13bc20f0a0..03b3d010cbab 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -31,6 +31,7 @@ extern u32 *cpu_to_phys_id;
extern bool coregroup_enabled;
extern int cpu_to_chip_id(int cpu);
+extern int *chip_id_lookup_table;
#ifdef CONFIG_SMP
@@ -121,6 +122,11 @@ static inline struct cpumask *cpu_sibling_mask(int cpu)
return per_cpu(cpu_sibling_map, cpu);
}
+static inline struct cpumask *cpu_core_mask(int cpu)
+{
+ return per_cpu(cpu_core_map, cpu);
+}
+
static inline struct cpumask *cpu_l2_cache_mask(int cpu)
{
return per_cpu(cpu_l2_cache_map, cpu);
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 6ec72282888d..bd75872a6334 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -10,6 +10,9 @@
#include <asm/simple_spinlock.h>
#endif
+/* See include/linux/spinlock.h */
+#define smp_mb__after_spinlock() smp_mb()
+
#ifndef CONFIG_PARAVIRT_SPINLOCKS
static inline void pv_spinlocks_init(void) { }
#endif
diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h
index 972ed0df154d..1df867c2e054 100644
--- a/arch/powerpc/include/asm/sstep.h
+++ b/arch/powerpc/include/asm/sstep.h
@@ -13,12 +13,11 @@ struct pt_regs;
* we don't allow putting a breakpoint on an mtmsrd instruction.
* Similarly we don't allow breakpoints on rfid instructions.
* These macros tell us if an instruction is a mtmsrd or rfid.
- * Note that IS_MTMSRD returns true for both an mtmsr (32-bit)
- * and an mtmsrd (64-bit).
+ * Note that these return true for both mtmsr/rfi (32-bit)
+ * and mtmsrd/rfid (64-bit).
*/
#define IS_MTMSRD(instr) ((ppc_inst_val(instr) & 0xfc0007be) == 0x7c000124)
-#define IS_RFID(instr) ((ppc_inst_val(instr) & 0xfc0007fe) == 0x4c000024)
-#define IS_RFI(instr) ((ppc_inst_val(instr) & 0xfc0007fe) == 0x4c000064)
+#define IS_RFID(instr) ((ppc_inst_val(instr) & 0xfc0007be) == 0x4c000024)
enum instruction_type {
COMPUTE, /* arith/logical/CR op, etc. */
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index fdab93428372..9d1fbd8be1c7 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -71,6 +71,16 @@ static inline void disable_kernel_vsx(void)
{
msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
}
+#else
+static inline void enable_kernel_vsx(void)
+{
+ BUILD_BUG();
+}
+
+static inline void disable_kernel_vsx(void)
+{
+ BUILD_BUG();
+}
#endif
#ifdef CONFIG_SPE
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
index fd1b518eed17..ba0f88f3a30d 100644
--- a/arch/powerpc/include/asm/syscall.h
+++ b/arch/powerpc/include/asm/syscall.h
@@ -41,11 +41,17 @@ static inline void syscall_rollback(struct task_struct *task,
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
- /*
- * If the system call failed,
- * regs->gpr[3] contains a positive ERRORCODE.
- */
- return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
+ if (trap_is_scv(regs)) {
+ unsigned long error = regs->gpr[3];
+
+ return IS_ERR_VALUE(error) ? error : 0;
+ } else {
+ /*
+ * If the system call failed,
+ * regs->gpr[3] contains a positive ERRORCODE.
+ */
+ return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
+ }
}
static inline long syscall_get_return_value(struct task_struct *task,
@@ -58,18 +64,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
{
- /*
- * In the general case it's not obvious that we must deal with CCR
- * here, as the syscall exit path will also do that for us. However
- * there are some places, eg. the signal code, which check ccr to
- * decide if the value in r3 is actually an error.
- */
- if (error) {
- regs->ccr |= 0x10000000L;
- regs->gpr[3] = error;
+ if (trap_is_scv(regs)) {
+ regs->gpr[3] = (long) error ?: val;
} else {
- regs->ccr &= ~0x10000000L;
- regs->gpr[3] = val;
+ /*
+ * In the general case it's not obvious that we must deal with
+ * CCR here, as the syscall exit path will also do that for us.
+ * However there are some places, eg. the signal code, which
+ * check ccr to decide if the value in r3 is actually an error.
+ */
+ if (error) {
+ regs->ccr |= 0x10000000L;
+ regs->gpr[3] = error;
+ } else {
+ regs->ccr &= ~0x10000000L;
+ regs->gpr[3] = val;
+ }
}
}
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 386d576673a1..b4ec6c7dd72e 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -38,7 +38,6 @@
#ifndef __ASSEMBLY__
#include <linux/cache.h>
#include <asm/processor.h>
-#include <asm/page.h>
#include <asm/accounting.h>
#define SLB_PRELOAD_NR 16U
@@ -152,6 +151,12 @@ void arch_setup_new_exec(void);
#ifndef __ASSEMBLY__
+static inline void clear_thread_local_flags(unsigned int flags)
+{
+ struct thread_info *ti = current_thread_info();
+ ti->local_flags &= ~flags;
+}
+
static inline bool test_thread_local_flags(unsigned int flags)
{
struct thread_info *ti = current_thread_info();
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 8dd3cdb25338..8c2c3dd4ddba 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -97,6 +97,18 @@ extern void div128_by_32(u64 dividend_high, u64 dividend_low,
extern void secondary_cpu_time_init(void);
extern void __init time_init(void);
+#ifdef CONFIG_PPC64
+static inline unsigned long test_irq_work_pending(void)
+{
+ unsigned long x;
+
+ asm volatile("lbz %0,%1(13)"
+ : "=r" (x)
+ : "i" (offsetof(struct paca_struct, irq_work_pending)));
+ return x;
+}
+#endif
+
DECLARE_PER_CPU(u64, decrementers_next_tb);
/* Convert timebase ticks to nanoseconds */
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 3beeb030cd78..e4db64c0e184 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -126,7 +126,7 @@ static inline int cpu_to_coregroup_id(int cpu)
#define topology_physical_package_id(cpu) (cpu_to_chip_id(cpu))
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
-#define topology_core_cpumask(cpu) (cpu_cpu_mask(cpu))
+#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_core_id(cpu) (cpu_to_core_id(cpu))
#endif
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 78e2a3990eab..22c79ab40006 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -43,129 +43,39 @@ static inline bool __access_ok(unsigned long addr, unsigned long size)
* exception handling means that it's no longer "just"...)
*
*/
-#define get_user(x, ptr) \
- __get_user_check((x), (ptr), sizeof(*(ptr)))
-#define put_user(x, ptr) \
- __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
-
-#define __get_user(x, ptr) \
- __get_user_nocheck((x), (ptr), sizeof(*(ptr)), true)
-#define __put_user(x, ptr) \
- __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
-
-#define __get_user_allowed(x, ptr) \
- __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false)
-
-#define __get_user_inatomic(x, ptr) \
- __get_user_nosleep((x), (ptr), sizeof(*(ptr)))
-#define __put_user_inatomic(x, ptr) \
- __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
-
-#ifdef CONFIG_PPC64
-
-#define ___get_user_instr(gu_op, dest, ptr) \
-({ \
- long __gui_ret = 0; \
- unsigned long __gui_ptr = (unsigned long)ptr; \
- struct ppc_inst __gui_inst; \
- unsigned int __prefix, __suffix; \
- __gui_ret = gu_op(__prefix, (unsigned int __user *)__gui_ptr); \
- if (__gui_ret == 0) { \
- if ((__prefix >> 26) == OP_PREFIX) { \
- __gui_ret = gu_op(__suffix, \
- (unsigned int __user *)__gui_ptr + 1); \
- __gui_inst = ppc_inst_prefix(__prefix, \
- __suffix); \
- } else { \
- __gui_inst = ppc_inst(__prefix); \
- } \
- if (__gui_ret == 0) \
- (dest) = __gui_inst; \
- } \
- __gui_ret; \
-})
-
-#define get_user_instr(x, ptr) \
- ___get_user_instr(get_user, x, ptr)
-
-#define __get_user_instr(x, ptr) \
- ___get_user_instr(__get_user, x, ptr)
-
-#define __get_user_instr_inatomic(x, ptr) \
- ___get_user_instr(__get_user_inatomic, x, ptr)
-
-#else /* !CONFIG_PPC64 */
-#define get_user_instr(x, ptr) \
- get_user((x).val, (u32 __user *)(ptr))
-
-#define __get_user_instr(x, ptr) \
- __get_user_nocheck((x).val, (u32 __user *)(ptr), sizeof(u32), true)
-
-#define __get_user_instr_inatomic(x, ptr) \
- __get_user_nosleep((x).val, (u32 __user *)(ptr), sizeof(u32))
-
-#endif /* CONFIG_PPC64 */
-
-extern long __put_user_bad(void);
-
-#define __put_user_size(x, ptr, size, retval) \
-do { \
- __label__ __pu_failed; \
- \
- retval = 0; \
- allow_write_to_user(ptr, size); \
- __put_user_size_goto(x, ptr, size, __pu_failed); \
- prevent_write_to_user(ptr, size); \
- break; \
- \
-__pu_failed: \
- retval = -EFAULT; \
- prevent_write_to_user(ptr, size); \
-} while (0)
-
-#define __put_user_nocheck(x, ptr, size) \
+#define __put_user(x, ptr) \
({ \
long __pu_err; \
__typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- __typeof__(*(ptr)) __pu_val = (x); \
- __typeof__(size) __pu_size = (size); \
+ __typeof__(*(ptr)) __pu_val = (__typeof__(*(ptr)))(x); \
+ __typeof__(sizeof(*(ptr))) __pu_size = sizeof(*(ptr)); \
\
- if (!is_kernel_addr((unsigned long)__pu_addr)) \
- might_fault(); \
- __chk_user_ptr(__pu_addr); \
- __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
+ might_fault(); \
+ do { \
+ __label__ __pu_failed; \
+ \
+ allow_write_to_user(__pu_addr, __pu_size); \
+ __put_user_size_goto(__pu_val, __pu_addr, __pu_size, __pu_failed); \
+ prevent_write_to_user(__pu_addr, __pu_size); \
+ __pu_err = 0; \
+ break; \
+ \
+__pu_failed: \
+ prevent_write_to_user(__pu_addr, __pu_size); \
+ __pu_err = -EFAULT; \
+ } while (0); \
\
__pu_err; \
})
-#define __put_user_check(x, ptr, size) \
+#define put_user(x, ptr) \
({ \
- long __pu_err = -EFAULT; \
- __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- __typeof__(*(ptr)) __pu_val = (x); \
- __typeof__(size) __pu_size = (size); \
- \
- might_fault(); \
- if (access_ok(__pu_addr, __pu_size)) \
- __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
+ __typeof__(*(ptr)) __user *_pu_addr = (ptr); \
\
- __pu_err; \
+ access_ok(_pu_addr, sizeof(*(ptr))) ? \
+ __put_user(x, _pu_addr) : -EFAULT; \
})
-#define __put_user_nosleep(x, ptr, size) \
-({ \
- long __pu_err; \
- __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- __typeof__(*(ptr)) __pu_val = (x); \
- __typeof__(size) __pu_size = (size); \
- \
- __chk_user_ptr(__pu_addr); \
- __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
- \
- __pu_err; \
-})
-
-
/*
* We don't tell gcc that we are accessing memory, but this is OK
* because we do not write to any memory gcc knows about, so there
@@ -198,25 +108,17 @@ __pu_failed: \
#define __put_user_size_goto(x, ptr, size, label) \
do { \
+ __typeof__(*(ptr)) __user *__pus_addr = (ptr); \
+ \
switch (size) { \
- case 1: __put_user_asm_goto(x, ptr, label, "stb"); break; \
- case 2: __put_user_asm_goto(x, ptr, label, "sth"); break; \
- case 4: __put_user_asm_goto(x, ptr, label, "stw"); break; \
- case 8: __put_user_asm2_goto(x, ptr, label); break; \
- default: __put_user_bad(); \
+ case 1: __put_user_asm_goto(x, __pus_addr, label, "stb"); break; \
+ case 2: __put_user_asm_goto(x, __pus_addr, label, "sth"); break; \
+ case 4: __put_user_asm_goto(x, __pus_addr, label, "stw"); break; \
+ case 8: __put_user_asm2_goto(x, __pus_addr, label); break; \
+ default: BUILD_BUG(); \
} \
} while (0)
-#define __unsafe_put_user_goto(x, ptr, size, label) \
-do { \
- __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- __chk_user_ptr(ptr); \
- __put_user_size_goto((x), __pu_addr, (size), label); \
-} while (0)
-
-
-extern long __get_user_bad(void);
-
/*
* This does an atomic 128 byte aligned load from userspace.
* Upto caller to do enable_kernel_vmx() before calling!
@@ -234,6 +136,59 @@ extern long __get_user_bad(void);
: "=r" (err) \
: "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
+#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+
+#define __get_user_asm_goto(x, addr, label, op) \
+ asm_volatile_goto( \
+ "1: "op"%U1%X1 %0, %1 # get_user\n" \
+ EX_TABLE(1b, %l2) \
+ : "=r" (x) \
+ : "m"UPD_CONSTR (*addr) \
+ : \
+ : label)
+
+#ifdef __powerpc64__
+#define __get_user_asm2_goto(x, addr, label) \
+ __get_user_asm_goto(x, addr, label, "ld")
+#else /* __powerpc64__ */
+#define __get_user_asm2_goto(x, addr, label) \
+ asm_volatile_goto( \
+ "1: lwz%X1 %0, %1\n" \
+ "2: lwz%X1 %L0, %L1\n" \
+ EX_TABLE(1b, %l2) \
+ EX_TABLE(2b, %l2) \
+ : "=&r" (x) \
+ : "m" (*addr) \
+ : \
+ : label)
+#endif /* __powerpc64__ */
+
+#define __get_user_size_goto(x, ptr, size, label) \
+do { \
+ BUILD_BUG_ON(size > sizeof(x)); \
+ switch (size) { \
+ case 1: __get_user_asm_goto(x, (u8 __user *)ptr, label, "lbz"); break; \
+ case 2: __get_user_asm_goto(x, (u16 __user *)ptr, label, "lhz"); break; \
+ case 4: __get_user_asm_goto(x, (u32 __user *)ptr, label, "lwz"); break; \
+ case 8: __get_user_asm2_goto(x, (u64 __user *)ptr, label); break; \
+ default: x = 0; BUILD_BUG(); \
+ } \
+} while (0)
+
+#define __get_user_size_allowed(x, ptr, size, retval) \
+do { \
+ __label__ __gus_failed; \
+ \
+ __get_user_size_goto(x, ptr, size, __gus_failed); \
+ retval = 0; \
+ break; \
+__gus_failed: \
+ x = 0; \
+ retval = -EFAULT; \
+} while (0)
+
+#else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
+
#define __get_user_asm(x, addr, err, op) \
__asm__ __volatile__( \
"1: "op"%U2%X2 %1, %2 # get_user\n" \
@@ -271,25 +226,27 @@ extern long __get_user_bad(void);
#define __get_user_size_allowed(x, ptr, size, retval) \
do { \
retval = 0; \
- __chk_user_ptr(ptr); \
- if (size > sizeof(x)) \
- (x) = __get_user_bad(); \
+ BUILD_BUG_ON(size > sizeof(x)); \
switch (size) { \
case 1: __get_user_asm(x, (u8 __user *)ptr, retval, "lbz"); break; \
case 2: __get_user_asm(x, (u16 __user *)ptr, retval, "lhz"); break; \
case 4: __get_user_asm(x, (u32 __user *)ptr, retval, "lwz"); break; \
case 8: __get_user_asm2(x, (u64 __user *)ptr, retval); break; \
- default: (x) = __get_user_bad(); \
+ default: x = 0; BUILD_BUG(); \
} \
} while (0)
-#define __get_user_size(x, ptr, size, retval) \
+#define __get_user_size_goto(x, ptr, size, label) \
do { \
- allow_read_from_user(ptr, size); \
- __get_user_size_allowed(x, ptr, size, retval); \
- prevent_read_from_user(ptr, size); \
+ long __gus_retval; \
+ \
+ __get_user_size_allowed(x, ptr, size, __gus_retval); \
+ if (__gus_retval) \
+ goto label; \
} while (0)
+#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
+
/*
* This is a type: either unsigned long, if the argument fits into
* that type, or otherwise unsigned long long.
@@ -297,86 +254,36 @@ do { \
#define __long_type(x) \
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
-#define __get_user_nocheck(x, ptr, size, do_allow) \
+#define __get_user(x, ptr) \
({ \
long __gu_err; \
__long_type(*(ptr)) __gu_val; \
__typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- __typeof__(size) __gu_size = (size); \
+ __typeof__(sizeof(*(ptr))) __gu_size = sizeof(*(ptr)); \
\
- __chk_user_ptr(__gu_addr); \
- if (do_allow && !is_kernel_addr((unsigned long)__gu_addr)) \
- might_fault(); \
- if (do_allow) \
- __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
- else \
- __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
+ might_fault(); \
+ allow_read_from_user(__gu_addr, __gu_size); \
+ __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
+ prevent_read_from_user(__gu_addr, __gu_size); \
(x) = (__typeof__(*(ptr)))__gu_val; \
\
__gu_err; \
})
-#define __get_user_check(x, ptr, size) \
+#define get_user(x, ptr) \
({ \
- long __gu_err = -EFAULT; \
- __long_type(*(ptr)) __gu_val = 0; \
- __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- __typeof__(size) __gu_size = (size); \
- \
- might_fault(); \
- if (access_ok(__gu_addr, __gu_size)) \
- __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
- (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ __typeof__(*(ptr)) __user *_gu_addr = (ptr); \
\
- __gu_err; \
+ access_ok(_gu_addr, sizeof(*(ptr))) ? \
+ __get_user(x, _gu_addr) : \
+ ((x) = (__force __typeof__(*(ptr)))0, -EFAULT); \
})
-#define __get_user_nosleep(x, ptr, size) \
-({ \
- long __gu_err; \
- __long_type(*(ptr)) __gu_val; \
- __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- __typeof__(size) __gu_size = (size); \
- \
- __chk_user_ptr(__gu_addr); \
- __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
- (x) = (__force __typeof__(*(ptr)))__gu_val; \
- \
- __gu_err; \
-})
-
-
/* more complex routines */
extern unsigned long __copy_tofrom_user(void __user *to,
const void __user *from, unsigned long size);
-#ifdef CONFIG_ARCH_HAS_COPY_MC
-unsigned long __must_check
-copy_mc_generic(void *to, const void *from, unsigned long size);
-
-static inline unsigned long __must_check
-copy_mc_to_kernel(void *to, const void *from, unsigned long size)
-{
- return copy_mc_generic(to, from, size);
-}
-#define copy_mc_to_kernel copy_mc_to_kernel
-
-static inline unsigned long __must_check
-copy_mc_to_user(void __user *to, const void *from, unsigned long n)
-{
- if (likely(check_copy_size(from, n, true))) {
- if (access_ok(to, n)) {
- allow_write_to_user(to, n);
- n = copy_mc_generic((void *)to, from, n);
- prevent_write_to_user(to, n);
- }
- }
-
- return n;
-}
-#endif
-
#ifdef __powerpc64__
static inline unsigned long
raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
@@ -414,26 +321,51 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
unsigned long __arch_clear_user(void __user *addr, unsigned long size);
-static inline unsigned long clear_user(void __user *addr, unsigned long size)
+static inline unsigned long __clear_user(void __user *addr, unsigned long size)
{
- unsigned long ret = size;
+ unsigned long ret;
+
might_fault();
- if (likely(access_ok(addr, size))) {
- allow_write_to_user(addr, size);
- ret = __arch_clear_user(addr, size);
- prevent_write_to_user(addr, size);
- }
+ allow_write_to_user(addr, size);
+ ret = __arch_clear_user(addr, size);
+ prevent_write_to_user(addr, size);
return ret;
}
-static inline unsigned long __clear_user(void __user *addr, unsigned long size)
+static inline unsigned long clear_user(void __user *addr, unsigned long size)
{
- return clear_user(addr, size);
+ return likely(access_ok(addr, size)) ? __clear_user(addr, size) : size;
}
extern long strncpy_from_user(char *dst, const char __user *src, long count);
extern __must_check long strnlen_user(const char __user *str, long n);
+#ifdef CONFIG_ARCH_HAS_COPY_MC
+unsigned long __must_check
+copy_mc_generic(void *to, const void *from, unsigned long size);
+
+static inline unsigned long __must_check
+copy_mc_to_kernel(void *to, const void *from, unsigned long size)
+{
+ return copy_mc_generic(to, from, size);
+}
+#define copy_mc_to_kernel copy_mc_to_kernel
+
+static inline unsigned long __must_check
+copy_mc_to_user(void __user *to, const void *from, unsigned long n)
+{
+ if (likely(check_copy_size(from, n, true))) {
+ if (access_ok(to, n)) {
+ allow_write_to_user(to, n);
+ n = copy_mc_generic((void *)to, from, n);
+ prevent_write_to_user(to, n);
+ }
+ }
+
+ return n;
+}
+#endif
+
extern long __copy_from_user_flushcache(void *dst, const void __user *src,
unsigned size);
extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
@@ -482,10 +414,37 @@ user_write_access_begin(const void __user *ptr, size_t len)
#define user_write_access_begin user_write_access_begin
#define user_write_access_end prevent_current_write_to_user
-#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
-#define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
+#define unsafe_get_user(x, p, e) do { \
+ __long_type(*(p)) __gu_val; \
+ __typeof__(*(p)) __user *__gu_addr = (p); \
+ \
+ __get_user_size_goto(__gu_val, __gu_addr, sizeof(*(p)), e); \
+ (x) = (__typeof__(*(p)))__gu_val; \
+} while (0)
+
#define unsafe_put_user(x, p, e) \
- __unsafe_put_user_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e)
+ __put_user_size_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e)
+
+#define unsafe_copy_from_user(d, s, l, e) \
+do { \
+ u8 *_dst = (u8 *)(d); \
+ const u8 __user *_src = (const u8 __user *)(s); \
+ size_t _len = (l); \
+ int _i; \
+ \
+ for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64)) \
+ unsafe_get_user(*(u64 *)(_dst + _i), (u64 __user *)(_src + _i), e); \
+ if (_len & 4) { \
+ unsafe_get_user(*(u32 *)(_dst + _i), (u32 __user *)(_src + _i), e); \
+ _i += 4; \
+ } \
+ if (_len & 2) { \
+ unsafe_get_user(*(u16 *)(_dst + _i), (u16 __user *)(_src + _i), e); \
+ _i += 2; \
+ } \
+ if (_len & 1) \
+ unsafe_get_user(*(u8 *)(_dst + _i), (u8 __user *)(_src + _i), e); \
+} while (0)
#define unsafe_copy_to_user(d, s, l, e) \
do { \
@@ -494,9 +453,9 @@ do { \
size_t _len = (l); \
int _i; \
\
- for (_i = 0; _i < (_len & ~(sizeof(long) - 1)); _i += sizeof(long)) \
- unsafe_put_user(*(long*)(_src + _i), (long __user *)(_dst + _i), e); \
- if (IS_ENABLED(CONFIG_PPC64) && (_len & 4)) { \
+ for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64)) \
+ unsafe_put_user(*(u64 *)(_src + _i), (u64 __user *)(_dst + _i), e); \
+ if (_len & 4) { \
unsafe_put_user(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \
_i += 4; \
} \
@@ -511,14 +470,8 @@ do { \
#define HAVE_GET_KERNEL_NOFAULT
#define __get_kernel_nofault(dst, src, type, err_label) \
-do { \
- int __kr_err; \
- \
- __get_user_size_allowed(*((type *)(dst)), (__force type __user *)(src),\
- sizeof(type), __kr_err); \
- if (unlikely(__kr_err)) \
- goto err_label; \
-} while (0)
+ __get_user_size_goto(*((type *)(dst)), \
+ (__force type __user *)(src), sizeof(type), err_label)
#define __put_kernel_nofault(dst, src, type, err_label) \
__put_user_size_goto(*((type *)(src)), \
diff --git a/arch/powerpc/include/asm/unaligned.h b/arch/powerpc/include/asm/unaligned.h
deleted file mode 100644
index ce69c5eff95e..000000000000
--- a/arch/powerpc/include/asm/unaligned.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_UNALIGNED_H
-#define _ASM_POWERPC_UNALIGNED_H
-
-#ifdef __KERNEL__
-
-/*
- * The PowerPC can do unaligned accesses itself based on its endian mode.
- */
-#include <linux/unaligned/access_ok.h>
-#include <linux/unaligned/generic.h>
-
-#ifdef __LITTLE_ENDIAN__
-#define get_unaligned __get_unaligned_le
-#define put_unaligned __put_unaligned_le
-#else
-#define get_unaligned __get_unaligned_be
-#define put_unaligned __put_unaligned_be
-#endif
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_UNALIGNED_H */
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 700fcdac2e3c..b541c690a31c 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -40,6 +40,7 @@
#define __ARCH_WANT_SYS_SIGPROCMASK
#ifdef CONFIG_PPC32
#define __ARCH_WANT_OLD_STAT
+#define __ARCH_WANT_SYS_OLD_SELECT
#endif
#ifdef CONFIG_PPC64
#define __ARCH_WANT_SYS_TIME
diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h
index 5bf65f5d44a9..fe683371336f 100644
--- a/arch/powerpc/include/asm/uprobes.h
+++ b/arch/powerpc/include/asm/uprobes.h
@@ -24,8 +24,8 @@ typedef ppc_opcode_t uprobe_opcode_t;
struct arch_uprobe {
union {
- struct ppc_inst insn;
- struct ppc_inst ixol;
+ u32 insn[2];
+ u32 ixol[2];
};
};
diff --git a/arch/powerpc/include/asm/vas.h b/arch/powerpc/include/asm/vas.h
index e33f80b0ea81..57573d9c1e09 100644
--- a/arch/powerpc/include/asm/vas.h
+++ b/arch/powerpc/include/asm/vas.h
@@ -5,8 +5,10 @@
#ifndef _ASM_POWERPC_VAS_H
#define _ASM_POWERPC_VAS_H
-
-struct vas_window;
+#include <linux/sched/mm.h>
+#include <linux/mmu_context.h>
+#include <asm/icswx.h>
+#include <uapi/asm/vas-api.h>
/*
* Min and max FIFO sizes are based on Version 1.05 Section 3.1.4.25
@@ -49,6 +51,64 @@ enum vas_cop_type {
};
/*
+ * User space VAS windows are opened by tasks and take references
+ * to pid and mm until windows are closed.
+ * Stores pid, mm, and tgid for each window.
+ */
+struct vas_user_win_ref {
+ struct pid *pid; /* PID of owner */
+ struct pid *tgid; /* Thread group ID of owner */
+ struct mm_struct *mm; /* Linux process mm_struct */
+};
+
+/*
+ * Common VAS window struct on PowerNV and PowerVM
+ */
+struct vas_window {
+ u32 winid;
+ u32 wcreds_max; /* Window credits */
+ enum vas_cop_type cop;
+ struct vas_user_win_ref task_ref;
+ char *dbgname;
+ struct dentry *dbgdir;
+};
+
+/*
+ * User space window operations used for powernv and powerVM
+ */
+struct vas_user_win_ops {
+ struct vas_window * (*open_win)(int vas_id, u64 flags,
+ enum vas_cop_type);
+ u64 (*paste_addr)(struct vas_window *);
+ int (*close_win)(struct vas_window *);
+};
+
+static inline void put_vas_user_win_ref(struct vas_user_win_ref *ref)
+{
+ /* Drop references to pid, tgid, and mm */
+ put_pid(ref->pid);
+ put_pid(ref->tgid);
+ if (ref->mm)
+ mmdrop(ref->mm);
+}
+
+static inline void vas_user_win_add_mm_context(struct vas_user_win_ref *ref)
+{
+ mm_context_add_vas_window(ref->mm);
+ /*
+ * Even a process that has no foreign real address mapping can
+ * use an unpaired COPY instruction (to no real effect). Issue
+ * CP_ABORT to clear any pending COPY and prevent a covert
+ * channel.
+ *
+ * __switch_to() will issue CP_ABORT on future context switches
+ * if process / thread has any open VAS window (Use
+ * current->mm->context.vas_windows).
+ */
+ asm volatile(PPC_CP_ABORT);
+}
+
+/*
* Receive window attributes specified by the (in-kernel) owner of window.
*/
struct vas_rx_win_attr {
@@ -100,6 +160,7 @@ struct vas_tx_win_attr {
bool rx_win_ord_mode;
};
+#ifdef CONFIG_PPC_POWERNV
/*
* Helper to map a chip id to VAS id.
* For POWER9, this is a 1:1 mapping. In the future this maybe a 1:N
@@ -162,6 +223,43 @@ int vas_copy_crb(void *crb, int offset);
*/
int vas_paste_crb(struct vas_window *win, int offset, bool re);
+int vas_register_api_powernv(struct module *mod, enum vas_cop_type cop_type,
+ const char *name);
+void vas_unregister_api_powernv(void);
+#endif
+
+#ifdef CONFIG_PPC_PSERIES
+
+/* VAS Capabilities */
+#define VAS_GZIP_QOS_FEAT 0x1
+#define VAS_GZIP_DEF_FEAT 0x2
+#define VAS_GZIP_QOS_FEAT_BIT PPC_BIT(VAS_GZIP_QOS_FEAT) /* Bit 1 */
+#define VAS_GZIP_DEF_FEAT_BIT PPC_BIT(VAS_GZIP_DEF_FEAT) /* Bit 2 */
+
+/* NX Capabilities */
+#define VAS_NX_GZIP_FEAT 0x1
+#define VAS_NX_GZIP_FEAT_BIT PPC_BIT(VAS_NX_GZIP_FEAT) /* Bit 1 */
+
+/*
+ * These structs are used to retrieve overall VAS capabilities that
+ * the hypervisor provides.
+ */
+struct hv_vas_all_caps {
+ __be64 descriptor;
+ __be64 feat_type;
+} __packed __aligned(0x1000);
+
+struct vas_all_caps {
+ u64 descriptor;
+ u64 feat_type;
+};
+
+int h_query_vas_capabilities(const u64 hcall, u8 query_type, u64 result);
+int vas_register_api_pseries(struct module *mod,
+ enum vas_cop_type cop_type, const char *name);
+void vas_unregister_api_pseries(void);
+#endif
+
/*
* Register / unregister coprocessor type to VAS API which will be exported
* to user space. Applications can use this API to open / close window
@@ -171,7 +269,12 @@ int vas_paste_crb(struct vas_window *win, int offset, bool re);
* used for others in future.
*/
int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type,
- const char *name);
+ const char *name,
+ const struct vas_user_win_ops *vops);
void vas_unregister_coproc_api(void);
+int get_vas_user_win_ref(struct vas_user_win_ref *task_ref);
+void vas_update_csb(struct coprocessor_request_block *crb,
+ struct vas_user_win_ref *task_ref);
+void vas_dump_crb(struct coprocessor_request_block *crb);
#endif /* __ASM_POWERPC_VAS_H */
diff --git a/arch/powerpc/include/asm/vdso/gettimeofday.h b/arch/powerpc/include/asm/vdso/gettimeofday.h
index 77c635c2c90d..1faff0be1111 100644
--- a/arch/powerpc/include/asm/vdso/gettimeofday.h
+++ b/arch/powerpc/include/asm/vdso/gettimeofday.h
@@ -2,6 +2,8 @@
#ifndef _ASM_POWERPC_VDSO_GETTIMEOFDAY_H
#define _ASM_POWERPC_VDSO_GETTIMEOFDAY_H
+#include <asm/page.h>
+
#ifdef __ASSEMBLY__
#include <asm/ppc_asm.h>
@@ -154,6 +156,14 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
const struct vdso_data *__arch_get_vdso_data(void);
+#ifdef CONFIG_TIME_NS
+static __always_inline
+const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
+{
+ return (void *)vd + PAGE_SIZE;
+}
+#endif
+
static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
{
return true;
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
index 3f958ecf2beb..a585c8e538ff 100644
--- a/arch/powerpc/include/asm/vdso_datapage.h
+++ b/arch/powerpc/include/asm/vdso_datapage.h
@@ -107,9 +107,7 @@ extern struct vdso_arch_data *vdso_data;
bcl 20, 31, .+4
999:
mflr \ptr
-#if CONFIG_PPC_PAGE_SHIFT > 14
addis \ptr, \ptr, (_vdso_datapage - 999b)@ha
-#endif
addi \ptr, \ptr, (_vdso_datapage - 999b)@l
.endm
diff --git a/arch/powerpc/include/asm/vio.h b/arch/powerpc/include/asm/vio.h
index 0cf52746531b..e7479a4abf96 100644
--- a/arch/powerpc/include/asm/vio.h
+++ b/arch/powerpc/include/asm/vio.h
@@ -113,7 +113,8 @@ struct vio_driver {
const char *name;
const struct vio_device_id *id_table;
int (*probe)(struct vio_dev *dev, const struct vio_device_id *id);
- int (*remove)(struct vio_dev *dev);
+ void (*remove)(struct vio_dev *dev);
+ void (*shutdown)(struct vio_dev *dev);
/* A driver must have a get_desired_dma() function to
* be loaded in a CMO environment if it uses DMA.
*/
diff --git a/arch/powerpc/include/asm/vmalloc.h b/arch/powerpc/include/asm/vmalloc.h
index b992dfaaa161..4c69ece52a31 100644
--- a/arch/powerpc/include/asm/vmalloc.h
+++ b/arch/powerpc/include/asm/vmalloc.h
@@ -1,4 +1,24 @@
#ifndef _ASM_POWERPC_VMALLOC_H
#define _ASM_POWERPC_VMALLOC_H
+#include <asm/mmu.h>
+#include <asm/page.h>
+
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+
+#define arch_vmap_pud_supported arch_vmap_pud_supported
+static inline bool arch_vmap_pud_supported(pgprot_t prot)
+{
+ /* HPT does not cope with large pages in the vmalloc area */
+ return radix_enabled();
+}
+
+#define arch_vmap_pmd_supported arch_vmap_pmd_supported
+static inline bool arch_vmap_pmd_supported(pgprot_t prot)
+{
+ return radix_enabled();
+}
+
+#endif
+
#endif /* _ASM_POWERPC_VMALLOC_H */
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index 8e903b3f9c24..d9cf192368ad 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -65,8 +65,12 @@ struct icp_ops {
extern const struct icp_ops *icp_ops;
+#ifdef CONFIG_PPC_ICS_NATIVE
/* Native ICS */
extern int ics_native_init(void);
+#else
+static inline int ics_native_init(void) { return -ENODEV; }
+#endif
/* RTAS ICS */
#ifdef CONFIG_PPC_ICS_RTAS
diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h
index 9a312b975ca8..aa094a8655b0 100644
--- a/arch/powerpc/include/asm/xive.h
+++ b/arch/powerpc/include/asm/xive.h
@@ -102,6 +102,7 @@ void xive_flush_interrupt(void);
/* xmon hook */
void xmon_xive_do_dump(int cpu);
int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d);
+void xmon_xive_get_irq_all(void);
/* APIs used by KVM */
u32 xive_native_default_eq_shift(void);
diff --git a/arch/powerpc/include/uapi/asm/errno.h b/arch/powerpc/include/uapi/asm/errno.h
index cc79856896a1..4ba87de32be0 100644
--- a/arch/powerpc/include/uapi/asm/errno.h
+++ b/arch/powerpc/include/uapi/asm/errno.h
@@ -2,6 +2,7 @@
#ifndef _ASM_POWERPC_ERRNO_H
#define _ASM_POWERPC_ERRNO_H
+#undef EDEADLOCK
#include <asm-generic/errno.h>
#undef EDEADLOCK
diff --git a/arch/powerpc/include/uapi/asm/papr_pdsm.h b/arch/powerpc/include/uapi/asm/papr_pdsm.h
index 50ef95e2f5b1..82488b1e7276 100644
--- a/arch/powerpc/include/uapi/asm/papr_pdsm.h
+++ b/arch/powerpc/include/uapi/asm/papr_pdsm.h
@@ -77,6 +77,9 @@
/* Indicate that the 'dimm_fuel_gauge' field is valid */
#define PDSM_DIMM_HEALTH_RUN_GAUGE_VALID 1
+/* Indicate that the 'dimm_dsc' field is valid */
+#define PDSM_DIMM_DSC_VALID 2
+
/*
* Struct exchanged between kernel & ndctl in for PAPR_PDSM_HEALTH
* Various flags indicate the health status of the dimm.
@@ -105,6 +108,9 @@ struct nd_papr_pdsm_health {
/* Extension flag PDSM_DIMM_HEALTH_RUN_GAUGE_VALID */
__u16 dimm_fuel_gauge;
+
+ /* Extension flag PDSM_DIMM_DSC_VALID */
+ __u64 dimm_dsc;
};
__u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE];
};
diff --git a/arch/powerpc/include/uapi/asm/posix_types.h b/arch/powerpc/include/uapi/asm/posix_types.h
index f698400e4bb0..9c0342312544 100644
--- a/arch/powerpc/include/uapi/asm/posix_types.h
+++ b/arch/powerpc/include/uapi/asm/posix_types.h
@@ -12,11 +12,6 @@
typedef unsigned long __kernel_old_dev_t;
#define __kernel_old_dev_t __kernel_old_dev_t
#else
-typedef unsigned int __kernel_size_t;
-typedef int __kernel_ssize_t;
-typedef long __kernel_ptrdiff_t;
-#define __kernel_size_t __kernel_size_t
-
typedef short __kernel_ipc_pid_t;
#define __kernel_ipc_pid_t __kernel_ipc_pid_t
#endif
diff --git a/arch/powerpc/include/uapi/asm/vas-api.h b/arch/powerpc/include/uapi/asm/vas-api.h
index ebd4b2424785..7c81301ecdba 100644
--- a/arch/powerpc/include/uapi/asm/vas-api.h
+++ b/arch/powerpc/include/uapi/asm/vas-api.h
@@ -13,11 +13,15 @@
#define VAS_MAGIC 'v'
#define VAS_TX_WIN_OPEN _IOW(VAS_MAGIC, 0x20, struct vas_tx_win_open_attr)
+/* Flags to VAS TX open window ioctl */
+/* To allocate a window with QoS credit, otherwise use default credit */
+#define VAS_TX_WIN_FLAG_QOS_CREDIT 0x0000000000000001
+
struct vas_tx_win_open_attr {
__u32 version;
__s16 vas_id; /* specific instance of vas or -1 for default */
__u16 reserved1;
- __u64 flags; /* Future use */
+ __u64 flags;
__u64 reserved2[6];
};
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 6084fa499aa3..f66b63e81c3b 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -191,3 +191,7 @@ $(obj)/prom_init_check: $(src)/prom_init_check.sh $(obj)/prom_init.o FORCE
targets += prom_init_check
clean-files := vmlinux.lds
+
+# Force dependency (incbin is bad)
+$(obj)/vdso32_wrapper.o : $(obj)/vdso32/vdso32.so.dbg
+$(obj)/vdso64_wrapper.o : $(obj)/vdso64/vdso64.so.dbg
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index c7797eb958c7..bbb4181621dd 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -107,7 +107,6 @@ static struct aligninfo spe_aligninfo[32] = {
static int emulate_spe(struct pt_regs *regs, unsigned int reg,
struct ppc_inst ppc_instr)
{
- int ret;
union {
u64 ll;
u32 w[2];
@@ -127,11 +126,6 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
nb = spe_aligninfo[instr].len;
flags = spe_aligninfo[instr].flags;
- /* Verify the address of the operand */
- if (unlikely(user_mode(regs) &&
- !access_ok(addr, nb)))
- return -EFAULT;
-
/* userland only */
if (unlikely(!user_mode(regs)))
return 0;
@@ -169,26 +163,27 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
}
} else {
temp.ll = data.ll = 0;
- ret = 0;
p = addr;
+ if (!user_read_access_begin(addr, nb))
+ return -EFAULT;
+
switch (nb) {
case 8:
- ret |= __get_user_inatomic(temp.v[0], p++);
- ret |= __get_user_inatomic(temp.v[1], p++);
- ret |= __get_user_inatomic(temp.v[2], p++);
- ret |= __get_user_inatomic(temp.v[3], p++);
+ unsafe_get_user(temp.v[0], p++, Efault_read);
+ unsafe_get_user(temp.v[1], p++, Efault_read);
+ unsafe_get_user(temp.v[2], p++, Efault_read);
+ unsafe_get_user(temp.v[3], p++, Efault_read);
fallthrough;
case 4:
- ret |= __get_user_inatomic(temp.v[4], p++);
- ret |= __get_user_inatomic(temp.v[5], p++);
+ unsafe_get_user(temp.v[4], p++, Efault_read);
+ unsafe_get_user(temp.v[5], p++, Efault_read);
fallthrough;
case 2:
- ret |= __get_user_inatomic(temp.v[6], p++);
- ret |= __get_user_inatomic(temp.v[7], p++);
- if (unlikely(ret))
- return -EFAULT;
+ unsafe_get_user(temp.v[6], p++, Efault_read);
+ unsafe_get_user(temp.v[7], p++, Efault_read);
}
+ user_read_access_end();
switch (instr) {
case EVLDD:
@@ -255,31 +250,41 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
/* Store result to memory or update registers */
if (flags & ST) {
- ret = 0;
p = addr;
+
+ if (!user_write_access_begin(addr, nb))
+ return -EFAULT;
+
switch (nb) {
case 8:
- ret |= __put_user_inatomic(data.v[0], p++);
- ret |= __put_user_inatomic(data.v[1], p++);
- ret |= __put_user_inatomic(data.v[2], p++);
- ret |= __put_user_inatomic(data.v[3], p++);
+ unsafe_put_user(data.v[0], p++, Efault_write);
+ unsafe_put_user(data.v[1], p++, Efault_write);
+ unsafe_put_user(data.v[2], p++, Efault_write);
+ unsafe_put_user(data.v[3], p++, Efault_write);
fallthrough;
case 4:
- ret |= __put_user_inatomic(data.v[4], p++);
- ret |= __put_user_inatomic(data.v[5], p++);
+ unsafe_put_user(data.v[4], p++, Efault_write);
+ unsafe_put_user(data.v[5], p++, Efault_write);
fallthrough;
case 2:
- ret |= __put_user_inatomic(data.v[6], p++);
- ret |= __put_user_inatomic(data.v[7], p++);
+ unsafe_put_user(data.v[6], p++, Efault_write);
+ unsafe_put_user(data.v[7], p++, Efault_write);
}
- if (unlikely(ret))
- return -EFAULT;
+ user_write_access_end();
} else {
*evr = data.w[0];
regs->gpr[reg] = data.w[1];
}
return 1;
+
+Efault_read:
+ user_read_access_end();
+ return -EFAULT;
+
+Efault_write:
+ user_write_access_end();
+ return -EFAULT;
}
#endif /* CONFIG_SPE */
@@ -299,13 +304,12 @@ int fix_alignment(struct pt_regs *regs)
struct instruction_op op;
int r, type;
- /*
- * We require a complete register set, if not, then our assembly
- * is broken
- */
- CHECK_FULL_REGS(regs);
+ if (is_kernel_addr(regs->nip))
+ r = copy_inst_from_kernel_nofault(&instr, (void *)regs->nip);
+ else
+ r = __get_user_instr(instr, (void __user *)regs->nip);
- if (unlikely(__get_user_instr(instr, (void __user *)regs->nip)))
+ if (unlikely(r))
return -EFAULT;
if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) {
/* We don't handle PPC little-endian any more... */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index f3a662201a9f..a47eefa09bcb 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -86,12 +86,7 @@ int main(void)
OFFSET(PACA_CANARY, paca_struct, canary);
#endif
#endif
- OFFSET(MMCONTEXTID, mm_struct, context.id);
-#ifdef CONFIG_PPC64
- DEFINE(SIGSEGV, SIGSEGV);
- DEFINE(NMI_MASK, NMI_MASK);
-#else
- OFFSET(KSP_LIMIT, thread_struct, ksp_limit);
+#ifdef CONFIG_PPC32
#ifdef CONFIG_PPC_RTAS
OFFSET(RTAS_SP, thread_struct, rtas_sp);
#endif
@@ -120,7 +115,6 @@ int main(void)
#ifdef CONFIG_ALTIVEC
OFFSET(THREAD_VRSTATE, thread_struct, vr_state.vr);
OFFSET(THREAD_VRSAVEAREA, thread_struct, vr_save_area);
- OFFSET(THREAD_VRSAVE, thread_struct, vrsave);
OFFSET(THREAD_USED_VR, thread_struct, used_vr);
OFFSET(VRSTATE_VSCR, thread_vr_state, vscr);
OFFSET(THREAD_LOAD_VEC, thread_struct, load_vec);
@@ -132,7 +126,6 @@ int main(void)
OFFSET(KSP_VSID, thread_struct, ksp_vsid);
#else /* CONFIG_PPC64 */
OFFSET(PGDIR, thread_struct, pgdir);
-#ifdef CONFIG_VMAP_STACK
OFFSET(SRR0, thread_struct, srr0);
OFFSET(SRR1, thread_struct, srr1);
OFFSET(DAR, thread_struct, dar);
@@ -149,26 +142,18 @@ int main(void)
OFFSET(THLR, thread_struct, lr);
OFFSET(THCTR, thread_struct, ctr);
#endif
-#endif
#ifdef CONFIG_SPE
OFFSET(THREAD_EVR0, thread_struct, evr[0]);
OFFSET(THREAD_ACC, thread_struct, acc);
- OFFSET(THREAD_SPEFSCR, thread_struct, spefscr);
OFFSET(THREAD_USED_SPE, thread_struct, used_spe);
#endif /* CONFIG_SPE */
#endif /* CONFIG_PPC64 */
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
- OFFSET(THREAD_DBCR0, thread_struct, debug.dbcr0);
-#endif
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
OFFSET(THREAD_KVM_SVCPU, thread_struct, kvm_shadow_vcpu);
#endif
#if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
OFFSET(THREAD_KVM_VCPU, thread_struct, kvm_vcpu);
#endif
-#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
- OFFSET(KUAP, thread_struct, kuap);
-#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
OFFSET(PACATMSCRATCH, paca_struct, tm_scratch);
@@ -188,19 +173,12 @@ int main(void)
sizeof(struct pt_regs) + 16);
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
- OFFSET(TI_FLAGS, thread_info, flags);
OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags);
- OFFSET(TI_PREEMPT, thread_info, preempt_count);
#ifdef CONFIG_PPC64
OFFSET(DCACHEL1BLOCKSIZE, ppc64_caches, l1d.block_size);
OFFSET(DCACHEL1LOGBLOCKSIZE, ppc64_caches, l1d.log_block_size);
- OFFSET(DCACHEL1BLOCKSPERPAGE, ppc64_caches, l1d.blocks_per_page);
- OFFSET(ICACHEL1BLOCKSIZE, ppc64_caches, l1i.block_size);
- OFFSET(ICACHEL1LOGBLOCKSIZE, ppc64_caches, l1i.log_block_size);
- OFFSET(ICACHEL1BLOCKSPERPAGE, ppc64_caches, l1i.blocks_per_page);
/* paca */
- DEFINE(PACA_SIZE, sizeof(struct paca_struct));
OFFSET(PACAPACAINDEX, paca_struct, paca_index);
OFFSET(PACAPROCSTART, paca_struct, cpu_start);
OFFSET(PACAKSAVE, paca_struct, kstack);
@@ -212,18 +190,13 @@ int main(void)
OFFSET(PACATOC, paca_struct, kernel_toc);
OFFSET(PACAKBASE, paca_struct, kernelbase);
OFFSET(PACAKMSR, paca_struct, kernel_msr);
+#ifdef CONFIG_PPC_BOOK3S_64
+ OFFSET(PACAHSRR_VALID, paca_struct, hsrr_valid);
+ OFFSET(PACASRR_VALID, paca_struct, srr_valid);
+#endif
OFFSET(PACAIRQSOFTMASK, paca_struct, irq_soft_mask);
OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened);
OFFSET(PACA_FTRACE_ENABLED, paca_struct, ftrace_enabled);
-#ifdef CONFIG_PPC_BOOK3S
- OFFSET(PACACONTEXTID, paca_struct, mm_ctx_id);
-#ifdef CONFIG_PPC_MM_SLICES
- OFFSET(PACALOWSLICESPSIZE, paca_struct, mm_ctx_low_slices_psize);
- OFFSET(PACAHIGHSLICEPSIZE, paca_struct, mm_ctx_high_slices_psize);
- OFFSET(PACA_SLB_ADDR_LIMIT, paca_struct, mm_ctx_slb_addr_limit);
- DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
-#endif /* CONFIG_PPC_MM_SLICES */
-#endif
#ifdef CONFIG_PPC_BOOK3E
OFFSET(PACAPGD, paca_struct, pgd);
@@ -244,21 +217,9 @@ int main(void)
#endif /* CONFIG_PPC_BOOK3E */
#ifdef CONFIG_PPC_BOOK3S_64
- OFFSET(PACASLBCACHE, paca_struct, slb_cache);
- OFFSET(PACASLBCACHEPTR, paca_struct, slb_cache_ptr);
- OFFSET(PACASTABRR, paca_struct, stab_rr);
- OFFSET(PACAVMALLOCSLLP, paca_struct, vmalloc_sllp);
-#ifdef CONFIG_PPC_MM_SLICES
- OFFSET(MMUPSIZESLLP, mmu_psize_def, sllp);
-#else
- OFFSET(PACACONTEXTSLLP, paca_struct, mm_ctx_sllp);
-#endif /* CONFIG_PPC_MM_SLICES */
OFFSET(PACA_EXGEN, paca_struct, exgen);
OFFSET(PACA_EXMC, paca_struct, exmc);
OFFSET(PACA_EXNMI, paca_struct, exnmi);
-#ifdef CONFIG_PPC_PSERIES
- OFFSET(PACALPPACAPTR, paca_struct, lppaca_ptr);
-#endif
OFFSET(PACA_SLBSHADOWPTR, paca_struct, slb_shadow_ptr);
OFFSET(SLBSHADOW_STACKVSID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid);
OFFSET(SLBSHADOW_STACKESID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid);
@@ -267,9 +228,7 @@ int main(void)
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
OFFSET(PACA_PMCINUSE, paca_struct, pmcregs_in_use);
#endif
- OFFSET(LPPACA_DTLIDX, lppaca, dtl_idx);
OFFSET(LPPACA_YIELDCOUNT, lppaca, yield_count);
- OFFSET(PACA_DTL_RIDX, paca_struct, dtl_ridx);
#endif /* CONFIG_PPC_BOOK3S_64 */
OFFSET(PACAEMERGSP, paca_struct, emergency_sp);
#ifdef CONFIG_PPC_BOOK3S_64
@@ -285,21 +244,14 @@ int main(void)
OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id);
OFFSET(PACAKEXECSTATE, paca_struct, kexec_state);
OFFSET(PACA_DSCR_DEFAULT, paca_struct, dscr_default);
- OFFSET(ACCOUNT_STARTTIME, paca_struct, accounting.starttime);
- OFFSET(ACCOUNT_STARTTIME_USER, paca_struct, accounting.starttime_user);
- OFFSET(ACCOUNT_USER_TIME, paca_struct, accounting.utime);
- OFFSET(ACCOUNT_SYSTEM_TIME, paca_struct, accounting.stime);
+#ifdef CONFIG_PPC64
+ OFFSET(PACA_EXIT_SAVE_R1, paca_struct, exit_save_r1);
+#endif
#ifdef CONFIG_PPC_BOOK3E
OFFSET(PACA_TRAP_SAVE, paca_struct, trap_save);
#endif
OFFSET(PACA_SPRG_VDSO, paca_struct, sprg_vdso);
#else /* CONFIG_PPC64 */
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
- OFFSET(ACCOUNT_STARTTIME, thread_info, accounting.starttime);
- OFFSET(ACCOUNT_STARTTIME_USER, thread_info, accounting.starttime_user);
- OFFSET(ACCOUNT_USER_TIME, thread_info, accounting.utime);
- OFFSET(ACCOUNT_SYSTEM_TIME, thread_info, accounting.stime);
-#endif
#endif /* CONFIG_PPC64 */
/* RTAS */
@@ -323,9 +275,6 @@ int main(void)
STACK_PT_REGS_OFFSET(GPR11, gpr[11]);
STACK_PT_REGS_OFFSET(GPR12, gpr[12]);
STACK_PT_REGS_OFFSET(GPR13, gpr[13]);
-#ifndef CONFIG_PPC64
- STACK_PT_REGS_OFFSET(GPR14, gpr[14]);
-#endif /* CONFIG_PPC64 */
/*
* Note: these symbols include _ because they overlap with special
* register names
@@ -359,10 +308,6 @@ int main(void)
STACK_PT_REGS_OFFSET(STACK_REGS_AMR, amr);
STACK_PT_REGS_OFFSET(STACK_REGS_IAMR, iamr);
#endif
-#ifdef CONFIG_PPC_KUAP
- STACK_PT_REGS_OFFSET(STACK_REGS_KUAP, kuap);
-#endif
-
#if defined(CONFIG_PPC32)
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
@@ -381,14 +326,9 @@ int main(void)
DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1));
DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0));
DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1));
- DEFINE(SAVED_KSP_LIMIT, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, saved_ksp_limit));
#endif
#endif
-#ifndef CONFIG_PPC64
- OFFSET(MM_PGD, mm_struct, pgd);
-#endif /* ! CONFIG_PPC64 */
-
/* About the CPU features table */
OFFSET(CPU_SPEC_FEATURES, cpu_spec, cpu_features);
OFFSET(CPU_SPEC_SETUP, cpu_spec, cpu_setup);
@@ -421,13 +361,6 @@ int main(void)
DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
#endif
-#ifdef CONFIG_PPC_BOOK3S_64
- DEFINE(PGD_TABLE_SIZE, (sizeof(pgd_t) << max(RADIX_PGD_INDEX_SIZE, H_PGD_INDEX_SIZE)));
-#else
- DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE);
-#endif
- DEFINE(PTE_SIZE, sizeof(pte_t));
-
#ifdef CONFIG_KVM
OFFSET(VCPU_HOST_STACK, kvm_vcpu, arch.host_stack);
OFFSET(VCPU_HOST_PID, kvm_vcpu, arch.host_pid);
@@ -499,11 +432,9 @@ int main(void)
OFFSET(KVM_HOST_LPID, kvm, arch.host_lpid);
OFFSET(KVM_HOST_LPCR, kvm, arch.host_lpcr);
OFFSET(KVM_HOST_SDR1, kvm, arch.host_sdr1);
- OFFSET(KVM_NEED_FLUSH, kvm, arch.need_tlb_flush.bits);
OFFSET(KVM_ENABLED_HCALLS, kvm, arch.enabled_hcalls);
OFFSET(KVM_VRMA_SLB_V, kvm, arch.vrma_slb_v);
OFFSET(KVM_RADIX, kvm, arch.radix);
- OFFSET(KVM_FWNMI, kvm, arch.fwnmi_enabled);
OFFSET(KVM_SECURE_GUEST, kvm, arch.secure_guest);
OFFSET(VCPU_DSISR, kvm_vcpu, arch.shregs.dsisr);
OFFSET(VCPU_DAR, kvm_vcpu, arch.shregs.dar);
@@ -531,7 +462,6 @@ int main(void)
OFFSET(VCPU_DAWRX1, kvm_vcpu, arch.dawrx1);
OFFSET(VCPU_CIABR, kvm_vcpu, arch.ciabr);
OFFSET(VCPU_HFLAGS, kvm_vcpu, arch.hflags);
- OFFSET(VCPU_DEC, kvm_vcpu, arch.dec);
OFFSET(VCPU_DEC_EXPIRES, kvm_vcpu, arch.dec_expires);
OFFSET(VCPU_PENDING_EXC, kvm_vcpu, arch.pending_exceptions);
OFFSET(VCPU_CEDED, kvm_vcpu, arch.ceded);
@@ -542,7 +472,6 @@ int main(void)
OFFSET(VCPU_MMCRA, kvm_vcpu, arch.mmcra);
OFFSET(VCPU_MMCRS, kvm_vcpu, arch.mmcrs);
OFFSET(VCPU_PMC, kvm_vcpu, arch.pmc);
- OFFSET(VCPU_SPMC, kvm_vcpu, arch.spmc);
OFFSET(VCPU_SIAR, kvm_vcpu, arch.siar);
OFFSET(VCPU_SDAR, kvm_vcpu, arch.sdar);
OFFSET(VCPU_SIER, kvm_vcpu, arch.sier);
@@ -551,7 +480,6 @@ int main(void)
OFFSET(VCPU_SLB_NR, kvm_vcpu, arch.slb_nr);
OFFSET(VCPU_FAULT_DSISR, kvm_vcpu, arch.fault_dsisr);
OFFSET(VCPU_FAULT_DAR, kvm_vcpu, arch.fault_dar);
- OFFSET(VCPU_FAULT_GPA, kvm_vcpu, arch.fault_gpa);
OFFSET(VCPU_INTR_MSR, kvm_vcpu, arch.intr_msr);
OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst);
OFFSET(VCPU_TRAP, kvm_vcpu, arch.trap);
@@ -663,10 +591,8 @@ int main(void)
HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state);
HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
- HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
HSTATE_FIELD(HSTATE_XIVE_TIMA_PHYS, xive_tima_phys);
HSTATE_FIELD(HSTATE_XIVE_TIMA_VIRT, xive_tima_virt);
- HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
HSTATE_FIELD(HSTATE_PTID, ptid);
HSTATE_FIELD(HSTATE_FAKE_SUSPEND, fake_suspend);
@@ -774,7 +700,6 @@ int main(void)
#endif
DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER);
- DEFINE(PPC_DBELL_MSGTYPE, PPC_DBELL_MSGTYPE);
#ifdef CONFIG_PPC_8xx
DEFINE(VIRT_IMMR_BASE, (u64)__fix_to_virt(FIX_IMMR_BASE));
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 735e89337398..5693e1c67c2b 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -35,7 +35,7 @@ void __init reserve_kdump_trampoline(void)
static void __init create_trampoline(unsigned long addr)
{
- struct ppc_inst *p = (struct ppc_inst *)addr;
+ u32 *p = (u32 *)addr;
/* The maximum range of a single instruction branch, is the current
* instruction's address + (32 MB - 4) bytes. For the trampoline we
@@ -45,8 +45,8 @@ static void __init create_trampoline(unsigned long addr)
* branch to "addr" we jump to ("addr" + 32 MB). Although it requires
* two instructions it doesn't require any registers.
*/
- patch_instruction(p, ppc_inst(PPC_INST_NOP));
- patch_branch((void *)p + 4, addr + PHYSICAL_START, 0);
+ patch_instruction(p, ppc_inst(PPC_RAW_NOP()));
+ patch_branch(p + 1, addr + PHYSICAL_START, 0);
}
void __init setup_kdump_trampoline(void)
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index cd60bc1c8701..3bbdcc86d01b 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -346,31 +346,7 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
*/
static inline unsigned long eeh_token_to_phys(unsigned long token)
{
- pte_t *ptep;
- unsigned long pa;
- int hugepage_shift;
-
- /*
- * We won't find hugepages here(this is iomem). Hence we are not
- * worried about _PAGE_SPLITTING/collapse. Also we will not hit
- * page table free, because of init_mm.
- */
- ptep = find_init_mm_pte(token, &hugepage_shift);
- if (!ptep)
- return token;
-
- pa = pte_pfn(*ptep);
-
- /* On radix we can do hugepage mappings for io, so handle that */
- if (hugepage_shift) {
- pa <<= hugepage_shift;
- pa |= token & ((1ul << hugepage_shift) - 1);
- } else {
- pa <<= PAGE_SHIFT;
- pa |= token & (PAGE_SIZE - 1);
- }
-
- return pa;
+ return ppc_find_vmap_phys(token);
}
/*
@@ -779,7 +755,7 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat
default:
eeh_pe_state_clear(pe, EEH_PE_ISOLATED | EEH_PE_CFG_BLOCKED, true);
return -EINVAL;
- };
+ }
return 0;
}
@@ -1568,6 +1544,7 @@ int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func,
}
EXPORT_SYMBOL_GPL(eeh_pe_inject_err);
+#ifdef CONFIG_PROC_FS
static int proc_eeh_show(struct seq_file *m, void *v)
{
if (!eeh_enabled()) {
@@ -1594,6 +1571,7 @@ static int proc_eeh_show(struct seq_file *m, void *v)
return 0;
}
+#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_DEBUG_FS
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 78c430b7f9d9..0273a1349006 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -32,6 +32,7 @@
#include <asm/barrier.h>
#include <asm/kup.h>
#include <asm/bug.h>
+#include <asm/interrupt.h>
#include "head_32.h"
@@ -48,195 +49,16 @@
*/
.align 12
-#ifdef CONFIG_BOOKE
- .globl mcheck_transfer_to_handler
-mcheck_transfer_to_handler:
- mfspr r0,SPRN_DSRR0
- stw r0,_DSRR0(r11)
- mfspr r0,SPRN_DSRR1
- stw r0,_DSRR1(r11)
- /* fall through */
-_ASM_NOKPROBE_SYMBOL(mcheck_transfer_to_handler)
-
- .globl debug_transfer_to_handler
-debug_transfer_to_handler:
- mfspr r0,SPRN_CSRR0
- stw r0,_CSRR0(r11)
- mfspr r0,SPRN_CSRR1
- stw r0,_CSRR1(r11)
- /* fall through */
-_ASM_NOKPROBE_SYMBOL(debug_transfer_to_handler)
-
- .globl crit_transfer_to_handler
-crit_transfer_to_handler:
-#ifdef CONFIG_PPC_BOOK3E_MMU
- mfspr r0,SPRN_MAS0
- stw r0,MAS0(r11)
- mfspr r0,SPRN_MAS1
- stw r0,MAS1(r11)
- mfspr r0,SPRN_MAS2
- stw r0,MAS2(r11)
- mfspr r0,SPRN_MAS3
- stw r0,MAS3(r11)
- mfspr r0,SPRN_MAS6
- stw r0,MAS6(r11)
-#ifdef CONFIG_PHYS_64BIT
- mfspr r0,SPRN_MAS7
- stw r0,MAS7(r11)
-#endif /* CONFIG_PHYS_64BIT */
-#endif /* CONFIG_PPC_BOOK3E_MMU */
-#ifdef CONFIG_44x
- mfspr r0,SPRN_MMUCR
- stw r0,MMUCR(r11)
-#endif
- mfspr r0,SPRN_SRR0
- stw r0,_SRR0(r11)
- mfspr r0,SPRN_SRR1
- stw r0,_SRR1(r11)
-
- /* set the stack limit to the current stack */
- mfspr r8,SPRN_SPRG_THREAD
- lwz r0,KSP_LIMIT(r8)
- stw r0,SAVED_KSP_LIMIT(r11)
- rlwinm r0,r1,0,0,(31 - THREAD_SHIFT)
- stw r0,KSP_LIMIT(r8)
- /* fall through */
-_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler)
-#endif
-
-#ifdef CONFIG_40x
- .globl crit_transfer_to_handler
-crit_transfer_to_handler:
- lwz r0,crit_r10@l(0)
- stw r0,GPR10(r11)
- lwz r0,crit_r11@l(0)
- stw r0,GPR11(r11)
- mfspr r0,SPRN_SRR0
- stw r0,crit_srr0@l(0)
- mfspr r0,SPRN_SRR1
- stw r0,crit_srr1@l(0)
-
- /* set the stack limit to the current stack */
- mfspr r8,SPRN_SPRG_THREAD
- lwz r0,KSP_LIMIT(r8)
- stw r0,saved_ksp_limit@l(0)
- rlwinm r0,r1,0,0,(31 - THREAD_SHIFT)
- stw r0,KSP_LIMIT(r8)
- /* fall through */
-_ASM_NOKPROBE_SYMBOL(crit_transfer_to_handler)
-#endif
-
-/*
- * This code finishes saving the registers to the exception frame
- * and jumps to the appropriate handler for the exception, turning
- * on address translation.
- * Note that we rely on the caller having set cr0.eq iff the exception
- * occurred in kernel mode (i.e. MSR:PR = 0).
- */
- .globl transfer_to_handler_full
-transfer_to_handler_full:
- SAVE_NVGPRS(r11)
-_ASM_NOKPROBE_SYMBOL(transfer_to_handler_full)
- /* fall through */
-
- .globl transfer_to_handler
-transfer_to_handler:
- stw r2,GPR2(r11)
- stw r12,_NIP(r11)
- stw r9,_MSR(r11)
- andi. r2,r9,MSR_PR
- mfctr r12
- mfspr r2,SPRN_XER
- stw r12,_CTR(r11)
- stw r2,_XER(r11)
- mfspr r12,SPRN_SPRG_THREAD
- tovirt_vmstack r12, r12
- beq 2f /* if from user, fix up THREAD.regs */
- addi r2, r12, -THREAD
- addi r11,r1,STACK_FRAME_OVERHEAD
- stw r11,PT_REGS(r12)
-#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
- /* Check to see if the dbcr0 register is set up to debug. Use the
- internal debug mode bit to do this. */
- lwz r12,THREAD_DBCR0(r12)
- andis. r12,r12,DBCR0_IDM@h
-#endif
- ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
-#ifdef CONFIG_PPC_BOOK3S_32
- kuep_lock r11, r12
-#endif
-#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
- beq+ 3f
- /* From user and task is ptraced - load up global dbcr0 */
- li r12,-1 /* clear all pending debug events */
- mtspr SPRN_DBSR,r12
- lis r11,global_dbcr0@ha
- tophys(r11,r11)
- addi r11,r11,global_dbcr0@l
-#ifdef CONFIG_SMP
- lwz r9,TASK_CPU(r2)
- slwi r9,r9,2
- add r11,r11,r9
-#endif
- lwz r12,0(r11)
- mtspr SPRN_DBCR0,r12
-#endif
-
- b 3f
-
-2: /* if from kernel, check interrupted DOZE/NAP mode and
- * check for stack overflow
- */
- kuap_save_and_lock r11, r12, r9, r2, r6
- addi r2, r12, -THREAD
-#ifndef CONFIG_VMAP_STACK
- lwz r9,KSP_LIMIT(r12)
- cmplw r1,r9 /* if r1 <= ksp_limit */
- ble- stack_ovf /* then the kernel stack overflowed */
-#endif
-5:
#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
+ .globl prepare_transfer_to_handler
+prepare_transfer_to_handler:
+ /* if from kernel, check interrupted DOZE/NAP mode */
lwz r12,TI_LOCAL_FLAGS(r2)
mtcrf 0x01,r12
bt- 31-TLF_NAPPING,4f
bt- 31-TLF_SLEEPING,7f
-#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
- .globl transfer_to_handler_cont
-transfer_to_handler_cont:
-3:
- mflr r9
- tovirt_novmstack r2, r2 /* set r2 to current */
- tovirt_vmstack r9, r9
- lwz r11,0(r9) /* virtual address of handler */
- lwz r9,4(r9) /* where to go when done */
-#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
- mtspr SPRN_NRI, r0
-#endif
-#ifdef CONFIG_TRACE_IRQFLAGS
- /*
- * When tracing IRQ state (lockdep) we enable the MMU before we call
- * the IRQ tracing functions as they might access vmalloc space or
- * perform IOs for console output.
- *
- * To speed up the syscall path where interrupts stay on, let's check
- * first if we are changing the MSR value at all.
- */
- tophys_novmstack r12, r1
- lwz r12,_MSR(r12)
- andi. r12,r12,MSR_EE
- bne 1f
-
- /* MSR isn't changing, just transition directly */
-#endif
- mtspr SPRN_SRR0,r11
- mtspr SPRN_SRR1,r10
- mtlr r9
- rfi /* jump to handler, enable MMU */
-#ifdef CONFIG_40x
- b . /* Prevent prefetch past rfi */
-#endif
+ blr
-#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
4: rlwinm r12,r12,0,~_TLF_NAPPING
stw r12,TI_LOCAL_FLAGS(r2)
b power_save_ppc32_restore
@@ -246,97 +68,36 @@ transfer_to_handler_cont:
lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
rlwinm r9,r9,0,~MSR_EE
lwz r12,_LINK(r11) /* and return to address in LR */
- kuap_restore r11, r2, r3, r4, r5
lwz r2, GPR2(r11)
b fast_exception_return
-#endif
-_ASM_NOKPROBE_SYMBOL(transfer_to_handler)
-_ASM_NOKPROBE_SYMBOL(transfer_to_handler_cont)
-
-#ifdef CONFIG_TRACE_IRQFLAGS
-1: /* MSR is changing, re-enable MMU so we can notify lockdep. We need to
- * keep interrupts disabled at this point otherwise we might risk
- * taking an interrupt before we tell lockdep they are enabled.
- */
- lis r12,reenable_mmu@h
- ori r12,r12,reenable_mmu@l
- LOAD_REG_IMMEDIATE(r0, MSR_KERNEL)
- mtspr SPRN_SRR0,r12
- mtspr SPRN_SRR1,r0
- rfi
-#ifdef CONFIG_40x
- b . /* Prevent prefetch past rfi */
-#endif
-
-reenable_mmu:
- /*
- * We save a bunch of GPRs,
- * r3 can be different from GPR3(r1) at this point, r9 and r11
- * contains the old MSR and handler address respectively,
- * r0, r4-r8, r12, CCR, CTR, XER etc... are left
- * clobbered as they aren't useful past this point.
- */
-
- stwu r1,-32(r1)
- stw r9,8(r1)
- stw r11,12(r1)
- stw r3,16(r1)
-
- /* If we are disabling interrupts (normal case), simply log it with
- * lockdep
- */
-1: bl trace_hardirqs_off
- lwz r3,16(r1)
- lwz r11,12(r1)
- lwz r9,8(r1)
- addi r1,r1,32
- mtctr r11
- mtlr r9
- bctr /* jump to handler */
-#endif /* CONFIG_TRACE_IRQFLAGS */
-
-#ifndef CONFIG_VMAP_STACK
-/*
- * On kernel stack overflow, load up an initial stack pointer
- * and call StackOverflow(regs), which should not return.
- */
-stack_ovf:
- /* sometimes we use a statically-allocated stack, which is OK. */
- lis r12,_end@h
- ori r12,r12,_end@l
- cmplw r1,r12
- ble 5b /* r1 <= &_end is OK */
- SAVE_NVGPRS(r11)
- addi r3,r1,STACK_FRAME_OVERHEAD
- lis r1,init_thread_union@ha
- addi r1,r1,init_thread_union@l
- addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
- lis r9,StackOverflow@ha
- addi r9,r9,StackOverflow@l
- LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
-#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
- mtspr SPRN_NRI, r0
-#endif
- mtspr SPRN_SRR0,r9
- mtspr SPRN_SRR1,r10
- rfi
-#ifdef CONFIG_40x
- b . /* Prevent prefetch past rfi */
-#endif
-_ASM_NOKPROBE_SYMBOL(stack_ovf)
-#endif
+_ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
+#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
.globl transfer_to_syscall
transfer_to_syscall:
+ stw r11, GPR1(r1)
+ stw r11, 0(r1)
+ mflr r12
+ stw r12, _LINK(r1)
+#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+ rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
+#endif
+ lis r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
+ SAVE_GPR(2, r1)
+ addi r12,r12,STACK_FRAME_REGS_MARKER@l
+ stw r9,_MSR(r1)
+ li r2, INTERRUPT_SYSCALL
+ stw r12,8(r1)
+ stw r2,_TRAP(r1)
+ SAVE_GPR(0, r1)
+ SAVE_4GPRS(3, r1)
+ SAVE_2GPRS(7, r1)
+ addi r2,r10,-THREAD
SAVE_NVGPRS(r1)
-#ifdef CONFIG_PPC_BOOK3S_32
- kuep_lock r11, r12
-#endif
/* Calling convention has r9 = orig r0, r10 = regs */
addi r10,r1,STACK_FRAME_OVERHEAD
mr r9,r0
- stw r10,THREAD+PT_REGS(r2)
bl system_call_exception
ret_from_syscall:
@@ -349,10 +110,6 @@ ret_from_syscall:
cmplwi cr0,r5,0
bne- 2f
#endif /* CONFIG_PPC_47x */
-#ifdef CONFIG_PPC_BOOK3S_32
- kuep_unlock r5, r7
-#endif
- kuap_check r2, r4
lwz r4,_LINK(r1)
lwz r5,_CCR(r1)
mtlr r4
@@ -412,27 +169,6 @@ ret_from_kernel_thread:
b ret_from_syscall
/*
- * Top-level page fault handling.
- * This is in assembler because if do_page_fault tells us that
- * it is a bad kernel page fault, we want to save the non-volatile
- * registers before calling bad_page_fault.
- */
- .globl handle_page_fault
-handle_page_fault:
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_page_fault
- cmpwi r3,0
- beq+ ret_from_except
- SAVE_NVGPRS(r1)
- lwz r0,_TRAP(r1)
- clrrwi r0,r0,1
- stw r0,_TRAP(r1)
- mr r4,r3 /* err arg for bad_page_fault */
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl __bad_page_fault
- b ret_from_except_full
-
-/*
* This routine switches between two different tasks. The process
* state of one is saved on its kernel stack. Then the state
* of the other is restored from its kernel stack. The memory
@@ -459,33 +195,10 @@ _GLOBAL(_switch)
/* r3-r12 are caller saved -- Cort */
SAVE_NVGPRS(r1)
stw r0,_NIP(r1) /* Return to switch caller */
- mfmsr r11
- li r0,MSR_FP /* Disable floating-point */
-#ifdef CONFIG_ALTIVEC
-BEGIN_FTR_SECTION
- oris r0,r0,MSR_VEC@h /* Disable altivec */
- mfspr r12,SPRN_VRSAVE /* save vrsave register value */
- stw r12,THREAD+THREAD_VRSAVE(r2)
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_SPE
-BEGIN_FTR_SECTION
- oris r0,r0,MSR_SPE@h /* Disable SPE */
- mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
- stw r12,THREAD+THREAD_SPEFSCR(r2)
-END_FTR_SECTION_IFSET(CPU_FTR_SPE)
-#endif /* CONFIG_SPE */
- and. r0,r0,r11 /* FP or altivec or SPE enabled? */
- beq+ 1f
- andc r11,r11,r0
- mtmsr r11
- isync
-1: stw r11,_MSR(r1)
mfcr r10
stw r10,_CCR(r1)
stw r1,KSP(r3) /* Set old stack pointer */
- kuap_check r2, r0
#ifdef CONFIG_SMP
/* We need a sync somewhere here to make sure that if the
* previous task gets rescheduled on another CPU, it sees all
@@ -502,19 +215,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE)
mr r3,r2
addi r2,r4,-THREAD /* Update current */
-#ifdef CONFIG_ALTIVEC
-BEGIN_FTR_SECTION
- lwz r0,THREAD+THREAD_VRSAVE(r2)
- mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_SPE
-BEGIN_FTR_SECTION
- lwz r0,THREAD+THREAD_SPEFSCR(r2)
- mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
-END_FTR_SECTION_IFSET(CPU_FTR_SPE)
-#endif /* CONFIG_SPE */
-
lwz r0,_CCR(r1)
mtcrf 0xFF,r0
/* r3-r12 are destroyed -- Cort */
@@ -529,12 +229,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE)
fast_exception_return:
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
andi. r10,r9,MSR_RI /* check for recoverable interrupt */
- beq 1f /* if not, we've got problems */
+ beq 3f /* if not, we've got problems */
#endif
2: REST_4GPRS(3, r11)
lwz r10,_CCR(r11)
- REST_GPR(1, r11)
+ REST_2GPRS(1, r11)
mtcr r10
lwz r10,_LINK(r11)
mtlr r10
@@ -556,257 +256,147 @@ fast_exception_return:
#endif
_ASM_NOKPROBE_SYMBOL(fast_exception_return)
-#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
-/* check if the exception happened in a restartable section */
-1: lis r3,exc_exit_restart_end@ha
- addi r3,r3,exc_exit_restart_end@l
- cmplw r12,r3
- bge 3f
- lis r4,exc_exit_restart@ha
- addi r4,r4,exc_exit_restart@l
- cmplw r12,r4
- blt 3f
- lis r3,fee_restarts@ha
- tophys(r3,r3)
- lwz r5,fee_restarts@l(r3)
- addi r5,r5,1
- stw r5,fee_restarts@l(r3)
- mr r12,r4 /* restart at exc_exit_restart */
- b 2b
-
- .section .bss
- .align 2
-fee_restarts:
- .space 4
- .previous
-
/* aargh, a nonrecoverable interrupt, panic */
/* aargh, we don't know which trap this is */
3:
li r10,-1
stw r10,_TRAP(r11)
- addi r3,r1,STACK_FRAME_OVERHEAD
- lis r10,MSR_KERNEL@h
- ori r10,r10,MSR_KERNEL@l
- bl transfer_to_handler_full
- .long unrecoverable_exception
- .long ret_from_except
-#endif
-
- .globl ret_from_except_full
-ret_from_except_full:
- REST_NVGPRS(r1)
- /* fall through */
-
- .globl ret_from_except
-ret_from_except:
- /* Hard-disable interrupts so that current_thread_info()->flags
- * can't change between when we test it and when we return
- * from the interrupt. */
- /* Note: We don't bother telling lockdep about it */
- LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
- mtmsr r10 /* disable interrupts */
-
- lwz r3,_MSR(r1) /* Returning to user mode? */
- andi. r0,r3,MSR_PR
- beq resume_kernel
-
-user_exc_return: /* r10 contains MSR_KERNEL here */
- /* Check current_thread_info()->flags */
- lwz r9,TI_FLAGS(r2)
- andi. r0,r9,_TIF_USER_WORK_MASK
- bne do_work
+ prepare_transfer_to_handler
+ bl unrecoverable_exception
+ trap /* should not get here */
-restore_user:
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
- /* Check whether this process has its own DBCR0 value. The internal
- debug mode bit tells us that dbcr0 should be loaded. */
- lwz r0,THREAD+THREAD_DBCR0(r2)
- andis. r10,r0,DBCR0_IDM@h
- bnel- load_dbcr0
-#endif
- ACCOUNT_CPU_USER_EXIT(r2, r10, r11)
-#ifdef CONFIG_PPC_BOOK3S_32
- kuep_unlock r10, r11
-#endif
+ .globl interrupt_return
+interrupt_return:
+ lwz r4,_MSR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ andi. r0,r4,MSR_PR
+ beq .Lkernel_interrupt_return
+ bl interrupt_exit_user_prepare
+ cmpwi r3,0
+ bne- .Lrestore_nvgprs
- b restore
+.Lfast_user_interrupt_return:
+ lwz r11,_NIP(r1)
+ lwz r12,_MSR(r1)
+ mtspr SPRN_SRR0,r11
+ mtspr SPRN_SRR1,r12
-/* N.B. the only way to get here is from the beq following ret_from_except. */
-resume_kernel:
- /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
- lwz r8,TI_FLAGS(r2)
- andis. r0,r8,_TIF_EMULATE_STACK_STORE@h
- beq+ 1f
+BEGIN_FTR_SECTION
+ stwcx. r0,0,r1 /* to clear the reservation */
+FTR_SECTION_ELSE
+ lwarx r0,0,r1
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
- addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
+ lwz r3,_CCR(r1)
+ lwz r4,_LINK(r1)
+ lwz r5,_CTR(r1)
+ lwz r6,_XER(r1)
+ li r0,0
- lwz r3,GPR1(r1)
- subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
- mr r4,r1 /* src: current exception frame */
- mr r1,r3 /* Reroute the trampoline frame to r1 */
+ /*
+ * Leaving a stale exception_marker on the stack can confuse
+ * the reliable stack unwinder later on. Clear it.
+ */
+ stw r0,8(r1)
+ REST_4GPRS(7, r1)
+ REST_2GPRS(11, r1)
- /* Copy from the original to the trampoline. */
- li r5,INT_FRAME_SIZE/4 /* size: INT_FRAME_SIZE */
- li r6,0 /* start offset: 0 */
+ mtcr r3
+ mtlr r4
mtctr r5
-2: lwzx r0,r6,r4
- stwx r0,r6,r3
- addi r6,r6,4
- bdnz 2b
-
- /* Do real store operation to complete stwu */
- lwz r5,GPR1(r1)
- stw r8,0(r5)
+ mtspr SPRN_XER,r6
- /* Clear _TIF_EMULATE_STACK_STORE flag */
- lis r11,_TIF_EMULATE_STACK_STORE@h
- addi r5,r2,TI_FLAGS
-0: lwarx r8,0,r5
- andc r8,r8,r11
- stwcx. r8,0,r5
- bne- 0b
-1:
-
-#ifdef CONFIG_PREEMPTION
- /* check current_thread_info->preempt_count */
- lwz r0,TI_PREEMPT(r2)
- cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
- bne restore_kuap
- andi. r8,r8,_TIF_NEED_RESCHED
- beq+ restore_kuap
- lwz r3,_MSR(r1)
- andi. r0,r3,MSR_EE /* interrupts off? */
- beq restore_kuap /* don't schedule if so */
-#ifdef CONFIG_TRACE_IRQFLAGS
- /* Lockdep thinks irqs are enabled, we need to call
- * preempt_schedule_irq with IRQs off, so we inform lockdep
- * now that we -did- turn them off already
- */
- bl trace_hardirqs_off
-#endif
- bl preempt_schedule_irq
-#ifdef CONFIG_TRACE_IRQFLAGS
- /* And now, to properly rebalance the above, we tell lockdep they
- * are being turned back on, which will happen when we return
- */
- bl trace_hardirqs_on
+ REST_4GPRS(2, r1)
+ REST_GPR(6, r1)
+ REST_GPR(0, r1)
+ REST_GPR(1, r1)
+ rfi
+#ifdef CONFIG_40x
+ b . /* Prevent prefetch past rfi */
#endif
-#endif /* CONFIG_PREEMPTION */
-restore_kuap:
- kuap_restore r1, r2, r9, r10, r0
-
- /* interrupts are hard-disabled at this point */
-restore:
-#if defined(CONFIG_44x) && !defined(CONFIG_PPC_47x)
- lis r4,icache_44x_need_flush@ha
- lwz r5,icache_44x_need_flush@l(r4)
- cmplwi cr0,r5,0
- beq+ 1f
- li r6,0
- iccci r0,r0
- stw r6,icache_44x_need_flush@l(r4)
-1:
-#endif /* CONFIG_44x */
- lwz r9,_MSR(r1)
-#ifdef CONFIG_TRACE_IRQFLAGS
- /* Lockdep doesn't know about the fact that IRQs are temporarily turned
- * off in this assembly code while peeking at TI_FLAGS() and such. However
- * we need to inform it if the exception turned interrupts off, and we
- * are about to trun them back on.
- */
- andi. r10,r9,MSR_EE
- beq 1f
- stwu r1,-32(r1)
- mflr r0
- stw r0,4(r1)
- bl trace_hardirqs_on
- addi r1, r1, 32
- lwz r9,_MSR(r1)
-1:
-#endif /* CONFIG_TRACE_IRQFLAGS */
+.Lrestore_nvgprs:
+ REST_NVGPRS(r1)
+ b .Lfast_user_interrupt_return
- lwz r0,GPR0(r1)
- lwz r2,GPR2(r1)
- REST_4GPRS(3, r1)
- REST_2GPRS(7, r1)
+.Lkernel_interrupt_return:
+ bl interrupt_exit_kernel_prepare
- lwz r10,_XER(r1)
- lwz r11,_CTR(r1)
- mtspr SPRN_XER,r10
- mtctr r11
+.Lfast_kernel_interrupt_return:
+ cmpwi cr1,r3,0
+ lwz r11,_NIP(r1)
+ lwz r12,_MSR(r1)
+ mtspr SPRN_SRR0,r11
+ mtspr SPRN_SRR1,r12
BEGIN_FTR_SECTION
- lwarx r11,0,r1
-END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
- stwcx. r0,0,r1 /* to clear the reservation */
+ stwcx. r0,0,r1 /* to clear the reservation */
+FTR_SECTION_ELSE
+ lwarx r0,0,r1
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
-#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
- andi. r10,r9,MSR_RI /* check if this exception occurred */
- beql nonrecoverable /* at a bad place (MSR:RI = 0) */
+ lwz r3,_LINK(r1)
+ lwz r4,_CTR(r1)
+ lwz r5,_XER(r1)
+ lwz r6,_CCR(r1)
+ li r0,0
- lwz r10,_CCR(r1)
- lwz r11,_LINK(r1)
- mtcrf 0xFF,r10
- mtlr r11
+ REST_4GPRS(7, r1)
+ REST_2GPRS(11, r1)
+
+ mtlr r3
+ mtctr r4
+ mtspr SPRN_XER,r5
- /* Clear the exception_marker on the stack to avoid confusing stacktrace */
- li r10, 0
- stw r10, 8(r1)
/*
- * Once we put values in SRR0 and SRR1, we are in a state
- * where exceptions are not recoverable, since taking an
- * exception will trash SRR0 and SRR1. Therefore we clear the
- * MSR:RI bit to indicate this. If we do take an exception,
- * we can't return to the point of the exception but we
- * can restart the exception exit path at the label
- * exc_exit_restart below. -- paulus
+ * Leaving a stale exception_marker on the stack can confuse
+ * the reliable stack unwinder later on. Clear it.
*/
- LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI)
- mtmsr r10 /* clear the RI bit */
- .globl exc_exit_restart
-exc_exit_restart:
- lwz r12,_NIP(r1)
- mtspr SPRN_SRR0,r12
- mtspr SPRN_SRR1,r9
- REST_4GPRS(9, r1)
- lwz r1,GPR1(r1)
- .globl exc_exit_restart_end
-exc_exit_restart_end:
+ stw r0,8(r1)
+
+ REST_4GPRS(2, r1)
+
+ bne- cr1,1f /* emulate stack store */
+ mtcr r6
+ REST_GPR(6, r1)
+ REST_GPR(0, r1)
+ REST_GPR(1, r1)
rfi
-_ASM_NOKPROBE_SYMBOL(exc_exit_restart)
-_ASM_NOKPROBE_SYMBOL(exc_exit_restart_end)
+#ifdef CONFIG_40x
+ b . /* Prevent prefetch past rfi */
+#endif
-#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
- /*
- * This is a bit different on 4xx/Book-E because it doesn't have
- * the RI bit in the MSR.
- * The TLB miss handler checks if we have interrupted
- * the exception exit path and restarts it if so
- * (well maybe one day it will... :).
+1: /*
+ * Emulate stack store with update. New r1 value was already calculated
+ * and updated in our interrupt regs by emulate_loadstore, but we can't
+ * store the previous value of r1 to the stack before re-loading our
+ * registers from it, otherwise they could be clobbered. Use
+ * SPRG Scratch0 as temporary storage to hold the store
+ * data, as interrupts are disabled here so it won't be clobbered.
*/
- lwz r11,_LINK(r1)
- mtlr r11
- lwz r10,_CCR(r1)
- mtcrf 0xff,r10
- /* Clear the exception_marker on the stack to avoid confusing stacktrace */
- li r10, 0
- stw r10, 8(r1)
- REST_2GPRS(9, r1)
- .globl exc_exit_restart
-exc_exit_restart:
- lwz r11,_NIP(r1)
- lwz r12,_MSR(r1)
- mtspr SPRN_SRR0,r11
- mtspr SPRN_SRR1,r12
- REST_2GPRS(11, r1)
- lwz r1,GPR1(r1)
- .globl exc_exit_restart_end
-exc_exit_restart_end:
+ mtcr r6
+#ifdef CONFIG_BOOKE
+ mtspr SPRN_SPRG_WSCRATCH0, r9
+#else
+ mtspr SPRN_SPRG_SCRATCH0, r9
+#endif
+ addi r9,r1,INT_FRAME_SIZE /* get original r1 */
+ REST_GPR(6, r1)
+ REST_GPR(0, r1)
+ REST_GPR(1, r1)
+ stw r9,0(r1) /* perform store component of stwu */
+#ifdef CONFIG_BOOKE
+ mfspr r9, SPRN_SPRG_RSCRATCH0
+#else
+ mfspr r9, SPRN_SPRG_SCRATCH0
+#endif
rfi
- b . /* prevent prefetch past rfi */
-_ASM_NOKPROBE_SYMBOL(exc_exit_restart)
+#ifdef CONFIG_40x
+ b . /* Prevent prefetch past rfi */
+#endif
+_ASM_NOKPROBE_SYMBOL(interrupt_return)
+
+#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
/*
* Returning from a critical interrupt in user mode doesn't need
@@ -837,8 +427,7 @@ _ASM_NOKPROBE_SYMBOL(exc_exit_restart)
REST_NVGPRS(r1); \
lwz r3,_MSR(r1); \
andi. r3,r3,MSR_PR; \
- LOAD_REG_IMMEDIATE(r10,MSR_KERNEL); \
- bne user_exc_return; \
+ bne interrupt_return; \
lwz r0,GPR0(r1); \
lwz r2,GPR2(r1); \
REST_4GPRS(3, r1); \
@@ -906,11 +495,6 @@ _ASM_NOKPROBE_SYMBOL(exc_exit_restart)
#ifdef CONFIG_40x
.globl ret_from_crit_exc
ret_from_crit_exc:
- mfspr r9,SPRN_SPRG_THREAD
- lis r10,saved_ksp_limit@ha;
- lwz r10,saved_ksp_limit@l(r10);
- tovirt(r9,r9);
- stw r10,KSP_LIMIT(r9)
lis r9,crit_srr0@ha;
lwz r9,crit_srr0@l(r9);
lis r10,crit_srr1@ha;
@@ -924,9 +508,6 @@ _ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
#ifdef CONFIG_BOOKE
.globl ret_from_crit_exc
ret_from_crit_exc:
- mfspr r9,SPRN_SPRG_THREAD
- lwz r10,SAVED_KSP_LIMIT(r1)
- stw r10,KSP_LIMIT(r9)
RESTORE_xSRR(SRR0,SRR1);
RESTORE_MMU_REGS;
RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
@@ -934,9 +515,6 @@ _ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
.globl ret_from_debug_exc
ret_from_debug_exc:
- mfspr r9,SPRN_SPRG_THREAD
- lwz r10,SAVED_KSP_LIMIT(r1)
- stw r10,KSP_LIMIT(r9)
RESTORE_xSRR(SRR0,SRR1);
RESTORE_xSRR(CSRR0,CSRR1);
RESTORE_MMU_REGS;
@@ -945,9 +523,6 @@ _ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)
.globl ret_from_mcheck_exc
ret_from_mcheck_exc:
- mfspr r9,SPRN_SPRG_THREAD
- lwz r10,SAVED_KSP_LIMIT(r1)
- stw r10,KSP_LIMIT(r9)
RESTORE_xSRR(SRR0,SRR1);
RESTORE_xSRR(CSRR0,CSRR1);
RESTORE_xSRR(DSRR0,DSRR1);
@@ -955,121 +530,8 @@ ret_from_mcheck_exc:
RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
#endif /* CONFIG_BOOKE */
-
-/*
- * Load the DBCR0 value for a task that is being ptraced,
- * having first saved away the global DBCR0. Note that r0
- * has the dbcr0 value to set upon entry to this.
- */
-load_dbcr0:
- mfmsr r10 /* first disable debug exceptions */
- rlwinm r10,r10,0,~MSR_DE
- mtmsr r10
- isync
- mfspr r10,SPRN_DBCR0
- lis r11,global_dbcr0@ha
- addi r11,r11,global_dbcr0@l
-#ifdef CONFIG_SMP
- lwz r9,TASK_CPU(r2)
- slwi r9,r9,2
- add r11,r11,r9
-#endif
- stw r10,0(r11)
- mtspr SPRN_DBCR0,r0
- li r11,-1
- mtspr SPRN_DBSR,r11 /* clear all pending debug events */
- blr
-
- .section .bss
- .align 4
- .global global_dbcr0
-global_dbcr0:
- .space 4*NR_CPUS
- .previous
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
-do_work: /* r10 contains MSR_KERNEL here */
- andi. r0,r9,_TIF_NEED_RESCHED
- beq do_user_signal
-
-do_resched: /* r10 contains MSR_KERNEL here */
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_on
- mfmsr r10
-#endif
- ori r10,r10,MSR_EE
- mtmsr r10 /* hard-enable interrupts */
- bl schedule
-recheck:
- /* Note: And we don't tell it we are disabling them again
- * neither. Those disable/enable cycles used to peek at
- * TI_FLAGS aren't advertised.
- */
- LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
- mtmsr r10 /* disable interrupts */
- lwz r9,TI_FLAGS(r2)
- andi. r0,r9,_TIF_NEED_RESCHED
- bne- do_resched
- andi. r0,r9,_TIF_USER_WORK_MASK
- beq restore_user
-do_user_signal: /* r10 contains MSR_KERNEL here */
- ori r10,r10,MSR_EE
- mtmsr r10 /* hard-enable interrupts */
- /* save r13-r31 in the exception frame, if not already done */
- lwz r3,_TRAP(r1)
- andi. r0,r3,1
- beq 2f
- SAVE_NVGPRS(r1)
- rlwinm r3,r3,0,0,30
- stw r3,_TRAP(r1)
-2: addi r3,r1,STACK_FRAME_OVERHEAD
- mr r4,r9
- bl do_notify_resume
- REST_NVGPRS(r1)
- b recheck
-
-/*
- * We come here when we are at the end of handling an exception
- * that occurred at a place where taking an exception will lose
- * state information, such as the contents of SRR0 and SRR1.
- */
-nonrecoverable:
- lis r10,exc_exit_restart_end@ha
- addi r10,r10,exc_exit_restart_end@l
- cmplw r12,r10
- bge 3f
- lis r11,exc_exit_restart@ha
- addi r11,r11,exc_exit_restart@l
- cmplw r12,r11
- blt 3f
- lis r10,ee_restarts@ha
- lwz r12,ee_restarts@l(r10)
- addi r12,r12,1
- stw r12,ee_restarts@l(r10)
- mr r12,r11 /* restart at exc_exit_restart */
- blr
-3: /* OK, we can't recover, kill this process */
- lwz r3,_TRAP(r1)
- andi. r0,r3,1
- beq 5f
- SAVE_NVGPRS(r1)
- rlwinm r3,r3,0,0,30
- stw r3,_TRAP(r1)
-5: mfspr r2,SPRN_SPRG_THREAD
- addi r2,r2,-THREAD
- tovirt(r2,r2) /* set back r2 to current */
-4: addi r3,r1,STACK_FRAME_OVERHEAD
- bl unrecoverable_exception
- /* shouldn't return */
- b 4b
-_ASM_NOKPROBE_SYMBOL(nonrecoverable)
-
- .section .bss
- .align 2
-ee_restarts:
- .space 4
- .previous
-
/*
* PROM code for specific machines follows. Put it
* here so it's easy to add arch-specific sections later.
@@ -1088,7 +550,6 @@ _GLOBAL(enter_rtas)
lis r6,1f@ha /* physical return address for rtas */
addi r6,r6,1f@l
tophys(r6,r6)
- tophys_novmstack r7, r1
lwz r8,RTASENTRY(r4)
lwz r4,RTASBASE(r4)
mfmsr r9
@@ -1097,24 +558,25 @@ _GLOBAL(enter_rtas)
mtmsr r0 /* disable interrupts so SRR0/1 don't get trashed */
li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
mtlr r6
- stw r7, THREAD + RTAS_SP(r2)
+ stw r1, THREAD + RTAS_SP(r2)
mtspr SPRN_SRR0,r8
mtspr SPRN_SRR1,r9
rfi
-1: tophys_novmstack r9, r1
-#ifdef CONFIG_VMAP_STACK
- li r0, MSR_KERNEL & ~MSR_IR /* can take DTLB miss */
- mtmsr r0
- isync
-#endif
- lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
- lwz r9,8(r9) /* original msr value */
- addi r1,r1,INT_FRAME_SIZE
- li r0,0
- tophys_novmstack r7, r2
- stw r0, THREAD + RTAS_SP(r7)
+1:
+ lis r8, 1f@h
+ ori r8, r8, 1f@l
+ LOAD_REG_IMMEDIATE(r9,MSR_KERNEL)
mtspr SPRN_SRR0,r8
mtspr SPRN_SRR1,r9
- rfi /* return to caller */
+ rfi /* Reactivate MMU translation */
+1:
+ lwz r8,INT_FRAME_SIZE+4(r1) /* get return address */
+ lwz r9,8(r1) /* original msr value */
+ addi r1,r1,INT_FRAME_SIZE
+ li r0,0
+ stw r0, THREAD + RTAS_SP(r2)
+ mtlr r8
+ mtmsr r9
+ blr /* return to caller */
_ASM_NOKPROBE_SYMBOL(enter_rtas)
#endif /* CONFIG_PPC_RTAS */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 6c4d9e276c4d..15720f8661a1 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -32,7 +32,6 @@
#include <asm/irqflags.h>
#include <asm/hw_irq.h>
#include <asm/context_tracking.h>
-#include <asm/tm.h>
#include <asm/ppc-opcode.h>
#include <asm/barrier.h>
#include <asm/export.h>
@@ -48,387 +47,7 @@
/*
* System calls.
*/
- .section ".toc","aw"
-SYS_CALL_TABLE:
- .tc sys_call_table[TC],sys_call_table
-
-#ifdef CONFIG_COMPAT
-COMPAT_SYS_CALL_TABLE:
- .tc compat_sys_call_table[TC],compat_sys_call_table
-#endif
-
-/* This value is used to mark exception frames on the stack. */
-exception_marker:
- .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
-
.section ".text"
- .align 7
-
-#ifdef CONFIG_PPC_BOOK3S
-.macro system_call_vectored name trapnr
- .globl system_call_vectored_\name
-system_call_vectored_\name:
-_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-BEGIN_FTR_SECTION
- extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
- bne .Ltabort_syscall
-END_FTR_SECTION_IFSET(CPU_FTR_TM)
-#endif
- SCV_INTERRUPT_TO_KERNEL
- mr r10,r1
- ld r1,PACAKSAVE(r13)
- std r10,0(r1)
- std r11,_NIP(r1)
- std r12,_MSR(r1)
- std r0,GPR0(r1)
- std r10,GPR1(r1)
- std r2,GPR2(r1)
- ld r2,PACATOC(r13)
- mfcr r12
- li r11,0
- /* Can we avoid saving r3-r8 in common case? */
- std r3,GPR3(r1)
- std r4,GPR4(r1)
- std r5,GPR5(r1)
- std r6,GPR6(r1)
- std r7,GPR7(r1)
- std r8,GPR8(r1)
- /* Zero r9-r12, this should only be required when restoring all GPRs */
- std r11,GPR9(r1)
- std r11,GPR10(r1)
- std r11,GPR11(r1)
- std r11,GPR12(r1)
- std r9,GPR13(r1)
- SAVE_NVGPRS(r1)
- std r11,_XER(r1)
- std r11,_LINK(r1)
- std r11,_CTR(r1)
-
- li r11,\trapnr
- std r11,_TRAP(r1)
- std r12,_CCR(r1)
- addi r10,r1,STACK_FRAME_OVERHEAD
- ld r11,exception_marker@toc(r2)
- std r11,-16(r10) /* "regshere" marker */
-
-BEGIN_FTR_SECTION
- HMT_MEDIUM
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
-
- /*
- * RECONCILE_IRQ_STATE without calling trace_hardirqs_off(), which
- * would clobber syscall parameters. Also we always enter with IRQs
- * enabled and nothing pending. system_call_exception() will call
- * trace_hardirqs_off().
- *
- * scv enters with MSR[EE]=1, so don't set PACA_IRQ_HARD_DIS. The
- * entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED.
- */
-
- /* Calling convention has r9 = orig r0, r10 = regs */
- mr r9,r0
- bl system_call_exception
-
-.Lsyscall_vectored_\name\()_exit:
- addi r4,r1,STACK_FRAME_OVERHEAD
- li r5,1 /* scv */
- bl syscall_exit_prepare
-
- ld r2,_CCR(r1)
- ld r4,_NIP(r1)
- ld r5,_MSR(r1)
-
-BEGIN_FTR_SECTION
- stdcx. r0,0,r1 /* to clear the reservation */
-END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
-
-BEGIN_FTR_SECTION
- HMT_MEDIUM_LOW
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
-
- cmpdi r3,0
- bne .Lsyscall_vectored_\name\()_restore_regs
-
- /* rfscv returns with LR->NIA and CTR->MSR */
- mtlr r4
- mtctr r5
-
- /* Could zero these as per ABI, but we may consider a stricter ABI
- * which preserves these if libc implementations can benefit, so
- * restore them for now until further measurement is done. */
- ld r0,GPR0(r1)
- ld r4,GPR4(r1)
- ld r5,GPR5(r1)
- ld r6,GPR6(r1)
- ld r7,GPR7(r1)
- ld r8,GPR8(r1)
- /* Zero volatile regs that may contain sensitive kernel data */
- li r9,0
- li r10,0
- li r11,0
- li r12,0
- mtspr SPRN_XER,r0
-
- /*
- * We don't need to restore AMR on the way back to userspace for KUAP.
- * The value of AMR only matters while we're in the kernel.
- */
- mtcr r2
- ld r2,GPR2(r1)
- ld r3,GPR3(r1)
- ld r13,GPR13(r1)
- ld r1,GPR1(r1)
- RFSCV_TO_USER
- b . /* prevent speculative execution */
-
-.Lsyscall_vectored_\name\()_restore_regs:
- li r3,0
- mtmsrd r3,1
- mtspr SPRN_SRR0,r4
- mtspr SPRN_SRR1,r5
-
- ld r3,_CTR(r1)
- ld r4,_LINK(r1)
- ld r5,_XER(r1)
-
- REST_NVGPRS(r1)
- ld r0,GPR0(r1)
- mtcr r2
- mtctr r3
- mtlr r4
- mtspr SPRN_XER,r5
- REST_10GPRS(2, r1)
- REST_2GPRS(12, r1)
- ld r1,GPR1(r1)
- RFI_TO_USER
-.endm
-
-system_call_vectored common 0x3000
-/*
- * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
- * which is tested by system_call_exception when r0 is -1 (as set by vector
- * entry code).
- */
-system_call_vectored sigill 0x7ff0
-
-
-/*
- * Entered via kernel return set up by kernel/sstep.c, must match entry regs
- */
- .globl system_call_vectored_emulate
-system_call_vectored_emulate:
-_ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate)
- li r10,IRQS_ALL_DISABLED
- stb r10,PACAIRQSOFTMASK(r13)
- b system_call_vectored_common
-#endif
-
- .balign IFETCH_ALIGN_BYTES
- .globl system_call_common_real
-system_call_common_real:
- ld r10,PACAKMSR(r13) /* get MSR value for kernel */
- mtmsrd r10
-
- .balign IFETCH_ALIGN_BYTES
- .globl system_call_common
-system_call_common:
-_ASM_NOKPROBE_SYMBOL(system_call_common)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-BEGIN_FTR_SECTION
- extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
- bne .Ltabort_syscall
-END_FTR_SECTION_IFSET(CPU_FTR_TM)
-#endif
- mr r10,r1
- ld r1,PACAKSAVE(r13)
- std r10,0(r1)
- std r11,_NIP(r1)
- std r12,_MSR(r1)
- std r0,GPR0(r1)
- std r10,GPR1(r1)
- std r2,GPR2(r1)
-#ifdef CONFIG_PPC_FSL_BOOK3E
-START_BTB_FLUSH_SECTION
- BTB_FLUSH(r10)
-END_BTB_FLUSH_SECTION
-#endif
- ld r2,PACATOC(r13)
- mfcr r12
- li r11,0
- /* Can we avoid saving r3-r8 in common case? */
- std r3,GPR3(r1)
- std r4,GPR4(r1)
- std r5,GPR5(r1)
- std r6,GPR6(r1)
- std r7,GPR7(r1)
- std r8,GPR8(r1)
- /* Zero r9-r12, this should only be required when restoring all GPRs */
- std r11,GPR9(r1)
- std r11,GPR10(r1)
- std r11,GPR11(r1)
- std r11,GPR12(r1)
- std r9,GPR13(r1)
- SAVE_NVGPRS(r1)
- std r11,_XER(r1)
- std r11,_CTR(r1)
- mflr r10
-
- /*
- * This clears CR0.SO (bit 28), which is the error indication on
- * return from this system call.
- */
- rldimi r12,r11,28,(63-28)
- li r11,0xc00
- std r10,_LINK(r1)
- std r11,_TRAP(r1)
- std r12,_CCR(r1)
- addi r10,r1,STACK_FRAME_OVERHEAD
- ld r11,exception_marker@toc(r2)
- std r11,-16(r10) /* "regshere" marker */
-
- /*
- * RECONCILE_IRQ_STATE without calling trace_hardirqs_off(), which
- * would clobber syscall parameters. Also we always enter with IRQs
- * enabled and nothing pending. system_call_exception() will call
- * trace_hardirqs_off().
- */
- li r11,IRQS_ALL_DISABLED
- li r12,PACA_IRQ_HARD_DIS
- stb r11,PACAIRQSOFTMASK(r13)
- stb r12,PACAIRQHAPPENED(r13)
-
- /* Calling convention has r9 = orig r0, r10 = regs */
- mr r9,r0
- bl system_call_exception
-
-.Lsyscall_exit:
- addi r4,r1,STACK_FRAME_OVERHEAD
- li r5,0 /* !scv */
- bl syscall_exit_prepare
-
- ld r2,_CCR(r1)
- ld r4,_NIP(r1)
- ld r5,_MSR(r1)
- ld r6,_LINK(r1)
-
-BEGIN_FTR_SECTION
- stdcx. r0,0,r1 /* to clear the reservation */
-END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
-
- mtspr SPRN_SRR0,r4
- mtspr SPRN_SRR1,r5
- mtlr r6
-
- cmpdi r3,0
- bne .Lsyscall_restore_regs
- /* Zero volatile regs that may contain sensitive kernel data */
- li r0,0
- li r4,0
- li r5,0
- li r6,0
- li r7,0
- li r8,0
- li r9,0
- li r10,0
- li r11,0
- li r12,0
- mtctr r0
- mtspr SPRN_XER,r0
-.Lsyscall_restore_regs_cont:
-
-BEGIN_FTR_SECTION
- HMT_MEDIUM_LOW
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
-
- /*
- * We don't need to restore AMR on the way back to userspace for KUAP.
- * The value of AMR only matters while we're in the kernel.
- */
- mtcr r2
- ld r2,GPR2(r1)
- ld r3,GPR3(r1)
- ld r13,GPR13(r1)
- ld r1,GPR1(r1)
- RFI_TO_USER
- b . /* prevent speculative execution */
-
-.Lsyscall_restore_regs:
- ld r3,_CTR(r1)
- ld r4,_XER(r1)
- REST_NVGPRS(r1)
- mtctr r3
- mtspr SPRN_XER,r4
- ld r0,GPR0(r1)
- REST_8GPRS(4, r1)
- ld r12,GPR12(r1)
- b .Lsyscall_restore_regs_cont
-
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-.Ltabort_syscall:
- /* Firstly we need to enable TM in the kernel */
- mfmsr r10
- li r9, 1
- rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
- mtmsrd r10, 0
-
- /* tabort, this dooms the transaction, nothing else */
- li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
- TABORT(R9)
-
- /*
- * Return directly to userspace. We have corrupted user register state,
- * but userspace will never see that register state. Execution will
- * resume after the tbegin of the aborted transaction with the
- * checkpointed register state.
- */
- li r9, MSR_RI
- andc r10, r10, r9
- mtmsrd r10, 1
- mtspr SPRN_SRR0, r11
- mtspr SPRN_SRR1, r12
- RFI_TO_USER
- b . /* prevent speculative execution */
-#endif
-
-#ifdef CONFIG_PPC_BOOK3S
-_GLOBAL(ret_from_fork_scv)
- bl schedule_tail
- REST_NVGPRS(r1)
- li r3,0 /* fork() return value */
- b .Lsyscall_vectored_common_exit
-#endif
-
-_GLOBAL(ret_from_fork)
- bl schedule_tail
- REST_NVGPRS(r1)
- li r3,0 /* fork() return value */
- b .Lsyscall_exit
-
-_GLOBAL(ret_from_kernel_thread)
- bl schedule_tail
- REST_NVGPRS(r1)
- mtctr r14
- mr r3,r15
-#ifdef PPC64_ELF_ABI_v2
- mr r12,r14
-#endif
- bctrl
- li r3,0
- b .Lsyscall_exit
-
-#ifdef CONFIG_PPC_BOOK3E
-/* Save non-volatile GPRs, if not already saved. */
-_GLOBAL(save_nvgprs)
- ld r11,_TRAP(r1)
- andi. r0,r11,1
- beqlr-
- SAVE_NVGPRS(r1)
- clrrdi r0,r11,1
- std r0,_TRAP(r1)
- blr
-_ASM_NOKPROBE_SYMBOL(save_nvgprs);
-#endif
#ifdef CONFIG_PPC_BOOK3S_64
@@ -645,151 +264,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
addi r1,r1,SWITCH_FRAME_SIZE
blr
-#ifdef CONFIG_PPC_BOOK3S
- /*
- * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
- * touched, no exit work created, then this can be used.
- */
- .balign IFETCH_ALIGN_BYTES
- .globl fast_interrupt_return
-fast_interrupt_return:
-_ASM_NOKPROBE_SYMBOL(fast_interrupt_return)
- kuap_check_amr r3, r4
- ld r5,_MSR(r1)
- andi. r0,r5,MSR_PR
- bne .Lfast_user_interrupt_return_amr
- kuap_kernel_restore r3, r4
- andi. r0,r5,MSR_RI
- li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
- bne+ .Lfast_kernel_interrupt_return
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl unrecoverable_exception
- b . /* should not get here */
-
- .balign IFETCH_ALIGN_BYTES
- .globl interrupt_return
-interrupt_return:
-_ASM_NOKPROBE_SYMBOL(interrupt_return)
- ld r4,_MSR(r1)
- andi. r0,r4,MSR_PR
- beq .Lkernel_interrupt_return
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl interrupt_exit_user_prepare
- cmpdi r3,0
- bne- .Lrestore_nvgprs
-
-.Lfast_user_interrupt_return_amr:
- kuap_user_restore r3, r4
-.Lfast_user_interrupt_return:
- ld r11,_NIP(r1)
- ld r12,_MSR(r1)
-BEGIN_FTR_SECTION
- ld r10,_PPR(r1)
- mtspr SPRN_PPR,r10
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
- mtspr SPRN_SRR0,r11
- mtspr SPRN_SRR1,r12
-
-BEGIN_FTR_SECTION
- stdcx. r0,0,r1 /* to clear the reservation */
-FTR_SECTION_ELSE
- ldarx r0,0,r1
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
-
- ld r3,_CCR(r1)
- ld r4,_LINK(r1)
- ld r5,_CTR(r1)
- ld r6,_XER(r1)
- li r0,0
-
- REST_4GPRS(7, r1)
- REST_2GPRS(11, r1)
- REST_GPR(13, r1)
-
- mtcr r3
- mtlr r4
- mtctr r5
- mtspr SPRN_XER,r6
-
- REST_4GPRS(2, r1)
- REST_GPR(6, r1)
- REST_GPR(0, r1)
- REST_GPR(1, r1)
- RFI_TO_USER
- b . /* prevent speculative execution */
-
-.Lrestore_nvgprs:
- REST_NVGPRS(r1)
- b .Lfast_user_interrupt_return
-
- .balign IFETCH_ALIGN_BYTES
-.Lkernel_interrupt_return:
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl interrupt_exit_kernel_prepare
-
-.Lfast_kernel_interrupt_return:
- cmpdi cr1,r3,0
- ld r11,_NIP(r1)
- ld r12,_MSR(r1)
- mtspr SPRN_SRR0,r11
- mtspr SPRN_SRR1,r12
-
-BEGIN_FTR_SECTION
- stdcx. r0,0,r1 /* to clear the reservation */
-FTR_SECTION_ELSE
- ldarx r0,0,r1
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
-
- ld r3,_LINK(r1)
- ld r4,_CTR(r1)
- ld r5,_XER(r1)
- ld r6,_CCR(r1)
- li r0,0
-
- REST_4GPRS(7, r1)
- REST_2GPRS(11, r1)
-
- mtlr r3
- mtctr r4
- mtspr SPRN_XER,r5
-
- /*
- * Leaving a stale exception_marker on the stack can confuse
- * the reliable stack unwinder later on. Clear it.
- */
- std r0,STACK_FRAME_OVERHEAD-16(r1)
-
- REST_4GPRS(2, r1)
-
- bne- cr1,1f /* emulate stack store */
- mtcr r6
- REST_GPR(6, r1)
- REST_GPR(0, r1)
- REST_GPR(1, r1)
- RFI_TO_KERNEL
- b . /* prevent speculative execution */
-
-1: /*
- * Emulate stack store with update. New r1 value was already calculated
- * and updated in our interrupt regs by emulate_loadstore, but we can't
- * store the previous value of r1 to the stack before re-loading our
- * registers from it, otherwise they could be clobbered. Use
- * PACA_EXGEN as temporary storage to hold the store data, as
- * interrupts are disabled here so it won't be clobbered.
- */
- mtcr r6
- std r9,PACA_EXGEN+0(r13)
- addi r9,r1,INT_FRAME_SIZE /* get original r1 */
- REST_GPR(6, r1)
- REST_GPR(0, r1)
- REST_GPR(1, r1)
- std r9,0(r1) /* perform store component of stdu */
- ld r9,PACA_EXGEN+0(r13)
-
- RFI_TO_KERNEL
- b . /* prevent speculative execution */
-#endif /* CONFIG_PPC_BOOK3S */
-
#ifdef CONFIG_PPC_RTAS
/*
* On CHRP, the Run-Time Abstraction Services (RTAS) have to be
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c
index 2ed14d4a47f5..93b0f3ec8fb0 100644
--- a/arch/powerpc/kernel/epapr_paravirt.c
+++ b/arch/powerpc/kernel/epapr_paravirt.c
@@ -38,9 +38,9 @@ static int __init early_init_dt_scan_epapr(unsigned long node,
for (i = 0; i < (len / 4); i++) {
struct ppc_inst inst = ppc_inst(be32_to_cpu(insts[i]));
- patch_instruction((struct ppc_inst *)(epapr_hypercall_start + i), inst);
+ patch_instruction(epapr_hypercall_start + i, inst);
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
- patch_instruction((struct ppc_inst *)(epapr_ev_idle_start + i), inst);
+ patch_instruction(epapr_ev_idle_start + i, inst);
#endif
}
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index e8eb9992a270..1401787b0b93 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -26,6 +26,10 @@
#include <asm/feature-fixups.h>
#include <asm/context_tracking.h>
+/* 64e interrupt returns always use SRR registers */
+#define fast_interrupt_return fast_interrupt_return_srr
+#define interrupt_return interrupt_return_srr
+
/* XXX This will ultimately add space for a special exception save
* structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...
* when taking special interrupts. For now we don't support that,
@@ -63,9 +67,6 @@
ld reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1)
special_reg_save:
- lbz r9,PACAIRQHAPPENED(r13)
- RECONCILE_IRQ_STATE(r3,r4)
-
/*
* We only need (or have stack space) to save this stuff if
* we interrupted the kernel.
@@ -119,15 +120,11 @@ BEGIN_FTR_SECTION
mtspr SPRN_MAS5,r10
mtspr SPRN_MAS8,r10
END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
- SPECIAL_EXC_STORE(r9,IRQHAPPENED)
-
mfspr r10,SPRN_DEAR
SPECIAL_EXC_STORE(r10,DEAR)
mfspr r10,SPRN_ESR
SPECIAL_EXC_STORE(r10,ESR)
- lbz r10,PACAIRQSOFTMASK(r13)
- SPECIAL_EXC_STORE(r10,SOFTE)
ld r10,_NIP(r1)
SPECIAL_EXC_STORE(r10,CSRR0)
ld r10,_MSR(r1)
@@ -139,7 +136,8 @@ ret_from_level_except:
ld r3,_MSR(r1)
andi. r3,r3,MSR_PR
beq 1f
- b ret_from_except
+ REST_NVGPRS(r1)
+ b interrupt_return
1:
LOAD_REG_ADDR(r11,extlb_level_exc)
@@ -193,27 +191,6 @@ BEGIN_FTR_SECTION
mtspr SPRN_MAS8,r10
END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
- lbz r6,PACAIRQSOFTMASK(r13)
- ld r5,SOFTE(r1)
-
- /* Interrupts had better not already be enabled... */
- tweqi r6,IRQS_ENABLED
-
- andi. r6,r5,IRQS_DISABLED
- bne 1f
-
- TRACE_ENABLE_INTS
- stb r5,PACAIRQSOFTMASK(r13)
-1:
- /*
- * Restore PACAIRQHAPPENED rather than setting it based on
- * the return MSR[EE], since we could have interrupted
- * __check_irq_replay() or other inconsistent transitory
- * states that must remain that way.
- */
- SPECIAL_EXC_LOAD(r10,IRQHAPPENED)
- stb r10,PACAIRQHAPPENED(r13)
-
SPECIAL_EXC_LOAD(r10,DEAR)
mtspr SPRN_DEAR,r10
SPECIAL_EXC_LOAD(r10,ESR)
@@ -367,6 +344,12 @@ ret_from_mc_except:
andi. r10,r10,IRQS_DISABLED; /* yes -> go out of line */ \
bne masked_interrupt_book3e_##n
+/*
+ * Additional regs must be re-loaded from paca before EXCEPTION_COMMON* is
+ * called, because that does SAVE_NVGPRS which must see the original register
+ * values, otherwise the scratch values might be restored when exiting the
+ * interrupt.
+ */
#define PROLOG_ADDITION_2REGS_GEN(n) \
std r14,PACA_EXGEN+EX_R14(r13); \
std r15,PACA_EXGEN+EX_R15(r13)
@@ -417,14 +400,15 @@ exc_##n##_common: \
std r6,_LINK(r1); \
std r7,_CTR(r1); \
std r8,_XER(r1); \
- li r3,(n)+1; /* indicate partial regs in trap */ \
+ li r3,(n); /* regs.trap vector */ \
std r9,0(r1); /* store stack frame back link */ \
std r10,_CCR(r1); /* store orig CR in stackframe */ \
std r9,GPR1(r1); /* store stack frame back link */ \
std r11,SOFTE(r1); /* and save it to stackframe */ \
std r12,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \
std r3,_TRAP(r1); /* set trap number */ \
- std r0,RESULT(r1); /* clear regs->result */
+ std r0,RESULT(r1); /* clear regs->result */ \
+ SAVE_NVGPRS(r1);
#define EXCEPTION_COMMON(n) \
EXCEPTION_COMMON_LVL(n, SPRN_SPRG_GEN_SCRATCH, PACA_EXGEN)
@@ -435,28 +419,6 @@ exc_##n##_common: \
#define EXCEPTION_COMMON_DBG(n) \
EXCEPTION_COMMON_LVL(n, SPRN_SPRG_DBG_SCRATCH, PACA_EXDBG)
-/*
- * This is meant for exceptions that don't immediately hard-enable. We
- * set a bit in paca->irq_happened to ensure that a subsequent call to
- * arch_local_irq_restore() will properly hard-enable and avoid the
- * fast-path, and then reconcile irq state.
- */
-#define INTS_DISABLE RECONCILE_IRQ_STATE(r3,r4)
-
-/*
- * This is called by exceptions that don't use INTS_DISABLE (that did not
- * touch irq indicators in the PACA). This will restore MSR:EE to it's
- * previous value
- *
- * XXX In the long run, we may want to open-code it in order to separate the
- * load from the wrtee, thus limiting the latency caused by the dependency
- * but at this point, I'll favor code clarity until we have a near to final
- * implementation
- */
-#define INTS_RESTORE_HARD \
- ld r11,_MSR(r1); \
- wrtee r11;
-
/* XXX FIXME: Restore r14/r15 when necessary */
#define BAD_STACK_TRAMPOLINE(n) \
exc_##n##_bad_stack: \
@@ -505,12 +467,11 @@ exc_##n##_bad_stack: \
START_EXCEPTION(label); \
NORMAL_EXCEPTION_PROLOG(trapnum, intnum, PROLOG_ADDITION_MASKABLE)\
EXCEPTION_COMMON(trapnum) \
- INTS_DISABLE; \
ack(r8); \
CHECK_NAPPING(); \
addi r3,r1,STACK_FRAME_OVERHEAD; \
bl hdlr; \
- b ret_from_except_lite;
+ b interrupt_return
/* This value is used to mark exception frames on the stack. */
.section ".toc","aw"
@@ -561,11 +522,10 @@ __end_interrupts:
CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON_CRIT(0x100)
- bl save_nvgprs
bl special_reg_save
CHECK_NAPPING();
addi r3,r1,STACK_FRAME_OVERHEAD
- bl unknown_exception
+ bl unknown_nmi_exception
b ret_from_crit_except
/* Machine Check Interrupt */
@@ -573,7 +533,6 @@ __end_interrupts:
MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON_MC(0x000)
- bl save_nvgprs
bl special_reg_save
CHECK_NAPPING();
addi r3,r1,STACK_FRAME_OVERHEAD
@@ -586,8 +545,11 @@ __end_interrupts:
PROLOG_ADDITION_2REGS)
mfspr r14,SPRN_DEAR
mfspr r15,SPRN_ESR
+ std r14,_DAR(r1)
+ std r15,_DSISR(r1)
+ ld r14,PACA_EXGEN+EX_R14(r13)
+ ld r15,PACA_EXGEN+EX_R15(r13)
EXCEPTION_COMMON(0x300)
- INTS_DISABLE
b storage_fault_common
/* Instruction Storage Interrupt */
@@ -596,8 +558,11 @@ __end_interrupts:
PROLOG_ADDITION_2REGS)
li r15,0
mr r14,r10
+ std r14,_DAR(r1)
+ std r15,_DSISR(r1)
+ ld r14,PACA_EXGEN+EX_R14(r13)
+ ld r15,PACA_EXGEN+EX_R15(r13)
EXCEPTION_COMMON(0x400)
- INTS_DISABLE
b storage_fault_common
/* External Input Interrupt */
@@ -610,6 +575,10 @@ __end_interrupts:
PROLOG_ADDITION_2REGS)
mfspr r14,SPRN_DEAR
mfspr r15,SPRN_ESR
+ std r14,_DAR(r1)
+ std r15,_DSISR(r1)
+ ld r14,PACA_EXGEN+EX_R14(r13)
+ ld r15,PACA_EXGEN+EX_R15(r13)
EXCEPTION_COMMON(0x600)
b alignment_more /* no room, go out of line */
@@ -618,14 +587,13 @@ __end_interrupts:
NORMAL_EXCEPTION_PROLOG(0x700, BOOKE_INTERRUPT_PROGRAM,
PROLOG_ADDITION_1REG)
mfspr r14,SPRN_ESR
- EXCEPTION_COMMON(0x700)
- INTS_DISABLE
std r14,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
ld r14,PACA_EXGEN+EX_R14(r13)
- bl save_nvgprs
+ EXCEPTION_COMMON(0x700)
+ addi r3,r1,STACK_FRAME_OVERHEAD
bl program_check_exception
- b ret_from_except
+ REST_NVGPRS(r1)
+ b interrupt_return
/* Floating Point Unavailable Interrupt */
START_EXCEPTION(fp_unavailable);
@@ -637,12 +605,10 @@ __end_interrupts:
andi. r0,r12,MSR_PR;
beq- 1f
bl load_up_fpu
- b fast_exception_return
-1: INTS_DISABLE
- bl save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
+ b fast_interrupt_return
+1: addi r3,r1,STACK_FRAME_OVERHEAD
bl kernel_fp_unavailable_exception
- b ret_from_except
+ b interrupt_return
/* Altivec Unavailable Interrupt */
START_EXCEPTION(altivec_unavailable);
@@ -656,15 +622,13 @@ BEGIN_FTR_SECTION
andi. r0,r12,MSR_PR;
beq- 1f
bl load_up_altivec
- b fast_exception_return
+ b fast_interrupt_return
1:
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
- INTS_DISABLE
- bl save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
bl altivec_unavailable_exception
- b ret_from_except
+ b interrupt_return
/* AltiVec Assist */
START_EXCEPTION(altivec_assist);
@@ -672,17 +636,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
BOOKE_INTERRUPT_ALTIVEC_ASSIST,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x220)
- INTS_DISABLE
- bl save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
bl altivec_assist_exception
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+ REST_NVGPRS(r1)
#else
bl unknown_exception
#endif
- b ret_from_except
+ b interrupt_return
/* Decrementer Interrupt */
@@ -698,14 +661,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON_CRIT(0x9f0)
- bl save_nvgprs
bl special_reg_save
CHECK_NAPPING();
addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef CONFIG_BOOKE_WDT
bl WatchdogException
#else
- bl unknown_exception
+ bl unknown_nmi_exception
#endif
b ret_from_crit_except
@@ -722,11 +684,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
NORMAL_EXCEPTION_PROLOG(0xf20, BOOKE_INTERRUPT_AP_UNAVAIL,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0xf20)
- INTS_DISABLE
- bl save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
bl unknown_exception
- b ret_from_except
+ b interrupt_return
/* Debug exception as a critical interrupt*/
START_EXCEPTION(debug_crit);
@@ -787,14 +747,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
* normal exception
*/
mfspr r14,SPRN_DBSR
- EXCEPTION_COMMON_CRIT(0xd00)
std r14,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
ld r14,PACA_EXCRIT+EX_R14(r13)
ld r15,PACA_EXCRIT+EX_R15(r13)
- bl save_nvgprs
+ EXCEPTION_COMMON_CRIT(0xd00)
+ addi r3,r1,STACK_FRAME_OVERHEAD
bl DebugException
- b ret_from_except
+ REST_NVGPRS(r1)
+ b interrupt_return
kernel_dbg_exc:
b . /* NYI */
@@ -858,25 +818,23 @@ kernel_dbg_exc:
* normal exception
*/
mfspr r14,SPRN_DBSR
- EXCEPTION_COMMON_DBG(0xd08)
- INTS_DISABLE
std r14,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
ld r14,PACA_EXDBG+EX_R14(r13)
ld r15,PACA_EXDBG+EX_R15(r13)
- bl save_nvgprs
+ EXCEPTION_COMMON_DBG(0xd08)
+ addi r3,r1,STACK_FRAME_OVERHEAD
bl DebugException
- b ret_from_except
+ REST_NVGPRS(r1)
+ b interrupt_return
START_EXCEPTION(perfmon);
NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x260)
- INTS_DISABLE
CHECK_NAPPING()
addi r3,r1,STACK_FRAME_OVERHEAD
bl performance_monitor_exception
- b ret_from_except_lite
+ b interrupt_return
/* Doorbell interrupt */
MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL,
@@ -887,11 +845,10 @@ kernel_dbg_exc:
CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON_CRIT(0x2a0)
- bl save_nvgprs
bl special_reg_save
CHECK_NAPPING();
addi r3,r1,STACK_FRAME_OVERHEAD
- bl unknown_exception
+ bl unknown_nmi_exception
b ret_from_crit_except
/*
@@ -903,21 +860,18 @@ kernel_dbg_exc:
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x2c0)
addi r3,r1,STACK_FRAME_OVERHEAD
- bl save_nvgprs
- INTS_RESTORE_HARD
bl unknown_exception
- b ret_from_except
+ b interrupt_return
/* Guest Doorbell critical Interrupt */
START_EXCEPTION(guest_doorbell_crit);
CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON_CRIT(0x2e0)
- bl save_nvgprs
bl special_reg_save
CHECK_NAPPING();
addi r3,r1,STACK_FRAME_OVERHEAD
- bl unknown_exception
+ bl unknown_nmi_exception
b ret_from_crit_except
/* Hypervisor call */
@@ -926,10 +880,8 @@ kernel_dbg_exc:
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x310)
addi r3,r1,STACK_FRAME_OVERHEAD
- bl save_nvgprs
- INTS_RESTORE_HARD
bl unknown_exception
- b ret_from_except
+ b interrupt_return
/* Embedded Hypervisor priviledged */
START_EXCEPTION(ehpriv);
@@ -937,10 +889,8 @@ kernel_dbg_exc:
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x320)
addi r3,r1,STACK_FRAME_OVERHEAD
- bl save_nvgprs
- INTS_RESTORE_HARD
bl unknown_exception
- b ret_from_except
+ b interrupt_return
/* LRAT Error interrupt */
START_EXCEPTION(lrat_error);
@@ -948,10 +898,36 @@ kernel_dbg_exc:
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x340)
addi r3,r1,STACK_FRAME_OVERHEAD
- bl save_nvgprs
- INTS_RESTORE_HARD
bl unknown_exception
- b ret_from_except
+ b interrupt_return
+
+.macro SEARCH_RESTART_TABLE
+#ifdef CONFIG_RELOCATABLE
+ ld r11,PACATOC(r13)
+ ld r14,__start___restart_table@got(r11)
+ ld r15,__stop___restart_table@got(r11)
+#else
+ LOAD_REG_IMMEDIATE_SYM(r14, r11, __start___restart_table)
+ LOAD_REG_IMMEDIATE_SYM(r15, r11, __stop___restart_table)
+#endif
+300:
+ cmpd r14,r15
+ beq 302f
+ ld r11,0(r14)
+ cmpld r10,r11
+ blt 301f
+ ld r11,8(r14)
+ cmpld r10,r11
+ bge 301f
+ ld r11,16(r14)
+ b 303f
+301:
+ addi r14,r14,24
+ b 300b
+302:
+ li r11,0
+303:
+.endm
/*
* An interrupt came in while soft-disabled; We mark paca->irq_happened
@@ -961,6 +937,9 @@ kernel_dbg_exc:
*/
.macro masked_interrupt_book3e paca_irq full_mask
+ std r14,PACA_EXGEN+EX_R14(r13)
+ std r15,PACA_EXGEN+EX_R15(r13)
+
lbz r10,PACAIRQHAPPENED(r13)
.if \full_mask == 1
ori r10,r10,\paca_irq | PACA_IRQ_HARD_DIS
@@ -970,15 +949,23 @@ kernel_dbg_exc:
stb r10,PACAIRQHAPPENED(r13)
.if \full_mask == 1
- rldicl r10,r11,48,1 /* clear MSR_EE */
- rotldi r11,r10,16
+ xori r11,r11,MSR_EE /* clear MSR_EE */
mtspr SPRN_SRR1,r11
.endif
+ mfspr r10,SPRN_SRR0
+ SEARCH_RESTART_TABLE
+ cmpdi r11,0
+ beq 1f
+ mtspr SPRN_SRR0,r11 /* return to restart address */
+1:
+
lwz r11,PACA_EXGEN+EX_CR(r13)
mtcr r11
ld r10,PACA_EXGEN+EX_R10(r13)
ld r11,PACA_EXGEN+EX_R11(r13)
+ ld r14,PACA_EXGEN+EX_R14(r13)
+ ld r15,PACA_EXGEN+EX_R15(r13)
mfspr r13,SPRN_SPRG_GEN_SCRATCH
rfi
b .
@@ -1005,316 +992,19 @@ masked_interrupt_book3e_0x2c0:
* original values stashed away in the PACA
*/
storage_fault_common:
- std r14,_DAR(r1)
- std r15,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
- ld r14,PACA_EXGEN+EX_R14(r13)
- ld r15,PACA_EXGEN+EX_R15(r13)
bl do_page_fault
- cmpdi r3,0
- bne- 1f
- b ret_from_except_lite
-1: bl save_nvgprs
- mr r4,r3
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl __bad_page_fault
- b ret_from_except
+ b interrupt_return
/*
* Alignment exception doesn't fit entirely in the 0x100 bytes so it
* continues here.
*/
alignment_more:
- std r14,_DAR(r1)
- std r15,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
- ld r14,PACA_EXGEN+EX_R14(r13)
- ld r15,PACA_EXGEN+EX_R15(r13)
- bl save_nvgprs
- INTS_RESTORE_HARD
bl alignment_exception
- b ret_from_except
-
- .align 7
-_GLOBAL(ret_from_except)
- ld r11,_TRAP(r1)
- andi. r0,r11,1
- bne ret_from_except_lite
REST_NVGPRS(r1)
-
-_GLOBAL(ret_from_except_lite)
- /*
- * Disable interrupts so that current_thread_info()->flags
- * can't change between when we test it and when we return
- * from the interrupt.
- */
- wrteei 0
-
- ld r9, PACA_THREAD_INFO(r13)
- ld r3,_MSR(r1)
- ld r10,PACACURRENT(r13)
- ld r4,TI_FLAGS(r9)
- andi. r3,r3,MSR_PR
- beq resume_kernel
- lwz r3,(THREAD+THREAD_DBCR0)(r10)
-
- /* Check current_thread_info()->flags */
- andi. r0,r4,_TIF_USER_WORK_MASK
- bne 1f
- /*
- * Check to see if the dbcr0 register is set up to debug.
- * Use the internal debug mode bit to do this.
- */
- andis. r0,r3,DBCR0_IDM@h
- beq restore
- mfmsr r0
- rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */
- mtmsr r0
- mtspr SPRN_DBCR0,r3
- li r10, -1
- mtspr SPRN_DBSR,r10
- b restore
-1: andi. r0,r4,_TIF_NEED_RESCHED
- beq 2f
- bl restore_interrupts
- SCHEDULE_USER
- b ret_from_except_lite
-2:
- bl save_nvgprs
- /*
- * Use a non volatile GPR to save and restore our thread_info flags
- * across the call to restore_interrupts.
- */
- mr r30,r4
- bl restore_interrupts
- mr r4,r30
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_notify_resume
- b ret_from_except
-
-resume_kernel:
- /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
- andis. r8,r4,_TIF_EMULATE_STACK_STORE@h
- beq+ 1f
-
- addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
-
- ld r3,GPR1(r1)
- subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
- mr r4,r1 /* src: current exception frame */
- mr r1,r3 /* Reroute the trampoline frame to r1 */
-
- /* Copy from the original to the trampoline. */
- li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
- li r6,0 /* start offset: 0 */
- mtctr r5
-2: ldx r0,r6,r4
- stdx r0,r6,r3
- addi r6,r6,8
- bdnz 2b
-
- /* Do real store operation to complete stdu */
- ld r5,GPR1(r1)
- std r8,0(r5)
-
- /* Clear _TIF_EMULATE_STACK_STORE flag */
- lis r11,_TIF_EMULATE_STACK_STORE@h
- addi r5,r9,TI_FLAGS
-0: ldarx r4,0,r5
- andc r4,r4,r11
- stdcx. r4,0,r5
- bne- 0b
-1:
-
-#ifdef CONFIG_PREEMPT
- /* Check if we need to preempt */
- andi. r0,r4,_TIF_NEED_RESCHED
- beq+ restore
- /* Check that preempt_count() == 0 and interrupts are enabled */
- lwz r8,TI_PREEMPT(r9)
- cmpwi cr0,r8,0
- bne restore
- ld r0,SOFTE(r1)
- andi. r0,r0,IRQS_DISABLED
- bne restore
-
- /*
- * Here we are preempting the current task. We want to make
- * sure we are soft-disabled first and reconcile irq state.
- */
- RECONCILE_IRQ_STATE(r3,r4)
- bl preempt_schedule_irq
-
- /*
- * arch_local_irq_restore() from preempt_schedule_irq above may
- * enable hard interrupt but we really should disable interrupts
- * when we return from the interrupt, and so that we don't get
- * interrupted after loading SRR0/1.
- */
- wrteei 0
-#endif /* CONFIG_PREEMPT */
-
-restore:
- /*
- * This is the main kernel exit path. First we check if we
- * are about to re-enable interrupts
- */
- ld r5,SOFTE(r1)
- lbz r6,PACAIRQSOFTMASK(r13)
- andi. r5,r5,IRQS_DISABLED
- bne .Lrestore_irq_off
-
- /* We are enabling, were we already enabled ? Yes, just return */
- andi. r6,r6,IRQS_DISABLED
- beq cr0,fast_exception_return
-
- /*
- * We are about to soft-enable interrupts (we are hard disabled
- * at this point). We check if there's anything that needs to
- * be replayed first.
- */
- lbz r0,PACAIRQHAPPENED(r13)
- cmpwi cr0,r0,0
- bne- .Lrestore_check_irq_replay
-
- /*
- * Get here when nothing happened while soft-disabled, just
- * soft-enable and move-on. We will hard-enable as a side
- * effect of rfi
- */
-.Lrestore_no_replay:
- TRACE_ENABLE_INTS
- li r0,IRQS_ENABLED
- stb r0,PACAIRQSOFTMASK(r13);
-
-/* This is the return from load_up_fpu fast path which could do with
- * less GPR restores in fact, but for now we have a single return path
- */
-fast_exception_return:
- wrteei 0
-1: mr r0,r13
- ld r10,_MSR(r1)
- REST_4GPRS(2, r1)
- andi. r6,r10,MSR_PR
- REST_2GPRS(6, r1)
- beq 1f
- ACCOUNT_CPU_USER_EXIT(r13, r10, r11)
- ld r0,GPR13(r1)
-
-1: stdcx. r0,0,r1 /* to clear the reservation */
-
- ld r8,_CCR(r1)
- ld r9,_LINK(r1)
- ld r10,_CTR(r1)
- ld r11,_XER(r1)
- mtcr r8
- mtlr r9
- mtctr r10
- mtxer r11
- REST_2GPRS(8, r1)
- ld r10,GPR10(r1)
- ld r11,GPR11(r1)
- ld r12,GPR12(r1)
- mtspr SPRN_SPRG_GEN_SCRATCH,r0
-
- std r10,PACA_EXGEN+EX_R10(r13);
- std r11,PACA_EXGEN+EX_R11(r13);
- ld r10,_NIP(r1)
- ld r11,_MSR(r1)
- ld r0,GPR0(r1)
- ld r1,GPR1(r1)
- mtspr SPRN_SRR0,r10
- mtspr SPRN_SRR1,r11
- ld r10,PACA_EXGEN+EX_R10(r13)
- ld r11,PACA_EXGEN+EX_R11(r13)
- mfspr r13,SPRN_SPRG_GEN_SCRATCH
- rfi
-
- /*
- * We are returning to a context with interrupts soft disabled.
- *
- * However, we may also about to hard enable, so we need to
- * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
- * or that bit can get out of sync and bad things will happen
- */
-.Lrestore_irq_off:
- ld r3,_MSR(r1)
- lbz r7,PACAIRQHAPPENED(r13)
- andi. r0,r3,MSR_EE
- beq 1f
- rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
- stb r7,PACAIRQHAPPENED(r13)
-1:
-#if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG)
- /* The interrupt should not have soft enabled. */
- lbz r7,PACAIRQSOFTMASK(r13)
-1: tdeqi r7,IRQS_ENABLED
- EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
-#endif
- b fast_exception_return
-
- /*
- * Something did happen, check if a re-emit is needed
- * (this also clears paca->irq_happened)
- */
-.Lrestore_check_irq_replay:
- /* XXX: We could implement a fast path here where we check
- * for irq_happened being just 0x01, in which case we can
- * clear it and return. That means that we would potentially
- * miss a decrementer having wrapped all the way around.
- *
- * Still, this might be useful for things like hash_page
- */
- bl __check_irq_replay
- cmpwi cr0,r3,0
- beq .Lrestore_no_replay
-
- /*
- * We need to re-emit an interrupt. We do so by re-using our
- * existing exception frame. We first change the trap value,
- * but we need to ensure we preserve the low nibble of it
- */
- ld r4,_TRAP(r1)
- clrldi r4,r4,60
- or r4,r4,r3
- std r4,_TRAP(r1)
-
- /*
- * PACA_IRQ_HARD_DIS won't always be set here, so set it now
- * to reconcile the IRQ state. Tracing is already accounted for.
- */
- lbz r4,PACAIRQHAPPENED(r13)
- ori r4,r4,PACA_IRQ_HARD_DIS
- stb r4,PACAIRQHAPPENED(r13)
-
- /*
- * Then find the right handler and call it. Interrupts are
- * still soft-disabled and we keep them that way.
- */
- cmpwi cr0,r3,0x500
- bne 1f
- addi r3,r1,STACK_FRAME_OVERHEAD;
- bl do_IRQ
- b ret_from_except
-1: cmpwi cr0,r3,0x900
- bne 1f
- addi r3,r1,STACK_FRAME_OVERHEAD;
- bl timer_interrupt
- b ret_from_except
-#ifdef CONFIG_PPC_DOORBELL
-1:
- cmpwi cr0,r3,0x280
- bne 1f
- addi r3,r1,STACK_FRAME_OVERHEAD;
- bl doorbell_exception
-#endif /* CONFIG_PPC_DOORBELL */
-1: b ret_from_except /* What else to do here ? */
-
-_ASM_NOKPROBE_SYMBOL(ret_from_except);
-_ASM_NOKPROBE_SYMBOL(ret_from_except_lite);
-_ASM_NOKPROBE_SYMBOL(resume_kernel);
-_ASM_NOKPROBE_SYMBOL(restore);
-_ASM_NOKPROBE_SYMBOL(fast_exception_return);
+ b interrupt_return
/*
* Trampolines used when spotting a bad kernel stack pointer in
@@ -1635,7 +1325,12 @@ a2_tlbinit_code_start:
a2_tlbinit_after_linear_map:
/* Now we branch the new virtual address mapped by this entry */
+#ifdef CONFIG_RELOCATABLE
+ ld r5,PACATOC(r13)
+ ld r3,1f@got(r5)
+#else
LOAD_REG_IMMEDIATE_SYM(r3, r5, 1f)
+#endif
mtctr r3
bctr
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 60d3051a8bc8..4aec59a77d4c 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -21,22 +21,6 @@
#include <asm/feature-fixups.h>
#include <asm/kup.h>
-/* PACA save area offsets (exgen, exmc, etc) */
-#define EX_R9 0
-#define EX_R10 8
-#define EX_R11 16
-#define EX_R12 24
-#define EX_R13 32
-#define EX_DAR 40
-#define EX_DSISR 48
-#define EX_CCR 52
-#define EX_CFAR 56
-#define EX_PPR 64
-#define EX_CTR 72
-.if EX_SIZE != 10
- .error "EX_SIZE is wrong"
-.endif
-
/*
* Following are fixed section helper macros.
*
@@ -133,7 +117,6 @@ name:
#define IBRANCH_TO_COMMON .L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */
#define IREALMODE_COMMON .L_IREALMODE_COMMON_\name\() /* Common runs in realmode */
#define IMASK .L_IMASK_\name\() /* IRQ soft-mask bit */
-#define IKVM_SKIP .L_IKVM_SKIP_\name\() /* Generate KVM skip handler */
#define IKVM_REAL .L_IKVM_REAL_\name\() /* Real entry tests KVM */
#define __IKVM_REAL(name) .L_IKVM_REAL_ ## name
#define IKVM_VIRT .L_IKVM_VIRT_\name\() /* Virt entry tests KVM */
@@ -190,9 +173,6 @@ do_define_int n
.ifndef IMASK
IMASK=0
.endif
- .ifndef IKVM_SKIP
- IKVM_SKIP=0
- .endif
.ifndef IKVM_REAL
IKVM_REAL=0
.endif
@@ -207,8 +187,6 @@ do_define_int n
.endif
.endm
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/*
* All interrupts which set HSRR registers, as well as SRESET and MCE and
* syscall when invoked with "sc 1" switch to MSR[HV]=1 (HVMODE) to be taken,
@@ -238,88 +216,28 @@ do_define_int n
/*
* If an interrupt is taken while a guest is running, it is immediately routed
- * to KVM to handle. If both HV and PR KVM arepossible, KVM interrupts go first
- * to kvmppc_interrupt_hv, which handles the PR guest case.
+ * to KVM to handle.
*/
-#define kvmppc_interrupt kvmppc_interrupt_hv
-#else
-#define kvmppc_interrupt kvmppc_interrupt_pr
-#endif
-.macro KVMTEST name
+.macro KVMTEST name handler
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
lbz r10,HSTATE_IN_GUEST(r13)
cmpwi r10,0
- bne \name\()_kvm
-.endm
-
-.macro GEN_KVM name
- .balign IFETCH_ALIGN_BYTES
-\name\()_kvm:
-
- .if IKVM_SKIP
- cmpwi r10,KVM_GUEST_MODE_SKIP
- beq 89f
- .else
-BEGIN_FTR_SECTION
- ld r10,IAREA+EX_CFAR(r13)
- std r10,HSTATE_CFAR(r13)
-END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
- .endif
-
- ld r10,IAREA+EX_CTR(r13)
- mtctr r10
-BEGIN_FTR_SECTION
- ld r10,IAREA+EX_PPR(r13)
- std r10,HSTATE_PPR(r13)
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
- ld r11,IAREA+EX_R11(r13)
- ld r12,IAREA+EX_R12(r13)
- std r12,HSTATE_SCRATCH0(r13)
- sldi r12,r9,32
- ld r9,IAREA+EX_R9(r13)
- ld r10,IAREA+EX_R10(r13)
/* HSRR variants have the 0x2 bit added to their trap number */
.if IHSRR_IF_HVMODE
BEGIN_FTR_SECTION
- ori r12,r12,(IVEC + 0x2)
+ li r10,(IVEC + 0x2)
FTR_SECTION_ELSE
- ori r12,r12,(IVEC)
+ li r10,(IVEC)
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
.elseif IHSRR
- ori r12,r12,(IVEC+ 0x2)
+ li r10,(IVEC + 0x2)
.else
- ori r12,r12,(IVEC)
+ li r10,(IVEC)
.endif
- b kvmppc_interrupt
-
- .if IKVM_SKIP
-89: mtocrf 0x80,r9
- ld r10,IAREA+EX_CTR(r13)
- mtctr r10
- ld r9,IAREA+EX_R9(r13)
- ld r10,IAREA+EX_R10(r13)
- ld r11,IAREA+EX_R11(r13)
- ld r12,IAREA+EX_R12(r13)
- .if IHSRR_IF_HVMODE
- BEGIN_FTR_SECTION
- b kvmppc_skip_Hinterrupt
- FTR_SECTION_ELSE
- b kvmppc_skip_interrupt
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
- .elseif IHSRR
- b kvmppc_skip_Hinterrupt
- .else
- b kvmppc_skip_interrupt
- .endif
- .endif
-.endm
-
-#else
-.macro KVMTEST name
-.endm
-.macro GEN_KVM name
-.endm
+ bne \handler
#endif
+.endm
/*
* This is the BOOK3S interrupt entry code macro.
@@ -461,12 +379,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
DEFINE_FIXED_SYMBOL(\name\()_common_real)
\name\()_common_real:
.if IKVM_REAL
- KVMTEST \name
+ KVMTEST \name kvm_interrupt
.endif
ld r10,PACAKMSR(r13) /* get MSR value for kernel */
/* MSR[RI] is clear iff using SRR regs */
- .if IHSRR == EXC_HV_OR_STD
+ .if IHSRR_IF_HVMODE
BEGIN_FTR_SECTION
xori r10,r10,MSR_RI
END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
@@ -484,7 +402,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
DEFINE_FIXED_SYMBOL(\name\()_common_virt)
\name\()_common_virt:
.if IKVM_VIRT
- KVMTEST \name
+ KVMTEST \name kvm_interrupt
1:
.endif
.endif /* IVIRT */
@@ -498,7 +416,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_virt)
DEFINE_FIXED_SYMBOL(\name\()_common_real)
\name\()_common_real:
.if IKVM_REAL
- KVMTEST \name
+ KVMTEST \name kvm_interrupt
.endif
.endm
@@ -510,18 +428,31 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
/* If coming from user, skip soft-mask tests. */
andi. r10,r12,MSR_PR
- bne 2f
-
- /* Kernel code running below __end_interrupts is implicitly
- * soft-masked */
- LOAD_HANDLER(r10, __end_interrupts)
+ bne 3f
+
+ /*
+ * Kernel code running below __end_soft_masked may be
+ * implicitly soft-masked if it is within the regions
+ * in the soft mask table.
+ */
+ LOAD_HANDLER(r10, __end_soft_masked)
cmpld r11,r10
+ bge+ 1f
+
+ /* SEARCH_SOFT_MASK_TABLE clobbers r9,r10,r12 */
+ mtctr r12
+ stw r9,PACA_EXGEN+EX_CCR(r13)
+ SEARCH_SOFT_MASK_TABLE
+ cmpdi r12,0
+ mfctr r12 /* Restore r12 to SRR1 */
+ lwz r9,PACA_EXGEN+EX_CCR(r13)
+ beq 1f /* Not in soft-mask table */
li r10,IMASK
- blt- 1f
+ b 2f /* In soft-mask table, always mask */
/* Test the soft mask state against our interrupt's bit */
- lbz r10,PACAIRQSOFTMASK(r13)
-1: andi. r10,r10,IMASK
+1: lbz r10,PACAIRQSOFTMASK(r13)
+2: andi. r10,r10,IMASK
/* Associate vector numbers with bits in paca->irq_happened */
.if IVEC == 0x500 || IVEC == 0xea0
li r10,PACA_IRQ_EE
@@ -552,7 +483,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
.if ISTACK
andi. r10,r12,MSR_PR /* See if coming from user */
-2: mr r10,r1 /* Save r1 */
+3: mr r10,r1 /* Save r1 */
subi r1,r1,INT_FRAME_SIZE /* alloc frame on kernel stack */
beq- 100f
ld r1,PACAKSAVE(r13) /* kernel stack to use */
@@ -567,6 +498,20 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
std r0,GPR0(r1) /* save r0 in stackframe */
std r10,GPR1(r1) /* save r1 in stackframe */
+ /* Mark our [H]SRRs valid for return */
+ li r10,1
+ .if IHSRR_IF_HVMODE
+ BEGIN_FTR_SECTION
+ stb r10,PACAHSRR_VALID(r13)
+ FTR_SECTION_ELSE
+ stb r10,PACASRR_VALID(r13)
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
+ .elseif IHSRR
+ stb r10,PACAHSRR_VALID(r13)
+ .else
+ stb r10,PACASRR_VALID(r13)
+ .endif
+
.if ISET_RI
li r10,MSR_RI
mtmsrd r10,1 /* Set MSR_RI */
@@ -659,6 +604,66 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
__GEN_COMMON_BODY \name
.endm
+.macro SEARCH_RESTART_TABLE
+#ifdef CONFIG_RELOCATABLE
+ mr r12,r2
+ ld r2,PACATOC(r13)
+ LOAD_REG_ADDR(r9, __start___restart_table)
+ LOAD_REG_ADDR(r10, __stop___restart_table)
+ mr r2,r12
+#else
+ LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___restart_table)
+ LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___restart_table)
+#endif
+300:
+ cmpd r9,r10
+ beq 302f
+ ld r12,0(r9)
+ cmpld r11,r12
+ blt 301f
+ ld r12,8(r9)
+ cmpld r11,r12
+ bge 301f
+ ld r12,16(r9)
+ b 303f
+301:
+ addi r9,r9,24
+ b 300b
+302:
+ li r12,0
+303:
+.endm
+
+.macro SEARCH_SOFT_MASK_TABLE
+#ifdef CONFIG_RELOCATABLE
+ mr r12,r2
+ ld r2,PACATOC(r13)
+ LOAD_REG_ADDR(r9, __start___soft_mask_table)
+ LOAD_REG_ADDR(r10, __stop___soft_mask_table)
+ mr r2,r12
+#else
+ LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___soft_mask_table)
+ LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___soft_mask_table)
+#endif
+300:
+ cmpd r9,r10
+ beq 302f
+ ld r12,0(r9)
+ cmpld r11,r12
+ blt 301f
+ ld r12,8(r9)
+ cmpld r11,r12
+ bge 301f
+ li r12,1
+ b 303f
+301:
+ addi r9,r9,16
+ b 300b
+302:
+ li r12,0
+303:
+.endm
+
/*
* Restore all registers including H/SRR0/1 saved in a stack frame of a
* standard exception.
@@ -666,10 +671,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
.macro EXCEPTION_RESTORE_REGS hsrr=0
/* Move original SRR0 and SRR1 into the respective regs */
ld r9,_MSR(r1)
+ li r10,0
.if \hsrr
mtspr SPRN_HSRR1,r9
+ stb r10,PACAHSRR_VALID(r13)
.else
mtspr SPRN_SRR1,r9
+ stb r10,PACASRR_VALID(r13)
.endif
ld r9,_NIP(r1)
.if \hsrr
@@ -693,25 +701,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
.endm
/*
- * When the idle code in power4_idle puts the CPU into NAP mode,
- * it has to do so in a loop, and relies on the external interrupt
- * and decrementer interrupt entry code to get it out of the loop.
- * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags
- * to signal that it is in the loop and needs help to get out.
- */
-#ifdef CONFIG_PPC_970_NAP
-#define FINISH_NAP \
-BEGIN_FTR_SECTION \
- ld r11, PACA_THREAD_INFO(r13); \
- ld r9,TI_LOCAL_FLAGS(r11); \
- andi. r10,r9,_TLF_NAPPING; \
- bnel power4_fixup_nap; \
-END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
-#else
-#define FINISH_NAP
-#endif
-
-/*
* There are a few constraints to be concerned with.
* - Real mode exceptions code/data must be located at their physical location.
* - Virtual mode exceptions must be mapped at their 0xc000... location.
@@ -805,17 +794,17 @@ __start_interrupts:
* scv instructions enter the kernel without changing EE, RI, ME, or HV.
* In particular, this means we can take a maskable interrupt at any point
* in the scv handler, which is unlike any other interrupt. This is solved
- * by treating the instruction addresses below __end_interrupts as being
- * soft-masked.
+ * by treating the instruction addresses in the handler as being soft-masked,
+ * by adding a SOFT_MASK_TABLE entry for them.
*
* AIL-0 mode scv exceptions go to 0x17000-0x17fff, but we set AIL-3 and
* ensure scv is never executed with relocation off, which means AIL-0
* should never happen.
*
- * Before leaving the below __end_interrupts text, at least of the following
- * must be true:
+ * Before leaving the following inside-__end_soft_masked text, at least of the
+ * following must be true:
* - MSR[PR]=1 (i.e., return to userspace)
- * - MSR_EE|MSR_RI is set (no reentrant exceptions)
+ * - MSR_EE|MSR_RI is clear (no reentrant exceptions)
* - Standard kernel environment is set up (stack, paca, etc)
*
* Call convention:
@@ -823,6 +812,7 @@ __start_interrupts:
* syscall register convention is in Documentation/powerpc/syscall64-abi.rst
*/
EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
+1:
/* SCV 0 */
mr r9,r13
GET_PACA(r13)
@@ -852,8 +842,11 @@ EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
b system_call_vectored_sigill
#endif
.endr
+2:
EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000)
+SOFT_MASK_TABLE(1b, 2b) // Treat scv vectors as soft-masked, see comment above.
+
#ifdef CONFIG_RELOCATABLE
TRAMP_VIRT_BEGIN(system_call_vectored_tramp)
__LOAD_HANDLER(r10, system_call_vectored_common)
@@ -1019,8 +1012,6 @@ EXC_COMMON_BEGIN(system_reset_common)
EXCEPTION_RESTORE_REGS
RFI_TO_USER_OR_KERNEL
- GEN_KVM system_reset
-
/**
* Interrupt 0x200 - Machine Check Interrupt (MCE).
@@ -1089,7 +1080,6 @@ INT_DEFINE_BEGIN(machine_check)
ISET_RI=0
IDAR=1
IDSISR=1
- IKVM_SKIP=1
IKVM_REAL=1
INT_DEFINE_END(machine_check)
@@ -1185,7 +1175,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
/*
* Check if we are coming from guest. If yes, then run the normal
* exception handler which will take the
- * machine_check_kvm->kvmppc_interrupt branch to deliver the MC event
+ * machine_check_kvm->kvm_interrupt branch to deliver the MC event
* to guest.
*/
lbz r11,HSTATE_IN_GUEST(r13)
@@ -1248,15 +1238,12 @@ EXC_COMMON_BEGIN(machine_check_common)
*/
GEN_COMMON machine_check
- FINISH_NAP
/* Enable MSR_RI when finished with PACA_EXMC */
li r10,MSR_RI
mtmsrd r10,1
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_exception
- b interrupt_return
-
- GEN_KVM machine_check
+ b interrupt_return_srr
#ifdef CONFIG_PPC_P7_NAP
@@ -1362,7 +1349,6 @@ INT_DEFINE_BEGIN(data_access)
IVEC=0x300
IDAR=1
IDSISR=1
- IKVM_SKIP=1
IKVM_REAL=1
INT_DEFINE_END(data_access)
@@ -1383,7 +1369,7 @@ BEGIN_MMU_FTR_SECTION
MMU_FTR_SECTION_ELSE
bl do_page_fault
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
- b interrupt_return
+ b interrupt_return_srr
1: bl do_break
/*
@@ -1391,9 +1377,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
* If so, we need to restore them with their updated values.
*/
REST_NVGPRS(r1)
- b interrupt_return
-
- GEN_KVM data_access
+ b interrupt_return_srr
/**
@@ -1416,7 +1400,6 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
INT_DEFINE_BEGIN(data_access_slb)
IVEC=0x380
IDAR=1
- IKVM_SKIP=1
IKVM_REAL=1
INT_DEFINE_END(data_access_slb)
@@ -1434,7 +1417,7 @@ BEGIN_MMU_FTR_SECTION
bl do_slb_fault
cmpdi r3,0
bne- 1f
- b fast_interrupt_return
+ b fast_interrupt_return_srr
1: /* Error case */
MMU_FTR_SECTION_ELSE
/* Radix case, access is outside page table range */
@@ -1443,9 +1426,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
std r3,RESULT(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_bad_slb_fault
- b interrupt_return
-
- GEN_KVM data_access_slb
+ b interrupt_return_srr
/**
@@ -1481,9 +1462,7 @@ BEGIN_MMU_FTR_SECTION
MMU_FTR_SECTION_ELSE
bl do_page_fault
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
- b interrupt_return
-
- GEN_KVM instruction_access
+ b interrupt_return_srr
/**
@@ -1518,7 +1497,7 @@ BEGIN_MMU_FTR_SECTION
bl do_slb_fault
cmpdi r3,0
bne- 1f
- b fast_interrupt_return
+ b fast_interrupt_return_srr
1: /* Error case */
MMU_FTR_SECTION_ELSE
/* Radix case, access is outside page table range */
@@ -1527,9 +1506,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
std r3,RESULT(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_bad_slb_fault
- b interrupt_return
-
- GEN_KVM instruction_access_slb
+ b interrupt_return_srr
/**
@@ -1571,12 +1548,13 @@ EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100)
EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100)
EXC_COMMON_BEGIN(hardware_interrupt_common)
GEN_COMMON hardware_interrupt
- FINISH_NAP
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_IRQ
- b interrupt_return
-
- GEN_KVM hardware_interrupt
+ BEGIN_FTR_SECTION
+ b interrupt_return_hsrr
+ FTR_SECTION_ELSE
+ b interrupt_return_srr
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
/**
@@ -1603,9 +1581,7 @@ EXC_COMMON_BEGIN(alignment_common)
addi r3,r1,STACK_FRAME_OVERHEAD
bl alignment_exception
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
- b interrupt_return
-
- GEN_KVM alignment
+ b interrupt_return_srr
/**
@@ -1712,9 +1688,7 @@ EXC_COMMON_BEGIN(program_check_common)
addi r3,r1,STACK_FRAME_OVERHEAD
bl program_check_exception
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
- b interrupt_return
-
- GEN_KVM program_check
+ b interrupt_return_srr
/*
@@ -1757,16 +1731,14 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
bl load_up_fpu
- b fast_interrupt_return
+ b fast_interrupt_return_srr
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2: /* User process was in a transaction */
addi r3,r1,STACK_FRAME_OVERHEAD
bl fp_unavailable_tm
- b interrupt_return
+ b interrupt_return_srr
#endif
- GEN_KVM fp_unavailable
-
/**
* Interrupt 0x900 - Decrementer Interrupt.
@@ -1801,12 +1773,9 @@ EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80)
EXC_VIRT_END(decrementer, 0x4900, 0x80)
EXC_COMMON_BEGIN(decrementer_common)
GEN_COMMON decrementer
- FINISH_NAP
addi r3,r1,STACK_FRAME_OVERHEAD
bl timer_interrupt
- b interrupt_return
-
- GEN_KVM decrementer
+ b interrupt_return_srr
/**
@@ -1843,6 +1812,8 @@ EXC_COMMON_BEGIN(hdecrementer_common)
*
* Be careful to avoid touching the kernel stack.
*/
+ li r10,0
+ stb r10,PACAHSRR_VALID(r13)
ld r10,PACA_EXGEN+EX_CTR(r13)
mtctr r10
mtcrf 0x80,r9
@@ -1853,8 +1824,6 @@ EXC_COMMON_BEGIN(hdecrementer_common)
ld r13,PACA_EXGEN+EX_R13(r13)
HRFI_TO_KERNEL
- GEN_KVM hdecrementer
-
/**
* Interrupt 0xa00 - Directed Privileged Doorbell Interrupt.
@@ -1886,16 +1855,13 @@ EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100)
EXC_VIRT_END(doorbell_super, 0x4a00, 0x100)
EXC_COMMON_BEGIN(doorbell_super_common)
GEN_COMMON doorbell_super
- FINISH_NAP
addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef CONFIG_PPC_DOORBELL
bl doorbell_exception
#else
bl unknown_async_exception
#endif
- b interrupt_return
-
- GEN_KVM doorbell_super
+ b interrupt_return_srr
EXC_REAL_NONE(0xb00, 0x100)
@@ -1946,7 +1912,7 @@ INT_DEFINE_END(system_call)
GET_PACA(r13)
std r10,PACA_EXGEN+EX_R10(r13)
INTERRUPT_TO_KERNEL
- KVMTEST system_call /* uses r10, branch to system_call_kvm */
+ KVMTEST system_call kvm_hcall /* uses r10, branch to kvm_hcall */
mfctr r9
#else
mr r9,r13
@@ -1972,8 +1938,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
mtctr r10
bctr
.else
- li r10,MSR_RI
- mtmsrd r10,1 /* Set RI (EE=0) */
#ifdef CONFIG_RELOCATABLE
__LOAD_HANDLER(r10, system_call_common)
mtctr r10
@@ -2002,14 +1966,16 @@ EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100)
EXC_VIRT_END(system_call, 0x4c00, 0x100)
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-TRAMP_REAL_BEGIN(system_call_kvm)
- /*
- * This is a hcall, so register convention is as above, with these
- * differences:
- * r13 = PACA
- * ctr = orig r13
- * orig r10 saved in PACA
- */
+TRAMP_REAL_BEGIN(kvm_hcall)
+ std r9,PACA_EXGEN+EX_R9(r13)
+ std r11,PACA_EXGEN+EX_R11(r13)
+ std r12,PACA_EXGEN+EX_R12(r13)
+ mfcr r9
+ mfctr r10
+ std r10,PACA_EXGEN+EX_R13(r13)
+ li r10,0
+ std r10,PACA_EXGEN+EX_CFAR(r13)
+ std r10,PACA_EXGEN+EX_CTR(r13)
/*
* Save the PPR (on systems that support it) before changing to
* HMT_MEDIUM. That allows the KVM code to save that value into the
@@ -2017,31 +1983,24 @@ TRAMP_REAL_BEGIN(system_call_kvm)
*/
BEGIN_FTR_SECTION
mfspr r10,SPRN_PPR
- std r10,HSTATE_PPR(r13)
+ std r10,PACA_EXGEN+EX_PPR(r13)
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
HMT_MEDIUM
- mfctr r10
- SET_SCRATCH0(r10)
- mfcr r10
- std r12,HSTATE_SCRATCH0(r13)
- sldi r12,r10,32
- ori r12,r12,0xc00
+
#ifdef CONFIG_RELOCATABLE
/*
- * Requires __LOAD_FAR_HANDLER beause kvmppc_interrupt lives
+ * Requires __LOAD_FAR_HANDLER beause kvmppc_hcall lives
* outside the head section.
*/
- __LOAD_FAR_HANDLER(r10, kvmppc_interrupt)
+ __LOAD_FAR_HANDLER(r10, kvmppc_hcall)
mtctr r10
- ld r10,PACA_EXGEN+EX_R10(r13)
bctr
#else
- ld r10,PACA_EXGEN+EX_R10(r13)
- b kvmppc_interrupt
+ b kvmppc_hcall
#endif
#endif
-
/**
* Interrupt 0xd00 - Trace Interrupt.
* This is a synchronous interrupt in response to instruction step or
@@ -2064,9 +2023,7 @@ EXC_COMMON_BEGIN(single_step_common)
GEN_COMMON single_step
addi r3,r1,STACK_FRAME_OVERHEAD
bl single_step_exception
- b interrupt_return
-
- GEN_KVM single_step
+ b interrupt_return_srr
/**
@@ -2086,7 +2043,6 @@ INT_DEFINE_BEGIN(h_data_storage)
IHSRR=1
IDAR=1
IDSISR=1
- IKVM_SKIP=1
IKVM_REAL=1
IKVM_VIRT=1
INT_DEFINE_END(h_data_storage)
@@ -2105,9 +2061,7 @@ BEGIN_MMU_FTR_SECTION
MMU_FTR_SECTION_ELSE
bl unknown_exception
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX)
- b interrupt_return
-
- GEN_KVM h_data_storage
+ b interrupt_return_hsrr
/**
@@ -2132,9 +2086,7 @@ EXC_COMMON_BEGIN(h_instr_storage_common)
GEN_COMMON h_instr_storage
addi r3,r1,STACK_FRAME_OVERHEAD
bl unknown_exception
- b interrupt_return
-
- GEN_KVM h_instr_storage
+ b interrupt_return_hsrr
/**
@@ -2158,9 +2110,7 @@ EXC_COMMON_BEGIN(emulation_assist_common)
addi r3,r1,STACK_FRAME_OVERHEAD
bl emulation_assist_interrupt
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
- b interrupt_return
-
- GEN_KVM emulation_assist
+ b interrupt_return_hsrr
/**
@@ -2233,16 +2183,11 @@ EXC_COMMON_BEGIN(hmi_exception_early_common)
EXCEPTION_RESTORE_REGS hsrr=1
GEN_INT_ENTRY hmi_exception, virt=0
- GEN_KVM hmi_exception_early
-
EXC_COMMON_BEGIN(hmi_exception_common)
GEN_COMMON hmi_exception
- FINISH_NAP
addi r3,r1,STACK_FRAME_OVERHEAD
bl handle_hmi_exception
- b interrupt_return
-
- GEN_KVM hmi_exception
+ b interrupt_return_hsrr
/**
@@ -2266,16 +2211,13 @@ EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20)
EXC_VIRT_END(h_doorbell, 0x4e80, 0x20)
EXC_COMMON_BEGIN(h_doorbell_common)
GEN_COMMON h_doorbell
- FINISH_NAP
addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef CONFIG_PPC_DOORBELL
bl doorbell_exception
#else
bl unknown_async_exception
#endif
- b interrupt_return
-
- GEN_KVM h_doorbell
+ b interrupt_return_hsrr
/**
@@ -2299,12 +2241,9 @@ EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20)
EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20)
EXC_COMMON_BEGIN(h_virt_irq_common)
GEN_COMMON h_virt_irq
- FINISH_NAP
addi r3,r1,STACK_FRAME_OVERHEAD
bl do_IRQ
- b interrupt_return
-
- GEN_KVM h_virt_irq
+ b interrupt_return_hsrr
EXC_REAL_NONE(0xec0, 0x20)
@@ -2345,12 +2284,9 @@ EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20)
EXC_VIRT_END(performance_monitor, 0x4f00, 0x20)
EXC_COMMON_BEGIN(performance_monitor_common)
GEN_COMMON performance_monitor
- FINISH_NAP
addi r3,r1,STACK_FRAME_OVERHEAD
bl performance_monitor_exception
- b interrupt_return
-
- GEN_KVM performance_monitor
+ b interrupt_return_srr
/**
@@ -2387,21 +2323,19 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
#endif
bl load_up_altivec
- b fast_interrupt_return
+ b fast_interrupt_return_srr
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2: /* User process was in a transaction */
addi r3,r1,STACK_FRAME_OVERHEAD
bl altivec_unavailable_tm
- b interrupt_return
+ b interrupt_return_srr
#endif
1:
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
addi r3,r1,STACK_FRAME_OVERHEAD
bl altivec_unavailable_exception
- b interrupt_return
-
- GEN_KVM altivec_unavailable
+ b interrupt_return_srr
/**
@@ -2442,16 +2376,14 @@ BEGIN_FTR_SECTION
2: /* User process was in a transaction */
addi r3,r1,STACK_FRAME_OVERHEAD
bl vsx_unavailable_tm
- b interrupt_return
+ b interrupt_return_srr
#endif
1:
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
addi r3,r1,STACK_FRAME_OVERHEAD
bl vsx_unavailable_exception
- b interrupt_return
-
- GEN_KVM vsx_unavailable
+ b interrupt_return_srr
/**
@@ -2479,9 +2411,7 @@ EXC_COMMON_BEGIN(facility_unavailable_common)
addi r3,r1,STACK_FRAME_OVERHEAD
bl facility_unavailable_exception
REST_NVGPRS(r1) /* instruction emulation may change GPRs */
- b interrupt_return
-
- GEN_KVM facility_unavailable
+ b interrupt_return_srr
/**
@@ -2509,9 +2439,7 @@ EXC_COMMON_BEGIN(h_facility_unavailable_common)
addi r3,r1,STACK_FRAME_OVERHEAD
bl facility_unavailable_exception
REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */
- b interrupt_return
-
- GEN_KVM h_facility_unavailable
+ b interrupt_return_hsrr
EXC_REAL_NONE(0xfa0, 0x20)
@@ -2530,8 +2458,6 @@ EXC_VIRT_NONE(0x5100, 0x100)
INT_DEFINE_BEGIN(cbe_system_error)
IVEC=0x1200
IHSRR=1
- IKVM_SKIP=1
- IKVM_REAL=1
INT_DEFINE_END(cbe_system_error)
EXC_REAL_BEGIN(cbe_system_error, 0x1200, 0x100)
@@ -2542,20 +2468,23 @@ EXC_COMMON_BEGIN(cbe_system_error_common)
GEN_COMMON cbe_system_error
addi r3,r1,STACK_FRAME_OVERHEAD
bl cbe_system_error_exception
- b interrupt_return
-
- GEN_KVM cbe_system_error
+ b interrupt_return_hsrr
#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1200, 0x100)
EXC_VIRT_NONE(0x5200, 0x100)
#endif
-
+/**
+ * Interrupt 0x1300 - Instruction Address Breakpoint Interrupt.
+ * This has been removed from the ISA before 2.01, which is the earliest
+ * 64-bit BookS ISA supported, however the G5 / 970 implements this
+ * interrupt with a non-architected feature available through the support
+ * processor interface.
+ */
INT_DEFINE_BEGIN(instruction_breakpoint)
IVEC=0x1300
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
- IKVM_SKIP=1
IKVM_REAL=1
#endif
INT_DEFINE_END(instruction_breakpoint)
@@ -2570,9 +2499,7 @@ EXC_COMMON_BEGIN(instruction_breakpoint_common)
GEN_COMMON instruction_breakpoint
addi r3,r1,STACK_FRAME_OVERHEAD
bl instruction_breakpoint_exception
- b interrupt_return
-
- GEN_KVM instruction_breakpoint
+ b interrupt_return_srr
EXC_REAL_NONE(0x1400, 0x100)
@@ -2680,6 +2607,8 @@ BEGIN_FTR_SECTION
ld r10,PACA_EXGEN+EX_CFAR(r13)
mtspr SPRN_CFAR,r10
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+ li r10,0
+ stb r10,PACAHSRR_VALID(r13)
ld r10,PACA_EXGEN+EX_R10(r13)
ld r11,PACA_EXGEN+EX_R11(r13)
ld r12,PACA_EXGEN+EX_R12(r13)
@@ -2692,17 +2621,13 @@ EXC_COMMON_BEGIN(denorm_exception_common)
GEN_COMMON denorm_exception
addi r3,r1,STACK_FRAME_OVERHEAD
bl unknown_exception
- b interrupt_return
-
- GEN_KVM denorm_exception
+ b interrupt_return_hsrr
#ifdef CONFIG_CBE_RAS
INT_DEFINE_BEGIN(cbe_maintenance)
IVEC=0x1600
IHSRR=1
- IKVM_SKIP=1
- IKVM_REAL=1
INT_DEFINE_END(cbe_maintenance)
EXC_REAL_BEGIN(cbe_maintenance, 0x1600, 0x100)
@@ -2713,9 +2638,7 @@ EXC_COMMON_BEGIN(cbe_maintenance_common)
GEN_COMMON cbe_maintenance
addi r3,r1,STACK_FRAME_OVERHEAD
bl cbe_maintenance_exception
- b interrupt_return
-
- GEN_KVM cbe_maintenance
+ b interrupt_return_hsrr
#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1600, 0x100)
@@ -2745,17 +2668,13 @@ EXC_COMMON_BEGIN(altivec_assist_common)
#else
bl unknown_exception
#endif
- b interrupt_return
-
- GEN_KVM altivec_assist
+ b interrupt_return_srr
#ifdef CONFIG_CBE_RAS
INT_DEFINE_BEGIN(cbe_thermal)
IVEC=0x1800
IHSRR=1
- IKVM_SKIP=1
- IKVM_REAL=1
INT_DEFINE_END(cbe_thermal)
EXC_REAL_BEGIN(cbe_thermal, 0x1800, 0x100)
@@ -2766,9 +2685,7 @@ EXC_COMMON_BEGIN(cbe_thermal_common)
GEN_COMMON cbe_thermal
addi r3,r1,STACK_FRAME_OVERHEAD
bl cbe_thermal_exception
- b interrupt_return
-
- GEN_KVM cbe_thermal
+ b interrupt_return_hsrr
#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1800, 0x100)
@@ -2793,7 +2710,6 @@ INT_DEFINE_END(soft_nmi)
* and run it entirely with interrupts hard disabled.
*/
EXC_COMMON_BEGIN(soft_nmi_common)
- mfspr r11,SPRN_SRR0
mr r10,r1
ld r1,PACAEMERGSP(r13)
subi r1,r1,INT_FRAME_SIZE
@@ -2807,6 +2723,7 @@ EXC_COMMON_BEGIN(soft_nmi_common)
mtmsrd r9,1
kuap_kernel_restore r9, r10
+
EXCEPTION_RESTORE_REGS hsrr=0
RFI_TO_KERNEL
@@ -2828,19 +2745,24 @@ masked_Hinterrupt:
.else
masked_interrupt:
.endif
- lbz r11,PACAIRQHAPPENED(r13)
- or r11,r11,r10
- stb r11,PACAIRQHAPPENED(r13)
+ stw r9,PACA_EXGEN+EX_CCR(r13)
+ lbz r9,PACAIRQHAPPENED(r13)
+ or r9,r9,r10
+ stb r9,PACAIRQHAPPENED(r13)
+
+ .if ! \hsrr
cmpwi r10,PACA_IRQ_DEC
bne 1f
- lis r10,0x7fff
- ori r10,r10,0xffff
- mtspr SPRN_DEC,r10
+ LOAD_REG_IMMEDIATE(r9, 0x7fffffff)
+ mtspr SPRN_DEC,r9
#ifdef CONFIG_PPC_WATCHDOG
+ lwz r9,PACA_EXGEN+EX_CCR(r13)
b soft_nmi_common
#else
b 2f
#endif
+ .endif
+
1: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK
beq 2f
xori r12,r12,MSR_EE /* clear MSR_EE */
@@ -2849,11 +2771,29 @@ masked_interrupt:
.else
mtspr SPRN_SRR1,r12
.endif
- ori r11,r11,PACA_IRQ_HARD_DIS
- stb r11,PACAIRQHAPPENED(r13)
+ ori r9,r9,PACA_IRQ_HARD_DIS
+ stb r9,PACAIRQHAPPENED(r13)
2: /* done */
- ld r10,PACA_EXGEN+EX_CTR(r13)
- mtctr r10
+ li r9,0
+ .if \hsrr
+ stb r9,PACAHSRR_VALID(r13)
+ .else
+ stb r9,PACASRR_VALID(r13)
+ .endif
+
+ SEARCH_RESTART_TABLE
+ cmpdi r12,0
+ beq 3f
+ .if \hsrr
+ mtspr SPRN_HSRR0,r12
+ .else
+ mtspr SPRN_SRR0,r12
+ .endif
+3:
+
+ ld r9,PACA_EXGEN+EX_CTR(r13)
+ mtctr r9
+ lwz r9,PACA_EXGEN+EX_CCR(r13)
mtcrf 0x80,r9
std r1,PACAR1(r13)
ld r9,PACA_EXGEN+EX_R9(r13)
@@ -3022,6 +2962,15 @@ TRAMP_REAL_BEGIN(rfscv_flush_fallback)
USE_TEXT_SECTION()
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+kvm_interrupt:
+ /*
+ * The conditional branch in KVMTEST can't reach all the way,
+ * make a stub.
+ */
+ b kvmppc_interrupt
+#endif
+
_GLOBAL(do_uaccess_flush)
UACCESS_FLUSH_FIXUP_SECTION
nop
@@ -3037,32 +2986,6 @@ EXPORT_SYMBOL(do_uaccess_flush)
MASKED_INTERRUPT
MASKED_INTERRUPT hsrr=1
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-kvmppc_skip_interrupt:
- /*
- * Here all GPRs are unchanged from when the interrupt happened
- * except for r13, which is saved in SPRG_SCRATCH0.
- */
- mfspr r13, SPRN_SRR0
- addi r13, r13, 4
- mtspr SPRN_SRR0, r13
- GET_SCRATCH0(r13)
- RFI_TO_KERNEL
- b .
-
-kvmppc_skip_Hinterrupt:
- /*
- * Here all GPRs are unchanged from when the interrupt happened
- * except for r13, which is saved in SPRG_SCRATCH0.
- */
- mfspr r13, SPRN_HSRR0
- addi r13, r13, 4
- mtspr SPRN_HSRR0, r13
- GET_SCRATCH0(r13)
- HRFI_TO_KERNEL
- b .
-#endif
-
/*
* Relocation-on interrupts: A subset of the interrupts can be delivered
* with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
@@ -3081,7 +3004,7 @@ kvmppc_skip_Hinterrupt:
USE_FIXED_SECTION(virt_trampolines)
/*
- * All code below __end_interrupts is treated as soft-masked. If
+ * All code below __end_soft_masked is treated as soft-masked. If
* any code runs here with MSR[EE]=1, it must then cope with pending
* soft interrupt being raised (i.e., by ensuring it is replayed).
*
@@ -3096,24 +3019,6 @@ USE_FIXED_SECTION(virt_trampolines)
__end_interrupts:
DEFINE_FIXED_SYMBOL(__end_interrupts)
-#ifdef CONFIG_PPC_970_NAP
- /*
- * Called by exception entry code if _TLF_NAPPING was set, this clears
- * the NAPPING flag, and redirects the exception exit to
- * power4_fixup_nap_return.
- */
- .globl power4_fixup_nap
-EXC_COMMON_BEGIN(power4_fixup_nap)
- andc r9,r9,r10
- std r9,TI_LOCAL_FLAGS(r11)
- LOAD_REG_ADDR(r10, power4_idle_nap_return)
- std r10,_NIP(r1)
- blr
-
-power4_idle_nap_return:
- blr
-#endif
-
CLOSE_FIXED_SECTION(real_vectors);
CLOSE_FIXED_SECTION(real_trampolines);
CLOSE_FIXED_SECTION(virt_vectors);
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 8482739d42f3..b990075285f5 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -31,6 +31,7 @@
#include <asm/fadump.h>
#include <asm/fadump-internal.h>
#include <asm/setup.h>
+#include <asm/interrupt.h>
/*
* The CPU who acquired the lock to trigger the fadump crash should
@@ -44,22 +45,21 @@ static struct fw_dump fw_dump;
static void __init fadump_reserve_crash_area(u64 base);
-struct kobject *fadump_kobj;
-
#ifndef CONFIG_PRESERVE_FA_DUMP
+static struct kobject *fadump_kobj;
+
static atomic_t cpus_in_fadump;
static DEFINE_MUTEX(fadump_mutex);
-struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0, false };
+static struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0, false };
#define RESERVED_RNGS_SZ 16384 /* 16K - 128 entries */
#define RESERVED_RNGS_CNT (RESERVED_RNGS_SZ / \
sizeof(struct fadump_memory_range))
static struct fadump_memory_range rngs[RESERVED_RNGS_CNT];
-struct fadump_mrange_info reserved_mrange_info = { "reserved", rngs,
- RESERVED_RNGS_SZ, 0,
- RESERVED_RNGS_CNT, true };
+static struct fadump_mrange_info
+reserved_mrange_info = { "reserved", rngs, RESERVED_RNGS_SZ, 0, RESERVED_RNGS_CNT, true };
static void __init early_init_dt_scan_reserved_ranges(unsigned long node);
@@ -79,7 +79,7 @@ static struct cma *fadump_cma;
* But for some reason even if it fails we still have the memory reservation
* with us and we can still continue doing fadump.
*/
-int __init fadump_cma_init(void)
+static int __init fadump_cma_init(void)
{
unsigned long long base, size;
int rc;
@@ -292,7 +292,7 @@ static void fadump_show_config(void)
* that is required for a kernel to boot successfully.
*
*/
-static inline u64 fadump_calculate_reserve_size(void)
+static __init u64 fadump_calculate_reserve_size(void)
{
u64 base, size, bootmem_min;
int ret;
@@ -728,7 +728,7 @@ void crash_fadump(struct pt_regs *regs, const char *str)
* If we came in via system reset, wait a while for the secondary
* CPUs to enter.
*/
- if (TRAP(&(fdh->regs)) == 0x100) {
+ if (TRAP(&(fdh->regs)) == INTERRUPT_SYSTEM_RESET) {
msecs = CRASH_TIMEOUT;
while ((atomic_read(&cpus_in_fadump) < ncpus) && (--msecs > 0))
mdelay(1);
diff --git a/arch/powerpc/kernel/firmware.c b/arch/powerpc/kernel/firmware.c
index c9e2819b095a..c7022c41cc31 100644
--- a/arch/powerpc/kernel/firmware.c
+++ b/arch/powerpc/kernel/firmware.c
@@ -23,18 +23,20 @@ EXPORT_SYMBOL_GPL(powerpc_firmware_features);
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_KVM_GUEST)
DEFINE_STATIC_KEY_FALSE(kvm_guest);
-bool check_kvm_guest(void)
+int __init check_kvm_guest(void)
{
struct device_node *hyper_node;
hyper_node = of_find_node_by_path("/hypervisor");
if (!hyper_node)
- return false;
+ return 0;
if (!of_device_is_compatible(hyper_node, "linux,kvm"))
- return false;
+ return 0;
static_branch_enable(&kvm_guest);
- return true;
+
+ return 0;
}
+core_initcall(check_kvm_guest); // before kvm_guest_init()
#endif
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 3ff9a8fafa46..6010adcee16e 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -92,9 +92,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
/* enable use of FP after return */
#ifdef CONFIG_PPC32
mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
-#ifdef CONFIG_VMAP_STACK
tovirt(r5, r5)
-#endif
lwz r4,THREAD_FPEXC_MODE(r5)
ori r9,r9,MSR_FP /* enable FP for current */
or r9,r9,r4
@@ -105,6 +103,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
ori r12,r12,MSR_FP
or r12,r12,r4
std r12,_MSR(r1)
+#ifdef CONFIG_PPC_BOOK3S_64
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+#endif
#endif
li r4,1
stb r4,THREAD_LOAD_FP(r5)
diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
index 5d4706c14572..6b1ec9e3541b 100644
--- a/arch/powerpc/kernel/head_32.h
+++ b/arch/powerpc/kernel/head_32.h
@@ -10,36 +10,39 @@
* We assume sprg3 has the physical address of the current
* task's thread_struct.
*/
-.macro EXCEPTION_PROLOG handle_dar_dsisr=0
+.macro EXCEPTION_PROLOG trapno name handle_dar_dsisr=0
EXCEPTION_PROLOG_0 handle_dar_dsisr=\handle_dar_dsisr
EXCEPTION_PROLOG_1
- EXCEPTION_PROLOG_2 handle_dar_dsisr=\handle_dar_dsisr
+ EXCEPTION_PROLOG_2 \trapno \name handle_dar_dsisr=\handle_dar_dsisr
.endm
.macro EXCEPTION_PROLOG_0 handle_dar_dsisr=0
mtspr SPRN_SPRG_SCRATCH0,r10
mtspr SPRN_SPRG_SCRATCH1,r11
-#ifdef CONFIG_VMAP_STACK
mfspr r10, SPRN_SPRG_THREAD
.if \handle_dar_dsisr
+#ifdef CONFIG_40x
+ mfspr r11, SPRN_DEAR
+#else
mfspr r11, SPRN_DAR
+#endif
stw r11, DAR(r10)
+#ifdef CONFIG_40x
+ mfspr r11, SPRN_ESR
+#else
mfspr r11, SPRN_DSISR
+#endif
stw r11, DSISR(r10)
.endif
mfspr r11, SPRN_SRR0
stw r11, SRR0(r10)
-#endif
mfspr r11, SPRN_SRR1 /* check whether user or kernel */
-#ifdef CONFIG_VMAP_STACK
stw r11, SRR1(r10)
-#endif
mfcr r10
andi. r11, r11, MSR_PR
.endm
-.macro EXCEPTION_PROLOG_1 for_rtas=0
-#ifdef CONFIG_VMAP_STACK
+.macro EXCEPTION_PROLOG_1
mtspr SPRN_SPRG_SCRATCH2,r1
subi r1, r1, INT_FRAME_SIZE /* use r1 if kernel */
beq 1f
@@ -47,32 +50,33 @@
lwz r1,TASK_STACK-THREAD(r1)
addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
1:
+#ifdef CONFIG_VMAP_STACK
mtcrf 0x3f, r1
- bt 32 - THREAD_ALIGN_SHIFT, stack_overflow
-#else
- subi r11, r1, INT_FRAME_SIZE /* use r1 if kernel */
- beq 1f
- mfspr r11,SPRN_SPRG_THREAD
- lwz r11,TASK_STACK-THREAD(r11)
- addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE
-1: tophys(r11, r11)
+ bt 32 - THREAD_ALIGN_SHIFT, vmap_stack_overflow
#endif
.endm
-.macro EXCEPTION_PROLOG_2 handle_dar_dsisr=0
-#ifdef CONFIG_VMAP_STACK
- li r11, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
- mtmsr r11
- isync
+.macro EXCEPTION_PROLOG_2 trapno name handle_dar_dsisr=0
+#ifdef CONFIG_PPC_8xx
+ .if \handle_dar_dsisr
+ li r11, RPN_PATTERN
+ mtspr SPRN_DAR, r11 /* Tag DAR, to be used in DTLB Error */
+ .endif
+#endif
+ LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~MSR_RI) /* re-enable MMU */
+ mtspr SPRN_SRR1, r11
+ lis r11, 1f@h
+ ori r11, r11, 1f@l
+ mtspr SPRN_SRR0, r11
mfspr r11, SPRN_SPRG_SCRATCH2
+ rfi
+
+ .text
+\name\()_virt:
+1:
stw r11,GPR1(r1)
stw r11,0(r1)
mr r11, r1
-#else
- stw r1,GPR1(r11)
- stw r1,0(r11)
- tovirt(r1, r11) /* set new kernel sp */
-#endif
stw r10,_CCR(r11) /* save registers */
stw r12,GPR12(r11)
stw r9,GPR9(r11)
@@ -82,7 +86,6 @@
stw r12,GPR11(r11)
mflr r10
stw r10,_LINK(r11)
-#ifdef CONFIG_VMAP_STACK
mfspr r12, SPRN_SPRG_THREAD
tovirt(r12, r12)
.if \handle_dar_dsisr
@@ -93,117 +96,72 @@
.endif
lwz r9, SRR1(r12)
lwz r12, SRR0(r12)
-#else
- mfspr r12,SPRN_SRR0
- mfspr r9,SPRN_SRR1
-#endif
#ifdef CONFIG_40x
rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
+#elif defined(CONFIG_PPC_8xx)
+ mtspr SPRN_EID, r2 /* Set MSR_RI */
#else
-#ifdef CONFIG_VMAP_STACK
- li r10, MSR_KERNEL & ~MSR_IR /* can take exceptions */
-#else
- li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR) /* can take exceptions */
-#endif
+ li r10, MSR_KERNEL /* can take exceptions */
mtmsr r10 /* (except for mach check in rtas) */
#endif
- stw r0,GPR0(r11)
+ COMMON_EXCEPTION_PROLOG_END \trapno
+_ASM_NOKPROBE_SYMBOL(\name\()_virt)
+.endm
+
+.macro COMMON_EXCEPTION_PROLOG_END trapno
+ stw r0,GPR0(r1)
lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
addi r10,r10,STACK_FRAME_REGS_MARKER@l
- stw r10,8(r11)
- SAVE_4GPRS(3, r11)
- SAVE_2GPRS(7, r11)
+ stw r10,8(r1)
+ li r10, \trapno
+ stw r10,_TRAP(r1)
+ SAVE_4GPRS(3, r1)
+ SAVE_2GPRS(7, r1)
+ SAVE_NVGPRS(r1)
+ stw r2,GPR2(r1)
+ stw r12,_NIP(r1)
+ stw r9,_MSR(r1)
+ mfctr r10
+ mfspr r2,SPRN_SPRG_THREAD
+ stw r10,_CTR(r1)
+ tovirt(r2, r2)
+ mfspr r10,SPRN_XER
+ addi r2, r2, -THREAD
+ stw r10,_XER(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+.endm
+
+.macro prepare_transfer_to_handler
+#ifdef CONFIG_PPC_BOOK3S_32
+ andi. r12,r9,MSR_PR
+ bne 777f
+ bl prepare_transfer_to_handler
+777:
+#endif
.endm
.macro SYSCALL_ENTRY trapno
mfspr r9, SPRN_SRR1
- mfspr r10, SPRN_SRR0
+ mfspr r12, SPRN_SRR0
LOAD_REG_IMMEDIATE(r11, MSR_KERNEL) /* can take exceptions */
- lis r12, 1f@h
- ori r12, r12, 1f@l
+ lis r10, 1f@h
+ ori r10, r10, 1f@l
mtspr SPRN_SRR1, r11
- mtspr SPRN_SRR0, r12
- mfspr r12,SPRN_SPRG_THREAD
+ mtspr SPRN_SRR0, r10
+ mfspr r10,SPRN_SPRG_THREAD
mr r11, r1
- lwz r1,TASK_STACK-THREAD(r12)
- tovirt(r12, r12)
+ lwz r1,TASK_STACK-THREAD(r10)
+ tovirt(r10, r10)
addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
rfi
1:
- stw r11,GPR1(r1)
- stw r11,0(r1)
- mr r11, r1
- stw r10,_NIP(r11)
- mflr r10
- stw r10, _LINK(r11)
- mfcr r10
- rlwinm r10,r10,0,4,2 /* Clear SO bit in CR */
- stw r10,_CCR(r11) /* save registers */
-#ifdef CONFIG_40x
- rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
-#endif
- lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
- stw r2,GPR2(r11)
- addi r10,r10,STACK_FRAME_REGS_MARKER@l
- stw r9,_MSR(r11)
- li r2, \trapno
- stw r10,8(r11)
- stw r2,_TRAP(r11)
- SAVE_GPR(0, r11)
- SAVE_4GPRS(3, r11)
- SAVE_2GPRS(7, r11)
- addi r2,r12,-THREAD
+ stw r12,_NIP(r1)
+ mfcr r12
+ rlwinm r12,r12,0,4,2 /* Clear SO bit in CR */
+ stw r12,_CCR(r1)
b transfer_to_syscall /* jump to handler */
.endm
-.macro save_dar_dsisr_on_stack reg1, reg2, sp
-#ifndef CONFIG_VMAP_STACK
- mfspr \reg1, SPRN_DAR
- mfspr \reg2, SPRN_DSISR
- stw \reg1, _DAR(\sp)
- stw \reg2, _DSISR(\sp)
-#endif
-.endm
-
-.macro get_and_save_dar_dsisr_on_stack reg1, reg2, sp
-#ifdef CONFIG_VMAP_STACK
- lwz \reg1, _DAR(\sp)
- lwz \reg2, _DSISR(\sp)
-#else
- save_dar_dsisr_on_stack \reg1, \reg2, \sp
-#endif
-.endm
-
-.macro tovirt_vmstack dst, src
-#ifdef CONFIG_VMAP_STACK
- tovirt(\dst, \src)
-#else
- .ifnc \dst, \src
- mr \dst, \src
- .endif
-#endif
-.endm
-
-.macro tovirt_novmstack dst, src
-#ifndef CONFIG_VMAP_STACK
- tovirt(\dst, \src)
-#else
- .ifnc \dst, \src
- mr \dst, \src
- .endif
-#endif
-.endm
-
-.macro tophys_novmstack dst, src
-#ifndef CONFIG_VMAP_STACK
- tophys(\dst, \src)
-#else
- .ifnc \dst, \src
- mr \dst, \src
- .endif
-#endif
-.endm
-
/*
* Note: code which follows this uses cr0.eq (set if from kernel),
* r11, r12 (SRR0), and r9 (SRR1).
@@ -217,41 +175,29 @@
*/
#ifdef CONFIG_PPC_BOOK3S
#define START_EXCEPTION(n, label) \
+ __HEAD; \
. = n; \
DO_KVM n; \
label:
#else
#define START_EXCEPTION(n, label) \
+ __HEAD; \
. = n; \
label:
#endif
-#define EXCEPTION(n, label, hdlr, xfer) \
+#define EXCEPTION(n, label, hdlr) \
START_EXCEPTION(n, label) \
- EXCEPTION_PROLOG; \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- xfer(n, hdlr)
-
-#define EXC_XFER_TEMPLATE(hdlr, trap, msr, tfer, ret) \
- li r10,trap; \
- stw r10,_TRAP(r11); \
- LOAD_REG_IMMEDIATE(r10, msr); \
- bl tfer; \
- .long hdlr; \
- .long ret
-
-#define EXC_XFER_STD(n, hdlr) \
- EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, transfer_to_handler_full, \
- ret_from_except_full)
-
-#define EXC_XFER_LITE(n, hdlr) \
- EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \
- ret_from_except)
+ EXCEPTION_PROLOG n label; \
+ prepare_transfer_to_handler; \
+ bl hdlr; \
+ b interrupt_return
.macro vmap_stack_overflow_exception
-#ifdef CONFIG_VMAP_STACK
+ __HEAD
+vmap_stack_overflow:
#ifdef CONFIG_SMP
mfspr r1, SPRN_SPRG_THREAD
lwz r1, TASK_CPU - THREAD(r1)
@@ -261,16 +207,11 @@ label:
lis r1, emergency_ctx@ha
#endif
lwz r1, emergency_ctx@l(r1)
- cmpwi cr1, r1, 0
- bne cr1, 1f
- lis r1, init_thread_union@ha
- addi r1, r1, init_thread_union@l
-1: addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
- EXCEPTION_PROLOG_2
- SAVE_NVGPRS(r11)
- addi r3, r1, STACK_FRAME_OVERHEAD
- EXC_XFER_STD(0, stack_overflow_exception)
-#endif
+ addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
+ EXCEPTION_PROLOG_2 0 vmap_stack_overflow
+ prepare_transfer_to_handler
+ bl stack_overflow_exception
+ b interrupt_return
.endm
#endif /* __HEAD_32_H__ */
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 24724a7dad49..7d72ee5ab387 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -89,7 +89,11 @@ _ENTRY(crit_srr0)
.space 4
_ENTRY(crit_srr1)
.space 4
-_ENTRY(saved_ksp_limit)
+_ENTRY(crit_r1)
+ .space 4
+_ENTRY(crit_dear)
+ .space 4
+_ENTRY(crit_esr)
.space 4
/*
@@ -100,42 +104,62 @@ _ENTRY(saved_ksp_limit)
* Instead we use a couple of words of memory at low physical addresses.
* This is OK since we don't support SMP on these processors.
*/
-#define CRITICAL_EXCEPTION_PROLOG \
- stw r10,crit_r10@l(0); /* save two registers to work with */\
- stw r11,crit_r11@l(0); \
- mfcr r10; /* save CR in r10 for now */\
- mfspr r11,SPRN_SRR3; /* check whether user or kernel */\
- andi. r11,r11,MSR_PR; \
- lis r11,critirq_ctx@ha; \
- tophys(r11,r11); \
- lwz r11,critirq_ctx@l(r11); \
- beq 1f; \
- /* COMING FROM USER MODE */ \
- mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\
- lwz r11,TASK_STACK-THREAD(r11); /* this thread's kernel stack */\
-1: addi r11,r11,THREAD_SIZE-INT_FRAME_SIZE; /* Alloc an excpt frm */\
- tophys(r11,r11); \
- stw r10,_CCR(r11); /* save various registers */\
- stw r12,GPR12(r11); \
- stw r9,GPR9(r11); \
- mflr r10; \
- stw r10,_LINK(r11); \
- mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\
- stw r12,_DEAR(r11); /* since they may have had stuff */\
- mfspr r9,SPRN_ESR; /* in them at the point where the */\
- stw r9,_ESR(r11); /* exception was taken */\
- mfspr r12,SPRN_SRR2; \
- stw r1,GPR1(r11); \
- mfspr r9,SPRN_SRR3; \
- stw r1,0(r11); \
- tovirt(r1,r11); \
- rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
- stw r0,GPR0(r11); \
- lis r10, STACK_FRAME_REGS_MARKER@ha; /* exception frame marker */\
- addi r10, r10, STACK_FRAME_REGS_MARKER@l; \
- stw r10, 8(r11); \
- SAVE_4GPRS(3, r11); \
- SAVE_2GPRS(7, r11)
+.macro CRITICAL_EXCEPTION_PROLOG trapno name
+ stw r10,crit_r10@l(0) /* save two registers to work with */
+ stw r11,crit_r11@l(0)
+ mfspr r10,SPRN_SRR0
+ mfspr r11,SPRN_SRR1
+ stw r10,crit_srr0@l(0)
+ stw r11,crit_srr1@l(0)
+ mfspr r10,SPRN_DEAR
+ mfspr r11,SPRN_ESR
+ stw r10,crit_dear@l(0)
+ stw r11,crit_esr@l(0)
+ mfcr r10 /* save CR in r10 for now */
+ mfspr r11,SPRN_SRR3 /* check whether user or kernel */
+ andi. r11,r11,MSR_PR
+ lis r11,(critirq_ctx-PAGE_OFFSET)@ha
+ lwz r11,(critirq_ctx-PAGE_OFFSET)@l(r11)
+ beq 1f
+ /* COMING FROM USER MODE */
+ mfspr r11,SPRN_SPRG_THREAD /* if from user, start at top of */
+ lwz r11,TASK_STACK-THREAD(r11) /* this thread's kernel stack */
+1: stw r1,crit_r1@l(0)
+ addi r1,r11,THREAD_SIZE-INT_FRAME_SIZE /* Alloc an excpt frm */
+ LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)) /* re-enable MMU */
+ mtspr SPRN_SRR1, r11
+ lis r11, 1f@h
+ ori r11, r11, 1f@l
+ mtspr SPRN_SRR0, r11
+ rfi
+
+ .text
+1:
+\name\()_virt:
+ lwz r11,crit_r1@l(0)
+ stw r11,GPR1(r1)
+ stw r11,0(r1)
+ mr r11,r1
+ stw r10,_CCR(r11) /* save various registers */
+ stw r12,GPR12(r11)
+ stw r9,GPR9(r11)
+ mflr r10
+ stw r10,_LINK(r11)
+ lis r9,PAGE_OFFSET@ha
+ lwz r10,crit_r10@l(r9)
+ lwz r12,crit_r11@l(r9)
+ stw r10,GPR10(r11)
+ stw r12,GPR11(r11)
+ lwz r12,crit_dear@l(r9)
+ lwz r9,crit_esr@l(r9)
+ stw r12,_DEAR(r11) /* since they may have had stuff */
+ stw r9,_ESR(r11) /* exception was taken */
+ mfspr r12,SPRN_SRR2
+ mfspr r9,SPRN_SRR3
+ rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
+ COMMON_EXCEPTION_PROLOG_END \trapno + 2
+_ASM_NOKPROBE_SYMBOL(\name\()_virt)
+.endm
/*
* State at this point:
@@ -155,10 +179,10 @@ _ENTRY(saved_ksp_limit)
*/
#define CRITICAL_EXCEPTION(n, label, hdlr) \
START_EXCEPTION(n, label); \
- CRITICAL_EXCEPTION_PROLOG; \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
- crit_transfer_to_handler, ret_from_crit_exc)
+ CRITICAL_EXCEPTION_PROLOG n label; \
+ prepare_transfer_to_handler; \
+ bl hdlr; \
+ b ret_from_crit_exc
/*
* 0x0100 - Critical Interrupt Exception
@@ -178,69 +202,67 @@ _ENTRY(saved_ksp_limit)
* if they can't resolve the lightweight TLB fault.
*/
START_EXCEPTION(0x0300, DataStorage)
- EXCEPTION_PROLOG
- mfspr r5, SPRN_ESR /* Grab the ESR, save it */
- stw r5, _ESR(r11)
- mfspr r4, SPRN_DEAR /* Grab the DEAR, save it */
- stw r4, _DEAR(r11)
- EXC_XFER_LITE(0x300, handle_page_fault)
+ EXCEPTION_PROLOG 0x300 DataStorage handle_dar_dsisr=1
+ prepare_transfer_to_handler
+ bl do_page_fault
+ b interrupt_return
/*
* 0x0400 - Instruction Storage Exception
* This is caused by a fetch from non-execute or guarded pages.
*/
START_EXCEPTION(0x0400, InstructionAccess)
- EXCEPTION_PROLOG
+ EXCEPTION_PROLOG 0x400 InstructionAccess
li r5,0
stw r5, _ESR(r11) /* Zero ESR */
stw r12, _DEAR(r11) /* SRR0 as DEAR */
- EXC_XFER_LITE(0x400, handle_page_fault)
+ prepare_transfer_to_handler
+ bl do_page_fault
+ b interrupt_return
/* 0x0500 - External Interrupt Exception */
- EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
+ EXCEPTION(0x0500, HardwareInterrupt, do_IRQ)
/* 0x0600 - Alignment Exception */
START_EXCEPTION(0x0600, Alignment)
- EXCEPTION_PROLOG
- mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */
- stw r4,_DEAR(r11)
- addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_STD(0x600, alignment_exception)
+ EXCEPTION_PROLOG 0x600 Alignment handle_dar_dsisr=1
+ prepare_transfer_to_handler
+ bl alignment_exception
+ REST_NVGPRS(r1)
+ b interrupt_return
/* 0x0700 - Program Exception */
START_EXCEPTION(0x0700, ProgramCheck)
- EXCEPTION_PROLOG
- mfspr r4,SPRN_ESR /* Grab the ESR and save it */
- stw r4,_ESR(r11)
- addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_STD(0x700, program_check_exception)
+ EXCEPTION_PROLOG 0x700 ProgramCheck handle_dar_dsisr=1
+ prepare_transfer_to_handler
+ bl program_check_exception
+ REST_NVGPRS(r1)
+ b interrupt_return
- EXCEPTION(0x0800, Trap_08, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x0900, Trap_09, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x0A00, Trap_0A, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x0B00, Trap_0B, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x0800, Trap_08, unknown_exception)
+ EXCEPTION(0x0900, Trap_09, unknown_exception)
+ EXCEPTION(0x0A00, Trap_0A, unknown_exception)
+ EXCEPTION(0x0B00, Trap_0B, unknown_exception)
/* 0x0C00 - System Call Exception */
START_EXCEPTION(0x0C00, SystemCall)
SYSCALL_ENTRY 0xc00
/* Trap_0D is commented out to get more space for system call exception */
-/* EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_STD) */
- EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_STD)
+/* EXCEPTION(0x0D00, Trap_0D, unknown_exception) */
+ EXCEPTION(0x0E00, Trap_0E, unknown_exception)
+ EXCEPTION(0x0F00, Trap_0F, unknown_exception)
/* 0x1000 - Programmable Interval Timer (PIT) Exception */
- . = 0x1000
+ START_EXCEPTION(0x1000, DecrementerTrap)
b Decrementer
-/* 0x1010 - Fixed Interval Timer (FIT) Exception
-*/
- . = 0x1010
+/* 0x1010 - Fixed Interval Timer (FIT) Exception */
+ START_EXCEPTION(0x1010, FITExceptionTrap)
b FITException
-/* 0x1020 - Watchdog Timer (WDT) Exception
-*/
- . = 0x1020
+/* 0x1020 - Watchdog Timer (WDT) Exception */
+ START_EXCEPTION(0x1020, WDTExceptionTrap)
b WDTException
/* 0x1100 - Data TLB Miss Exception
@@ -249,13 +271,13 @@ _ENTRY(saved_ksp_limit)
* load TLB entries from the page table if they exist.
*/
START_EXCEPTION(0x1100, DTLBMiss)
- mtspr SPRN_SPRG_SCRATCH0, r10 /* Save some working registers */
- mtspr SPRN_SPRG_SCRATCH1, r11
+ mtspr SPRN_SPRG_SCRATCH5, r10 /* Save some working registers */
+ mtspr SPRN_SPRG_SCRATCH6, r11
mtspr SPRN_SPRG_SCRATCH3, r12
mtspr SPRN_SPRG_SCRATCH4, r9
mfcr r12
mfspr r9, SPRN_PID
- mtspr SPRN_SPRG_SCRATCH5, r9
+ rlwimi r12, r9, 0, 0xff
mfspr r10, SPRN_DEAR /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
@@ -316,13 +338,12 @@ _ENTRY(saved_ksp_limit)
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
- mfspr r9, SPRN_SPRG_SCRATCH5
- mtspr SPRN_PID, r9
- mtcr r12
+ mtspr SPRN_PID, r12
+ mtcrf 0x80, r12
mfspr r9, SPRN_SPRG_SCRATCH4
mfspr r12, SPRN_SPRG_SCRATCH3
- mfspr r11, SPRN_SPRG_SCRATCH1
- mfspr r10, SPRN_SPRG_SCRATCH0
+ mfspr r11, SPRN_SPRG_SCRATCH6
+ mfspr r10, SPRN_SPRG_SCRATCH5
b DataStorage
/* 0x1200 - Instruction TLB Miss Exception
@@ -330,13 +351,13 @@ _ENTRY(saved_ksp_limit)
* registers and bailout to a different point.
*/
START_EXCEPTION(0x1200, ITLBMiss)
- mtspr SPRN_SPRG_SCRATCH0, r10 /* Save some working registers */
- mtspr SPRN_SPRG_SCRATCH1, r11
+ mtspr SPRN_SPRG_SCRATCH5, r10 /* Save some working registers */
+ mtspr SPRN_SPRG_SCRATCH6, r11
mtspr SPRN_SPRG_SCRATCH3, r12
mtspr SPRN_SPRG_SCRATCH4, r9
mfcr r12
mfspr r9, SPRN_PID
- mtspr SPRN_SPRG_SCRATCH5, r9
+ rlwimi r12, r9, 0, 0xff
mfspr r10, SPRN_SRR0 /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
@@ -397,28 +418,27 @@ _ENTRY(saved_ksp_limit)
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
- mfspr r9, SPRN_SPRG_SCRATCH5
- mtspr SPRN_PID, r9
- mtcr r12
+ mtspr SPRN_PID, r12
+ mtcrf 0x80, r12
mfspr r9, SPRN_SPRG_SCRATCH4
mfspr r12, SPRN_SPRG_SCRATCH3
- mfspr r11, SPRN_SPRG_SCRATCH1
- mfspr r10, SPRN_SPRG_SCRATCH0
+ mfspr r11, SPRN_SPRG_SCRATCH6
+ mfspr r10, SPRN_SPRG_SCRATCH5
b InstructionAccess
- EXCEPTION(0x1300, Trap_13, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1400, Trap_14, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1A00, Trap_1A, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1B00, Trap_1B, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1C00, Trap_1C, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1D00, Trap_1D, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1E00, Trap_1E, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1F00, Trap_1F, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1300, Trap_13, unknown_exception)
+ EXCEPTION(0x1400, Trap_14, unknown_exception)
+ EXCEPTION(0x1500, Trap_15, unknown_exception)
+ EXCEPTION(0x1600, Trap_16, unknown_exception)
+ EXCEPTION(0x1700, Trap_17, unknown_exception)
+ EXCEPTION(0x1800, Trap_18, unknown_exception)
+ EXCEPTION(0x1900, Trap_19, unknown_exception)
+ EXCEPTION(0x1A00, Trap_1A, unknown_exception)
+ EXCEPTION(0x1B00, Trap_1B, unknown_exception)
+ EXCEPTION(0x1C00, Trap_1C, unknown_exception)
+ EXCEPTION(0x1D00, Trap_1D, unknown_exception)
+ EXCEPTION(0x1E00, Trap_1E, unknown_exception)
+ EXCEPTION(0x1F00, Trap_1F, unknown_exception)
/* Check for a single step debug exception while in an exception
* handler before state has been saved. This is to catch the case
@@ -435,7 +455,7 @@ _ENTRY(saved_ksp_limit)
*/
/* 0x2000 - Debug Exception */
START_EXCEPTION(0x2000, DebugTrap)
- CRITICAL_EXCEPTION_PROLOG
+ CRITICAL_EXCEPTION_PROLOG 0x2000 DebugTrap
/*
* If this is a single step or branch-taken exception in an
@@ -477,32 +497,35 @@ _ENTRY(saved_ksp_limit)
/* continue normal handling for a critical exception... */
2: mfspr r4,SPRN_DBSR
stw r4,_ESR(r11) /* DebugException takes DBSR in _ESR */
- addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_TEMPLATE(DebugException, 0x2002, \
- (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
- crit_transfer_to_handler, ret_from_crit_exc)
+ prepare_transfer_to_handler
+ bl DebugException
+ b ret_from_crit_exc
/* Programmable Interval Timer (PIT) Exception. (from 0x1000) */
+ __HEAD
Decrementer:
- EXCEPTION_PROLOG
+ EXCEPTION_PROLOG 0x1000 Decrementer
lis r0,TSR_PIS@h
mtspr SPRN_TSR,r0 /* Clear the PIT exception */
- addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_LITE(0x1000, timer_interrupt)
+ prepare_transfer_to_handler
+ bl timer_interrupt
+ b interrupt_return
/* Fixed Interval Timer (FIT) Exception. (from 0x1010) */
+ __HEAD
FITException:
- EXCEPTION_PROLOG
- addi r3,r1,STACK_FRAME_OVERHEAD;
- EXC_XFER_STD(0x1010, unknown_exception)
+ EXCEPTION_PROLOG 0x1010 FITException
+ prepare_transfer_to_handler
+ bl unknown_exception
+ b interrupt_return
/* Watchdog Timer (WDT) Exception. (from 0x1020) */
+ __HEAD
WDTException:
- CRITICAL_EXCEPTION_PROLOG;
- addi r3,r1,STACK_FRAME_OVERHEAD;
- EXC_XFER_TEMPLATE(WatchdogException, 0x1020+2,
- (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)),
- crit_transfer_to_handler, ret_from_crit_exc)
+ CRITICAL_EXCEPTION_PROLOG 0x1020 WDTException
+ prepare_transfer_to_handler
+ bl WatchdogException
+ b ret_from_crit_exc
/* Other PowerPC processors, namely those derived from the 6xx-series
* have vectors from 0x2100 through 0x2F00 defined, but marked as reserved.
@@ -510,6 +533,7 @@ WDTException:
* reserved.
*/
+ __HEAD
/* Damn, I came up one instruction too many to fit into the
* exception space :-). Both the instruction and data TLB
* miss get to this point to load the TLB.
@@ -543,13 +567,12 @@ finish_tlb_load:
/* Done...restore registers and get out of here.
*/
- mfspr r9, SPRN_SPRG_SCRATCH5
- mtspr SPRN_PID, r9
- mtcr r12
+ mtspr SPRN_PID, r12
+ mtcrf 0x80, r12
mfspr r9, SPRN_SPRG_SCRATCH4
mfspr r12, SPRN_SPRG_SCRATCH3
- mfspr r11, SPRN_SPRG_SCRATCH1
- mfspr r10, SPRN_SPRG_SCRATCH0
+ mfspr r11, SPRN_SPRG_SCRATCH6
+ mfspr r10, SPRN_SPRG_SCRATCH5
rfi /* Should sync shadow TLBs */
b . /* prevent prefetch past rfi */
@@ -678,39 +701,3 @@ _GLOBAL(abort)
mfspr r13,SPRN_DBCR0
oris r13,r13,DBCR0_RST_SYSTEM@h
mtspr SPRN_DBCR0,r13
-
-_GLOBAL(set_context)
-
-#ifdef CONFIG_BDI_SWITCH
- /* Context switch the PTE pointer for the Abatron BDI2000.
- * The PGDIR is the second parameter.
- */
- lis r5, abatron_pteptrs@ha
- stw r4, abatron_pteptrs@l + 0x4(r5)
-#endif
- sync
- mtspr SPRN_PID,r3
- isync /* Need an isync to flush shadow */
- /* TLBs after changing PID */
- blr
-
-/* We put a few things here that have to be page-aligned. This stuff
- * goes at the beginning of the data segment, which is page-aligned.
- */
- .data
- .align 12
- .globl sdata
-sdata:
- .globl empty_zero_page
-empty_zero_page:
- .space 4096
-EXPORT_SYMBOL(empty_zero_page)
- .globl swapper_pg_dir
-swapper_pg_dir:
- .space PGD_TABLE_SIZE
-
-/* Room for two PTE pointers, usually the kernel and current user pointers
- * to their respective root page table.
- */
-abatron_pteptrs:
- .space 8
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 813fa305c33b..ddc978a2d381 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -263,8 +263,7 @@ interrupt_base:
INSTRUCTION_STORAGE_EXCEPTION
/* External Input Interrupt */
- EXCEPTION(0x0500, BOOKE_INTERRUPT_EXTERNAL, ExternalInput, \
- do_IRQ, EXC_XFER_LITE)
+ EXCEPTION(0x0500, BOOKE_INTERRUPT_EXTERNAL, ExternalInput, do_IRQ)
/* Alignment Interrupt */
ALIGNMENT_EXCEPTION
@@ -277,7 +276,7 @@ interrupt_base:
FP_UNAVAILABLE_EXCEPTION
#else
EXCEPTION(0x2010, BOOKE_INTERRUPT_FP_UNAVAIL, \
- FloatingPointUnavailable, unknown_exception, EXC_XFER_STD)
+ FloatingPointUnavailable, unknown_exception)
#endif
/* System Call Interrupt */
START_EXCEPTION(SystemCall)
@@ -285,15 +284,14 @@ interrupt_base:
/* Auxiliary Processor Unavailable Interrupt */
EXCEPTION(0x2020, BOOKE_INTERRUPT_AP_UNAVAIL, \
- AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_STD)
+ AuxillaryProcessorUnavailable, unknown_exception)
/* Decrementer Interrupt */
DECREMENTER_EXCEPTION
/* Fixed Internal Timer Interrupt */
/* TODO: Add FIT support */
- EXCEPTION(0x1010, BOOKE_INTERRUPT_FIT, FixedIntervalTimer, \
- unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1010, BOOKE_INTERRUPT_FIT, FixedIntervalTimer, unknown_exception)
/* Watchdog Timer Interrupt */
/* TODO: Add watchdog support */
@@ -534,6 +532,10 @@ finish_tlb_load_44x:
andi. r10,r12,_PAGE_USER /* User page ? */
beq 1f /* nope, leave U bits empty */
rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
+#ifdef CONFIG_PPC_KUEP
+0: rlwinm r11,r11,0,~PPC44x_TLB_SX /* Clear SX if User page */
+ patch_site 0b, patch__tlb_44x_kuep
+#endif
1: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */
/* Done...restore registers and get out of here.
@@ -745,6 +747,10 @@ finish_tlb_load_47x:
andi. r10,r12,_PAGE_USER /* User page ? */
beq 1f /* nope, leave U bits empty */
rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
+#ifdef CONFIG_PPC_KUEP
+0: rlwinm r11,r11,0,~PPC47x_TLB2_SX /* Clear SX if User page */
+ patch_site 0b, patch__tlb_47x_kuep
+#endif
1: tlbwe r11,r13,2
/* Done...restore registers and get out of here.
@@ -782,20 +788,6 @@ _GLOBAL(__fixup_440A_mcheck)
sync
blr
-_GLOBAL(set_context)
-
-#ifdef CONFIG_BDI_SWITCH
- /* Context switch the PTE pointer for the Abatron BDI2000.
- * The PGDIR is the second parameter.
- */
- lis r5, abatron_pteptrs@h
- ori r5, r5, abatron_pteptrs@l
- stw r4, 0x4(r5)
-#endif
- mtspr SPRN_PID,r3
- isync /* Force context change */
- blr
-
/*
* Init CPU state. This is called at boot time or for secondary CPUs
* to setup initial TLB entries, setup IVORs, etc...
@@ -1241,34 +1233,8 @@ head_start_common:
isync
blr
-/*
- * We put a few things here that have to be page-aligned. This stuff
- * goes at the beginning of the data segment, which is page-aligned.
- */
- .data
- .align PAGE_SHIFT
- .globl sdata
-sdata:
- .globl empty_zero_page
-empty_zero_page:
- .space PAGE_SIZE
-EXPORT_SYMBOL(empty_zero_page)
-
-/*
- * To support >32-bit physical addresses, we use an 8KB pgdir.
- */
- .globl swapper_pg_dir
-swapper_pg_dir:
- .space PGD_TABLE_SIZE
-
-/*
- * Room for two PTE pointers, usually the kernel and current user pointers
- * to their respective root page table.
- */
-abatron_pteptrs:
- .space 8
-
#ifdef CONFIG_SMP
+ .data
.align 12
temp_boot_stack:
.space 1024
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index ece7f97bafff..79930b0bc781 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -194,8 +194,9 @@ CLOSE_FIXED_SECTION(first_256B)
/* This value is used to mark exception frames on the stack. */
.section ".toc","aw"
+/* This value is used to mark exception frames on the stack. */
exception_marker:
- .tc ID_72656773_68657265[TC],0x7265677368657265
+ .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
.previous
/*
@@ -211,6 +212,8 @@ OPEN_TEXT_SECTION(0x100)
USE_TEXT_SECTION()
+#include "interrupt_64.S"
+
#ifdef CONFIG_PPC_BOOK3E
/*
* The booting_thread_hwid holds the thread id we want to boot in cpu
@@ -997,23 +1000,3 @@ start_here_common:
0: trap
EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0
.previous
-
-/*
- * We put a few things here that have to be page-aligned.
- * This stuff goes at the beginning of the bss, which is page-aligned.
- */
- .section ".bss"
-/*
- * pgd dir should be aligned to PGD_TABLE_SIZE which is 64K.
- * We will need to find a better way to fix this
- */
- .align 16
-
- .globl swapper_pg_dir
-swapper_pg_dir:
- .space PGD_TABLE_SIZE
-
- .globl empty_zero_page
-empty_zero_page:
- .space PAGE_SIZE
-EXPORT_SYMBOL(empty_zero_page)
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 46dff3f9c31f..9bdb95f5694f 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -29,6 +29,13 @@
#include <asm/ptrace.h>
#include <asm/export.h>
#include <asm/code-patching-asm.h>
+#include <asm/interrupt.h>
+
+/*
+ * Value for the bits that have fixed value in RPN entries.
+ * Also used for tagging DAR for DTLBerror.
+ */
+#define RPN_PATTERN 0x00f0
#include "head_32.h"
@@ -42,12 +49,6 @@
#endif
.endm
-/*
- * Value for the bits that have fixed value in RPN entries.
- * Also used for tagging DAR for DTLBerror.
- */
-#define RPN_PATTERN 0x00f0
-
#define PAGE_SHIFT_512K 19
#define PAGE_SHIFT_8M 23
@@ -118,56 +119,54 @@ instruction_counter:
#endif
/* System reset */
- EXCEPTION(0x100, Reset, system_reset_exception, EXC_XFER_STD)
+ EXCEPTION(INTERRUPT_SYSTEM_RESET, Reset, system_reset_exception)
/* Machine check */
- . = 0x200
-MachineCheck:
- EXCEPTION_PROLOG handle_dar_dsisr=1
- save_dar_dsisr_on_stack r4, r5, r11
- li r6, RPN_PATTERN
- mtspr SPRN_DAR, r6 /* Tag DAR, to be used in DTLB Error */
- addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_STD(0x200, machine_check_exception)
+ START_EXCEPTION(INTERRUPT_MACHINE_CHECK, MachineCheck)
+ EXCEPTION_PROLOG INTERRUPT_MACHINE_CHECK MachineCheck handle_dar_dsisr=1
+ prepare_transfer_to_handler
+ bl machine_check_exception
+ b interrupt_return
/* External interrupt */
- EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
+ EXCEPTION(INTERRUPT_EXTERNAL, HardwareInterrupt, do_IRQ)
/* Alignment exception */
- . = 0x600
-Alignment:
- EXCEPTION_PROLOG handle_dar_dsisr=1
- save_dar_dsisr_on_stack r4, r5, r11
- li r6, RPN_PATTERN
- mtspr SPRN_DAR, r6 /* Tag DAR, to be used in DTLB Error */
- addi r3,r1,STACK_FRAME_OVERHEAD
- b .Lalignment_exception_ool
+ START_EXCEPTION(INTERRUPT_ALIGNMENT, Alignment)
+ EXCEPTION_PROLOG INTERRUPT_ALIGNMENT Alignment handle_dar_dsisr=1
+ prepare_transfer_to_handler
+ bl alignment_exception
+ REST_NVGPRS(r1)
+ b interrupt_return
/* Program check exception */
- EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
+ START_EXCEPTION(INTERRUPT_PROGRAM, ProgramCheck)
+ EXCEPTION_PROLOG INTERRUPT_PROGRAM ProgramCheck
+ prepare_transfer_to_handler
+ bl program_check_exception
+ REST_NVGPRS(r1)
+ b interrupt_return
/* Decrementer */
- EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
-
- /* With VMAP_STACK there's not enough room for this at 0x600 */
- . = 0xa00
-.Lalignment_exception_ool:
- EXC_XFER_STD(0x600, alignment_exception)
+ EXCEPTION(INTERRUPT_DECREMENTER, Decrementer, timer_interrupt)
/* System call */
- . = 0xc00
-SystemCall:
- SYSCALL_ENTRY 0xc00
+ START_EXCEPTION(INTERRUPT_SYSCALL, SystemCall)
+ SYSCALL_ENTRY INTERRUPT_SYSCALL
/* Single step - not used on 601 */
- EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
+ EXCEPTION(INTERRUPT_TRACE, SingleStep, single_step_exception)
/* On the MPC8xx, this is a software emulation interrupt. It occurs
* for all unimplemented and illegal instructions.
*/
- EXCEPTION(0x1000, SoftEmu, emulation_assist_interrupt, EXC_XFER_STD)
+ START_EXCEPTION(INTERRUPT_SOFT_EMU_8xx, SoftEmu)
+ EXCEPTION_PROLOG INTERRUPT_SOFT_EMU_8xx SoftEmu
+ prepare_transfer_to_handler
+ bl emulation_assist_interrupt
+ REST_NVGPRS(r1)
+ b interrupt_return
- . = 0x1100
/*
* For the MPC8xx, this is a software tablewalk to load the instruction
* TLB. The task switch loads the M_TWB register with the pointer to the first
@@ -189,7 +188,7 @@ SystemCall:
#define INVALIDATE_ADJACENT_PAGES_CPU15(addr, tmp)
#endif
-InstructionTLBMiss:
+ START_EXCEPTION(INTERRUPT_INST_TLB_MISS_8xx, InstructionTLBMiss)
mtspr SPRN_SPRG_SCRATCH2, r10
mtspr SPRN_M_TW, r11
@@ -245,8 +244,7 @@ InstructionTLBMiss:
rfi
#endif
- . = 0x1200
-DataStoreTLBMiss:
+ START_EXCEPTION(INTERRUPT_DATA_TLB_MISS_8xx, DataStoreTLBMiss)
mtspr SPRN_SPRG_SCRATCH2, r10
mtspr SPRN_M_TW, r11
mfcr r11
@@ -309,83 +307,74 @@ DataStoreTLBMiss:
* to many reasons, such as executing guarded memory or illegal instruction
* addresses. There is nothing to do but handle a big time error fault.
*/
- . = 0x1300
-InstructionTLBError:
- EXCEPTION_PROLOG
+ START_EXCEPTION(INTERRUPT_INST_TLB_ERROR_8xx, InstructionTLBError)
+ /* 0x400 is InstructionAccess exception, needed by bad_page_fault() */
+ EXCEPTION_PROLOG INTERRUPT_INST_STORAGE InstructionTLBError
andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
andis. r10,r9,SRR1_ISI_NOPT@h
beq+ .Litlbie
tlbie r12
- /* 0x400 is InstructionAccess exception, needed by bad_page_fault() */
.Litlbie:
stw r12, _DAR(r11)
stw r5, _DSISR(r11)
- EXC_XFER_LITE(0x400, handle_page_fault)
+ prepare_transfer_to_handler
+ bl do_page_fault
+ b interrupt_return
/* This is the data TLB error on the MPC8xx. This could be due to
* many reasons, including a dirty update to a pte. We bail out to
* a higher level function that can handle it.
*/
- . = 0x1400
-DataTLBError:
+ START_EXCEPTION(INTERRUPT_DATA_TLB_ERROR_8xx, DataTLBError)
EXCEPTION_PROLOG_0 handle_dar_dsisr=1
mfspr r11, SPRN_DAR
cmpwi cr1, r11, RPN_PATTERN
beq- cr1, FixupDAR /* must be a buggy dcbX, icbi insn. */
DARFixed:/* Return from dcbx instruction bug workaround */
-#ifdef CONFIG_VMAP_STACK
- li r11, RPN_PATTERN
- mtspr SPRN_DAR, r11 /* Tag DAR, to be used in DTLB Error */
-#endif
EXCEPTION_PROLOG_1
- EXCEPTION_PROLOG_2 handle_dar_dsisr=1
- get_and_save_dar_dsisr_on_stack r4, r5, r11
+ /* 0x300 is DataAccess exception, needed by bad_page_fault() */
+ EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataTLBError handle_dar_dsisr=1
+ lwz r4, _DAR(r11)
+ lwz r5, _DSISR(r11)
andis. r10,r5,DSISR_NOHPTE@h
beq+ .Ldtlbie
tlbie r4
.Ldtlbie:
-#ifndef CONFIG_VMAP_STACK
- li r10,RPN_PATTERN
- mtspr SPRN_DAR,r10 /* Tag DAR, to be used in DTLB Error */
-#endif
- /* 0x300 is DataAccess exception, needed by bad_page_fault() */
- EXC_XFER_LITE(0x300, handle_page_fault)
+ prepare_transfer_to_handler
+ bl do_page_fault
+ b interrupt_return
-stack_overflow:
+#ifdef CONFIG_VMAP_STACK
vmap_stack_overflow_exception
+#endif
/* On the MPC8xx, these next four traps are used for development
* support of breakpoints and such. Someday I will get around to
* using them.
*/
-do_databreakpoint:
- EXCEPTION_PROLOG_1
- EXCEPTION_PROLOG_2 handle_dar_dsisr=1
- addi r3,r1,STACK_FRAME_OVERHEAD
- mfspr r4,SPRN_BAR
- stw r4,_DAR(r11)
-#ifndef CONFIG_VMAP_STACK
- mfspr r5,SPRN_DSISR
- stw r5,_DSISR(r11)
-#endif
- EXC_XFER_STD(0x1c00, do_break)
-
- . = 0x1c00
-DataBreakpoint:
+ START_EXCEPTION(INTERRUPT_DATA_BREAKPOINT_8xx, DataBreakpoint)
EXCEPTION_PROLOG_0 handle_dar_dsisr=1
mfspr r11, SPRN_SRR0
cmplwi cr1, r11, (.Ldtlbie - PAGE_OFFSET)@l
cmplwi cr7, r11, (.Litlbie - PAGE_OFFSET)@l
cror 4*cr1+eq, 4*cr1+eq, 4*cr7+eq
- bne cr1, do_databreakpoint
+ bne cr1, 1f
mtcr r10
mfspr r10, SPRN_SPRG_SCRATCH0
mfspr r11, SPRN_SPRG_SCRATCH1
rfi
+1: EXCEPTION_PROLOG_1
+ EXCEPTION_PROLOG_2 INTERRUPT_DATA_BREAKPOINT_8xx DataBreakpoint handle_dar_dsisr=1
+ mfspr r4,SPRN_BAR
+ stw r4,_DAR(r11)
+ prepare_transfer_to_handler
+ bl do_break
+ REST_NVGPRS(r1)
+ b interrupt_return
+
#ifdef CONFIG_PERF_EVENTS
- . = 0x1d00
-InstructionBreakpoint:
+ START_EXCEPTION(INTERRUPT_INST_BREAKPOINT_8xx, InstructionBreakpoint)
mtspr SPRN_SPRG_SCRATCH0, r10
lwz r10, (instruction_counter - PAGE_OFFSET)@l(0)
addi r10, r10, -1
@@ -396,11 +385,12 @@ InstructionBreakpoint:
mfspr r10, SPRN_SPRG_SCRATCH0
rfi
#else
- EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(INTERRUPT_INST_BREAKPOINT_8xx, Trap_1d, unknown_exception)
#endif
- EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1e00, Trap_1e, unknown_exception)
+ EXCEPTION(0x1f00, Trap_1f, unknown_exception)
+ __HEAD
. = 0x2000
/* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions
@@ -510,14 +500,10 @@ FixupDAR:/* Entry point for dcbx workaround. */
152:
mfdar r11
mtctr r11 /* restore ctr reg from DAR */
-#ifdef CONFIG_VMAP_STACK
mfspr r11, SPRN_SPRG_THREAD
stw r10, DAR(r11)
mfspr r10, SPRN_DSISR
stw r10, DSISR(r11)
-#else
- mtdar r10 /* save fault EA to DAR */
-#endif
mfspr r10,SPRN_M_TW
b DARFixed /* Go back to normal TLB handling */
@@ -800,28 +786,3 @@ _GLOBAL(mmu_pin_tlb)
mtspr SPRN_SRR1, r10
mtspr SPRN_SRR0, r11
rfi
-
-/*
- * We put a few things here that have to be page-aligned.
- * This stuff goes at the beginning of the data segment,
- * which is page-aligned.
- */
- .data
- .globl sdata
-sdata:
- .globl empty_zero_page
- .align PAGE_SHIFT
-empty_zero_page:
- .space PAGE_SIZE
-EXPORT_SYMBOL(empty_zero_page)
-
- .globl swapper_pg_dir
-swapper_pg_dir:
- .space PGD_TABLE_SIZE
-
-/* Room for two PTE table poiners, usually the kernel and current user
- * pointer to their respective root page table (pgdir).
- */
- .globl abatron_pteptrs
-abatron_pteptrs:
- .space 8
diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S
index 727fdab557c9..764edd860ed4 100644
--- a/arch/powerpc/kernel/head_book3s_32.S
+++ b/arch/powerpc/kernel/head_book3s_32.S
@@ -31,6 +31,7 @@
#include <asm/kvm_book3s_asm.h>
#include <asm/export.h>
#include <asm/feature-fixups.h>
+#include <asm/interrupt.h>
#include "head_32.h"
@@ -239,7 +240,7 @@ __secondary_hold_acknowledge:
/* System reset */
/* core99 pmac starts the seconary here by changing the vector, and
putting it back to what it was (unknown_async_exception) when done. */
- EXCEPTION(0x100, Reset, unknown_async_exception, EXC_XFER_STD)
+ EXCEPTION(INTERRUPT_SYSTEM_RESET, Reset, unknown_async_exception)
/* Machine check */
/*
@@ -255,40 +256,28 @@ __secondary_hold_acknowledge:
* pointer when we take an exception from supervisor mode.)
* -- paulus.
*/
- . = 0x200
- DO_KVM 0x200
-MachineCheck:
+ START_EXCEPTION(INTERRUPT_MACHINE_CHECK, MachineCheck)
EXCEPTION_PROLOG_0
#ifdef CONFIG_PPC_CHRP
-#ifdef CONFIG_VMAP_STACK
mtspr SPRN_SPRG_SCRATCH2,r1
mfspr r1, SPRN_SPRG_THREAD
lwz r1, RTAS_SP(r1)
cmpwi cr1, r1, 0
bne cr1, 7f
mfspr r1, SPRN_SPRG_SCRATCH2
-#else
- mfspr r11, SPRN_SPRG_THREAD
- lwz r11, RTAS_SP(r11)
- cmpwi cr1, r11, 0
- bne cr1, 7f
-#endif
#endif /* CONFIG_PPC_CHRP */
- EXCEPTION_PROLOG_1 for_rtas=1
-7: EXCEPTION_PROLOG_2
- addi r3,r1,STACK_FRAME_OVERHEAD
+ EXCEPTION_PROLOG_1
+7: EXCEPTION_PROLOG_2 0x200 MachineCheck
#ifdef CONFIG_PPC_CHRP
- beq cr1, machine_check_tramp
+ beq cr1, 1f
twi 31, 0, 0
-#else
- b machine_check_tramp
#endif
+1: prepare_transfer_to_handler
+ bl machine_check_exception
+ b interrupt_return
/* Data access exception. */
- . = 0x300
- DO_KVM 0x300
-DataAccess:
-#ifdef CONFIG_VMAP_STACK
+ START_EXCEPTION(INTERRUPT_DATA_STORAGE, DataAccess)
#ifdef CONFIG_PPC_BOOK3S_604
BEGIN_MMU_FTR_SECTION
mtspr SPRN_SPRG_SCRATCH2,r10
@@ -309,30 +298,20 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
#endif
1: EXCEPTION_PROLOG_0 handle_dar_dsisr=1
EXCEPTION_PROLOG_1
- b handle_page_fault_tramp_1
-#else /* CONFIG_VMAP_STACK */
- EXCEPTION_PROLOG handle_dar_dsisr=1
- get_and_save_dar_dsisr_on_stack r4, r5, r11
-#ifdef CONFIG_PPC_BOOK3S_604
-BEGIN_MMU_FTR_SECTION
- andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
- bne handle_page_fault_tramp_2 /* if not, try to put a PTE */
- rlwinm r3, r5, 32 - 15, 21, 21 /* DSISR_STORE -> _PAGE_RW */
- bl hash_page
- b handle_page_fault_tramp_1
-MMU_FTR_SECTION_ELSE
-#endif
- b handle_page_fault_tramp_2
-#ifdef CONFIG_PPC_BOOK3S_604
-ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
-#endif
-#endif /* CONFIG_VMAP_STACK */
+ EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1
+ prepare_transfer_to_handler
+ lwz r5, _DSISR(r11)
+ andis. r0, r5, DSISR_DABRMATCH@h
+ bne- 1f
+ bl do_page_fault
+ b interrupt_return
+1: bl do_break
+ REST_NVGPRS(r1)
+ b interrupt_return
+
/* Instruction access exception. */
- . = 0x400
- DO_KVM 0x400
-InstructionAccess:
-#ifdef CONFIG_VMAP_STACK
+ START_EXCEPTION(INTERRUPT_INST_STORAGE, InstructionAccess)
mtspr SPRN_SPRG_SCRATCH0,r10
mtspr SPRN_SPRG_SCRATCH1,r11
mfspr r10, SPRN_SPRG_THREAD
@@ -352,43 +331,35 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
andi. r11, r11, MSR_PR
EXCEPTION_PROLOG_1
- EXCEPTION_PROLOG_2
-#else /* CONFIG_VMAP_STACK */
- EXCEPTION_PROLOG
- andis. r0,r9,SRR1_ISI_NOPT@h /* no pte found? */
- beq 1f /* if so, try to put a PTE */
- li r3,0 /* into the hash table */
- mr r4,r12 /* SRR0 is fault address */
-#ifdef CONFIG_PPC_BOOK3S_604
-BEGIN_MMU_FTR_SECTION
- bl hash_page
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
-#endif
-#endif /* CONFIG_VMAP_STACK */
+ EXCEPTION_PROLOG_2 INTERRUPT_INST_STORAGE InstructionAccess
andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
stw r5, _DSISR(r11)
stw r12, _DAR(r11)
- EXC_XFER_LITE(0x400, handle_page_fault)
+ prepare_transfer_to_handler
+ bl do_page_fault
+ b interrupt_return
/* External interrupt */
- EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
+ EXCEPTION(INTERRUPT_EXTERNAL, HardwareInterrupt, do_IRQ)
/* Alignment exception */
- . = 0x600
- DO_KVM 0x600
-Alignment:
- EXCEPTION_PROLOG handle_dar_dsisr=1
- save_dar_dsisr_on_stack r4, r5, r11
- addi r3,r1,STACK_FRAME_OVERHEAD
- b alignment_exception_tramp
+ START_EXCEPTION(INTERRUPT_ALIGNMENT, Alignment)
+ EXCEPTION_PROLOG INTERRUPT_ALIGNMENT Alignment handle_dar_dsisr=1
+ prepare_transfer_to_handler
+ bl alignment_exception
+ REST_NVGPRS(r1)
+ b interrupt_return
/* Program check exception */
- EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
+ START_EXCEPTION(INTERRUPT_PROGRAM, ProgramCheck)
+ EXCEPTION_PROLOG INTERRUPT_PROGRAM ProgramCheck
+ prepare_transfer_to_handler
+ bl program_check_exception
+ REST_NVGPRS(r1)
+ b interrupt_return
/* Floating-point unavailable */
- . = 0x800
- DO_KVM 0x800
-FPUnavailable:
+ START_EXCEPTION(0x800, FPUnavailable)
#ifdef CONFIG_PPC_FPU
BEGIN_FTR_SECTION
/*
@@ -397,30 +368,29 @@ BEGIN_FTR_SECTION
*/
b ProgramCheck
END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
- EXCEPTION_PROLOG
+ EXCEPTION_PROLOG INTERRUPT_FP_UNAVAIL FPUnavailable
beq 1f
bl load_up_fpu /* if from user, just load it up */
b fast_exception_return
-1: addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_LITE(0x800, kernel_fp_unavailable_exception)
+1: prepare_transfer_to_handler
+ bl kernel_fp_unavailable_exception
+ b interrupt_return
#else
b ProgramCheck
#endif
/* Decrementer */
- EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
+ EXCEPTION(INTERRUPT_DECREMENTER, Decrementer, timer_interrupt)
- EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0xa00, Trap_0a, unknown_exception)
+ EXCEPTION(0xb00, Trap_0b, unknown_exception)
/* System call */
- . = 0xc00
- DO_KVM 0xc00
-SystemCall:
- SYSCALL_ENTRY 0xc00
+ START_EXCEPTION(INTERRUPT_SYSCALL, SystemCall)
+ SYSCALL_ENTRY INTERRUPT_SYSCALL
- EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
- EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(INTERRUPT_TRACE, SingleStep, single_step_exception)
+ EXCEPTION(0xe00, Trap_0e, unknown_exception)
/*
* The Altivec unavailable trap is at 0x0f20. Foo.
@@ -430,19 +400,18 @@ SystemCall:
* non-altivec kernel running on a machine with altivec just
* by executing an altivec instruction.
*/
- . = 0xf00
- DO_KVM 0xf00
+ START_EXCEPTION(INTERRUPT_PERFMON, PerformanceMonitorTrap)
b PerformanceMonitor
- . = 0xf20
- DO_KVM 0xf20
+ START_EXCEPTION(INTERRUPT_ALTIVEC_UNAVAIL, AltiVecUnavailableTrap)
b AltiVecUnavailable
+ __HEAD
/*
* Handle TLB miss for instruction on 603/603e.
* Note: we get an alternate set of r0 - r3 to use automatically.
*/
- . = 0x1000
+ . = INTERRUPT_INST_TLB_MISS_603
InstructionTLBMiss:
/*
* r0: scratch
@@ -457,11 +426,12 @@ InstructionTLBMiss:
cmplw 0,r1,r3
#endif
mfspr r2, SPRN_SDR1
- li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
+ li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
rlwinm r2, r2, 28, 0xfffff000
#ifdef CONFIG_MODULES
bgt- 112f
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
+ li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
#endif
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
@@ -507,7 +477,7 @@ InstructionAddressInvalid:
/*
* Handle TLB miss for DATA Load operation on 603/603e
*/
- . = 0x1100
+ . = INTERRUPT_DATA_LOAD_TLB_MISS_603
DataLoadTLBMiss:
/*
* r0: scratch
@@ -520,10 +490,11 @@ DataLoadTLBMiss:
lis r1, TASK_SIZE@h /* check if kernel address */
cmplw 0,r1,r3
mfspr r2, SPRN_SDR1
- li r1, _PAGE_PRESENT | _PAGE_ACCESSED
+ li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
rlwinm r2, r2, 28, 0xfffff000
bgt- 112f
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
+ li r1, _PAGE_PRESENT | _PAGE_ACCESSED
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
lwz r2,0(r2) /* get pmd entry */
@@ -547,8 +518,6 @@ BEGIN_FTR_SECTION
rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
mtspr SPRN_RPA,r1
- mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
- mtcrf 0x80,r2
BEGIN_MMU_FTR_SECTION
li r0,1
mfspr r1,SPRN_SPRG_603_LRU
@@ -560,9 +529,15 @@ BEGIN_MMU_FTR_SECTION
mfspr r2,SPRN_SRR1
rlwimi r2,r0,31-14,14,14
mtspr SPRN_SRR1,r2
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
+ mtcrf 0x80,r2
tlbld r3
rfi
+MMU_FTR_SECTION_ELSE
+ mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
+ mtcrf 0x80,r2
+ tlbld r3
+ rfi
+ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
DataAddressInvalid:
mfspr r3,SPRN_SRR1
rlwinm r1,r3,9,6,6 /* Get load/store bit */
@@ -584,7 +559,7 @@ DataAddressInvalid:
/*
* Handle TLB miss for DATA Store on 603/603e
*/
- . = 0x1200
+ . = INTERRUPT_DATA_STORE_TLB_MISS_603
DataStoreTLBMiss:
/*
* r0: scratch
@@ -597,10 +572,11 @@ DataStoreTLBMiss:
lis r1, TASK_SIZE@h /* check if kernel address */
cmplw 0,r1,r3
mfspr r2, SPRN_SDR1
- li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
+ li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
rlwinm r2, r2, 28, 0xfffff000
bgt- 112f
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
+ li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
lwz r2,0(r2) /* get pmd entry */
@@ -635,9 +611,15 @@ BEGIN_MMU_FTR_SECTION
mfspr r2,SPRN_SRR1
rlwimi r2,r0,31-14,14,14
mtspr SPRN_SRR1,r2
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
+ mtcrf 0x80,r2
+ tlbld r3
+ rfi
+MMU_FTR_SECTION_ELSE
+ mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
+ mtcrf 0x80,r2
tlbld r3
rfi
+ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
#ifndef CONFIG_ALTIVEC
#define altivec_assist_exception unknown_exception
@@ -647,57 +629,39 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
#define TAUException unknown_async_exception
#endif
- EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_STD)
- EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_STD)
- EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_STD)
- EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
- EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_STD)
- EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2f00, Trap_2f, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception)
+ EXCEPTION(0x1400, SMI, SMIException)
+ EXCEPTION(0x1500, Trap_15, unknown_exception)
+ EXCEPTION(0x1600, Trap_16, altivec_assist_exception)
+ EXCEPTION(0x1700, Trap_17, TAUException)
+ EXCEPTION(0x1800, Trap_18, unknown_exception)
+ EXCEPTION(0x1900, Trap_19, unknown_exception)
+ EXCEPTION(0x1a00, Trap_1a, unknown_exception)
+ EXCEPTION(0x1b00, Trap_1b, unknown_exception)
+ EXCEPTION(0x1c00, Trap_1c, unknown_exception)
+ EXCEPTION(0x1d00, Trap_1d, unknown_exception)
+ EXCEPTION(0x1e00, Trap_1e, unknown_exception)
+ EXCEPTION(0x1f00, Trap_1f, unknown_exception)
+ EXCEPTION(0x2000, RunMode, RunModeException)
+ EXCEPTION(0x2100, Trap_21, unknown_exception)
+ EXCEPTION(0x2200, Trap_22, unknown_exception)
+ EXCEPTION(0x2300, Trap_23, unknown_exception)
+ EXCEPTION(0x2400, Trap_24, unknown_exception)
+ EXCEPTION(0x2500, Trap_25, unknown_exception)
+ EXCEPTION(0x2600, Trap_26, unknown_exception)
+ EXCEPTION(0x2700, Trap_27, unknown_exception)
+ EXCEPTION(0x2800, Trap_28, unknown_exception)
+ EXCEPTION(0x2900, Trap_29, unknown_exception)
+ EXCEPTION(0x2a00, Trap_2a, unknown_exception)
+ EXCEPTION(0x2b00, Trap_2b, unknown_exception)
+ EXCEPTION(0x2c00, Trap_2c, unknown_exception)
+ EXCEPTION(0x2d00, Trap_2d, unknown_exception)
+ EXCEPTION(0x2e00, Trap_2e, unknown_exception)
+ EXCEPTION(0x2f00, Trap_2f, unknown_exception)
+ __HEAD
. = 0x3000
-machine_check_tramp:
- EXC_XFER_STD(0x200, machine_check_exception)
-
-alignment_exception_tramp:
- EXC_XFER_STD(0x600, alignment_exception)
-
-handle_page_fault_tramp_1:
-#ifdef CONFIG_VMAP_STACK
- EXCEPTION_PROLOG_2 handle_dar_dsisr=1
-#endif
- lwz r5, _DSISR(r11)
- /* fall through */
-handle_page_fault_tramp_2:
- andis. r0, r5, DSISR_DABRMATCH@h
- bne- 1f
- EXC_XFER_LITE(0x300, handle_page_fault)
-1: EXC_XFER_STD(0x300, do_break)
-
-#ifdef CONFIG_VMAP_STACK
#ifdef CONFIG_PPC_BOOK3S_604
.macro save_regs_thread thread
stw r0, THR0(\thread)
@@ -772,34 +736,36 @@ fast_hash_page_return:
rfi
#endif /* CONFIG_PPC_BOOK3S_604 */
-stack_overflow:
+#ifdef CONFIG_VMAP_STACK
vmap_stack_overflow_exception
#endif
+ __HEAD
AltiVecUnavailable:
- EXCEPTION_PROLOG
+ EXCEPTION_PROLOG 0xf20 AltiVecUnavailable
#ifdef CONFIG_ALTIVEC
beq 1f
bl load_up_altivec /* if from user, just load it up */
b fast_exception_return
#endif /* CONFIG_ALTIVEC */
-1: addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_LITE(0xf20, altivec_unavailable_exception)
+1: prepare_transfer_to_handler
+ bl altivec_unavailable_exception
+ b interrupt_return
+ __HEAD
PerformanceMonitor:
- EXCEPTION_PROLOG
- addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_STD(0xf00, performance_monitor_exception)
+ EXCEPTION_PROLOG 0xf00 PerformanceMonitor
+ prepare_transfer_to_handler
+ bl performance_monitor_exception
+ b interrupt_return
+ __HEAD
/*
* This code is jumped to from the startup code to copy
* the kernel image to physical address PHYSICAL_START.
*/
relocate_kernel:
- addis r9,r26,klimit@ha /* fetch klimit */
- lwz r25,klimit@l(r9)
- addis r25,r25,-KERNELBASE@h
lis r3,PHYSICAL_START@h /* Destination base address */
li r6,0 /* Destination offset */
li r5,0x4000 /* # bytes of memory to copy */
@@ -807,7 +773,8 @@ relocate_kernel:
addi r0,r3,4f@l /* jump to the address of 4f */
mtctr r0 /* in copy and do the rest. */
bctr /* jump to the copy */
-4: mr r5,r25
+4: lis r5,_end-KERNELBASE@h
+ ori r5,r5,_end-KERNELBASE@l
bl copy_and_flush /* copy the rest */
b turn_on_mmu
@@ -965,12 +932,6 @@ _GLOBAL(load_segment_registers)
li r0, NUM_USER_SEGMENTS /* load up user segment register values */
mtctr r0 /* for context 0 */
li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */
-#ifdef CONFIG_PPC_KUEP
- oris r3, r3, SR_NX@h /* Set Nx */
-#endif
-#ifdef CONFIG_PPC_KUAP
- oris r3, r3, SR_KS@h /* Set Ks */
-#endif
li r4, 0
3: mtsrin r3, r4
addi r3, r3, 0x111 /* increment VSID */
@@ -1065,58 +1026,6 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
rfi
/*
- * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
- *
- * Set up the segment registers for a new context.
- */
-_ENTRY(switch_mmu_context)
- lwz r3,MMCONTEXTID(r4)
- cmpwi cr0,r3,0
- blt- 4f
- mulli r3,r3,897 /* multiply context by skew factor */
- rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
-#ifdef CONFIG_PPC_KUEP
- oris r3, r3, SR_NX@h /* Set Nx */
-#endif
-#ifdef CONFIG_PPC_KUAP
- oris r3, r3, SR_KS@h /* Set Ks */
-#endif
- li r0,NUM_USER_SEGMENTS
- mtctr r0
-
-#ifdef CONFIG_BDI_SWITCH
- /* Context switch the PTE pointer for the Abatron BDI2000.
- * The PGDIR is passed as second argument.
- */
- lwz r4, MM_PGD(r4)
- lis r5, abatron_pteptrs@ha
- stw r4, abatron_pteptrs@l + 0x4(r5)
-#endif
-BEGIN_MMU_FTR_SECTION
-#ifndef CONFIG_BDI_SWITCH
- lwz r4, MM_PGD(r4)
-#endif
- tophys(r4, r4)
- rlwinm r4, r4, 4, 0xffff01ff
- mtspr SPRN_SDR1, r4
-END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
- li r4,0
- isync
-3:
- mtsrin r3,r4
- addi r3,r3,0x111 /* next VSID */
- rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */
- addis r4,r4,0x1000 /* address of next segment */
- bdnz 3b
- sync
- isync
- blr
-4: trap
- EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
- blr
-EXPORT_SYMBOL(switch_mmu_context)
-
-/*
* An undocumented "feature" of 604e requires that the v bit
* be cleared before changing BAT values.
*
@@ -1297,61 +1206,4 @@ setup_usbgecko_bat:
blr
#endif
-#ifdef CONFIG_8260
-/* Jump into the system reset for the rom.
- * We first disable the MMU, and then jump to the ROM reset address.
- *
- * r3 is the board info structure, r4 is the location for starting.
- * I use this for building a small kernel that can load other kernels,
- * rather than trying to write or rely on a rom monitor that can tftp load.
- */
- .globl m8260_gorom
-m8260_gorom:
- mfmsr r0
- rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
- sync
- mtmsr r0
- sync
- mfspr r11, SPRN_HID0
- lis r10, 0
- ori r10,r10,HID0_ICE|HID0_DCE
- andc r11, r11, r10
- mtspr SPRN_HID0, r11
- isync
- li r5, MSR_ME|MSR_RI
- lis r6,2f@h
- addis r6,r6,-KERNELBASE@h
- ori r6,r6,2f@l
- mtspr SPRN_SRR0,r6
- mtspr SPRN_SRR1,r5
- isync
- sync
- rfi
-2:
- mtlr r4
- blr
-#endif
-
-
-/*
- * We put a few things here that have to be page-aligned.
- * This stuff goes at the beginning of the data segment,
- * which is page-aligned.
- */
.data
- .globl sdata
-sdata:
- .globl empty_zero_page
-empty_zero_page:
- .space 4096
-EXPORT_SYMBOL(empty_zero_page)
-
- .globl swapper_pg_dir
-swapper_pg_dir:
- .space PGD_TABLE_SIZE
-
-/* Room for two PTE pointers, usually the kernel and current user pointers
- * to their respective root page table.
- */
-abatron_pteptrs:
- .space 8
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index 47857795f50a..87b806e8eded 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -44,7 +44,7 @@ END_BTB_FLUSH_SECTION
#endif
-#define NORMAL_EXCEPTION_PROLOG(intno) \
+#define NORMAL_EXCEPTION_PROLOG(trapno, intno) \
mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \
mfspr r10, SPRN_SPRG_THREAD; \
stw r11, THREAD_NORMSAVE(0)(r10); \
@@ -53,6 +53,8 @@ END_BTB_FLUSH_SECTION
mfspr r11, SPRN_SRR1; \
DO_KVM BOOKE_INTERRUPT_##intno SPRN_SRR1; \
andi. r11, r11, MSR_PR; /* check whether user or kernel */\
+ LOAD_REG_IMMEDIATE(r11, MSR_KERNEL); \
+ mtmsr r11; \
mr r11, r1; \
beq 1f; \
BOOKE_CLEAR_BTB(r11) \
@@ -76,12 +78,39 @@ END_BTB_FLUSH_SECTION
stw r1, 0(r11); \
mr r1, r11; \
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
- stw r0,GPR0(r11); \
- lis r10, STACK_FRAME_REGS_MARKER@ha;/* exception frame marker */ \
- addi r10, r10, STACK_FRAME_REGS_MARKER@l; \
- stw r10, 8(r11); \
- SAVE_4GPRS(3, r11); \
- SAVE_2GPRS(7, r11)
+ COMMON_EXCEPTION_PROLOG_END trapno
+
+.macro COMMON_EXCEPTION_PROLOG_END trapno
+ stw r0,GPR0(r1)
+ lis r10, STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
+ addi r10, r10, STACK_FRAME_REGS_MARKER@l
+ stw r10, 8(r1)
+ li r10, \trapno
+ stw r10,_TRAP(r1)
+ SAVE_4GPRS(3, r1)
+ SAVE_2GPRS(7, r1)
+ SAVE_NVGPRS(r1)
+ stw r2,GPR2(r1)
+ stw r12,_NIP(r1)
+ stw r9,_MSR(r1)
+ mfctr r10
+ mfspr r2,SPRN_SPRG_THREAD
+ stw r10,_CTR(r1)
+ tovirt(r2, r2)
+ mfspr r10,SPRN_XER
+ addi r2, r2, -THREAD
+ stw r10,_XER(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+.endm
+
+.macro prepare_transfer_to_handler
+#ifdef CONFIG_E500
+ andi. r12,r9,MSR_PR
+ bne 777f
+ bl prepare_transfer_to_handler
+777:
+#endif
+.endm
.macro SYSCALL_ENTRY trapno intno srr1
mfspr r10, SPRN_SPRG_THREAD
@@ -99,37 +128,20 @@ BEGIN_FTR_SECTION
mr r12, r13
lwz r13, THREAD_NORMSAVE(2)(r10)
FTR_SECTION_ELSE
-#endif
mfcr r12
-#ifdef CONFIG_KVM_BOOKE_HV
ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
+#else
+ mfcr r12
#endif
mfspr r9, SPRN_SRR1
BOOKE_CLEAR_BTB(r11)
- lwz r11, TASK_STACK - THREAD(r10)
+ mr r11, r1
+ lwz r1, TASK_STACK - THREAD(r10)
rlwinm r12,r12,0,4,2 /* Clear SO bit in CR */
- ALLOC_STACK_FRAME(r11, THREAD_SIZE - INT_FRAME_SIZE)
- stw r12, _CCR(r11) /* save various registers */
- mflr r12
- stw r12,_LINK(r11)
+ ALLOC_STACK_FRAME(r1, THREAD_SIZE - INT_FRAME_SIZE)
+ stw r12, _CCR(r1)
mfspr r12,SPRN_SRR0
- stw r1, GPR1(r11)
- stw r1, 0(r11)
- mr r1, r11
- stw r12,_NIP(r11)
- rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
- lis r12, STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
- stw r2,GPR2(r11)
- addi r12, r12, STACK_FRAME_REGS_MARKER@l
- stw r9,_MSR(r11)
- li r2, \trapno
- stw r12, 8(r11)
- stw r2,_TRAP(r11)
- SAVE_GPR(0, r11)
- SAVE_4GPRS(3, r11)
- SAVE_2GPRS(7, r11)
-
- addi r2,r10,-THREAD
+ stw r12,_NIP(r1)
b transfer_to_syscall /* jump to handler */
.endm
@@ -180,7 +192,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
* registers as the normal prolog above. Instead we use a portion of the
* critical/machine check exception stack at low physical addresses.
*/
-#define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, intno, exc_level_srr0, exc_level_srr1) \
+#define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, trapno, intno, exc_level_srr0, exc_level_srr1) \
mtspr SPRN_SPRG_WSCRATCH_##exc_level,r8; \
BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);/* r8 points to the exc_level stack*/ \
stw r9,GPR9(r8); /* save various registers */\
@@ -192,6 +204,8 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \
BOOKE_CLEAR_BTB(r10) \
andi. r11,r11,MSR_PR; \
+ LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)); \
+ mtmsr r11; \
mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\
lwz r11, TASK_STACK - THREAD(r11); /* this thread's kernel stack */\
addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\
@@ -221,16 +235,44 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
stw r1,0(r11); \
mr r1,r11; \
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
- stw r0,GPR0(r11); \
- SAVE_4GPRS(3, r11); \
- SAVE_2GPRS(7, r11)
-
-#define CRITICAL_EXCEPTION_PROLOG(intno) \
- EXC_LEVEL_EXCEPTION_PROLOG(CRIT, intno, SPRN_CSRR0, SPRN_CSRR1)
-#define DEBUG_EXCEPTION_PROLOG \
- EXC_LEVEL_EXCEPTION_PROLOG(DBG, DEBUG, SPRN_DSRR0, SPRN_DSRR1)
-#define MCHECK_EXCEPTION_PROLOG \
- EXC_LEVEL_EXCEPTION_PROLOG(MC, MACHINE_CHECK, \
+ COMMON_EXCEPTION_PROLOG_END trapno
+
+#define SAVE_xSRR(xSRR) \
+ mfspr r0,SPRN_##xSRR##0; \
+ stw r0,_##xSRR##0(r1); \
+ mfspr r0,SPRN_##xSRR##1; \
+ stw r0,_##xSRR##1(r1)
+
+
+.macro SAVE_MMU_REGS
+#ifdef CONFIG_PPC_BOOK3E_MMU
+ mfspr r0,SPRN_MAS0
+ stw r0,MAS0(r1)
+ mfspr r0,SPRN_MAS1
+ stw r0,MAS1(r1)
+ mfspr r0,SPRN_MAS2
+ stw r0,MAS2(r1)
+ mfspr r0,SPRN_MAS3
+ stw r0,MAS3(r1)
+ mfspr r0,SPRN_MAS6
+ stw r0,MAS6(r1)
+#ifdef CONFIG_PHYS_64BIT
+ mfspr r0,SPRN_MAS7
+ stw r0,MAS7(r1)
+#endif /* CONFIG_PHYS_64BIT */
+#endif /* CONFIG_PPC_BOOK3E_MMU */
+#ifdef CONFIG_44x
+ mfspr r0,SPRN_MMUCR
+ stw r0,MMUCR(r1)
+#endif
+.endm
+
+#define CRITICAL_EXCEPTION_PROLOG(trapno, intno) \
+ EXC_LEVEL_EXCEPTION_PROLOG(CRIT, trapno+2, intno, SPRN_CSRR0, SPRN_CSRR1)
+#define DEBUG_EXCEPTION_PROLOG(trapno) \
+ EXC_LEVEL_EXCEPTION_PROLOG(DBG, trapno+8, DEBUG, SPRN_DSRR0, SPRN_DSRR1)
+#define MCHECK_EXCEPTION_PROLOG(trapno) \
+ EXC_LEVEL_EXCEPTION_PROLOG(MC, trapno+4, MACHINE_CHECK, \
SPRN_MCSRR0, SPRN_MCSRR1)
/*
@@ -257,44 +299,34 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
.align 5; \
label:
-#define EXCEPTION(n, intno, label, hdlr, xfer) \
+#define EXCEPTION(n, intno, label, hdlr) \
START_EXCEPTION(label); \
- NORMAL_EXCEPTION_PROLOG(intno); \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- xfer(n, hdlr)
+ NORMAL_EXCEPTION_PROLOG(n, intno); \
+ prepare_transfer_to_handler; \
+ bl hdlr; \
+ b interrupt_return
#define CRITICAL_EXCEPTION(n, intno, label, hdlr) \
START_EXCEPTION(label); \
- CRITICAL_EXCEPTION_PROLOG(intno); \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
- crit_transfer_to_handler, ret_from_crit_exc)
+ CRITICAL_EXCEPTION_PROLOG(n, intno); \
+ SAVE_MMU_REGS; \
+ SAVE_xSRR(SRR); \
+ prepare_transfer_to_handler; \
+ bl hdlr; \
+ b ret_from_crit_exc
#define MCHECK_EXCEPTION(n, label, hdlr) \
START_EXCEPTION(label); \
- MCHECK_EXCEPTION_PROLOG; \
+ MCHECK_EXCEPTION_PROLOG(n); \
mfspr r5,SPRN_ESR; \
stw r5,_ESR(r11); \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_TEMPLATE(hdlr, n+4, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
- mcheck_transfer_to_handler, ret_from_mcheck_exc)
-
-#define EXC_XFER_TEMPLATE(hdlr, trap, msr, tfer, ret) \
- li r10,trap; \
- stw r10,_TRAP(r11); \
- lis r10,msr@h; \
- ori r10,r10,msr@l; \
- bl tfer; \
- .long hdlr; \
- .long ret
-
-#define EXC_XFER_STD(n, hdlr) \
- EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, transfer_to_handler_full, \
- ret_from_except_full)
-
-#define EXC_XFER_LITE(n, hdlr) \
- EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \
- ret_from_except)
+ SAVE_xSRR(DSRR); \
+ SAVE_xSRR(CSRR); \
+ SAVE_MMU_REGS; \
+ SAVE_xSRR(SRR); \
+ prepare_transfer_to_handler; \
+ bl hdlr; \
+ b ret_from_mcheck_exc
/* Check for a single step debug exception while in an exception
* handler before state has been saved. This is to catch the case
@@ -311,7 +343,7 @@ label:
*/
#define DEBUG_DEBUG_EXCEPTION \
START_EXCEPTION(DebugDebug); \
- DEBUG_EXCEPTION_PROLOG; \
+ DEBUG_EXCEPTION_PROLOG(2000); \
\
/* \
* If there is a single step or branch-taken exception in an \
@@ -360,12 +392,16 @@ label:
/* continue normal handling for a debug exception... */ \
2: mfspr r4,SPRN_DBSR; \
stw r4,_ESR(r11); /* DebugException takes DBSR in _ESR */\
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_TEMPLATE(DebugException, 0x2008, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), debug_transfer_to_handler, ret_from_debug_exc)
+ SAVE_xSRR(CSRR); \
+ SAVE_MMU_REGS; \
+ SAVE_xSRR(SRR); \
+ prepare_transfer_to_handler; \
+ bl DebugException; \
+ b ret_from_debug_exc
#define DEBUG_CRIT_EXCEPTION \
START_EXCEPTION(DebugCrit); \
- CRITICAL_EXCEPTION_PROLOG(DEBUG); \
+ CRITICAL_EXCEPTION_PROLOG(2000,DEBUG); \
\
/* \
* If there is a single step or branch-taken exception in an \
@@ -414,58 +450,71 @@ label:
/* continue normal handling for a critical exception... */ \
2: mfspr r4,SPRN_DBSR; \
stw r4,_ESR(r11); /* DebugException takes DBSR in _ESR */\
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), crit_transfer_to_handler, ret_from_crit_exc)
+ SAVE_MMU_REGS; \
+ SAVE_xSRR(SRR); \
+ prepare_transfer_to_handler; \
+ bl DebugException; \
+ b ret_from_crit_exc
#define DATA_STORAGE_EXCEPTION \
START_EXCEPTION(DataStorage) \
- NORMAL_EXCEPTION_PROLOG(DATA_STORAGE); \
+ NORMAL_EXCEPTION_PROLOG(0x300, DATA_STORAGE); \
mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \
stw r5,_ESR(r11); \
mfspr r4,SPRN_DEAR; /* Grab the DEAR */ \
stw r4, _DEAR(r11); \
- EXC_XFER_LITE(0x0300, handle_page_fault)
+ prepare_transfer_to_handler; \
+ bl do_page_fault; \
+ b interrupt_return
#define INSTRUCTION_STORAGE_EXCEPTION \
START_EXCEPTION(InstructionStorage) \
- NORMAL_EXCEPTION_PROLOG(INST_STORAGE); \
+ NORMAL_EXCEPTION_PROLOG(0x400, INST_STORAGE); \
mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \
stw r5,_ESR(r11); \
stw r12, _DEAR(r11); /* Pass SRR0 as arg2 */ \
- EXC_XFER_LITE(0x0400, handle_page_fault)
+ prepare_transfer_to_handler; \
+ bl do_page_fault; \
+ b interrupt_return
#define ALIGNMENT_EXCEPTION \
START_EXCEPTION(Alignment) \
- NORMAL_EXCEPTION_PROLOG(ALIGNMENT); \
+ NORMAL_EXCEPTION_PROLOG(0x600, ALIGNMENT); \
mfspr r4,SPRN_DEAR; /* Grab the DEAR and save it */ \
stw r4,_DEAR(r11); \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_STD(0x0600, alignment_exception)
+ prepare_transfer_to_handler; \
+ bl alignment_exception; \
+ REST_NVGPRS(r1); \
+ b interrupt_return
#define PROGRAM_EXCEPTION \
START_EXCEPTION(Program) \
- NORMAL_EXCEPTION_PROLOG(PROGRAM); \
+ NORMAL_EXCEPTION_PROLOG(0x700, PROGRAM); \
mfspr r4,SPRN_ESR; /* Grab the ESR and save it */ \
stw r4,_ESR(r11); \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_STD(0x0700, program_check_exception)
+ prepare_transfer_to_handler; \
+ bl program_check_exception; \
+ REST_NVGPRS(r1); \
+ b interrupt_return
#define DECREMENTER_EXCEPTION \
START_EXCEPTION(Decrementer) \
- NORMAL_EXCEPTION_PROLOG(DECREMENTER); \
+ NORMAL_EXCEPTION_PROLOG(0x900, DECREMENTER); \
lis r0,TSR_DIS@h; /* Setup the DEC interrupt mask */ \
mtspr SPRN_TSR,r0; /* Clear the DEC interrupt */ \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_LITE(0x0900, timer_interrupt)
+ prepare_transfer_to_handler; \
+ bl timer_interrupt; \
+ b interrupt_return
#define FP_UNAVAILABLE_EXCEPTION \
START_EXCEPTION(FloatingPointUnavailable) \
- NORMAL_EXCEPTION_PROLOG(FP_UNAVAIL); \
+ NORMAL_EXCEPTION_PROLOG(0x800, FP_UNAVAIL); \
beq 1f; \
bl load_up_fpu; /* if from user, just load it up */ \
b fast_exception_return; \
-1: addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_STD(0x800, kernel_fp_unavailable_exception)
+1: prepare_transfer_to_handler; \
+ bl kernel_fp_unavailable_exception; \
+ b interrupt_return
#else /* __ASSEMBLY__ */
struct exception_regs {
@@ -481,7 +530,6 @@ struct exception_regs {
unsigned long csrr1;
unsigned long dsrr0;
unsigned long dsrr1;
- unsigned long saved_ksp_limit;
};
/* ensure this structure is always sized to a multiple of the stack alignment */
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 3f4a40cccef5..0f9642f36b49 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -113,7 +113,7 @@ _ENTRY(_start);
1:
/*
- * We have the runtime (virutal) address of our base.
+ * We have the runtime (virtual) address of our base.
* We calculate our shift of offset from a 64M page.
* We could map the 64M page we belong to at PAGE_OFFSET and
* get going from there.
@@ -363,23 +363,26 @@ interrupt_base:
/* Data Storage Interrupt */
START_EXCEPTION(DataStorage)
- NORMAL_EXCEPTION_PROLOG(DATA_STORAGE)
+ NORMAL_EXCEPTION_PROLOG(0x300, DATA_STORAGE)
mfspr r5,SPRN_ESR /* Grab the ESR, save it */
stw r5,_ESR(r11)
mfspr r4,SPRN_DEAR /* Grab the DEAR, save it */
stw r4, _DEAR(r11)
andis. r10,r5,(ESR_ILK|ESR_DLK)@h
bne 1f
- EXC_XFER_LITE(0x0300, handle_page_fault)
+ prepare_transfer_to_handler
+ bl do_page_fault
+ b interrupt_return
1:
- addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_LITE(0x0300, CacheLockingException)
+ prepare_transfer_to_handler
+ bl CacheLockingException
+ b interrupt_return
/* Instruction Storage Interrupt */
INSTRUCTION_STORAGE_EXCEPTION
/* External Input Interrupt */
- EXCEPTION(0x0500, EXTERNAL, ExternalInput, do_IRQ, EXC_XFER_LITE)
+ EXCEPTION(0x0500, EXTERNAL, ExternalInput, do_IRQ)
/* Alignment Interrupt */
ALIGNMENT_EXCEPTION
@@ -391,8 +394,7 @@ interrupt_base:
#ifdef CONFIG_PPC_FPU
FP_UNAVAILABLE_EXCEPTION
#else
- EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, \
- unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, unknown_exception)
#endif
/* System Call Interrupt */
@@ -400,16 +402,14 @@ interrupt_base:
SYSCALL_ENTRY 0xc00 BOOKE_INTERRUPT_SYSCALL SPRN_SRR1
/* Auxiliary Processor Unavailable Interrupt */
- EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, \
- unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, unknown_exception)
/* Decrementer Interrupt */
DECREMENTER_EXCEPTION
/* Fixed Internal Timer Interrupt */
/* TODO: Add FIT support */
- EXCEPTION(0x3100, FIT, FixedIntervalTimer, \
- unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x3100, FIT, FixedIntervalTimer, unknown_exception)
/* Watchdog Timer Interrupt */
#ifdef CONFIG_BOOKE_WDT
@@ -497,7 +497,7 @@ END_BTB_FLUSH_SECTION
#endif
#endif
- bne 2f /* Bail if permission/valid mismach */
+ bne 2f /* Bail if permission/valid mismatch */
/* Jump to common tlb load */
b finish_tlb_load
@@ -592,7 +592,7 @@ END_BTB_FLUSH_SECTION
#endif
#endif
- bne 2f /* Bail if permission mismach */
+ bne 2f /* Bail if permission mismatch */
/* Jump to common TLB load point */
b finish_tlb_load
@@ -614,38 +614,44 @@ END_BTB_FLUSH_SECTION
#ifdef CONFIG_SPE
/* SPE Unavailable */
START_EXCEPTION(SPEUnavailable)
- NORMAL_EXCEPTION_PROLOG(SPE_UNAVAIL)
+ NORMAL_EXCEPTION_PROLOG(0x2010, SPE_UNAVAIL)
beq 1f
bl load_up_spe
b fast_exception_return
-1: addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_LITE(0x2010, KernelSPE)
+1: prepare_transfer_to_handler
+ bl KernelSPE
+ b interrupt_return
#elif defined(CONFIG_SPE_POSSIBLE)
- EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, \
- unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, unknown_exception)
#endif /* CONFIG_SPE_POSSIBLE */
/* SPE Floating Point Data */
#ifdef CONFIG_SPE
- EXCEPTION(0x2030, SPE_FP_DATA, SPEFloatingPointData,
- SPEFloatingPointException, EXC_XFER_STD)
+ START_EXCEPTION(SPEFloatingPointData)
+ NORMAL_EXCEPTION_PROLOG(0x2030, SPE_FP_DATA)
+ prepare_transfer_to_handler
+ bl SPEFloatingPointException
+ REST_NVGPRS(r1)
+ b interrupt_return
/* SPE Floating Point Round */
- EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
- SPEFloatingPointRoundException, EXC_XFER_STD)
+ START_EXCEPTION(SPEFloatingPointRound)
+ NORMAL_EXCEPTION_PROLOG(0x2050, SPE_FP_ROUND)
+ prepare_transfer_to_handler
+ bl SPEFloatingPointRoundException
+ REST_NVGPRS(r1)
+ b interrupt_return
#elif defined(CONFIG_SPE_POSSIBLE)
- EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData,
- unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
- unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData, unknown_exception)
+ EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, unknown_exception)
#endif /* CONFIG_SPE_POSSIBLE */
/* Performance Monitor */
EXCEPTION(0x2060, PERFORMANCE_MONITOR, PerformanceMonitor, \
- performance_monitor_exception, EXC_XFER_STD)
+ performance_monitor_exception)
- EXCEPTION(0x2070, DOORBELL, Doorbell, doorbell_exception, EXC_XFER_STD)
+ EXCEPTION(0x2070, DOORBELL, Doorbell, doorbell_exception)
CRITICAL_EXCEPTION(0x2080, DOORBELL_CRITICAL, \
CriticalDoorbell, unknown_exception)
@@ -660,10 +666,10 @@ END_BTB_FLUSH_SECTION
unknown_exception)
/* Hypercall */
- EXCEPTION(0, HV_SYSCALL, Hypercall, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0, HV_SYSCALL, Hypercall, unknown_exception)
/* Embedded Hypervisor Privilege */
- EXCEPTION(0, HV_PRIV, Ehvpriv, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0, HV_PRIV, Ehvpriv, unknown_exception)
interrupt_end:
@@ -854,7 +860,7 @@ KernelSPE:
lwz r5,_NIP(r1)
bl printk
#endif
- b ret_from_except
+ b interrupt_return
#ifdef CONFIG_PRINTK
87: .string "SPE used in kernel (task=%p, pc=%x) \n"
#endif
@@ -979,20 +985,6 @@ _GLOBAL(abort)
mtspr SPRN_DBCR0,r13
isync
-_GLOBAL(set_context)
-
-#ifdef CONFIG_BDI_SWITCH
- /* Context switch the PTE pointer for the Abatron BDI2000.
- * The PGDIR is the second parameter.
- */
- lis r5, abatron_pteptrs@h
- ori r5, r5, abatron_pteptrs@l
- stw r4, 0x4(r5)
-#endif
- mtspr SPRN_PID,r3
- isync /* Force context change */
- blr
-
#ifdef CONFIG_SMP
/* When we get here, r24 needs to hold the CPU # */
.globl __secondary_start
@@ -1220,26 +1212,3 @@ _GLOBAL(restore_to_as0)
*/
3: mr r3,r5
bl _start
-
-/*
- * We put a few things here that have to be page-aligned. This stuff
- * goes at the beginning of the data segment, which is page-aligned.
- */
- .data
- .align 12
- .globl sdata
-sdata:
- .globl empty_zero_page
-empty_zero_page:
- .space 4096
-EXPORT_SYMBOL(empty_zero_page)
- .globl swapper_pg_dir
-swapper_pg_dir:
- .space PGD_TABLE_SIZE
-
-/*
- * Room for two PTE pointers, usually the kernel and current user pointers
- * to their respective root page table.
- */
-abatron_pteptrs:
- .space 8
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 8fc7a14e4d71..21a638aff72f 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -486,7 +486,7 @@ void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
return;
reset:
- regs->msr &= ~MSR_SE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_SE);
for (i = 0; i < nr_wp_slots(); i++) {
info = counter_arch_bp(__this_cpu_read(bp_per_reg[i]));
__set_breakpoint(i, info);
@@ -537,7 +537,7 @@ static bool stepping_handler(struct pt_regs *regs, struct perf_event **bp,
current->thread.last_hit_ubp[i] = bp[i];
info[i] = NULL;
}
- regs->msr |= MSR_SE;
+ regs_set_return_msr(regs, regs->msr | MSR_SE);
return false;
}
diff --git a/arch/powerpc/kernel/hw_breakpoint_constraints.c b/arch/powerpc/kernel/hw_breakpoint_constraints.c
index 867ee4aa026a..675d1f66ab72 100644
--- a/arch/powerpc/kernel/hw_breakpoint_constraints.c
+++ b/arch/powerpc/kernel/hw_breakpoint_constraints.c
@@ -141,7 +141,7 @@ void wp_get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr,
{
struct instruction_op op;
- if (__get_user_instr_inatomic(*instr, (void __user *)regs->nip))
+ if (__get_user_instr(*instr, (void __user *)regs->nip))
return;
analyse_instr(&op, regs, *instr);
diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S
index 69df840f7253..13cad9297d82 100644
--- a/arch/powerpc/kernel/idle_6xx.S
+++ b/arch/powerpc/kernel/idle_6xx.S
@@ -145,9 +145,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
/*
* Return from NAP/DOZE mode, restore some CPU specific registers,
- * we are called with DR/IR still off and r2 containing physical
- * address of current. R11 points to the exception frame (physical
- * address). We have to preserve r10.
+ * R11 points to the exception frame. We have to preserve r10.
*/
_GLOBAL(power_save_ppc32_restore)
lwz r9,_LINK(r11) /* interrupted in ppc6xx_idle: */
@@ -166,11 +164,7 @@ BEGIN_FTR_SECTION
mfspr r9,SPRN_HID0
andis. r9,r9,HID0_NAP@h
beq 1f
-#ifdef CONFIG_VMAP_STACK
addis r9, r11, nap_save_msscr0@ha
-#else
- addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
-#endif
lwz r9,nap_save_msscr0@l(r9)
mtspr SPRN_MSSCR0, r9
sync
@@ -178,15 +172,11 @@ BEGIN_FTR_SECTION
1:
END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
BEGIN_FTR_SECTION
-#ifdef CONFIG_VMAP_STACK
addis r9, r11, nap_save_hid1@ha
-#else
- addis r9,r11,(nap_save_hid1-KERNELBASE)@ha
-#endif
lwz r9,nap_save_hid1@l(r9)
mtspr SPRN_HID1, r9
END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
- b transfer_to_handler_cont
+ blr
_ASM_NOKPROBE_SYMBOL(power_save_ppc32_restore)
.data
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index f9e6d83e6720..abb719b21cae 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -209,4 +209,8 @@ _GLOBAL(power4_idle_nap)
mtmsrd r7
isync
b 1b
+
+ .globl power4_idle_nap_return
+power4_idle_nap_return:
+ blr
#endif
diff --git a/arch/powerpc/kernel/idle_e500.S b/arch/powerpc/kernel/idle_e500.S
index 72c85b6f3898..9e1bc4502c50 100644
--- a/arch/powerpc/kernel/idle_e500.S
+++ b/arch/powerpc/kernel/idle_e500.S
@@ -74,20 +74,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
/*
* Return from NAP/DOZE mode, restore some CPU specific registers,
- * r2 containing physical address of current.
- * r11 points to the exception frame (physical address).
+ * r2 containing address of current.
+ * r11 points to the exception frame.
* We have to preserve r10.
*/
_GLOBAL(power_save_ppc32_restore)
lwz r9,_LINK(r11) /* interrupted in e500_idle */
stw r9,_NIP(r11) /* make it do a blr */
-
-#ifdef CONFIG_SMP
- lwz r11,TASK_CPU(r2) /* get cpu number * 4 */
- slwi r11,r11,2
-#else
- li r11,0
-#endif
-
- b transfer_to_handler_cont
+ blr
_ASM_NOKPROBE_SYMBOL(power_save_ppc32_restore)
diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
index 398cd86b6ada..21bbd615ca41 100644
--- a/arch/powerpc/kernel/interrupt.c
+++ b/arch/powerpc/kernel/interrupt.c
@@ -3,6 +3,7 @@
#include <linux/context_tracking.h>
#include <linux/err.h>
#include <linux/compat.h>
+#include <linux/sched/debug.h> /* for show_regs */
#include <asm/asm-prototypes.h>
#include <asm/kup.h>
@@ -20,8 +21,59 @@
#include <asm/time.h>
#include <asm/unistd.h>
+#if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32)
+unsigned long global_dbcr0[NR_CPUS];
+#endif
+
typedef long (*syscall_fn)(long, long, long, long, long, long);
+#ifdef CONFIG_PPC_BOOK3S_64
+DEFINE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
+static inline bool exit_must_hard_disable(void)
+{
+ return static_branch_unlikely(&interrupt_exit_not_reentrant);
+}
+#else
+static inline bool exit_must_hard_disable(void)
+{
+ return true;
+}
+#endif
+
+/*
+ * local irqs must be disabled. Returns false if the caller must re-enable
+ * them, check for new work, and try again.
+ *
+ * This should be called with local irqs disabled, but if they were previously
+ * enabled when the interrupt handler returns (indicating a process-context /
+ * synchronous interrupt) then irqs_enabled should be true.
+ *
+ * restartable is true then EE/RI can be left on because interrupts are handled
+ * with a restart sequence.
+ */
+static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable)
+{
+ /* This must be done with RI=1 because tracing may touch vmaps */
+ trace_hardirqs_on();
+
+ if (exit_must_hard_disable() || !restartable)
+ __hard_EE_RI_disable();
+
+#ifdef CONFIG_PPC64
+ /* This pattern matches prep_irq_for_idle */
+ if (unlikely(lazy_irq_pending_nocheck())) {
+ if (exit_must_hard_disable() || !restartable) {
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+ __hard_RI_enable();
+ }
+ trace_hardirqs_off();
+
+ return false;
+ }
+#endif
+ return true;
+}
+
/* Has to run notrace because it is entered not completely "reconciled" */
notrace long system_call_exception(long r3, long r4, long r5,
long r6, long r7, long r8,
@@ -29,20 +81,21 @@ notrace long system_call_exception(long r3, long r4, long r5,
{
syscall_fn f;
+ kuep_lock();
+
regs->orig_gpr3 = r3;
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
+ trace_hardirqs_off(); /* finish reconciling */
+
CT_WARN_ON(ct_state() == CONTEXT_KERNEL);
user_exit_irqoff();
- trace_hardirqs_off(); /* finish reconciling */
-
if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x))
BUG_ON(!(regs->msr & MSR_RI));
BUG_ON(!(regs->msr & MSR_PR));
- BUG_ON(!FULL_REGS(regs));
BUG_ON(arch_irq_disabled_regs(regs));
#ifdef CONFIG_PPC_PKEY
@@ -69,9 +122,7 @@ notrace long system_call_exception(long r3, long r4, long r5,
isync();
} else
#endif
-#ifdef CONFIG_PPC64
- kuap_check_amr();
-#endif
+ kuap_assert_locked();
booke_restore_dbcr0();
@@ -141,71 +192,6 @@ notrace long system_call_exception(long r3, long r4, long r5,
return f(r3, r4, r5, r6, r7, r8);
}
-/*
- * local irqs must be disabled. Returns false if the caller must re-enable
- * them, check for new work, and try again.
- *
- * This should be called with local irqs disabled, but if they were previously
- * enabled when the interrupt handler returns (indicating a process-context /
- * synchronous interrupt) then irqs_enabled should be true.
- */
-static notrace inline bool __prep_irq_for_enabled_exit(bool clear_ri)
-{
- /* This must be done with RI=1 because tracing may touch vmaps */
- trace_hardirqs_on();
-
- /* This pattern matches prep_irq_for_idle */
- if (clear_ri)
- __hard_EE_RI_disable();
- else
- __hard_irq_disable();
-#ifdef CONFIG_PPC64
- if (unlikely(lazy_irq_pending_nocheck())) {
- /* Took an interrupt, may have more exit work to do. */
- if (clear_ri)
- __hard_RI_enable();
- trace_hardirqs_off();
- local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
-
- return false;
- }
- local_paca->irq_happened = 0;
- irq_soft_mask_set(IRQS_ENABLED);
-#endif
- return true;
-}
-
-static notrace inline bool prep_irq_for_enabled_exit(bool clear_ri, bool irqs_enabled)
-{
- if (__prep_irq_for_enabled_exit(clear_ri))
- return true;
-
- /*
- * Must replay pending soft-masked interrupts now. Don't just
- * local_irq_enabe(); local_irq_disable(); because if we are
- * returning from an asynchronous interrupt here, another one
- * might hit after irqs are enabled, and it would exit via this
- * same path allowing another to fire, and so on unbounded.
- *
- * If interrupts were enabled when this interrupt exited,
- * indicating a process context (synchronous) interrupt,
- * local_irq_enable/disable can be used, which will enable
- * interrupts rather than keeping them masked (unclear how
- * much benefit this is over just replaying for all cases,
- * because we immediately disable again, so all we're really
- * doing is allowing hard interrupts to execute directly for
- * a very small time, rather than being masked and replayed).
- */
- if (irqs_enabled) {
- local_irq_enable();
- local_irq_disable();
- } else {
- replay_soft_interrupts();
- }
-
- return false;
-}
-
static notrace void booke_load_dbcr0(void)
{
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
@@ -228,59 +214,92 @@ static notrace void booke_load_dbcr0(void)
#endif
}
-/*
- * This should be called after a syscall returns, with r3 the return value
- * from the syscall. If this function returns non-zero, the system call
- * exit assembly should additionally load all GPR registers and CTR and XER
- * from the interrupt frame.
- *
- * The function graph tracer can not trace the return side of this function,
- * because RI=0 and soft mask state is "unreconciled", so it is marked notrace.
- */
-notrace unsigned long syscall_exit_prepare(unsigned long r3,
- struct pt_regs *regs,
- long scv)
+static void check_return_regs_valid(struct pt_regs *regs)
{
- unsigned long ti_flags;
- unsigned long ret = 0;
- bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv;
+#ifdef CONFIG_PPC_BOOK3S_64
+ unsigned long trap, srr0, srr1;
+ static bool warned;
+ u8 *validp;
+ char *h;
- CT_WARN_ON(ct_state() == CONTEXT_USER);
-
-#ifdef CONFIG_PPC64
- kuap_check_amr();
-#endif
+ if (trap_is_scv(regs))
+ return;
- regs->result = r3;
+ trap = regs->trap;
+ // EE in HV mode sets HSRRs like 0xea0
+ if (cpu_has_feature(CPU_FTR_HVMODE) && trap == INTERRUPT_EXTERNAL)
+ trap = 0xea0;
+
+ switch (trap) {
+ case 0x980:
+ case INTERRUPT_H_DATA_STORAGE:
+ case 0xe20:
+ case 0xe40:
+ case INTERRUPT_HMI:
+ case 0xe80:
+ case 0xea0:
+ case INTERRUPT_H_FAC_UNAVAIL:
+ case 0x1200:
+ case 0x1500:
+ case 0x1600:
+ case 0x1800:
+ validp = &local_paca->hsrr_valid;
+ if (!*validp)
+ return;
+
+ srr0 = mfspr(SPRN_HSRR0);
+ srr1 = mfspr(SPRN_HSRR1);
+ h = "H";
+
+ break;
+ default:
+ validp = &local_paca->srr_valid;
+ if (!*validp)
+ return;
+
+ srr0 = mfspr(SPRN_SRR0);
+ srr1 = mfspr(SPRN_SRR1);
+ h = "";
+ break;
+ }
- /* Check whether the syscall is issued inside a restartable sequence */
- rseq_syscall(regs);
+ if (srr0 == regs->nip && srr1 == regs->msr)
+ return;
- ti_flags = current_thread_info()->flags;
+ /*
+ * A NMI / soft-NMI interrupt may have come in after we found
+ * srr_valid and before the SRRs are loaded. The interrupt then
+ * comes in and clobbers SRRs and clears srr_valid. Then we load
+ * the SRRs here and test them above and find they don't match.
+ *
+ * Test validity again after that, to catch such false positives.
+ *
+ * This test in general will have some window for false negatives
+ * and may not catch and fix all such cases if an NMI comes in
+ * later and clobbers SRRs without clearing srr_valid, but hopefully
+ * such things will get caught most of the time, statistically
+ * enough to be able to get a warning out.
+ */
+ barrier();
- if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && is_not_scv) {
- if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
- r3 = -r3;
- regs->ccr |= 0x10000000; /* Set SO bit in CR */
- }
- }
+ if (!*validp)
+ return;
- if (unlikely(ti_flags & _TIF_PERSYSCALL_MASK)) {
- if (ti_flags & _TIF_RESTOREALL)
- ret = _TIF_RESTOREALL;
- else
- regs->gpr[3] = r3;
- clear_bits(_TIF_PERSYSCALL_MASK, &current_thread_info()->flags);
- } else {
- regs->gpr[3] = r3;
+ if (!warned) {
+ warned = true;
+ printk("%sSRR0 was: %lx should be: %lx\n", h, srr0, regs->nip);
+ printk("%sSRR1 was: %lx should be: %lx\n", h, srr1, regs->msr);
+ show_regs(regs);
}
- if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) {
- do_syscall_trace_leave(regs);
- ret |= _TIF_RESTOREALL;
- }
+ *validp = 0; /* fixup */
+#endif
+}
- local_irq_disable();
+static notrace unsigned long
+interrupt_exit_user_prepare_main(unsigned long ret, struct pt_regs *regs)
+{
+ unsigned long ti_flags;
again:
ti_flags = READ_ONCE(current_thread_info()->flags);
@@ -302,7 +321,7 @@ again:
ti_flags = READ_ONCE(current_thread_info()->flags);
}
- if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && IS_ENABLED(CONFIG_PPC_FPU)) {
if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
unlikely((ti_flags & _TIF_RESTORE_TM))) {
restore_tm_state(regs);
@@ -326,10 +345,10 @@ again:
}
}
- user_enter_irqoff();
+ check_return_regs_valid(regs);
- /* scv need not set RI=0 because SRRs are not used */
- if (unlikely(!__prep_irq_for_enabled_exit(is_not_scv))) {
+ user_enter_irqoff();
+ if (!prep_irq_for_enabled_exit(true)) {
user_exit_irqoff();
local_irq_enable();
local_irq_disable();
@@ -344,129 +363,151 @@ again:
account_cpu_user_exit();
-#ifdef CONFIG_PPC_BOOK3S_64 /* BOOK3E and ppc32 not using this */
- /*
- * We do this at the end so that we do context switch with KERNEL AMR
- */
+ /* Restore user access locks last */
kuap_user_restore(regs);
-#endif
+ kuep_unlock();
+
return ret;
}
-#ifndef CONFIG_PPC_BOOK3E_64 /* BOOK3E not yet using this */
-notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr)
+/*
+ * This should be called after a syscall returns, with r3 the return value
+ * from the syscall. If this function returns non-zero, the system call
+ * exit assembly should additionally load all GPR registers and CTR and XER
+ * from the interrupt frame.
+ *
+ * The function graph tracer can not trace the return side of this function,
+ * because RI=0 and soft mask state is "unreconciled", so it is marked notrace.
+ */
+notrace unsigned long syscall_exit_prepare(unsigned long r3,
+ struct pt_regs *regs,
+ long scv)
{
unsigned long ti_flags;
- unsigned long flags;
unsigned long ret = 0;
+ bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv;
- if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x))
- BUG_ON(!(regs->msr & MSR_RI));
- BUG_ON(!(regs->msr & MSR_PR));
- BUG_ON(!FULL_REGS(regs));
- BUG_ON(arch_irq_disabled_regs(regs));
CT_WARN_ON(ct_state() == CONTEXT_USER);
- /*
- * We don't need to restore AMR on the way back to userspace for KUAP.
- * AMR can only have been unlocked if we interrupted the kernel.
- */
-#ifdef CONFIG_PPC64
- kuap_check_amr();
-#endif
+ kuap_assert_locked();
- local_irq_save(flags);
-
-again:
- ti_flags = READ_ONCE(current_thread_info()->flags);
- while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
- local_irq_enable(); /* returning to user: may enable */
- if (ti_flags & _TIF_NEED_RESCHED) {
- schedule();
- } else {
- if (ti_flags & _TIF_SIGPENDING)
- ret |= _TIF_RESTOREALL;
- do_notify_resume(regs, ti_flags);
- }
- local_irq_disable();
- ti_flags = READ_ONCE(current_thread_info()->flags);
- }
+ regs->result = r3;
- if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
- if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
- unlikely((ti_flags & _TIF_RESTORE_TM))) {
- restore_tm_state(regs);
- } else {
- unsigned long mathflags = MSR_FP;
+ /* Check whether the syscall is issued inside a restartable sequence */
+ rseq_syscall(regs);
- if (cpu_has_feature(CPU_FTR_VSX))
- mathflags |= MSR_VEC | MSR_VSX;
- else if (cpu_has_feature(CPU_FTR_ALTIVEC))
- mathflags |= MSR_VEC;
+ ti_flags = current_thread_info()->flags;
- /* See above restore_math comment */
- if ((regs->msr & mathflags) != mathflags)
- restore_math(regs);
+ if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && is_not_scv) {
+ if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
+ r3 = -r3;
+ regs->ccr |= 0x10000000; /* Set SO bit in CR */
}
}
- user_enter_irqoff();
+ if (unlikely(ti_flags & _TIF_PERSYSCALL_MASK)) {
+ if (ti_flags & _TIF_RESTOREALL)
+ ret = _TIF_RESTOREALL;
+ else
+ regs->gpr[3] = r3;
+ clear_bits(_TIF_PERSYSCALL_MASK, &current_thread_info()->flags);
+ } else {
+ regs->gpr[3] = r3;
+ }
- if (unlikely(!__prep_irq_for_enabled_exit(true))) {
- user_exit_irqoff();
- local_irq_enable();
- local_irq_disable();
- goto again;
+ if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) {
+ do_syscall_trace_leave(regs);
+ ret |= _TIF_RESTOREALL;
}
- booke_load_dbcr0();
+ local_irq_disable();
+ ret = interrupt_exit_user_prepare_main(ret, regs);
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- local_paca->tm_scratch = regs->msr;
+#ifdef CONFIG_PPC64
+ regs->exit_result = ret;
#endif
- account_cpu_user_exit();
+ return ret;
+}
+
+#ifdef CONFIG_PPC64
+notrace unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs)
+{
+ /*
+ * This is called when detecting a soft-pending interrupt as well as
+ * an alternate-return interrupt. So we can't just have the alternate
+ * return path clear SRR1[MSR] and set PACA_IRQ_HARD_DIS (unless
+ * the soft-pending case were to fix things up as well). RI might be
+ * disabled, in which case it gets re-enabled by __hard_irq_disable().
+ */
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ set_kuap(AMR_KUAP_BLOCKED);
+#endif
+
+ trace_hardirqs_off();
+ user_exit_irqoff();
+ account_cpu_user_entry();
+
+ BUG_ON(!user_mode(regs));
+
+ regs->exit_result = interrupt_exit_user_prepare_main(regs->exit_result, regs);
+
+ return regs->exit_result;
+}
+#endif
+
+notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs)
+{
+ unsigned long ret;
+
+ if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x))
+ BUG_ON(!(regs->msr & MSR_RI));
+ BUG_ON(!(regs->msr & MSR_PR));
+ BUG_ON(arch_irq_disabled_regs(regs));
+ CT_WARN_ON(ct_state() == CONTEXT_USER);
/*
- * We do this at the end so that we do context switch with KERNEL AMR
+ * We don't need to restore AMR on the way back to userspace for KUAP.
+ * AMR can only have been unlocked if we interrupted the kernel.
*/
+ kuap_assert_locked();
+
+ local_irq_disable();
+
+ ret = interrupt_exit_user_prepare_main(0, regs);
+
#ifdef CONFIG_PPC64
- kuap_user_restore(regs);
+ regs->exit_result = ret;
#endif
+
return ret;
}
-void unrecoverable_exception(struct pt_regs *regs);
void preempt_schedule_irq(void);
-notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr)
+notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
{
unsigned long flags;
unsigned long ret = 0;
-#ifdef CONFIG_PPC64
- unsigned long amr;
-#endif
+ unsigned long kuap;
+ bool stack_store = current_thread_info()->flags &
+ _TIF_EMULATE_STACK_STORE;
if (!IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x) &&
unlikely(!(regs->msr & MSR_RI)))
unrecoverable_exception(regs);
BUG_ON(regs->msr & MSR_PR);
- BUG_ON(!FULL_REGS(regs));
/*
* CT_WARN_ON comes here via program_check_exception,
* so avoid recursion.
*/
- if (TRAP(regs) != 0x700)
+ if (TRAP(regs) != INTERRUPT_PROGRAM)
CT_WARN_ON(ct_state() == CONTEXT_USER);
-#ifdef CONFIG_PPC64
- amr = kuap_get_and_check_amr();
-#endif
-
- if (unlikely(current_thread_info()->flags & _TIF_EMULATE_STACK_STORE)) {
- clear_bits(_TIF_EMULATE_STACK_STORE, &current_thread_info()->flags);
- ret = 1;
- }
+ kuap = kuap_get_and_assert_locked();
local_irq_save(flags);
@@ -482,31 +523,112 @@ again:
}
}
- if (unlikely(!prep_irq_for_enabled_exit(true, !irqs_disabled_flags(flags))))
+ check_return_regs_valid(regs);
+
+ /*
+ * Stack store exit can't be restarted because the interrupt
+ * stack frame might have been clobbered.
+ */
+ if (!prep_irq_for_enabled_exit(unlikely(stack_store))) {
+ /*
+ * Replay pending soft-masked interrupts now. Don't
+ * just local_irq_enabe(); local_irq_disable(); because
+ * if we are returning from an asynchronous interrupt
+ * here, another one might hit after irqs are enabled,
+ * and it would exit via this same path allowing
+ * another to fire, and so on unbounded.
+ */
+ hard_irq_disable();
+ replay_soft_interrupts();
+ /* Took an interrupt, may have more exit work to do. */
goto again;
- } else {
- /* Returning to a kernel context with local irqs disabled. */
- __hard_EE_RI_disable();
+ }
#ifdef CONFIG_PPC64
+ /*
+ * An interrupt may clear MSR[EE] and set this concurrently,
+ * but it will be marked pending and the exit will be retried.
+ * This leaves a racy window where MSR[EE]=0 and HARD_DIS is
+ * clear, until interrupt_exit_kernel_restart() calls
+ * hard_irq_disable(), which will set HARD_DIS again.
+ */
+ local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
+
+ } else {
+ check_return_regs_valid(regs);
+
+ if (unlikely(stack_store))
+ __hard_EE_RI_disable();
+ /*
+ * Returning to a kernel context with local irqs disabled.
+ * Here, if EE was enabled in the interrupted context, enable
+ * it on return as well. A problem exists here where a soft
+ * masked interrupt may have cleared MSR[EE] and set HARD_DIS
+ * here, and it will still exist on return to the caller. This
+ * will be resolved by the masked interrupt firing again.
+ */
if (regs->msr & MSR_EE)
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
-#endif
+#endif /* CONFIG_PPC64 */
}
+ if (unlikely(stack_store)) {
+ clear_bits(_TIF_EMULATE_STACK_STORE, &current_thread_info()->flags);
+ ret = 1;
+ }
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
local_paca->tm_scratch = regs->msr;
#endif
/*
- * Don't want to mfspr(SPRN_AMR) here, because this comes after mtmsr,
- * which would cause Read-After-Write stalls. Hence, we take the AMR
- * value from the check above.
+ * 64s does not want to mfspr(SPRN_AMR) here, because this comes after
+ * mtmsr, which would cause Read-After-Write stalls. Hence, take the
+ * AMR value from the check above.
*/
+ kuap_kernel_restore(regs, kuap);
+
+ return ret;
+}
+
#ifdef CONFIG_PPC64
- kuap_kernel_restore(regs, amr);
+notrace unsigned long interrupt_exit_user_restart(struct pt_regs *regs)
+{
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ set_kuap(AMR_KUAP_BLOCKED);
#endif
- return ret;
+ trace_hardirqs_off();
+ user_exit_irqoff();
+ account_cpu_user_entry();
+
+ BUG_ON(!user_mode(regs));
+
+ regs->exit_result |= interrupt_exit_user_prepare(regs);
+
+ return regs->exit_result;
+}
+
+/*
+ * No real need to return a value here because the stack store case does not
+ * get restarted.
+ */
+notrace unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs)
+{
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ set_kuap(AMR_KUAP_BLOCKED);
+#endif
+
+ if (regs->softe == IRQS_ENABLED)
+ trace_hardirqs_off();
+
+ BUG_ON(user_mode(regs));
+
+ return interrupt_exit_kernel_prepare(regs);
}
#endif
diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S
new file mode 100644
index 000000000000..4063e8a3f704
--- /dev/null
+++ b/arch/powerpc/kernel/interrupt_64.S
@@ -0,0 +1,770 @@
+#include <asm/asm-offsets.h>
+#include <asm/bug.h>
+#ifdef CONFIG_PPC_BOOK3S
+#include <asm/exception-64s.h>
+#else
+#include <asm/exception-64e.h>
+#endif
+#include <asm/feature-fixups.h>
+#include <asm/head-64.h>
+#include <asm/hw_irq.h>
+#include <asm/kup.h>
+#include <asm/mmu.h>
+#include <asm/ppc_asm.h>
+#include <asm/ptrace.h>
+#include <asm/tm.h>
+
+ .section ".toc","aw"
+SYS_CALL_TABLE:
+ .tc sys_call_table[TC],sys_call_table
+
+#ifdef CONFIG_COMPAT
+COMPAT_SYS_CALL_TABLE:
+ .tc compat_sys_call_table[TC],compat_sys_call_table
+#endif
+ .previous
+
+ .align 7
+
+.macro DEBUG_SRR_VALID srr
+#ifdef CONFIG_PPC_RFI_SRR_DEBUG
+ .ifc \srr,srr
+ mfspr r11,SPRN_SRR0
+ ld r12,_NIP(r1)
+100: tdne r11,r12
+ EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
+ mfspr r11,SPRN_SRR1
+ ld r12,_MSR(r1)
+100: tdne r11,r12
+ EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
+ .else
+ mfspr r11,SPRN_HSRR0
+ ld r12,_NIP(r1)
+100: tdne r11,r12
+ EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
+ mfspr r11,SPRN_HSRR1
+ ld r12,_MSR(r1)
+100: tdne r11,r12
+ EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
+ .endif
+#endif
+.endm
+
+#ifdef CONFIG_PPC_BOOK3S
+.macro system_call_vectored name trapnr
+ .globl system_call_vectored_\name
+system_call_vectored_\name:
+_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+BEGIN_FTR_SECTION
+ extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
+ bne tabort_syscall
+END_FTR_SECTION_IFSET(CPU_FTR_TM)
+#endif
+ SCV_INTERRUPT_TO_KERNEL
+ mr r10,r1
+ ld r1,PACAKSAVE(r13)
+ std r10,0(r1)
+ std r11,_NIP(r1)
+ std r12,_MSR(r1)
+ std r0,GPR0(r1)
+ std r10,GPR1(r1)
+ std r2,GPR2(r1)
+ ld r2,PACATOC(r13)
+ mfcr r12
+ li r11,0
+ /* Can we avoid saving r3-r8 in common case? */
+ std r3,GPR3(r1)
+ std r4,GPR4(r1)
+ std r5,GPR5(r1)
+ std r6,GPR6(r1)
+ std r7,GPR7(r1)
+ std r8,GPR8(r1)
+ /* Zero r9-r12, this should only be required when restoring all GPRs */
+ std r11,GPR9(r1)
+ std r11,GPR10(r1)
+ std r11,GPR11(r1)
+ std r11,GPR12(r1)
+ std r9,GPR13(r1)
+ SAVE_NVGPRS(r1)
+ std r11,_XER(r1)
+ std r11,_LINK(r1)
+ std r11,_CTR(r1)
+
+ li r11,\trapnr
+ std r11,_TRAP(r1)
+ std r12,_CCR(r1)
+ addi r10,r1,STACK_FRAME_OVERHEAD
+ ld r11,exception_marker@toc(r2)
+ std r11,-16(r10) /* "regshere" marker */
+
+BEGIN_FTR_SECTION
+ HMT_MEDIUM
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
+ /*
+ * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
+ * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
+ * and interrupts may be masked and pending already.
+ * system_call_exception() will call trace_hardirqs_off() which means
+ * interrupts could already have been blocked before trace_hardirqs_off,
+ * but this is the best we can do.
+ */
+
+ /* Calling convention has r9 = orig r0, r10 = regs */
+ mr r9,r0
+ bl system_call_exception
+
+.Lsyscall_vectored_\name\()_exit:
+ addi r4,r1,STACK_FRAME_OVERHEAD
+ li r5,1 /* scv */
+ bl syscall_exit_prepare
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+.Lsyscall_vectored_\name\()_rst_start:
+ lbz r11,PACAIRQHAPPENED(r13)
+ andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
+ bne- syscall_vectored_\name\()_restart
+ li r11,IRQS_ENABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ li r11,0
+ stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
+
+ ld r2,_CCR(r1)
+ ld r4,_NIP(r1)
+ ld r5,_MSR(r1)
+
+BEGIN_FTR_SECTION
+ stdcx. r0,0,r1 /* to clear the reservation */
+END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
+
+BEGIN_FTR_SECTION
+ HMT_MEDIUM_LOW
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
+ cmpdi r3,0
+ bne .Lsyscall_vectored_\name\()_restore_regs
+
+ /* rfscv returns with LR->NIA and CTR->MSR */
+ mtlr r4
+ mtctr r5
+
+ /* Could zero these as per ABI, but we may consider a stricter ABI
+ * which preserves these if libc implementations can benefit, so
+ * restore them for now until further measurement is done. */
+ ld r0,GPR0(r1)
+ ld r4,GPR4(r1)
+ ld r5,GPR5(r1)
+ ld r6,GPR6(r1)
+ ld r7,GPR7(r1)
+ ld r8,GPR8(r1)
+ /* Zero volatile regs that may contain sensitive kernel data */
+ li r9,0
+ li r10,0
+ li r11,0
+ li r12,0
+ mtspr SPRN_XER,r0
+
+ /*
+ * We don't need to restore AMR on the way back to userspace for KUAP.
+ * The value of AMR only matters while we're in the kernel.
+ */
+ mtcr r2
+ ld r2,GPR2(r1)
+ ld r3,GPR3(r1)
+ ld r13,GPR13(r1)
+ ld r1,GPR1(r1)
+ RFSCV_TO_USER
+ b . /* prevent speculative execution */
+
+.Lsyscall_vectored_\name\()_restore_regs:
+ mtspr SPRN_SRR0,r4
+ mtspr SPRN_SRR1,r5
+
+ ld r3,_CTR(r1)
+ ld r4,_LINK(r1)
+ ld r5,_XER(r1)
+
+ REST_NVGPRS(r1)
+ ld r0,GPR0(r1)
+ mtcr r2
+ mtctr r3
+ mtlr r4
+ mtspr SPRN_XER,r5
+ REST_10GPRS(2, r1)
+ REST_2GPRS(12, r1)
+ ld r1,GPR1(r1)
+ RFI_TO_USER
+.Lsyscall_vectored_\name\()_rst_end:
+
+syscall_vectored_\name\()_restart:
+_ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
+ GET_PACA(r13)
+ ld r1,PACA_EXIT_SAVE_R1(r13)
+ ld r2,PACATOC(r13)
+ ld r3,RESULT(r1)
+ addi r4,r1,STACK_FRAME_OVERHEAD
+ li r11,IRQS_ALL_DISABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ bl syscall_exit_restart
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+ b .Lsyscall_vectored_\name\()_rst_start
+1:
+
+SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
+RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
+
+.endm
+
+system_call_vectored common 0x3000
+
+/*
+ * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
+ * which is tested by system_call_exception when r0 is -1 (as set by vector
+ * entry code).
+ */
+system_call_vectored sigill 0x7ff0
+
+
+/*
+ * Entered via kernel return set up by kernel/sstep.c, must match entry regs
+ */
+ .globl system_call_vectored_emulate
+system_call_vectored_emulate:
+_ASM_NOKPROBE_SYMBOL(system_call_vectored_emulate)
+ li r10,IRQS_ALL_DISABLED
+ stb r10,PACAIRQSOFTMASK(r13)
+ b system_call_vectored_common
+#endif /* CONFIG_PPC_BOOK3S */
+
+ .balign IFETCH_ALIGN_BYTES
+ .globl system_call_common_real
+system_call_common_real:
+_ASM_NOKPROBE_SYMBOL(system_call_common_real)
+ ld r10,PACAKMSR(r13) /* get MSR value for kernel */
+ mtmsrd r10
+
+ .balign IFETCH_ALIGN_BYTES
+ .globl system_call_common
+system_call_common:
+_ASM_NOKPROBE_SYMBOL(system_call_common)
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+BEGIN_FTR_SECTION
+ extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
+ bne tabort_syscall
+END_FTR_SECTION_IFSET(CPU_FTR_TM)
+#endif
+ mr r10,r1
+ ld r1,PACAKSAVE(r13)
+ std r10,0(r1)
+ std r11,_NIP(r1)
+ std r12,_MSR(r1)
+ std r0,GPR0(r1)
+ std r10,GPR1(r1)
+ std r2,GPR2(r1)
+#ifdef CONFIG_PPC_FSL_BOOK3E
+START_BTB_FLUSH_SECTION
+ BTB_FLUSH(r10)
+END_BTB_FLUSH_SECTION
+#endif
+ ld r2,PACATOC(r13)
+ mfcr r12
+ li r11,0
+ /* Can we avoid saving r3-r8 in common case? */
+ std r3,GPR3(r1)
+ std r4,GPR4(r1)
+ std r5,GPR5(r1)
+ std r6,GPR6(r1)
+ std r7,GPR7(r1)
+ std r8,GPR8(r1)
+ /* Zero r9-r12, this should only be required when restoring all GPRs */
+ std r11,GPR9(r1)
+ std r11,GPR10(r1)
+ std r11,GPR11(r1)
+ std r11,GPR12(r1)
+ std r9,GPR13(r1)
+ SAVE_NVGPRS(r1)
+ std r11,_XER(r1)
+ std r11,_CTR(r1)
+ mflr r10
+
+ /*
+ * This clears CR0.SO (bit 28), which is the error indication on
+ * return from this system call.
+ */
+ rldimi r12,r11,28,(63-28)
+ li r11,0xc00
+ std r10,_LINK(r1)
+ std r11,_TRAP(r1)
+ std r12,_CCR(r1)
+ addi r10,r1,STACK_FRAME_OVERHEAD
+ ld r11,exception_marker@toc(r2)
+ std r11,-16(r10) /* "regshere" marker */
+
+#ifdef CONFIG_PPC_BOOK3S
+ li r11,1
+ stb r11,PACASRR_VALID(r13)
+#endif
+
+ /*
+ * We always enter kernel from userspace with irq soft-mask enabled and
+ * nothing pending. system_call_exception() will call
+ * trace_hardirqs_off().
+ */
+ li r11,IRQS_ALL_DISABLED
+ li r12,-1 /* Set MSR_EE and MSR_RI */
+ stb r11,PACAIRQSOFTMASK(r13)
+ mtmsrd r12,1
+
+ /* Calling convention has r9 = orig r0, r10 = regs */
+ mr r9,r0
+ bl system_call_exception
+
+.Lsyscall_exit:
+ addi r4,r1,STACK_FRAME_OVERHEAD
+ li r5,0 /* !scv */
+ bl syscall_exit_prepare
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+#ifdef CONFIG_PPC_BOOK3S
+.Lsyscall_rst_start:
+ lbz r11,PACAIRQHAPPENED(r13)
+ andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
+ bne- syscall_restart
+#endif
+ li r11,IRQS_ENABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ li r11,0
+ stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
+
+ ld r2,_CCR(r1)
+ ld r6,_LINK(r1)
+ mtlr r6
+
+#ifdef CONFIG_PPC_BOOK3S
+ lbz r4,PACASRR_VALID(r13)
+ cmpdi r4,0
+ bne 1f
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+#endif
+ ld r4,_NIP(r1)
+ ld r5,_MSR(r1)
+ mtspr SPRN_SRR0,r4
+ mtspr SPRN_SRR1,r5
+1:
+ DEBUG_SRR_VALID srr
+
+BEGIN_FTR_SECTION
+ stdcx. r0,0,r1 /* to clear the reservation */
+END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
+
+ cmpdi r3,0
+ bne .Lsyscall_restore_regs
+ /* Zero volatile regs that may contain sensitive kernel data */
+ li r0,0
+ li r4,0
+ li r5,0
+ li r6,0
+ li r7,0
+ li r8,0
+ li r9,0
+ li r10,0
+ li r11,0
+ li r12,0
+ mtctr r0
+ mtspr SPRN_XER,r0
+.Lsyscall_restore_regs_cont:
+
+BEGIN_FTR_SECTION
+ HMT_MEDIUM_LOW
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
+ /*
+ * We don't need to restore AMR on the way back to userspace for KUAP.
+ * The value of AMR only matters while we're in the kernel.
+ */
+ mtcr r2
+ ld r2,GPR2(r1)
+ ld r3,GPR3(r1)
+ ld r13,GPR13(r1)
+ ld r1,GPR1(r1)
+ RFI_TO_USER
+ b . /* prevent speculative execution */
+
+.Lsyscall_restore_regs:
+ ld r3,_CTR(r1)
+ ld r4,_XER(r1)
+ REST_NVGPRS(r1)
+ mtctr r3
+ mtspr SPRN_XER,r4
+ ld r0,GPR0(r1)
+ REST_8GPRS(4, r1)
+ ld r12,GPR12(r1)
+ b .Lsyscall_restore_regs_cont
+.Lsyscall_rst_end:
+
+#ifdef CONFIG_PPC_BOOK3S
+syscall_restart:
+_ASM_NOKPROBE_SYMBOL(syscall_restart)
+ GET_PACA(r13)
+ ld r1,PACA_EXIT_SAVE_R1(r13)
+ ld r2,PACATOC(r13)
+ ld r3,RESULT(r1)
+ addi r4,r1,STACK_FRAME_OVERHEAD
+ li r11,IRQS_ALL_DISABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ bl syscall_exit_restart
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+ b .Lsyscall_rst_start
+1:
+
+SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
+RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
+#endif
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+tabort_syscall:
+_ASM_NOKPROBE_SYMBOL(tabort_syscall)
+ /* Firstly we need to enable TM in the kernel */
+ mfmsr r10
+ li r9, 1
+ rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
+ mtmsrd r10, 0
+
+ /* tabort, this dooms the transaction, nothing else */
+ li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
+ TABORT(R9)
+
+ /*
+ * Return directly to userspace. We have corrupted user register state,
+ * but userspace will never see that register state. Execution will
+ * resume after the tbegin of the aborted transaction with the
+ * checkpointed register state.
+ */
+ li r9, MSR_RI
+ andc r10, r10, r9
+ mtmsrd r10, 1
+ mtspr SPRN_SRR0, r11
+ mtspr SPRN_SRR1, r12
+ RFI_TO_USER
+ b . /* prevent speculative execution */
+#endif
+
+ /*
+ * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
+ * touched, no exit work created, then this can be used.
+ */
+ .balign IFETCH_ALIGN_BYTES
+ .globl fast_interrupt_return_srr
+fast_interrupt_return_srr:
+_ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
+ kuap_check_amr r3, r4
+ ld r5,_MSR(r1)
+ andi. r0,r5,MSR_PR
+#ifdef CONFIG_PPC_BOOK3S
+ beq 1f
+ kuap_user_restore r3, r4
+ b .Lfast_user_interrupt_return_srr
+1: kuap_kernel_restore r3, r4
+ andi. r0,r5,MSR_RI
+ li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
+ bne+ .Lfast_kernel_interrupt_return_srr
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl unrecoverable_exception
+ b . /* should not get here */
+#else
+ bne .Lfast_user_interrupt_return_srr
+ b .Lfast_kernel_interrupt_return_srr
+#endif
+
+.macro interrupt_return_macro srr
+ .balign IFETCH_ALIGN_BYTES
+ .globl interrupt_return_\srr
+interrupt_return_\srr\():
+_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
+ ld r4,_MSR(r1)
+ andi. r0,r4,MSR_PR
+ beq interrupt_return_\srr\()_kernel
+interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
+_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl interrupt_exit_user_prepare
+ cmpdi r3,0
+ bne- .Lrestore_nvgprs_\srr
+.Lrestore_nvgprs_\srr\()_cont:
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+#ifdef CONFIG_PPC_BOOK3S
+.Linterrupt_return_\srr\()_user_rst_start:
+ lbz r11,PACAIRQHAPPENED(r13)
+ andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
+ bne- interrupt_return_\srr\()_user_restart
+#endif
+ li r11,IRQS_ENABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ li r11,0
+ stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
+
+.Lfast_user_interrupt_return_\srr\():
+#ifdef CONFIG_PPC_BOOK3S
+ .ifc \srr,srr
+ lbz r4,PACASRR_VALID(r13)
+ .else
+ lbz r4,PACAHSRR_VALID(r13)
+ .endif
+ cmpdi r4,0
+ li r4,0
+ bne 1f
+#endif
+ ld r11,_NIP(r1)
+ ld r12,_MSR(r1)
+ .ifc \srr,srr
+ mtspr SPRN_SRR0,r11
+ mtspr SPRN_SRR1,r12
+1:
+#ifdef CONFIG_PPC_BOOK3S
+ stb r4,PACASRR_VALID(r13)
+#endif
+ .else
+ mtspr SPRN_HSRR0,r11
+ mtspr SPRN_HSRR1,r12
+1:
+#ifdef CONFIG_PPC_BOOK3S
+ stb r4,PACAHSRR_VALID(r13)
+#endif
+ .endif
+ DEBUG_SRR_VALID \srr
+
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+ lbz r4,PACAIRQSOFTMASK(r13)
+ tdnei r4,IRQS_ENABLED
+#endif
+
+BEGIN_FTR_SECTION
+ ld r10,_PPR(r1)
+ mtspr SPRN_PPR,r10
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
+BEGIN_FTR_SECTION
+ stdcx. r0,0,r1 /* to clear the reservation */
+FTR_SECTION_ELSE
+ ldarx r0,0,r1
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
+
+ ld r3,_CCR(r1)
+ ld r4,_LINK(r1)
+ ld r5,_CTR(r1)
+ ld r6,_XER(r1)
+ li r0,0
+
+ REST_4GPRS(7, r1)
+ REST_2GPRS(11, r1)
+ REST_GPR(13, r1)
+
+ mtcr r3
+ mtlr r4
+ mtctr r5
+ mtspr SPRN_XER,r6
+
+ REST_4GPRS(2, r1)
+ REST_GPR(6, r1)
+ REST_GPR(0, r1)
+ REST_GPR(1, r1)
+ .ifc \srr,srr
+ RFI_TO_USER
+ .else
+ HRFI_TO_USER
+ .endif
+ b . /* prevent speculative execution */
+.Linterrupt_return_\srr\()_user_rst_end:
+
+.Lrestore_nvgprs_\srr\():
+ REST_NVGPRS(r1)
+ b .Lrestore_nvgprs_\srr\()_cont
+
+#ifdef CONFIG_PPC_BOOK3S
+interrupt_return_\srr\()_user_restart:
+_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
+ GET_PACA(r13)
+ ld r1,PACA_EXIT_SAVE_R1(r13)
+ ld r2,PACATOC(r13)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ li r11,IRQS_ALL_DISABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ bl interrupt_exit_user_restart
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+ b .Linterrupt_return_\srr\()_user_rst_start
+1:
+
+SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
+RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
+#endif
+
+ .balign IFETCH_ALIGN_BYTES
+interrupt_return_\srr\()_kernel:
+_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl interrupt_exit_kernel_prepare
+
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+.Linterrupt_return_\srr\()_kernel_rst_start:
+ ld r11,SOFTE(r1)
+ cmpwi r11,IRQS_ENABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ bne 1f
+#ifdef CONFIG_PPC_BOOK3S
+ lbz r11,PACAIRQHAPPENED(r13)
+ andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
+ bne- interrupt_return_\srr\()_kernel_restart
+#endif
+ li r11,0
+ stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
+1:
+
+.Lfast_kernel_interrupt_return_\srr\():
+ cmpdi cr1,r3,0
+#ifdef CONFIG_PPC_BOOK3S
+ .ifc \srr,srr
+ lbz r4,PACASRR_VALID(r13)
+ .else
+ lbz r4,PACAHSRR_VALID(r13)
+ .endif
+ cmpdi r4,0
+ li r4,0
+ bne 1f
+#endif
+ ld r11,_NIP(r1)
+ ld r12,_MSR(r1)
+ .ifc \srr,srr
+ mtspr SPRN_SRR0,r11
+ mtspr SPRN_SRR1,r12
+1:
+#ifdef CONFIG_PPC_BOOK3S
+ stb r4,PACASRR_VALID(r13)
+#endif
+ .else
+ mtspr SPRN_HSRR0,r11
+ mtspr SPRN_HSRR1,r12
+1:
+#ifdef CONFIG_PPC_BOOK3S
+ stb r4,PACAHSRR_VALID(r13)
+#endif
+ .endif
+ DEBUG_SRR_VALID \srr
+
+BEGIN_FTR_SECTION
+ stdcx. r0,0,r1 /* to clear the reservation */
+FTR_SECTION_ELSE
+ ldarx r0,0,r1
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
+
+ ld r3,_LINK(r1)
+ ld r4,_CTR(r1)
+ ld r5,_XER(r1)
+ ld r6,_CCR(r1)
+ li r0,0
+
+ REST_4GPRS(7, r1)
+ REST_2GPRS(11, r1)
+
+ mtlr r3
+ mtctr r4
+ mtspr SPRN_XER,r5
+
+ /*
+ * Leaving a stale exception_marker on the stack can confuse
+ * the reliable stack unwinder later on. Clear it.
+ */
+ std r0,STACK_FRAME_OVERHEAD-16(r1)
+
+ REST_4GPRS(2, r1)
+
+ bne- cr1,1f /* emulate stack store */
+ mtcr r6
+ REST_GPR(6, r1)
+ REST_GPR(0, r1)
+ REST_GPR(1, r1)
+ .ifc \srr,srr
+ RFI_TO_KERNEL
+ .else
+ HRFI_TO_KERNEL
+ .endif
+ b . /* prevent speculative execution */
+
+1: /*
+ * Emulate stack store with update. New r1 value was already calculated
+ * and updated in our interrupt regs by emulate_loadstore, but we can't
+ * store the previous value of r1 to the stack before re-loading our
+ * registers from it, otherwise they could be clobbered. Use
+ * PACA_EXGEN as temporary storage to hold the store data, as
+ * interrupts are disabled here so it won't be clobbered.
+ */
+ mtcr r6
+ std r9,PACA_EXGEN+0(r13)
+ addi r9,r1,INT_FRAME_SIZE /* get original r1 */
+ REST_GPR(6, r1)
+ REST_GPR(0, r1)
+ REST_GPR(1, r1)
+ std r9,0(r1) /* perform store component of stdu */
+ ld r9,PACA_EXGEN+0(r13)
+
+ .ifc \srr,srr
+ RFI_TO_KERNEL
+ .else
+ HRFI_TO_KERNEL
+ .endif
+ b . /* prevent speculative execution */
+.Linterrupt_return_\srr\()_kernel_rst_end:
+
+#ifdef CONFIG_PPC_BOOK3S
+interrupt_return_\srr\()_kernel_restart:
+_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
+ GET_PACA(r13)
+ ld r1,PACA_EXIT_SAVE_R1(r13)
+ ld r2,PACATOC(r13)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ li r11,IRQS_ALL_DISABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ bl interrupt_exit_kernel_restart
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+ b .Linterrupt_return_\srr\()_kernel_rst_start
+1:
+
+SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
+RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
+#endif
+
+.endm
+
+interrupt_return_macro srr
+#ifdef CONFIG_PPC_BOOK3S
+interrupt_return_macro hsrr
+
+ .globl __end_soft_masked
+__end_soft_masked:
+DEFINE_FIXED_SYMBOL(__end_soft_masked)
+#endif /* CONFIG_PPC_BOOK3S */
+
+#ifdef CONFIG_PPC_BOOK3S
+_GLOBAL(ret_from_fork_scv)
+ bl schedule_tail
+ REST_NVGPRS(r1)
+ li r3,0 /* fork() return value */
+ b .Lsyscall_vectored_common_exit
+#endif
+
+_GLOBAL(ret_from_fork)
+ bl schedule_tail
+ REST_NVGPRS(r1)
+ li r3,0 /* fork() return value */
+ b .Lsyscall_exit
+
+_GLOBAL(ret_from_kernel_thread)
+ bl schedule_tail
+ REST_NVGPRS(r1)
+ mtctr r14
+ mr r3,r15
+#ifdef PPC64_ELF_ABI_v2
+ mr r12,r14
+#endif
+ bctrl
+ li r3,0
+ b .Lsyscall_exit
diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c
index 51bbaae94ccc..c877f074d174 100644
--- a/arch/powerpc/kernel/io-workarounds.c
+++ b/arch/powerpc/kernel/io-workarounds.c
@@ -55,7 +55,6 @@ static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr)
#ifdef CONFIG_PPC_INDIRECT_MMIO
struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
{
- unsigned hugepage_shift;
struct iowa_bus *bus;
int token;
@@ -65,22 +64,13 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
bus = &iowa_busses[token - 1];
else {
unsigned long vaddr, paddr;
- pte_t *ptep;
vaddr = (unsigned long)PCI_FIX_ADDR(addr);
if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END)
return NULL;
- /*
- * We won't find huge pages here (iomem). Also can't hit
- * a page table free due to init_mm
- */
- ptep = find_init_mm_pte(vaddr, &hugepage_shift);
- if (ptep == NULL)
- paddr = 0;
- else {
- WARN_ON(hugepage_shift);
- paddr = pte_pfn(*ptep) << PAGE_SHIFT;
- }
+
+ paddr = ppc_find_vmap_phys(vaddr);
+
bus = iowa_pci_find(vaddr, paddr);
if (bus == NULL)
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index c00214a4355c..2af89a5e379f 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -72,8 +72,7 @@ static void iommu_debugfs_del(struct iommu_table *tbl)
sprintf(name, "%08lx", tbl->it_index);
liobn_entry = debugfs_lookup(name, iommu_debugfs_dir);
- if (liobn_entry)
- debugfs_remove(liobn_entry);
+ debugfs_remove(liobn_entry);
}
#else
static void iommu_debugfs_add(struct iommu_table *tbl){}
@@ -297,6 +296,15 @@ again:
pass++;
goto again;
+ } else if (pass == tbl->nr_pools + 1) {
+ /* Last resort: try largepool */
+ spin_unlock(&pool->lock);
+ pool = &tbl->large_pool;
+ spin_lock(&pool->lock);
+ pool->hint = pool->start;
+ pass++;
+ goto again;
+
} else {
/* Give up */
spin_unlock_irqrestore(&(pool->lock), flags);
@@ -719,7 +727,6 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
{
unsigned long sz;
static int welcomed = 0;
- struct page *page;
unsigned int i;
struct iommu_pool *p;
@@ -728,11 +735,11 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
/* number of bytes needed for the bitmap */
sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
- page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
- if (!page)
- panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
- tbl->it_map = page_address(page);
- memset(tbl->it_map, 0, sz);
+ tbl->it_map = vzalloc_node(sz, nid);
+ if (!tbl->it_map) {
+ pr_err("%s: Can't allocate %ld bytes\n", __func__, sz);
+ return NULL;
+ }
iommu_table_reserve_pages(tbl, res_start, res_end);
@@ -774,8 +781,6 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
static void iommu_table_free(struct kref *kref)
{
- unsigned long bitmap_sz;
- unsigned int order;
struct iommu_table *tbl;
tbl = container_of(kref, struct iommu_table, it_kref);
@@ -796,12 +801,8 @@ static void iommu_table_free(struct kref *kref)
if (!bitmap_empty(tbl->it_map, tbl->it_size))
pr_warn("%s: Unexpected TCEs\n", __func__);
- /* calculate bitmap size in bytes */
- bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
-
/* free bitmap */
- order = get_order(bitmap_sz);
- free_pages((unsigned long) tbl->it_map, order);
+ vfree(tbl->it_map);
/* free table */
kfree(tbl);
@@ -1096,7 +1097,7 @@ int iommu_take_ownership(struct iommu_table *tbl)
spin_lock_irqsave(&tbl->large_pool.lock, flags);
for (i = 0; i < tbl->nr_pools; i++)
- spin_lock(&tbl->pools[i].lock);
+ spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
iommu_table_release_pages(tbl);
@@ -1124,7 +1125,7 @@ void iommu_release_ownership(struct iommu_table *tbl)
spin_lock_irqsave(&tbl->large_pool.lock, flags);
for (i = 0; i < tbl->nr_pools; i++)
- spin_lock(&tbl->pools[i].lock);
+ spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
memset(tbl->it_map, 0, sz);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index d71fd10a1dd4..91e63eac4e8f 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -104,82 +104,6 @@ static inline notrace unsigned long get_irq_happened(void)
return happened;
}
-#ifdef CONFIG_PPC_BOOK3E
-
-/* This is called whenever we are re-enabling interrupts
- * and returns either 0 (nothing to do) or 500/900/280 if
- * there's an EE, DEC or DBELL to generate.
- *
- * This is called in two contexts: From arch_local_irq_restore()
- * before soft-enabling interrupts, and from the exception exit
- * path when returning from an interrupt from a soft-disabled to
- * a soft enabled context. In both case we have interrupts hard
- * disabled.
- *
- * We take care of only clearing the bits we handled in the
- * PACA irq_happened field since we can only re-emit one at a
- * time and we don't want to "lose" one.
- */
-notrace unsigned int __check_irq_replay(void)
-{
- /*
- * We use local_paca rather than get_paca() to avoid all
- * the debug_smp_processor_id() business in this low level
- * function
- */
- unsigned char happened = local_paca->irq_happened;
-
- /*
- * We are responding to the next interrupt, so interrupt-off
- * latencies should be reset here.
- */
- trace_hardirqs_on();
- trace_hardirqs_off();
-
- if (happened & PACA_IRQ_DEC) {
- local_paca->irq_happened &= ~PACA_IRQ_DEC;
- return 0x900;
- }
-
- if (happened & PACA_IRQ_EE) {
- local_paca->irq_happened &= ~PACA_IRQ_EE;
- return 0x500;
- }
-
- if (happened & PACA_IRQ_DBELL) {
- local_paca->irq_happened &= ~PACA_IRQ_DBELL;
- return 0x280;
- }
-
- if (happened & PACA_IRQ_HARD_DIS)
- local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
-
- /* There should be nothing left ! */
- BUG_ON(local_paca->irq_happened != 0);
-
- return 0;
-}
-
-/*
- * This is specifically called by assembly code to re-enable interrupts
- * if they are currently disabled. This is typically called before
- * schedule() or do_signal() when returning to userspace. We do it
- * in C to avoid the burden of dealing with lockdep etc...
- *
- * NOTE: This is called with interrupts hard disabled but not marked
- * as such in paca->irq_happened, so we need to resync this.
- */
-void notrace restore_interrupts(void)
-{
- if (irqs_disabled()) {
- local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
- local_irq_enable();
- } else
- __hard_irq_enable();
-}
-
-#endif /* CONFIG_PPC_BOOK3E */
-
void replay_soft_interrupts(void)
{
struct pt_regs regs;
@@ -197,6 +121,7 @@ void replay_soft_interrupts(void)
ppc_save_regs(&regs);
regs.softe = IRQS_ENABLED;
+ regs.msr |= MSR_EE;
again:
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
@@ -218,7 +143,7 @@ again:
*/
if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_HMI)) {
local_paca->irq_happened &= ~PACA_IRQ_HMI;
- regs.trap = 0xe60;
+ regs.trap = INTERRUPT_HMI;
handle_hmi_exception(&regs);
if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
hard_irq_disable();
@@ -226,7 +151,7 @@ again:
if (local_paca->irq_happened & PACA_IRQ_DEC) {
local_paca->irq_happened &= ~PACA_IRQ_DEC;
- regs.trap = 0x900;
+ regs.trap = INTERRUPT_DECREMENTER;
timer_interrupt(&regs);
if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
hard_irq_disable();
@@ -234,7 +159,7 @@ again:
if (local_paca->irq_happened & PACA_IRQ_EE) {
local_paca->irq_happened &= ~PACA_IRQ_EE;
- regs.trap = 0x500;
+ regs.trap = INTERRUPT_EXTERNAL;
do_IRQ(&regs);
if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
hard_irq_disable();
@@ -242,10 +167,7 @@ again:
if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (local_paca->irq_happened & PACA_IRQ_DBELL)) {
local_paca->irq_happened &= ~PACA_IRQ_DBELL;
- if (IS_ENABLED(CONFIG_PPC_BOOK3E))
- regs.trap = 0x280;
- else
- regs.trap = 0xa00;
+ regs.trap = INTERRUPT_DOORBELL;
doorbell_exception(&regs);
if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
hard_irq_disable();
@@ -254,7 +176,7 @@ again:
/* Book3E does not support soft-masking PMI interrupts */
if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_PMI)) {
local_paca->irq_happened &= ~PACA_IRQ_PMI;
- regs.trap = 0xf00;
+ regs.trap = INTERRUPT_PERFMON;
performance_monitor_exception(&regs);
if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
hard_irq_disable();
@@ -282,7 +204,7 @@ static inline void replay_soft_interrupts_irqrestore(void)
* and re-locking AMR but we shouldn't get here in the first place,
* hence the warning.
*/
- kuap_check_amr();
+ kuap_assert_locked();
if (kuap_state != AMR_KUAP_BLOCKED)
set_kuap(AMR_KUAP_BLOCKED);
@@ -296,6 +218,100 @@ static inline void replay_soft_interrupts_irqrestore(void)
#define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
#endif
+#ifdef CONFIG_CC_HAS_ASM_GOTO
+notrace void arch_local_irq_restore(unsigned long mask)
+{
+ unsigned char irq_happened;
+
+ /* Write the new soft-enabled value if it is a disable */
+ if (mask) {
+ irq_soft_mask_set(mask);
+ return;
+ }
+
+ /*
+ * After the stb, interrupts are unmasked and there are no interrupts
+ * pending replay. The restart sequence makes this atomic with
+ * respect to soft-masked interrupts. If this was just a simple code
+ * sequence, a soft-masked interrupt could become pending right after
+ * the comparison and before the stb.
+ *
+ * This allows interrupts to be unmasked without hard disabling, and
+ * also without new hard interrupts coming in ahead of pending ones.
+ */
+ asm_volatile_goto(
+"1: \n"
+" lbz 9,%0(13) \n"
+" cmpwi 9,0 \n"
+" bne %l[happened] \n"
+" stb 9,%1(13) \n"
+"2: \n"
+ RESTART_TABLE(1b, 2b, 1b)
+ : : "i" (offsetof(struct paca_struct, irq_happened)),
+ "i" (offsetof(struct paca_struct, irq_soft_mask))
+ : "cr0", "r9"
+ : happened);
+
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(!(mfmsr() & MSR_EE));
+
+ return;
+
+happened:
+ irq_happened = get_irq_happened();
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(!irq_happened);
+
+ if (irq_happened == PACA_IRQ_HARD_DIS) {
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(mfmsr() & MSR_EE);
+ irq_soft_mask_set(IRQS_ENABLED);
+ local_paca->irq_happened = 0;
+ __hard_irq_enable();
+ return;
+ }
+
+ /* Have interrupts to replay, need to hard disable first */
+ if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+ if (!(mfmsr() & MSR_EE)) {
+ /*
+ * An interrupt could have come in and cleared
+ * MSR[EE] and set IRQ_HARD_DIS, so check
+ * IRQ_HARD_DIS again and warn if it is still
+ * clear.
+ */
+ irq_happened = get_irq_happened();
+ WARN_ON_ONCE(!(irq_happened & PACA_IRQ_HARD_DIS));
+ }
+ }
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+ } else {
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+ if (WARN_ON_ONCE(mfmsr() & MSR_EE))
+ __hard_irq_disable();
+ }
+ }
+
+ /*
+ * Disable preempt here, so that the below preempt_enable will
+ * perform resched if required (a replayed interrupt may set
+ * need_resched).
+ */
+ preempt_disable();
+ irq_soft_mask_set(IRQS_ALL_DISABLED);
+ trace_hardirqs_off();
+
+ replay_soft_interrupts_irqrestore();
+ local_paca->irq_happened = 0;
+
+ trace_hardirqs_on();
+ irq_soft_mask_set(IRQS_ENABLED);
+ __hard_irq_enable();
+ preempt_enable();
+}
+#else
notrace void arch_local_irq_restore(unsigned long mask)
{
unsigned char irq_happened;
@@ -367,6 +383,7 @@ notrace void arch_local_irq_restore(unsigned long mask)
__hard_irq_enable();
preempt_enable();
}
+#endif
EXPORT_SYMBOL(arch_local_irq_restore);
/*
@@ -667,6 +684,47 @@ static inline void check_stack_overflow(void)
}
}
+static __always_inline void call_do_softirq(const void *sp)
+{
+ /* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */
+ asm volatile (
+ PPC_STLU " %%r1, %[offset](%[sp]) ;"
+ "mr %%r1, %[sp] ;"
+ "bl %[callee] ;"
+ PPC_LL " %%r1, 0(%%r1) ;"
+ : // Outputs
+ : // Inputs
+ [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD),
+ [callee] "i" (__do_softirq)
+ : // Clobbers
+ "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
+ "cr7", "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
+ "r11", "r12"
+ );
+}
+
+static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
+{
+ register unsigned long r3 asm("r3") = (unsigned long)regs;
+
+ /* Temporarily switch r1 to sp, call __do_irq() then restore r1. */
+ asm volatile (
+ PPC_STLU " %%r1, %[offset](%[sp]) ;"
+ "mr %%r1, %[sp] ;"
+ "bl %[callee] ;"
+ PPC_LL " %%r1, 0(%%r1) ;"
+ : // Outputs
+ "+r" (r3)
+ : // Inputs
+ [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD),
+ [callee] "i" (__do_irq)
+ : // Clobbers
+ "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
+ "cr7", "r0", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
+ "r11", "r12"
+ );
+}
+
void __do_irq(struct pt_regs *regs)
{
unsigned int irq;
diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c
index 2257d24e6a26..39c625737c09 100644
--- a/arch/powerpc/kernel/isa-bridge.c
+++ b/arch/powerpc/kernel/isa-bridge.c
@@ -48,7 +48,7 @@ static void remap_isa_base(phys_addr_t pa, unsigned long size)
if (slab_is_available()) {
if (ioremap_page_range(ISA_IO_BASE, ISA_IO_BASE + size, pa,
pgprot_noncached(PAGE_KERNEL)))
- unmap_kernel_range(ISA_IO_BASE, size);
+ vunmap_range(ISA_IO_BASE, ISA_IO_BASE + size);
} else {
early_ioremap_range(ISA_IO_BASE, pa, size,
pgprot_noncached(PAGE_KERNEL));
@@ -311,7 +311,7 @@ static void isa_bridge_remove(void)
isa_bridge_pcidev = NULL;
/* Unmap the ISA area */
- unmap_kernel_range(ISA_IO_BASE, 0x10000);
+ vunmap_range(ISA_IO_BASE, ISA_IO_BASE + 0x10000);
}
/**
diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c
index 144858027fa3..5277cf582c16 100644
--- a/arch/powerpc/kernel/jump_label.c
+++ b/arch/powerpc/kernel/jump_label.c
@@ -11,10 +11,10 @@
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
- struct ppc_inst *addr = (struct ppc_inst *)(unsigned long)entry->code;
+ u32 *addr = (u32 *)jump_entry_code(entry);
if (type == JUMP_LABEL_JMP)
- patch_branch(addr, entry->target, 0);
+ patch_branch(addr, jump_entry_target(entry), 0);
else
- patch_instruction(addr, ppc_inst(PPC_INST_NOP));
+ patch_instruction(addr, ppc_inst(PPC_RAW_NOP()));
}
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 409080208a6c..bdee7262c080 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -147,7 +147,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
return 0;
if (*(u32 *)regs->nip == BREAK_INSTR)
- regs->nip += BREAK_INSTR_SIZE;
+ regs_add_return_ip(regs, BREAK_INSTR_SIZE);
return 1;
}
@@ -372,11 +372,11 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
{
- regs->nip = pc;
+ regs_set_return_ip(regs, pc);
}
/*
- * This function does PowerPC specific procesing for interfacing to gdb.
+ * This function does PowerPC specific processing for interfacing to gdb.
*/
int kgdb_arch_handle_exception(int vector, int signo, int err_code,
char *remcom_in_buffer, char *remcom_out_buffer,
@@ -394,7 +394,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
case 'c':
/* handle the optional parameter */
if (kgdb_hex2long(&ptr, &addr))
- linux_regs->nip = addr;
+ regs_set_return_ip(linux_regs, addr);
atomic_set(&kgdb_cpu_doing_single_step, -1);
/* set the trace bit if we're stepping */
@@ -402,9 +402,9 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
mtspr(SPRN_DBCR0,
mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
- linux_regs->msr |= MSR_DE;
+ regs_set_return_msr(linux_regs, linux_regs->msr | MSR_DE);
#else
- linux_regs->msr |= MSR_SE;
+ regs_set_return_msr(linux_regs, linux_regs->msr | MSR_SE);
#endif
atomic_set(&kgdb_cpu_doing_single_step,
raw_smp_processor_id());
@@ -417,11 +417,10 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
{
+ u32 instr, *addr = (u32 *)bpt->bpt_addr;
int err;
- unsigned int instr;
- struct ppc_inst *addr = (struct ppc_inst *)bpt->bpt_addr;
- err = get_kernel_nofault(instr, (unsigned *) addr);
+ err = get_kernel_nofault(instr, addr);
if (err)
return err;
@@ -429,7 +428,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
if (err)
return -EFAULT;
- *(unsigned int *)bpt->saved_instr = instr;
+ *(u32 *)bpt->saved_instr = instr;
return 0;
}
@@ -438,7 +437,7 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
{
int err;
unsigned int instr = *(unsigned int *)bpt->saved_instr;
- struct ppc_inst *addr = (struct ppc_inst *)bpt->bpt_addr;
+ u32 *addr = (u32 *)bpt->bpt_addr;
err = patch_instruction(addr, ppc_inst(instr));
if (err)
diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c
index 660138f6c4b2..7154d58338cc 100644
--- a/arch/powerpc/kernel/kprobes-ftrace.c
+++ b/arch/powerpc/kernel/kprobes-ftrace.c
@@ -39,7 +39,7 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
* On powerpc, NIP is *before* this instruction for the
* pre handler
*/
- regs->nip -= MCOUNT_INSN_SIZE;
+ regs_add_return_ip(regs, -MCOUNT_INSN_SIZE);
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
@@ -48,7 +48,7 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
* Emulate singlestep (and also recover regs->nip)
* as if there is a nop
*/
- regs->nip += MCOUNT_INSN_SIZE;
+ regs_add_return_ip(regs, MCOUNT_INSN_SIZE);
if (unlikely(p->post_handler)) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 01ab2163659e..cbc28d1a2e1b 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -19,11 +19,13 @@
#include <linux/extable.h>
#include <linux/kdebug.h>
#include <linux/slab.h>
+#include <linux/moduleloader.h>
#include <asm/code-patching.h>
#include <asm/cacheflush.h>
#include <asm/sstep.h>
#include <asm/sections.h>
#include <asm/inst.h>
+#include <asm/set_memory.h>
#include <linux/uaccess.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
@@ -103,28 +105,42 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
return addr;
}
+void *alloc_insn_page(void)
+{
+ void *page;
+
+ page = module_alloc(PAGE_SIZE);
+ if (!page)
+ return NULL;
+
+ if (strict_module_rwx_enabled()) {
+ set_memory_ro((unsigned long)page, 1);
+ set_memory_x((unsigned long)page, 1);
+ }
+ return page;
+}
+
int arch_prepare_kprobe(struct kprobe *p)
{
int ret = 0;
struct kprobe *prev;
- struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->addr);
- struct ppc_inst prefix = ppc_inst_read((struct ppc_inst *)(p->addr - 1));
+ struct ppc_inst insn = ppc_inst_read(p->addr);
if ((unsigned long)p->addr & 0x03) {
printk("Attempt to register kprobe at an unaligned address\n");
ret = -EINVAL;
- } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
- printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
+ } else if (IS_MTMSRD(insn) || IS_RFID(insn)) {
+ printk("Cannot register a kprobe on mtmsr[d]/rfi[d]\n");
ret = -EINVAL;
- } else if (ppc_inst_prefixed(prefix)) {
+ } else if ((unsigned long)p->addr & ~PAGE_MASK &&
+ ppc_inst_prefixed(ppc_inst_read(p->addr - 1))) {
printk("Cannot register a kprobe on the second word of prefixed instruction\n");
ret = -EINVAL;
}
preempt_disable();
prev = get_kprobe(p->addr - 1);
preempt_enable_no_resched();
- if (prev &&
- ppc_inst_prefixed(ppc_inst_read((struct ppc_inst *)prev->ainsn.insn))) {
+ if (prev && ppc_inst_prefixed(ppc_inst_read(prev->ainsn.insn))) {
printk("Cannot register a kprobe on the second word of prefixed instruction\n");
ret = -EINVAL;
}
@@ -138,7 +154,7 @@ int arch_prepare_kprobe(struct kprobe *p)
}
if (!ret) {
- patch_instruction((struct ppc_inst *)p->ainsn.insn, insn);
+ patch_instruction(p->ainsn.insn, insn);
p->opcode = ppc_inst_val(insn);
}
@@ -149,13 +165,13 @@ NOKPROBE_SYMBOL(arch_prepare_kprobe);
void arch_arm_kprobe(struct kprobe *p)
{
- patch_instruction((struct ppc_inst *)p->addr, ppc_inst(BREAKPOINT_INSTRUCTION));
+ WARN_ON_ONCE(patch_instruction(p->addr, ppc_inst(BREAKPOINT_INSTRUCTION)));
}
NOKPROBE_SYMBOL(arch_arm_kprobe);
void arch_disarm_kprobe(struct kprobe *p)
{
- patch_instruction((struct ppc_inst *)p->addr, ppc_inst(p->opcode));
+ WARN_ON_ONCE(patch_instruction(p->addr, ppc_inst(p->opcode)));
}
NOKPROBE_SYMBOL(arch_disarm_kprobe);
@@ -178,7 +194,7 @@ static nokprobe_inline void prepare_singlestep(struct kprobe *p, struct pt_regs
* variant as values in regs could play a part in
* if the trap is taken or not
*/
- regs->nip = (unsigned long)p->ainsn.insn;
+ regs_set_return_ip(regs, (unsigned long)p->ainsn.insn);
}
static nokprobe_inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
@@ -228,7 +244,7 @@ NOKPROBE_SYMBOL(arch_prepare_kretprobe);
static int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
{
int ret;
- struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->ainsn.insn);
+ struct ppc_inst insn = ppc_inst_read(p->ainsn.insn);
/* regs->nip is also adjusted if emulate_step returns 1 */
ret = emulate_step(regs, insn);
@@ -319,8 +335,9 @@ int kprobe_handler(struct pt_regs *regs)
kprobe_opcode_t insn = *p->ainsn.insn;
if (kcb->kprobe_status == KPROBE_HIT_SS && is_trap(insn)) {
/* Turn off 'trace' bits */
- regs->msr &= ~MSR_SINGLESTEP;
- regs->msr |= kcb->kprobe_saved_msr;
+ regs_set_return_msr(regs,
+ (regs->msr & ~MSR_SINGLESTEP) |
+ kcb->kprobe_saved_msr);
goto no_kprobe;
}
@@ -415,7 +432,7 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
* we end up emulating it in kprobe_handler(), which increments the nip
* again.
*/
- regs->nip = orig_ret_address - 4;
+ regs_set_return_ip(regs, orig_ret_address - 4);
regs->link = orig_ret_address;
return 0;
@@ -439,7 +456,7 @@ int kprobe_post_handler(struct pt_regs *regs)
if (!cur || user_mode(regs))
return 0;
- len = ppc_inst_len(ppc_inst_read((struct ppc_inst *)cur->ainsn.insn));
+ len = ppc_inst_len(ppc_inst_read(cur->ainsn.insn));
/* make sure we got here for instruction we have a kprobe on */
if (((unsigned long)cur->ainsn.insn + len) != regs->nip)
return 0;
@@ -450,8 +467,8 @@ int kprobe_post_handler(struct pt_regs *regs)
}
/* Adjust nip to after the single-stepped instruction */
- regs->nip = (unsigned long)cur->addr + len;
- regs->msr |= kcb->kprobe_saved_msr;
+ regs_set_return_ip(regs, (unsigned long)cur->addr + len);
+ regs_set_return_msr(regs, regs->msr | kcb->kprobe_saved_msr);
/*Restore back the original saved kprobes variables and continue. */
if (kcb->kprobe_status == KPROBE_REENTER) {
@@ -490,9 +507,11 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
* and allow the page fault handler to continue as a
* normal page fault.
*/
- regs->nip = (unsigned long)cur->addr;
- regs->msr &= ~MSR_SINGLESTEP; /* Turn off 'trace' bits */
- regs->msr |= kcb->kprobe_saved_msr;
+ regs_set_return_ip(regs, (unsigned long)cur->addr);
+ /* Turn off 'trace' bits */
+ regs_set_return_msr(regs,
+ (regs->msr & ~MSR_SINGLESTEP) |
+ kcb->kprobe_saved_msr);
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
else
@@ -502,28 +521,11 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
if ((entry = search_exception_tables(regs->nip)) != NULL) {
- regs->nip = extable_fixup(entry);
+ regs_set_return_ip(regs, extable_fixup(entry));
return 1;
}
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index f061e06e9f51..cfc03e016ff2 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -15,6 +15,7 @@
#include <asm/udbg.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
+#include <asm/early_ioremap.h>
#undef DEBUG
@@ -34,6 +35,7 @@ static struct legacy_serial_info {
unsigned int clock;
int irq_check_parent;
phys_addr_t taddr;
+ void __iomem *early_addr;
} legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS];
static const struct of_device_id legacy_serial_parents[] __initconst = {
@@ -325,17 +327,16 @@ static void __init setup_legacy_serial_console(int console)
{
struct legacy_serial_info *info = &legacy_serial_infos[console];
struct plat_serial8250_port *port = &legacy_serial_ports[console];
- void __iomem *addr;
unsigned int stride;
stride = 1 << port->regshift;
/* Check if a translated MMIO address has been found */
if (info->taddr) {
- addr = ioremap(info->taddr, 0x1000);
- if (addr == NULL)
+ info->early_addr = early_ioremap(info->taddr, 0x1000);
+ if (info->early_addr == NULL)
return;
- udbg_uart_init_mmio(addr, stride);
+ udbg_uart_init_mmio(info->early_addr, stride);
} else {
/* Check if it's PIO and we support untranslated PIO */
if (port->iotype == UPIO_PORT && isa_io_special)
@@ -353,6 +354,33 @@ static void __init setup_legacy_serial_console(int console)
udbg_uart_setup(info->speed, info->clock);
}
+static int __init ioremap_legacy_serial_console(void)
+{
+ struct plat_serial8250_port *port;
+ struct legacy_serial_info *info;
+ void __iomem *vaddr;
+
+ if (legacy_serial_console < 0)
+ return 0;
+
+ info = &legacy_serial_infos[legacy_serial_console];
+ port = &legacy_serial_ports[legacy_serial_console];
+
+ if (!info->early_addr)
+ return 0;
+
+ vaddr = ioremap(info->taddr, 0x1000);
+ if (WARN_ON(!vaddr))
+ return -ENOMEM;
+
+ udbg_uart_init_mmio(vaddr, 1 << port->regshift);
+ early_iounmap(info->early_addr, 0x1000);
+ info->early_addr = NULL;
+
+ return 0;
+}
+early_initcall(ioremap_legacy_serial_console);
+
/*
* This is called very early, as part of setup_system() or eventually
* setup_arch(), basically before anything else in this file. This function
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index 11f0cae086ed..47a683cd00d2 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -18,6 +18,7 @@
#include <linux/extable.h>
#include <linux/ftrace.h>
#include <linux/memblock.h>
+#include <linux/of.h>
#include <asm/interrupt.h>
#include <asm/machdep.h>
@@ -40,7 +41,7 @@ static struct irq_work mce_ue_event_irq_work = {
.func = machine_check_ue_irq_work,
};
-DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
+static DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
static BLOCKING_NOTIFIER_HEAD(mce_notifier_list);
@@ -131,6 +132,8 @@ void save_mce_event(struct pt_regs *regs, long handled,
* Populate the mce error_type and type-specific error_type.
*/
mce_set_error_info(mce, mce_err);
+ if (mce->error_type == MCE_ERROR_TYPE_UE)
+ mce->u.ue_error.ignore_event = mce_err->ignore_event;
if (!addr)
return;
@@ -159,7 +162,6 @@ void save_mce_event(struct pt_regs *regs, long handled,
if (phys_addr != ULONG_MAX) {
mce->u.ue_error.physical_address_provided = true;
mce->u.ue_error.physical_address = phys_addr;
- mce->u.ue_error.ignore_event = mce_err->ignore_event;
machine_check_ue_event(mce);
}
}
@@ -272,7 +274,7 @@ void mce_common_process_ue(struct pt_regs *regs,
entry = search_kernel_exception_table(regs->nip);
if (entry) {
mce_err->ignore_event = true;
- regs->nip = extable_fixup(entry);
+ regs_set_return_ip(regs, extable_fixup(entry));
}
}
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index 667104d4c455..c2f55fe7092d 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -463,7 +463,7 @@ static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr,
pfn = addr_to_pfn(regs, regs->nip);
if (pfn != ULONG_MAX) {
instr_addr = (pfn << PAGE_SHIFT) + (regs->nip & ~PAGE_MASK);
- instr = ppc_inst_read((struct ppc_inst *)instr_addr);
+ instr = ppc_inst_read((u32 *)instr_addr);
if (!analyse_instr(&op, &tmp, instr)) {
pfn = addr_to_pfn(regs, op.ea);
*addr = op.ea;
@@ -481,12 +481,11 @@ static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr,
return -1;
}
-static int mce_handle_ierror(struct pt_regs *regs,
+static int mce_handle_ierror(struct pt_regs *regs, unsigned long srr1,
const struct mce_ierror_table table[],
struct mce_error_info *mce_err, uint64_t *addr,
uint64_t *phys_addr)
{
- uint64_t srr1 = regs->msr;
int handled = 0;
int i;
@@ -695,19 +694,19 @@ static long mce_handle_ue_error(struct pt_regs *regs,
}
static long mce_handle_error(struct pt_regs *regs,
+ unsigned long srr1,
const struct mce_derror_table dtable[],
const struct mce_ierror_table itable[])
{
struct mce_error_info mce_err = { 0 };
uint64_t addr, phys_addr = ULONG_MAX;
- uint64_t srr1 = regs->msr;
long handled;
if (SRR1_MC_LOADSTORE(srr1))
handled = mce_handle_derror(regs, dtable, &mce_err, &addr,
&phys_addr);
else
- handled = mce_handle_ierror(regs, itable, &mce_err, &addr,
+ handled = mce_handle_ierror(regs, srr1, itable, &mce_err, &addr,
&phys_addr);
if (!handled && mce_err.error_type == MCE_ERROR_TYPE_UE)
@@ -723,16 +722,20 @@ long __machine_check_early_realmode_p7(struct pt_regs *regs)
/* P7 DD1 leaves top bits of DSISR undefined */
regs->dsisr &= 0x0000ffff;
- return mce_handle_error(regs, mce_p7_derror_table, mce_p7_ierror_table);
+ return mce_handle_error(regs, regs->msr,
+ mce_p7_derror_table, mce_p7_ierror_table);
}
long __machine_check_early_realmode_p8(struct pt_regs *regs)
{
- return mce_handle_error(regs, mce_p8_derror_table, mce_p8_ierror_table);
+ return mce_handle_error(regs, regs->msr,
+ mce_p8_derror_table, mce_p8_ierror_table);
}
long __machine_check_early_realmode_p9(struct pt_regs *regs)
{
+ unsigned long srr1 = regs->msr;
+
/*
* On POWER9 DD2.1 and below, it's possible to get a machine check
* caused by a paste instruction where only DSISR bit 25 is set. This
@@ -746,10 +749,39 @@ long __machine_check_early_realmode_p9(struct pt_regs *regs)
if (SRR1_MC_LOADSTORE(regs->msr) && regs->dsisr == 0x02000000)
return 1;
- return mce_handle_error(regs, mce_p9_derror_table, mce_p9_ierror_table);
+ /*
+ * Async machine check due to bad real address from store or foreign
+ * link time out comes with the load/store bit (PPC bit 42) set in
+ * SRR1, but the cause comes in SRR1 not DSISR. Clear bit 42 so we're
+ * directed to the ierror table so it will find the cause (which
+ * describes it correctly as a store error).
+ */
+ if (SRR1_MC_LOADSTORE(srr1) &&
+ ((srr1 & 0x081c0000) == 0x08140000 ||
+ (srr1 & 0x081c0000) == 0x08180000)) {
+ srr1 &= ~PPC_BIT(42);
+ }
+
+ return mce_handle_error(regs, srr1,
+ mce_p9_derror_table, mce_p9_ierror_table);
}
long __machine_check_early_realmode_p10(struct pt_regs *regs)
{
- return mce_handle_error(regs, mce_p10_derror_table, mce_p10_ierror_table);
+ unsigned long srr1 = regs->msr;
+
+ /*
+ * Async machine check due to bad real address from store comes with
+ * the load/store bit (PPC bit 42) set in SRR1, but the cause comes in
+ * SRR1 not DSISR. Clear bit 42 so we're directed to the ierror table
+ * so it will find the cause (which describes it correctly as a store
+ * error).
+ */
+ if (SRR1_MC_LOADSTORE(srr1) &&
+ (srr1 & 0x081c0000) == 0x08140000) {
+ srr1 &= ~PPC_BIT(42);
+ }
+
+ return mce_handle_error(regs, srr1,
+ mce_p10_derror_table, mce_p10_ierror_table);
}
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 717e658b90fd..39ab15419592 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -28,45 +28,6 @@
.text
/*
- * We store the saved ksp_limit in the unused part
- * of the STACK_FRAME_OVERHEAD
- */
-_GLOBAL(call_do_softirq)
- mflr r0
- stw r0,4(r1)
- lwz r10,THREAD+KSP_LIMIT(r2)
- stw r3, THREAD+KSP_LIMIT(r2)
- stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
- mr r1,r3
- stw r10,8(r1)
- bl __do_softirq
- lwz r10,8(r1)
- lwz r1,0(r1)
- lwz r0,4(r1)
- stw r10,THREAD+KSP_LIMIT(r2)
- mtlr r0
- blr
-
-/*
- * void call_do_irq(struct pt_regs *regs, void *sp);
- */
-_GLOBAL(call_do_irq)
- mflr r0
- stw r0,4(r1)
- lwz r10,THREAD+KSP_LIMIT(r2)
- stw r4, THREAD+KSP_LIMIT(r2)
- stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
- mr r1,r4
- stw r10,8(r1)
- bl __do_irq
- lwz r10,8(r1)
- lwz r1,0(r1)
- lwz r0,4(r1)
- stw r10,THREAD+KSP_LIMIT(r2)
- mtlr r0
- blr
-
-/*
* This returns the high 64 bits of the product of two 64-bit numbers.
*/
_GLOBAL(mulhdu)
@@ -427,9 +388,3 @@ _GLOBAL(start_secondary_resume)
bl start_secondary
b .
#endif /* CONFIG_SMP */
-
-/*
- * This routine is just here to keep GCC happy - sigh...
- */
-_GLOBAL(__main)
- blr
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 070465825c21..4b761a18a74d 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -27,28 +27,6 @@
.text
-_GLOBAL(call_do_softirq)
- mflr r0
- std r0,16(r1)
- stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
- mr r1,r3
- bl __do_softirq
- ld r1,0(r1)
- ld r0,16(r1)
- mtlr r0
- blr
-
-_GLOBAL(call_do_irq)
- mflr r0
- std r0,16(r1)
- stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
- mr r1,r4
- bl __do_irq
- ld r1,0(r1)
- ld r0,16(r1)
- mtlr r0
- blr
-
_GLOBAL(__bswapdi2)
EXPORT_SYMBOL(__bswapdi2)
srdi r8,r3,32
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index a211b0253cdb..ed04a3ba66fe 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -8,12 +8,14 @@
#include <linux/moduleloader.h>
#include <linux/err.h>
#include <linux/vmalloc.h>
+#include <linux/mm.h>
#include <linux/bug.h>
#include <asm/module.h>
#include <linux/uaccess.h>
#include <asm/firmware.h>
#include <linux/sort.h>
#include <asm/setup.h>
+#include <asm/sections.h>
static LIST_HEAD(module_bug_list);
@@ -87,13 +89,38 @@ int module_finalize(const Elf_Ehdr *hdr,
return 0;
}
-#ifdef MODULES_VADDR
+static __always_inline void *
+__module_alloc(unsigned long size, unsigned long start, unsigned long end)
+{
+ pgprot_t prot = strict_module_rwx_enabled() ? PAGE_KERNEL : PAGE_KERNEL_EXEC;
+
+ /*
+ * Don't do huge page allocations for modules yet until more testing
+ * is done. STRICT_MODULE_RWX may require extra work to support this
+ * too.
+ */
+ return __vmalloc_node_range(size, 1, start, end, GFP_KERNEL, prot,
+ VM_FLUSH_RESET_PERMS | VM_NO_HUGE_VMAP,
+ NUMA_NO_NODE, __builtin_return_address(0));
+}
+
void *module_alloc(unsigned long size)
{
+#ifdef MODULES_VADDR
+ unsigned long limit = (unsigned long)_etext - SZ_32M;
+ void *ptr = NULL;
+
BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
- return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, GFP_KERNEL,
- PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
- __builtin_return_address(0));
-}
+ /* First try within 32M limit from _etext to avoid branch trampolines */
+ if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit)
+ ptr = __module_alloc(size, limit, MODULES_END);
+
+ if (!ptr)
+ ptr = __module_alloc(size, MODULES_VADDR, MODULES_END);
+
+ return ptr;
+#else
+ return __module_alloc(size, VMALLOC_START, VMALLOC_END);
#endif
+}
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index c27b8687b82a..f417afc08d33 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -145,10 +145,9 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
{
- if (entry->jump[0] != (PPC_INST_ADDIS | __PPC_RT(R12) | PPC_HA(val)))
+ if (entry->jump[0] != PPC_RAW_LIS(_R12, PPC_HA(val)))
return 0;
- if (entry->jump[1] != (PPC_INST_ADDI | __PPC_RT(R12) | __PPC_RA(R12) |
- PPC_LO(val)))
+ if (entry->jump[1] != PPC_RAW_ADDI(_R12, _R12, PPC_LO(val)))
return 0;
return 1;
}
@@ -175,16 +174,10 @@ static uint32_t do_plt_call(void *location,
entry++;
}
- /*
- * lis r12, sym@ha
- * addi r12, r12, sym@l
- * mtctr r12
- * bctr
- */
- entry->jump[0] = PPC_INST_ADDIS | __PPC_RT(R12) | PPC_HA(val);
- entry->jump[1] = PPC_INST_ADDI | __PPC_RT(R12) | __PPC_RA(R12) | PPC_LO(val);
- entry->jump[2] = PPC_INST_MTCTR | __PPC_RS(R12);
- entry->jump[3] = PPC_INST_BCTR;
+ entry->jump[0] = PPC_RAW_LIS(_R12, PPC_HA(val));
+ entry->jump[1] = PPC_RAW_ADDI(_R12, _R12, PPC_LO(val));
+ entry->jump[2] = PPC_RAW_MTCTR(_R12);
+ entry->jump[3] = PPC_RAW_BCTR();
pr_debug("Initialized plt for 0x%x at %p\n", val, entry);
return (uint32_t)entry;
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index ae2b188365b1..6baa676e7cb6 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -122,27 +122,19 @@ struct ppc64_stub_entry
* the stub, but it's significantly shorter to put these values at the
* end of the stub code, and patch the stub address (32-bits relative
* to the TOC ptr, r2) into the stub.
- *
- * addis r11,r2, <high>
- * addi r11,r11, <low>
- * std r2,R2_STACK_OFFSET(r1)
- * ld r12,32(r11)
- * ld r2,40(r11)
- * mtctr r12
- * bctr
*/
static u32 ppc64_stub_insns[] = {
- PPC_INST_ADDIS | __PPC_RT(R11) | __PPC_RA(R2),
- PPC_INST_ADDI | __PPC_RT(R11) | __PPC_RA(R11),
+ PPC_RAW_ADDIS(_R11, _R2, 0),
+ PPC_RAW_ADDI(_R11, _R11, 0),
/* Save current r2 value in magic place on the stack. */
- PPC_INST_STD | __PPC_RS(R2) | __PPC_RA(R1) | R2_STACK_OFFSET,
- PPC_INST_LD | __PPC_RT(R12) | __PPC_RA(R11) | 32,
+ PPC_RAW_STD(_R2, _R1, R2_STACK_OFFSET),
+ PPC_RAW_LD(_R12, _R11, 32),
#ifdef PPC64_ELF_ABI_v1
/* Set up new r2 from function descriptor */
- PPC_INST_LD | __PPC_RT(R2) | __PPC_RA(R11) | 40,
+ PPC_RAW_LD(_R2, _R11, 40),
#endif
- PPC_INST_MTCTR | __PPC_RS(R12),
- PPC_INST_BCTR,
+ PPC_RAW_MTCTR(_R12),
+ PPC_RAW_BCTR(),
};
/* Count how many different 24-bit relocations (different symbol,
@@ -336,21 +328,12 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr,
#ifdef CONFIG_MPROFILE_KERNEL
-#define PACATOC offsetof(struct paca_struct, kernel_toc)
-
-/*
- * ld r12,PACATOC(r13)
- * addis r12,r12,<high>
- * addi r12,r12,<low>
- * mtctr r12
- * bctr
- */
static u32 stub_insns[] = {
- PPC_INST_LD | __PPC_RT(R12) | __PPC_RA(R13) | PACATOC,
- PPC_INST_ADDIS | __PPC_RT(R12) | __PPC_RA(R12),
- PPC_INST_ADDI | __PPC_RT(R12) | __PPC_RA(R12),
- PPC_INST_MTCTR | __PPC_RS(R12),
- PPC_INST_BCTR,
+ PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernel_toc)),
+ PPC_RAW_ADDIS(_R12, _R12, 0),
+ PPC_RAW_ADDI(_R12, _R12, 0),
+ PPC_RAW_MTCTR(_R12),
+ PPC_RAW_BCTR(),
};
/*
@@ -507,7 +490,7 @@ static int restore_r2(const char *name, u32 *instruction, struct module *me)
if (!instr_is_relative_link_branch(ppc_inst(*prev_insn)))
return 1;
- if (*instruction != PPC_INST_NOP) {
+ if (*instruction != PPC_RAW_NOP()) {
pr_err("%s: Expected nop after call, got %08x at %pS\n",
me->name, *instruction, instruction);
return 0;
@@ -696,21 +679,17 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
* ld r2, ...(r12)
* add r2, r2, r12
*/
- if ((((uint32_t *)location)[0] & ~0xfffc) !=
- (PPC_INST_LD | __PPC_RT(R2) | __PPC_RA(R12)))
+ if ((((uint32_t *)location)[0] & ~0xfffc) != PPC_RAW_LD(_R2, _R12, 0))
break;
- if (((uint32_t *)location)[1] !=
- (PPC_INST_ADD | __PPC_RT(R2) | __PPC_RA(R2) | __PPC_RB(R12)))
+ if (((uint32_t *)location)[1] != PPC_RAW_ADD(_R2, _R2, _R12))
break;
/*
* If found, replace it with:
* addis r2, r12, (.TOC.-func)@ha
* addi r2, r2, (.TOC.-func)@l
*/
- ((uint32_t *)location)[0] = PPC_INST_ADDIS | __PPC_RT(R2) |
- __PPC_RA(R12) | PPC_HA(value);
- ((uint32_t *)location)[1] = PPC_INST_ADDI | __PPC_RT(R2) |
- __PPC_RA(R2) | PPC_LO(value);
+ ((uint32_t *)location)[0] = PPC_RAW_ADDIS(_R2, _R12, PPC_HA(value));
+ ((uint32_t *)location)[1] = PPC_RAW_ADDI(_R2, _R2, PPC_LO(value));
break;
case R_PPC64_REL16_HA:
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 532f22637783..3c8d9bbb51cf 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -647,6 +647,7 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
{
struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
static unsigned int oops_count = 0;
+ static struct kmsg_dump_iter iter;
static bool panicking = false;
static DEFINE_SPINLOCK(lock);
unsigned long flags;
@@ -681,13 +682,14 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
return;
if (big_oops_buf) {
- kmsg_dump_get_buffer(dumper, false,
+ kmsg_dump_rewind(&iter);
+ kmsg_dump_get_buffer(&iter, false,
big_oops_buf, big_oops_buf_sz, &text_len);
rc = zip_oops(text_len);
}
if (rc != 0) {
- kmsg_dump_rewind(dumper);
- kmsg_dump_get_buffer(dumper, false,
+ kmsg_dump_rewind(&iter);
+ kmsg_dump_get_buffer(&iter, false,
oops_data, oops_data_sz, &text_len);
err_type = ERR_TYPE_KERNEL_PANIC;
oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c
index 7f7cdbeacd1a..c79899abcec8 100644
--- a/arch/powerpc/kernel/optprobes.c
+++ b/arch/powerpc/kernel/optprobes.c
@@ -18,24 +18,16 @@
#include <asm/ppc-opcode.h>
#include <asm/inst.h>
-#define TMPL_CALL_HDLR_IDX \
- (optprobe_template_call_handler - optprobe_template_entry)
-#define TMPL_EMULATE_IDX \
- (optprobe_template_call_emulate - optprobe_template_entry)
-#define TMPL_RET_IDX \
- (optprobe_template_ret - optprobe_template_entry)
-#define TMPL_OP_IDX \
- (optprobe_template_op_address - optprobe_template_entry)
-#define TMPL_INSN_IDX \
- (optprobe_template_insn - optprobe_template_entry)
-#define TMPL_END_IDX \
- (optprobe_template_end - optprobe_template_entry)
-
-DEFINE_INSN_CACHE_OPS(ppc_optinsn);
+#define TMPL_CALL_HDLR_IDX (optprobe_template_call_handler - optprobe_template_entry)
+#define TMPL_EMULATE_IDX (optprobe_template_call_emulate - optprobe_template_entry)
+#define TMPL_RET_IDX (optprobe_template_ret - optprobe_template_entry)
+#define TMPL_OP_IDX (optprobe_template_op_address - optprobe_template_entry)
+#define TMPL_INSN_IDX (optprobe_template_insn - optprobe_template_entry)
+#define TMPL_END_IDX (optprobe_template_end - optprobe_template_entry)
static bool insn_page_in_use;
-static void *__ppc_alloc_insn_page(void)
+void *alloc_optinsn_page(void)
{
if (insn_page_in_use)
return NULL;
@@ -43,20 +35,11 @@ static void *__ppc_alloc_insn_page(void)
return &optinsn_slot;
}
-static void __ppc_free_insn_page(void *page __maybe_unused)
+void free_optinsn_page(void *page)
{
insn_page_in_use = false;
}
-struct kprobe_insn_cache kprobe_ppc_optinsn_slots = {
- .mutex = __MUTEX_INITIALIZER(kprobe_ppc_optinsn_slots.mutex),
- .pages = LIST_HEAD_INIT(kprobe_ppc_optinsn_slots.pages),
- /* insn_size initialized later */
- .alloc = __ppc_alloc_insn_page,
- .free = __ppc_free_insn_page,
- .nr_garbage = 0,
-};
-
/*
* Check if we can optimize this probe. Returns NIP post-emulation if this can
* be optimized and 0 otherwise.
@@ -66,6 +49,7 @@ static unsigned long can_optimize(struct kprobe *p)
struct pt_regs regs;
struct instruction_op op;
unsigned long nip = 0;
+ unsigned long addr = (unsigned long)p->addr;
/*
* kprobe placed for kretprobe during boot time
@@ -73,7 +57,7 @@ static unsigned long can_optimize(struct kprobe *p)
* So further checks can be skipped.
*/
if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
- return (unsigned long)p->addr + sizeof(kprobe_opcode_t);
+ return addr + sizeof(kprobe_opcode_t);
/*
* We only support optimizing kernel addresses, but not
@@ -81,11 +65,11 @@ static unsigned long can_optimize(struct kprobe *p)
*
* FIXME: Optimize kprobes placed in module addresses.
*/
- if (!is_kernel_addr((unsigned long)p->addr))
+ if (!is_kernel_addr(addr))
return 0;
memset(&regs, 0, sizeof(struct pt_regs));
- regs.nip = (unsigned long)p->addr;
+ regs.nip = addr;
regs.trap = 0x0;
regs.msr = MSR_KERNEL;
@@ -100,9 +84,8 @@ static unsigned long can_optimize(struct kprobe *p)
* Ensure that the instruction is not a conditional branch,
* and that can be emulated.
*/
- if (!is_conditional_branch(ppc_inst_read((struct ppc_inst *)p->ainsn.insn)) &&
- analyse_instr(&op, &regs,
- ppc_inst_read((struct ppc_inst *)p->ainsn.insn)) == 1) {
+ if (!is_conditional_branch(ppc_inst_read(p->ainsn.insn)) &&
+ analyse_instr(&op, &regs, ppc_inst_read(p->ainsn.insn)) == 1) {
emulate_update_regs(&regs, &op);
nip = regs.nip;
}
@@ -123,7 +106,7 @@ static void optimized_callback(struct optimized_kprobe *op,
kprobes_inc_nmissed_count(&op->kp);
} else {
__this_cpu_write(current_kprobe, &op->kp);
- regs->nip = (unsigned long)op->kp.addr;
+ regs_set_return_ip(regs, (unsigned long)op->kp.addr);
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
opt_pre_handler(&op->kp, regs);
__this_cpu_write(current_kprobe, NULL);
@@ -136,63 +119,53 @@ NOKPROBE_SYMBOL(optimized_callback);
void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
{
if (op->optinsn.insn) {
- free_ppc_optinsn_slot(op->optinsn.insn, 1);
+ free_optinsn_slot(op->optinsn.insn, 1);
op->optinsn.insn = NULL;
}
}
+static void patch_imm32_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
+{
+ patch_instruction(addr++, ppc_inst(PPC_RAW_LIS(reg, PPC_HI(val))));
+ patch_instruction(addr, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_LO(val))));
+}
+
/*
* Generate instructions to load provided immediate 64-bit value
* to register 'reg' and patch these instructions at 'addr'.
*/
-static void patch_imm64_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
+static void patch_imm64_load_insns(unsigned long long val, int reg, kprobe_opcode_t *addr)
+{
+ patch_instruction(addr++, ppc_inst(PPC_RAW_LIS(reg, PPC_HIGHEST(val))));
+ patch_instruction(addr++, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_HIGHER(val))));
+ patch_instruction(addr++, ppc_inst(PPC_RAW_SLDI(reg, reg, 32)));
+ patch_instruction(addr++, ppc_inst(PPC_RAW_ORIS(reg, reg, PPC_HI(val))));
+ patch_instruction(addr, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_LO(val))));
+}
+
+static void patch_imm_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
{
- /* lis reg,(op)@highest */
- patch_instruction((struct ppc_inst *)addr,
- ppc_inst(PPC_INST_ADDIS | ___PPC_RT(reg) |
- ((val >> 48) & 0xffff)));
- addr++;
-
- /* ori reg,reg,(op)@higher */
- patch_instruction((struct ppc_inst *)addr,
- ppc_inst(PPC_INST_ORI | ___PPC_RA(reg) |
- ___PPC_RS(reg) | ((val >> 32) & 0xffff)));
- addr++;
-
- /* rldicr reg,reg,32,31 */
- patch_instruction((struct ppc_inst *)addr,
- ppc_inst(PPC_INST_RLDICR | ___PPC_RA(reg) |
- ___PPC_RS(reg) | __PPC_SH64(32) | __PPC_ME64(31)));
- addr++;
-
- /* oris reg,reg,(op)@h */
- patch_instruction((struct ppc_inst *)addr,
- ppc_inst(PPC_INST_ORIS | ___PPC_RA(reg) |
- ___PPC_RS(reg) | ((val >> 16) & 0xffff)));
- addr++;
-
- /* ori reg,reg,(op)@l */
- patch_instruction((struct ppc_inst *)addr,
- ppc_inst(PPC_INST_ORI | ___PPC_RA(reg) |
- ___PPC_RS(reg) | (val & 0xffff)));
+ if (IS_ENABLED(CONFIG_PPC64))
+ patch_imm64_load_insns(val, reg, addr);
+ else
+ patch_imm32_load_insns(val, reg, addr);
}
int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
{
struct ppc_inst branch_op_callback, branch_emulate_step, temp;
- kprobe_opcode_t *op_callback_addr, *emulate_step_addr, *buff;
+ unsigned long op_callback_addr, emulate_step_addr;
+ kprobe_opcode_t *buff;
long b_offset;
unsigned long nip, size;
int rc, i;
- kprobe_ppc_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
-
nip = can_optimize(p);
if (!nip)
return -EILSEQ;
/* Allocate instruction slot for detour buffer */
- buff = get_ppc_optinsn_slot();
+ buff = get_optinsn_slot();
if (!buff)
return -ENOMEM;
@@ -210,8 +183,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
goto error;
/* Check if the return address is also within 32MB range */
- b_offset = (unsigned long)(buff + TMPL_RET_IDX) -
- (unsigned long)nip;
+ b_offset = (unsigned long)(buff + TMPL_RET_IDX) - nip;
if (!is_offset_in_branch_range(b_offset))
goto error;
@@ -220,8 +192,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
pr_devel("Copying template to %p, size %lu\n", buff, size);
for (i = 0; i < size; i++) {
- rc = patch_instruction((struct ppc_inst *)(buff + i),
- ppc_inst(*(optprobe_template_entry + i)));
+ rc = patch_instruction(buff + i, ppc_inst(*(optprobe_template_entry + i)));
if (rc < 0)
goto error;
}
@@ -230,56 +201,53 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
* Fixup the template with instructions to:
* 1. load the address of the actual probepoint
*/
- patch_imm64_load_insns((unsigned long)op, 3, buff + TMPL_OP_IDX);
+ patch_imm_load_insns((unsigned long)op, 3, buff + TMPL_OP_IDX);
/*
* 2. branch to optimized_callback() and emulate_step()
*/
- op_callback_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("optimized_callback");
- emulate_step_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("emulate_step");
+ op_callback_addr = ppc_kallsyms_lookup_name("optimized_callback");
+ emulate_step_addr = ppc_kallsyms_lookup_name("emulate_step");
if (!op_callback_addr || !emulate_step_addr) {
WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
goto error;
}
- rc = create_branch(&branch_op_callback,
- (struct ppc_inst *)(buff + TMPL_CALL_HDLR_IDX),
- (unsigned long)op_callback_addr,
- BRANCH_SET_LINK);
+ rc = create_branch(&branch_op_callback, buff + TMPL_CALL_HDLR_IDX,
+ op_callback_addr, BRANCH_SET_LINK);
- rc |= create_branch(&branch_emulate_step,
- (struct ppc_inst *)(buff + TMPL_EMULATE_IDX),
- (unsigned long)emulate_step_addr,
- BRANCH_SET_LINK);
+ rc |= create_branch(&branch_emulate_step, buff + TMPL_EMULATE_IDX,
+ emulate_step_addr, BRANCH_SET_LINK);
if (rc)
goto error;
- patch_instruction((struct ppc_inst *)(buff + TMPL_CALL_HDLR_IDX),
- branch_op_callback);
- patch_instruction((struct ppc_inst *)(buff + TMPL_EMULATE_IDX),
- branch_emulate_step);
+ patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback);
+ patch_instruction(buff + TMPL_EMULATE_IDX, branch_emulate_step);
/*
* 3. load instruction to be emulated into relevant register, and
*/
- temp = ppc_inst_read((struct ppc_inst *)p->ainsn.insn);
- patch_imm64_load_insns(ppc_inst_as_u64(temp), 4, buff + TMPL_INSN_IDX);
+ if (IS_ENABLED(CONFIG_PPC64)) {
+ temp = ppc_inst_read(p->ainsn.insn);
+ patch_imm_load_insns(ppc_inst_as_ulong(temp), 4, buff + TMPL_INSN_IDX);
+ } else {
+ patch_imm_load_insns((unsigned long)p->ainsn.insn, 4, buff + TMPL_INSN_IDX);
+ }
/*
* 4. branch back from trampoline
*/
- patch_branch((struct ppc_inst *)(buff + TMPL_RET_IDX), (unsigned long)nip, 0);
+ patch_branch(buff + TMPL_RET_IDX, nip, 0);
- flush_icache_range((unsigned long)buff,
- (unsigned long)(&buff[TMPL_END_IDX]));
+ flush_icache_range((unsigned long)buff, (unsigned long)(&buff[TMPL_END_IDX]));
op->optinsn.insn = buff;
return 0;
error:
- free_ppc_optinsn_slot(buff, 0);
+ free_optinsn_slot(buff, 0);
return -ERANGE;
}
@@ -310,12 +278,9 @@ void arch_optimize_kprobes(struct list_head *oplist)
* Backup instructions which will be replaced
* by jump address
*/
- memcpy(op->optinsn.copied_insn, op->kp.addr,
- RELATIVEJUMP_SIZE);
- create_branch(&instr,
- (struct ppc_inst *)op->kp.addr,
- (unsigned long)op->optinsn.insn, 0);
- patch_instruction((struct ppc_inst *)op->kp.addr, instr);
+ memcpy(op->optinsn.copied_insn, op->kp.addr, RELATIVEJUMP_SIZE);
+ create_branch(&instr, op->kp.addr, (unsigned long)op->optinsn.insn, 0);
+ patch_instruction(op->kp.addr, instr);
list_del_init(&op->list);
}
}
@@ -325,8 +290,7 @@ void arch_unoptimize_kprobe(struct optimized_kprobe *op)
arch_arm_kprobe(&op->kp);
}
-void arch_unoptimize_kprobes(struct list_head *oplist,
- struct list_head *done_list)
+void arch_unoptimize_kprobes(struct list_head *oplist, struct list_head *done_list)
{
struct optimized_kprobe *op;
struct optimized_kprobe *tmp;
@@ -337,8 +301,7 @@ void arch_unoptimize_kprobes(struct list_head *oplist,
}
}
-int arch_within_optimized_kprobe(struct optimized_kprobe *op,
- unsigned long addr)
+int arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr)
{
return ((unsigned long)op->kp.addr <= addr &&
(unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
diff --git a/arch/powerpc/kernel/optprobes_head.S b/arch/powerpc/kernel/optprobes_head.S
index ff8ba4d3824d..19ea3312403c 100644
--- a/arch/powerpc/kernel/optprobes_head.S
+++ b/arch/powerpc/kernel/optprobes_head.S
@@ -9,6 +9,16 @@
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
+#ifdef CONFIG_PPC64
+#define SAVE_30GPRS(base) SAVE_10GPRS(2,base); SAVE_10GPRS(12,base); SAVE_10GPRS(22,base)
+#define REST_30GPRS(base) REST_10GPRS(2,base); REST_10GPRS(12,base); REST_10GPRS(22,base)
+#define TEMPLATE_FOR_IMM_LOAD_INSNS nop; nop; nop; nop; nop
+#else
+#define SAVE_30GPRS(base) stmw r2, GPR2(base)
+#define REST_30GPRS(base) lmw r2, GPR2(base)
+#define TEMPLATE_FOR_IMM_LOAD_INSNS nop; nop; nop
+#endif
+
#define OPT_SLOT_SIZE 65536
.balign 4
@@ -30,39 +40,41 @@ optinsn_slot:
.global optprobe_template_entry
optprobe_template_entry:
/* Create an in-memory pt_regs */
- stdu r1,-INT_FRAME_SIZE(r1)
+ PPC_STLU r1,-INT_FRAME_SIZE(r1)
SAVE_GPR(0,r1)
/* Save the previous SP into stack */
addi r0,r1,INT_FRAME_SIZE
- std r0,GPR1(r1)
- SAVE_10GPRS(2,r1)
- SAVE_10GPRS(12,r1)
- SAVE_10GPRS(22,r1)
+ PPC_STL r0,GPR1(r1)
+ SAVE_30GPRS(r1)
/* Save SPRS */
mfmsr r5
- std r5,_MSR(r1)
+ PPC_STL r5,_MSR(r1)
li r5,0x700
- std r5,_TRAP(r1)
+ PPC_STL r5,_TRAP(r1)
li r5,0
- std r5,ORIG_GPR3(r1)
- std r5,RESULT(r1)
+ PPC_STL r5,ORIG_GPR3(r1)
+ PPC_STL r5,RESULT(r1)
mfctr r5
- std r5,_CTR(r1)
+ PPC_STL r5,_CTR(r1)
mflr r5
- std r5,_LINK(r1)
+ PPC_STL r5,_LINK(r1)
mfspr r5,SPRN_XER
- std r5,_XER(r1)
+ PPC_STL r5,_XER(r1)
mfcr r5
- std r5,_CCR(r1)
+ PPC_STL r5,_CCR(r1)
+#ifdef CONFIG_PPC64
lbz r5,PACAIRQSOFTMASK(r13)
std r5,SOFTE(r1)
+#endif
/*
* We may get here from a module, so load the kernel TOC in r2.
* The original TOC gets restored when pt_regs is restored
* further below.
*/
+#ifdef CONFIG_PPC64
ld r2,PACATOC(r13)
+#endif
.global optprobe_template_op_address
optprobe_template_op_address:
@@ -70,11 +82,8 @@ optprobe_template_op_address:
* Parameters to optimized_callback():
* 1. optimized_kprobe structure in r3
*/
- nop
- nop
- nop
- nop
- nop
+ TEMPLATE_FOR_IMM_LOAD_INSNS
+
/* 2. pt_regs pointer in r4 */
addi r4,r1,STACK_FRAME_OVERHEAD
@@ -92,11 +101,7 @@ optprobe_template_call_handler:
.global optprobe_template_insn
optprobe_template_insn:
/* 2, Pass instruction to be emulated in r4 */
- nop
- nop
- nop
- nop
- nop
+ TEMPLATE_FOR_IMM_LOAD_INSNS
.global optprobe_template_call_emulate
optprobe_template_call_emulate:
@@ -107,20 +112,18 @@ optprobe_template_call_emulate:
* All done.
* Now, restore the registers...
*/
- ld r5,_MSR(r1)
+ PPC_LL r5,_MSR(r1)
mtmsr r5
- ld r5,_CTR(r1)
+ PPC_LL r5,_CTR(r1)
mtctr r5
- ld r5,_LINK(r1)
+ PPC_LL r5,_LINK(r1)
mtlr r5
- ld r5,_XER(r1)
+ PPC_LL r5,_XER(r1)
mtxer r5
- ld r5,_CCR(r1)
+ PPC_LL r5,_CCR(r1)
mtcr r5
REST_GPR(0,r1)
- REST_10GPRS(2,r1)
- REST_10GPRS(12,r1)
- REST_10GPRS(22,r1)
+ REST_30GPRS(r1)
/* Restore the previous SP */
addi r1,r1,INT_FRAME_SIZE
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 7f5aae3c387d..9bd30cac852b 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -346,10 +346,8 @@ void copy_mm_to_paca(struct mm_struct *mm)
#ifdef CONFIG_PPC_BOOK3S
mm_context_t *context = &mm->context;
- get_paca()->mm_ctx_id = context->id;
#ifdef CONFIG_PPC_MM_SLICES
VM_BUG_ON(!mm_ctx_slb_addr_limit(context));
- get_paca()->mm_ctx_slb_addr_limit = mm_ctx_slb_addr_limit(context);
memcpy(&get_paca()->mm_ctx_low_slices_psize, mm_ctx_low_slices(context),
LOW_SLICE_ARRAY_SZ);
memcpy(&get_paca()->mm_ctx_high_slices_psize, mm_ctx_high_slices(context),
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 9312e6eda7ff..3fb7e572abed 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -140,7 +140,7 @@ void __iomem *ioremap_phb(phys_addr_t paddr, unsigned long size)
addr = (unsigned long)area->addr;
if (ioremap_page_range(addr, addr + size, paddr,
pgprot_noncached(PAGE_KERNEL))) {
- unmap_kernel_range(addr, size);
+ vunmap_range(addr, addr + size);
return NULL;
}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 3231c2df9e26..185beb290580 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -96,7 +96,8 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
if (tsk == current && tsk->thread.regs &&
MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
!test_thread_flag(TIF_RESTORE_TM)) {
- tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
+ regs_set_return_msr(&tsk->thread.ckpt_regs,
+ tsk->thread.regs->msr);
set_thread_flag(TIF_RESTORE_TM);
}
}
@@ -161,7 +162,7 @@ static void __giveup_fpu(struct task_struct *tsk)
msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
if (cpu_has_feature(CPU_FTR_VSX))
msr &= ~MSR_VSX;
- tsk->thread.regs->msr = msr;
+ regs_set_return_msr(tsk->thread.regs, msr);
}
void giveup_fpu(struct task_struct *tsk)
@@ -244,7 +245,7 @@ static void __giveup_altivec(struct task_struct *tsk)
msr &= ~MSR_VEC;
if (cpu_has_feature(CPU_FTR_VSX))
msr &= ~MSR_VSX;
- tsk->thread.regs->msr = msr;
+ regs_set_return_msr(tsk->thread.regs, msr);
}
void giveup_altivec(struct task_struct *tsk)
@@ -559,7 +560,7 @@ void notrace restore_math(struct pt_regs *regs)
msr_check_and_clear(new_msr);
- regs->msr |= new_msr | fpexc_mode;
+ regs_set_return_msr(regs, regs->msr | new_msr | fpexc_mode);
}
}
#endif /* CONFIG_PPC_BOOK3S_64 */
@@ -1114,12 +1115,13 @@ void restore_tm_state(struct pt_regs *regs)
#endif
restore_math(regs);
- regs->msr |= msr_diff;
+ regs_set_return_msr(regs, regs->msr | msr_diff);
}
-#else
+#else /* !CONFIG_PPC_TRANSACTIONAL_MEM */
#define tm_recheckpoint_new_task(new)
#define __switch_to_tm(prev, new)
+void tm_reclaim_current(uint8_t cause) {}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
static inline void save_sprs(struct thread_struct *t)
@@ -1128,6 +1130,10 @@ static inline void save_sprs(struct thread_struct *t)
if (cpu_has_feature(CPU_FTR_ALTIVEC))
t->vrsave = mfspr(SPRN_VRSAVE);
#endif
+#ifdef CONFIG_SPE
+ if (cpu_has_feature(CPU_FTR_SPE))
+ t->spefscr = mfspr(SPRN_SPEFSCR);
+#endif
#ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_DSCR))
t->dscr = mfspr(SPRN_DSCR);
@@ -1158,6 +1164,11 @@ static inline void restore_sprs(struct thread_struct *old_thread,
old_thread->vrsave != new_thread->vrsave)
mtspr(SPRN_VRSAVE, new_thread->vrsave);
#endif
+#ifdef CONFIG_SPE
+ if (cpu_has_feature(CPU_FTR_SPE) &&
+ old_thread->spefscr != new_thread->spefscr)
+ mtspr(SPRN_SPEFSCR, new_thread->spefscr);
+#endif
#ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_DSCR)) {
u64 dscr = get_paca()->dscr_default;
@@ -1212,6 +1223,19 @@ struct task_struct *__switch_to(struct task_struct *prev,
__flush_tlb_pending(batch);
batch->active = 0;
}
+
+ /*
+ * On POWER9 the copy-paste buffer can only paste into
+ * foreign real addresses, so unprivileged processes can not
+ * see the data or use it in any way unless they have
+ * foreign real mappings. If the new process has the foreign
+ * real address mappings, we must issue a cp_abort to clear
+ * any state and prevent snooping, corruption or a covert
+ * channel. ISA v3.1 supports paste into local memory.
+ */
+ if (new->mm && (cpu_has_feature(CPU_FTR_ARCH_31) ||
+ atomic_read(&new->mm->context.vas_windows)))
+ asm volatile(PPC_CP_ABORT);
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
@@ -1247,40 +1271,48 @@ struct task_struct *__switch_to(struct task_struct *prev,
}
/*
- * Call restore_sprs() before calling _switch(). If we move it after
- * _switch() then we miss out on calling it for new tasks. The reason
- * for this is we manually create a stack frame for new tasks that
- * directly returns through ret_from_fork() or
+ * Call restore_sprs() and set_return_regs_changed() before calling
+ * _switch(). If we move it after _switch() then we miss out on calling
+ * it for new tasks. The reason for this is we manually create a stack
+ * frame for new tasks that directly returns through ret_from_fork() or
* ret_from_kernel_thread(). See copy_thread() for details.
*/
restore_sprs(old_thread, new_thread);
+ set_return_regs_changed(); /* _switch changes stack (and regs) */
+
+#ifdef CONFIG_PPC32
+ kuap_assert_locked();
+#endif
last = _switch(old_thread, new_thread);
+ /*
+ * Nothing after _switch will be run for newly created tasks,
+ * because they switch directly to ret_from_fork/ret_from_kernel_thread
+ * etc. Code added here should have a comment explaining why that is
+ * okay.
+ */
+
#ifdef CONFIG_PPC_BOOK3S_64
+ /*
+ * This applies to a process that was context switched while inside
+ * arch_enter_lazy_mmu_mode(), to re-activate the batch that was
+ * deactivated above, before _switch(). This will never be the case
+ * for new tasks.
+ */
if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
batch = this_cpu_ptr(&ppc64_tlb_batch);
batch->active = 1;
}
- if (current->thread.regs) {
+ /*
+ * Math facilities are masked out of the child MSR in copy_thread.
+ * A new task does not need to restore_math because it will
+ * demand fault them.
+ */
+ if (current->thread.regs)
restore_math(current->thread.regs);
-
- /*
- * On POWER9 the copy-paste buffer can only paste into
- * foreign real addresses, so unprivileged processes can not
- * see the data or use it in any way unless they have
- * foreign real mappings. If the new process has the foreign
- * real address mappings, we must issue a cp_abort to clear
- * any state and prevent snooping, corruption or a covert
- * channel. ISA v3.1 supports paste into local memory.
- */
- if (current->mm &&
- (cpu_has_feature(CPU_FTR_ARCH_31) ||
- atomic_read(&current->mm->context.vas_windows)))
- asm volatile(PPC_CP_ABORT);
- }
#endif /* CONFIG_PPC_BOOK3S_64 */
return last;
@@ -1444,11 +1476,9 @@ static void print_msr_bits(unsigned long val)
#ifdef CONFIG_PPC64
#define REG "%016lx"
#define REGS_PER_LINE 4
-#define LAST_VOLATILE 13
#else
#define REG "%08lx"
#define REGS_PER_LINE 8
-#define LAST_VOLATILE 12
#endif
static void __show_regs(struct pt_regs *regs)
@@ -1465,7 +1495,9 @@ static void __show_regs(struct pt_regs *regs)
trap = TRAP(regs);
if (!trap_is_syscall(regs) && cpu_has_feature(CPU_FTR_CFAR))
pr_cont("CFAR: "REG" ", regs->orig_gpr3);
- if (trap == 0x200 || trap == 0x300 || trap == 0x600) {
+ if (trap == INTERRUPT_MACHINE_CHECK ||
+ trap == INTERRUPT_DATA_STORAGE ||
+ trap == INTERRUPT_ALIGNMENT) {
if (IS_ENABLED(CONFIG_4xx) || IS_ENABLED(CONFIG_BOOKE))
pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
else
@@ -1484,8 +1516,6 @@ static void __show_regs(struct pt_regs *regs)
if ((i % REGS_PER_LINE) == 0)
pr_cont("\nGPR%02d: ", i);
pr_cont(REG " ", regs->gpr[i]);
- if (i == LAST_VOLATILE && !FULL_REGS(regs))
- break;
}
pr_cont("\n");
/*
@@ -1688,7 +1718,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
} else {
/* user thread */
struct pt_regs *regs = current_pt_regs();
- CHECK_FULL_REGS(regs);
*childregs = *regs;
if (usp)
childregs->gpr[1] = usp;
@@ -1724,9 +1753,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
kregs = (struct pt_regs *) sp;
sp -= STACK_FRAME_OVERHEAD;
p->thread.ksp = sp;
-#ifdef CONFIG_PPC32
- p->thread.ksp_limit = (unsigned long)end_of_stack(p);
-#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
for (i = 0; i < nr_wp_slots(); i++)
p->thread.ptrace_bps[i] = NULL;
@@ -1738,6 +1764,9 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
#ifdef CONFIG_ALTIVEC
p->thread.vr_save_area = NULL;
#endif
+#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
+ p->thread.kuap = KUAP_NONE;
+#endif
setup_ksp_vsid(p, sp);
@@ -1796,13 +1825,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
regs->ccr = 0;
regs->gpr[1] = sp;
- /*
- * We have just cleared all the nonvolatile GPRs, so make
- * FULL_REGS(regs) return true. This is necessary to allow
- * ptrace to examine the thread immediately after exec.
- */
- SET_FULL_REGS(regs);
-
#ifdef CONFIG_PPC32
regs->mq = 0;
regs->nip = start;
@@ -1847,13 +1869,14 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
}
regs->gpr[2] = toc;
}
- regs->nip = entry;
- regs->msr = MSR_USER64;
+ regs_set_return_ip(regs, entry);
+ regs_set_return_msr(regs, MSR_USER64);
} else {
- regs->nip = start;
regs->gpr[2] = 0;
- regs->msr = MSR_USER32;
+ regs_set_return_ip(regs, start);
+ regs_set_return_msr(regs, MSR_USER32);
}
+
#endif
#ifdef CONFIG_VSX
current->thread.used_vsr = 0;
@@ -1884,7 +1907,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
current->thread.tm_tfiar = 0;
current->thread.load_tm = 0;
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
-
}
EXPORT_SYMBOL(start_thread);
@@ -1932,9 +1954,10 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
if (val > PR_FP_EXC_PRECISE)
return -EINVAL;
tsk->thread.fpexc_mode = __pack_fe01(val);
- if (regs != NULL && (regs->msr & MSR_FP) != 0)
- regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
- | tsk->thread.fpexc_mode;
+ if (regs != NULL && (regs->msr & MSR_FP) != 0) {
+ regs_set_return_msr(regs, (regs->msr & ~(MSR_FE0|MSR_FE1))
+ | tsk->thread.fpexc_mode);
+ }
return 0;
}
@@ -1980,9 +2003,9 @@ int set_endian(struct task_struct *tsk, unsigned int val)
return -EINVAL;
if (val == PR_ENDIAN_BIG)
- regs->msr &= ~MSR_LE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_LE);
else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
- regs->msr |= MSR_LE;
+ regs_set_return_msr(regs, regs->msr | MSR_LE);
else
return -EINVAL;
@@ -2093,7 +2116,7 @@ static unsigned long __get_wchan(struct task_struct *p)
unsigned long ip, sp;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
sp = p->thread.ksp;
@@ -2103,7 +2126,7 @@ static unsigned long __get_wchan(struct task_struct *p)
do {
sp = *(unsigned long *)sp;
if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
- p->state == TASK_RUNNING)
+ task_is_running(p))
return 0;
if (count > 0) {
ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
@@ -2130,8 +2153,9 @@ unsigned long get_wchan(struct task_struct *p)
static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
-void show_stack(struct task_struct *tsk, unsigned long *stack,
- const char *loglvl)
+void __no_sanitize_address show_stack(struct task_struct *tsk,
+ unsigned long *stack,
+ const char *loglvl)
{
unsigned long sp, ip, lr, newsp;
int count = 0;
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 9a4797d1d40d..f620e04dc9bf 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -65,6 +65,8 @@
#define DBG(fmt...)
#endif
+int *chip_id_lookup_table;
+
#ifdef CONFIG_PPC64
int __initdata iommu_is_off;
int __initdata iommu_force_on;
@@ -267,7 +269,7 @@ static struct feature_property {
};
#if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
-static inline void identical_pvr_fixup(unsigned long node)
+static __init void identical_pvr_fixup(unsigned long node)
{
unsigned int pvr;
const char *model = of_get_flat_dt_prop(node, "model", NULL);
@@ -756,7 +758,7 @@ void __init early_init_devtree(void *params)
first_memblock_size = min_t(u64, first_memblock_size, memory_limit);
setup_initial_memory_limit(memstart_addr, first_memblock_size);
/* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
- memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
+ memblock_reserve(PHYSICAL_START, __pa(_end) - PHYSICAL_START);
/* If relocatable, reserve first 32k for interrupt vectors etc. */
if (PHYSICAL_START > MEMORY_START)
memblock_reserve(MEMORY_START, 0x8000);
@@ -914,13 +916,22 @@ EXPORT_SYMBOL(of_get_ibm_chip_id);
int cpu_to_chip_id(int cpu)
{
struct device_node *np;
+ int ret = -1, idx;
+
+ idx = cpu / threads_per_core;
+ if (chip_id_lookup_table && chip_id_lookup_table[idx] != -1)
+ return chip_id_lookup_table[idx];
np = of_get_cpu_node(cpu, NULL);
- if (!np)
- return -1;
+ if (np) {
+ ret = of_get_ibm_chip_id(np);
+ of_node_put(np);
+
+ if (chip_id_lookup_table)
+ chip_id_lookup_table[idx] = ret;
+ }
- of_node_put(np);
- return of_get_ibm_chip_id(np);
+ return ret;
}
EXPORT_SYMBOL(cpu_to_chip_id);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index ccf77b985c8f..a5bf355ce1d6 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -27,10 +27,12 @@
#include <linux/initrd.h>
#include <linux/bitops.h>
#include <linux/pgtable.h>
+#include <linux/printk.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/page.h>
#include <asm/processor.h>
+#include <asm/interrupt.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/smp.h>
@@ -242,13 +244,31 @@ static int __init prom_strcmp(const char *cs, const char *ct)
return 0;
}
-static char __init *prom_strcpy(char *dest, const char *src)
+static ssize_t __init prom_strscpy_pad(char *dest, const char *src, size_t n)
{
- char *tmp = dest;
+ ssize_t rc;
+ size_t i;
- while ((*dest++ = *src++) != '\0')
- /* nothing */;
- return tmp;
+ if (n == 0 || n > INT_MAX)
+ return -E2BIG;
+
+ // Copy up to n bytes
+ for (i = 0; i < n && src[i] != '\0'; i++)
+ dest[i] = src[i];
+
+ rc = i;
+
+ // If we copied all n then we have run out of space for the nul
+ if (rc == n) {
+ // Rewind by one character to ensure nul termination
+ i--;
+ rc = -E2BIG;
+ }
+
+ for (; i < n; i++)
+ dest[i] = '\0';
+
+ return rc;
}
static int __init prom_strncmp(const char *cs, const char *ct, size_t count)
@@ -701,13 +721,12 @@ static int __init prom_setprop(phandle node, const char *nodename,
}
/* We can't use the standard versions because of relocation headaches. */
-#define isxdigit(c) (('0' <= (c) && (c) <= '9') \
- || ('a' <= (c) && (c) <= 'f') \
- || ('A' <= (c) && (c) <= 'F'))
+#define prom_isxdigit(c) \
+ (('0' <= (c) && (c) <= '9') || ('a' <= (c) && (c) <= 'f') || ('A' <= (c) && (c) <= 'F'))
-#define isdigit(c) ('0' <= (c) && (c) <= '9')
-#define islower(c) ('a' <= (c) && (c) <= 'z')
-#define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
+#define prom_isdigit(c) ('0' <= (c) && (c) <= '9')
+#define prom_islower(c) ('a' <= (c) && (c) <= 'z')
+#define prom_toupper(c) (prom_islower(c) ? ((c) - 'a' + 'A') : (c))
static unsigned long prom_strtoul(const char *cp, const char **endp)
{
@@ -716,14 +735,14 @@ static unsigned long prom_strtoul(const char *cp, const char **endp)
if (*cp == '0') {
base = 8;
cp++;
- if (toupper(*cp) == 'X') {
+ if (prom_toupper(*cp) == 'X') {
cp++;
base = 16;
}
}
- while (isxdigit(*cp) &&
- (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
+ while (prom_isxdigit(*cp) &&
+ (value = prom_isdigit(*cp) ? *cp - '0' : prom_toupper(*cp) - 'A' + 10) < base) {
result = result * base + value;
cp++;
}
@@ -927,6 +946,10 @@ struct option_vector6 {
u8 os_name;
} __packed;
+struct option_vector7 {
+ u8 os_id[256];
+} __packed;
+
struct ibm_arch_vec {
struct { u32 mask, val; } pvrs[14];
@@ -949,6 +972,9 @@ struct ibm_arch_vec {
u8 vec6_len;
struct option_vector6 vec6;
+
+ u8 vec7_len;
+ struct option_vector7 vec7;
} __packed;
static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
@@ -1095,6 +1121,9 @@ static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
.secondary_pteg = 0,
.os_name = OV6_LINUX,
},
+
+ /* option vector 7: OS Identification */
+ .vec7_len = VECTOR_LENGTH(sizeof(struct option_vector7)),
};
static struct ibm_arch_vec __prombss ibm_architecture_vec ____cacheline_aligned;
@@ -1323,6 +1352,8 @@ static void __init prom_check_platform_support(void)
memcpy(&ibm_architecture_vec, &ibm_architecture_vec_template,
sizeof(ibm_architecture_vec));
+ prom_strscpy_pad(ibm_architecture_vec.vec7.os_id, linux_banner, 256);
+
if (prop_len > 1) {
int i;
u8 vec[8];
@@ -1762,6 +1793,8 @@ static int prom_rtas_hcall(uint64_t args)
asm volatile("sc 1\n" : "=r" (arg1) :
"r" (arg1),
"r" (arg2) :);
+ srr_regs_clobbered();
+
return arg1;
}
@@ -2702,7 +2735,7 @@ static void __init flatten_device_tree(void)
/* Add "phandle" in there, we'll need it */
namep = make_room(&mem_start, &mem_end, 16, 1);
- prom_strcpy(namep, "phandle");
+ prom_strscpy_pad(namep, "phandle", sizeof("phandle"));
mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
/* Build string array */
@@ -2983,7 +3016,7 @@ static void __init fixup_device_tree_efika_add_phy(void)
" 0x3 encode-int encode+"
" s\" interrupts\" property"
" finish-device");
- };
+ }
/* Check for a PHY device node - if missing then create one and
* give it's phandle to the ethernet node */
@@ -3210,54 +3243,6 @@ static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
#endif /* CONFIG_BLK_DEV_INITRD */
}
-#ifdef CONFIG_PPC64
-#ifdef CONFIG_RELOCATABLE
-static void reloc_toc(void)
-{
-}
-
-static void unreloc_toc(void)
-{
-}
-#else
-static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
-{
- unsigned long i;
- unsigned long *toc_entry;
-
- /* Get the start of the TOC by using r2 directly. */
- asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
-
- for (i = 0; i < nr_entries; i++) {
- *toc_entry = *toc_entry + offset;
- toc_entry++;
- }
-}
-
-static void reloc_toc(void)
-{
- unsigned long offset = reloc_offset();
- unsigned long nr_entries =
- (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
-
- __reloc_toc(offset, nr_entries);
-
- mb();
-}
-
-static void unreloc_toc(void)
-{
- unsigned long offset = reloc_offset();
- unsigned long nr_entries =
- (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
-
- mb();
-
- __reloc_toc(-offset, nr_entries);
-}
-#endif
-#endif
-
#ifdef CONFIG_PPC_SVM
/*
* Perform the Enter Secure Mode ultracall.
@@ -3291,14 +3276,12 @@ static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
* relocated it so the check will fail. Restore the original image by
* relocating it back to the kernel virtual base address.
*/
- if (IS_ENABLED(CONFIG_RELOCATABLE))
- relocate(KERNELBASE);
+ relocate(KERNELBASE);
ret = enter_secure_mode(kbase, fdt);
/* Relocate the kernel again. */
- if (IS_ENABLED(CONFIG_RELOCATABLE))
- relocate(kbase);
+ relocate(kbase);
if (ret != U_SUCCESS) {
prom_printf("Returned %d from switching to secure mode.\n", ret);
@@ -3326,8 +3309,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
#ifdef CONFIG_PPC32
unsigned long offset = reloc_offset();
reloc_got2(offset);
-#else
- reloc_toc();
#endif
/*
@@ -3504,8 +3485,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
#ifdef CONFIG_PPC32
reloc_got2(-offset);
-#else
- unreloc_toc();
#endif
/* Move to secure memory if we're supposed to be secure guests. */
diff --git a/arch/powerpc/kernel/ptrace/Makefile b/arch/powerpc/kernel/ptrace/Makefile
index 8ebc11d1168d..77abd1a5a508 100644
--- a/arch/powerpc/kernel/ptrace/Makefile
+++ b/arch/powerpc/kernel/ptrace/Makefile
@@ -6,11 +6,11 @@
CFLAGS_ptrace-view.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
obj-y += ptrace.o ptrace-view.o
-obj-$(CONFIG_PPC_FPU_REGS) += ptrace-fpu.o
+obj-y += ptrace-fpu.o
obj-$(CONFIG_COMPAT) += ptrace32.o
obj-$(CONFIG_VSX) += ptrace-vsx.o
ifneq ($(CONFIG_VSX),y)
-obj-$(CONFIG_PPC_FPU_REGS) += ptrace-novsx.o
+obj-y += ptrace-novsx.o
endif
obj-$(CONFIG_ALTIVEC) += ptrace-altivec.o
obj-$(CONFIG_SPE) += ptrace-spe.o
diff --git a/arch/powerpc/kernel/ptrace/ptrace-adv.c b/arch/powerpc/kernel/ptrace/ptrace-adv.c
index 3990c01ef8cf..399f5d94a3df 100644
--- a/arch/powerpc/kernel/ptrace/ptrace-adv.c
+++ b/arch/powerpc/kernel/ptrace/ptrace-adv.c
@@ -12,7 +12,7 @@ void user_enable_single_step(struct task_struct *task)
if (regs != NULL) {
task->thread.debug.dbcr0 &= ~DBCR0_BT;
task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
- regs->msr |= MSR_DE;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
}
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
@@ -24,7 +24,7 @@ void user_enable_block_step(struct task_struct *task)
if (regs != NULL) {
task->thread.debug.dbcr0 &= ~DBCR0_IC;
task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
- regs->msr |= MSR_DE;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
}
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
@@ -50,7 +50,7 @@ void user_disable_single_step(struct task_struct *task)
* All debug events were off.....
*/
task->thread.debug.dbcr0 &= ~DBCR0_IDM;
- regs->msr &= ~MSR_DE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_DE);
}
}
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
@@ -82,6 +82,7 @@ int ptrace_get_debugreg(struct task_struct *child, unsigned long addr,
int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned long data)
{
+ struct pt_regs *regs = task->thread.regs;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
int ret;
struct thread_struct *thread = &task->thread;
@@ -112,7 +113,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned l
dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
task->thread.debug.dbcr1)) {
- task->thread.regs->msr &= ~MSR_DE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_DE);
task->thread.debug.dbcr0 &= ~DBCR0_IDM;
}
return 0;
@@ -132,7 +133,7 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned l
dbcr_dac(task) |= DBCR_DAC1R;
if (data & 0x2UL)
dbcr_dac(task) |= DBCR_DAC1W;
- task->thread.regs->msr |= MSR_DE;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
return 0;
}
@@ -220,7 +221,7 @@ static long set_instruction_bp(struct task_struct *child,
}
out:
child->thread.debug.dbcr0 |= DBCR0_IDM;
- child->thread.regs->msr |= MSR_DE;
+ regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE);
return slot;
}
@@ -336,7 +337,7 @@ static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
return -ENOSPC;
}
child->thread.debug.dbcr0 |= DBCR0_IDM;
- child->thread.regs->msr |= MSR_DE;
+ regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE);
return slot + 4;
}
@@ -430,7 +431,7 @@ static int set_dac_range(struct task_struct *child,
child->thread.debug.dbcr2 |= DBCR2_DAC12MX;
else /* PPC_BREAKPOINT_MODE_MASK */
child->thread.debug.dbcr2 |= DBCR2_DAC12MM;
- child->thread.regs->msr |= MSR_DE;
+ regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE);
return 5;
}
@@ -485,7 +486,8 @@ long ppc_del_hwdebug(struct task_struct *child, long data)
if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
child->thread.debug.dbcr1)) {
child->thread.debug.dbcr0 &= ~DBCR0_IDM;
- child->thread.regs->msr &= ~MSR_DE;
+ regs_set_return_msr(child->thread.regs,
+ child->thread.regs->msr & ~MSR_DE);
}
}
return rc;
diff --git a/arch/powerpc/kernel/ptrace/ptrace-decl.h b/arch/powerpc/kernel/ptrace/ptrace-decl.h
index 3487f2c9735c..eafe5f0f6289 100644
--- a/arch/powerpc/kernel/ptrace/ptrace-decl.h
+++ b/arch/powerpc/kernel/ptrace/ptrace-decl.h
@@ -165,22 +165,8 @@ int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data);
extern const struct user_regset_view user_ppc_native_view;
/* ptrace-fpu */
-#ifdef CONFIG_PPC_FPU_REGS
int ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data);
int ptrace_put_fpr(struct task_struct *child, int index, unsigned long data);
-#else
-static inline int
-ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data)
-{
- return -EIO;
-}
-
-static inline int
-ptrace_put_fpr(struct task_struct *child, int index, unsigned long data)
-{
- return -EIO;
-}
-#endif
/* ptrace-(no)adv */
void ppc_gethwdinfo(struct ppc_debug_info *dbginfo);
diff --git a/arch/powerpc/kernel/ptrace/ptrace-fpu.c b/arch/powerpc/kernel/ptrace/ptrace-fpu.c
index 8301cb52dd99..5dca19361316 100644
--- a/arch/powerpc/kernel/ptrace/ptrace-fpu.c
+++ b/arch/powerpc/kernel/ptrace/ptrace-fpu.c
@@ -8,32 +8,42 @@
int ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data)
{
+#ifdef CONFIG_PPC_FPU_REGS
unsigned int fpidx = index - PT_FPR0;
+#endif
if (index > PT_FPSCR)
return -EIO;
+#ifdef CONFIG_PPC_FPU_REGS
flush_fp_to_thread(child);
if (fpidx < (PT_FPSCR - PT_FPR0))
memcpy(data, &child->thread.TS_FPR(fpidx), sizeof(long));
else
*data = child->thread.fp_state.fpscr;
+#else
+ *data = 0;
+#endif
return 0;
}
int ptrace_put_fpr(struct task_struct *child, int index, unsigned long data)
{
+#ifdef CONFIG_PPC_FPU_REGS
unsigned int fpidx = index - PT_FPR0;
+#endif
if (index > PT_FPSCR)
return -EIO;
+#ifdef CONFIG_PPC_FPU_REGS
flush_fp_to_thread(child);
if (fpidx < (PT_FPSCR - PT_FPR0))
memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long));
else
child->thread.fp_state.fpscr = data;
+#endif
return 0;
}
diff --git a/arch/powerpc/kernel/ptrace/ptrace-noadv.c b/arch/powerpc/kernel/ptrace/ptrace-noadv.c
index aa36fcad36cd..a5dd7d2e2c9e 100644
--- a/arch/powerpc/kernel/ptrace/ptrace-noadv.c
+++ b/arch/powerpc/kernel/ptrace/ptrace-noadv.c
@@ -11,10 +11,8 @@ void user_enable_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
- if (regs != NULL) {
- regs->msr &= ~MSR_BE;
- regs->msr |= MSR_SE;
- }
+ if (regs != NULL)
+ regs_set_return_msr(regs, (regs->msr & ~MSR_BE) | MSR_SE);
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
@@ -22,10 +20,8 @@ void user_enable_block_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
- if (regs != NULL) {
- regs->msr &= ~MSR_SE;
- regs->msr |= MSR_BE;
- }
+ if (regs != NULL)
+ regs_set_return_msr(regs, (regs->msr & ~MSR_SE) | MSR_BE);
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
@@ -34,7 +30,7 @@ void user_disable_single_step(struct task_struct *task)
struct pt_regs *regs = task->thread.regs;
if (regs != NULL)
- regs->msr &= ~(MSR_SE | MSR_BE);
+ regs_set_return_msr(regs, regs->msr & ~(MSR_SE | MSR_BE));
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
}
diff --git a/arch/powerpc/kernel/ptrace/ptrace-novsx.c b/arch/powerpc/kernel/ptrace/ptrace-novsx.c
index b3b36835658a..7433f3db979a 100644
--- a/arch/powerpc/kernel/ptrace/ptrace-novsx.c
+++ b/arch/powerpc/kernel/ptrace/ptrace-novsx.c
@@ -21,12 +21,16 @@
int fpr_get(struct task_struct *target, const struct user_regset *regset,
struct membuf to)
{
+#ifdef CONFIG_PPC_FPU_REGS
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
offsetof(struct thread_fp_state, fpr[32]));
flush_fp_to_thread(target);
return membuf_write(&to, &target->thread.fp_state, 33 * sizeof(u64));
+#else
+ return membuf_write(&to, &empty_zero_page, 33 * sizeof(u64));
+#endif
}
/*
@@ -46,6 +50,7 @@ int fpr_set(struct task_struct *target, const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
+#ifdef CONFIG_PPC_FPU_REGS
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
offsetof(struct thread_fp_state, fpr[32]));
@@ -53,4 +58,7 @@ int fpr_set(struct task_struct *target, const struct user_regset *regset,
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.fp_state, 0, -1);
+#else
+ return 0;
+#endif
}
diff --git a/arch/powerpc/kernel/ptrace/ptrace-view.c b/arch/powerpc/kernel/ptrace/ptrace-view.c
index 2bad8068f598..b8be1d6668b5 100644
--- a/arch/powerpc/kernel/ptrace/ptrace-view.c
+++ b/arch/powerpc/kernel/ptrace/ptrace-view.c
@@ -111,10 +111,11 @@ static unsigned long get_user_msr(struct task_struct *task)
return task->thread.regs->msr | task->thread.fpexc_mode;
}
-static int set_user_msr(struct task_struct *task, unsigned long msr)
+static __always_inline int set_user_msr(struct task_struct *task, unsigned long msr)
{
- task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
- task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
+ unsigned long newmsr = (task->thread.regs->msr & ~MSR_DEBUGCHANGE) |
+ (msr & MSR_DEBUGCHANGE);
+ regs_set_return_msr(task->thread.regs, newmsr);
return 0;
}
@@ -147,7 +148,7 @@ static int set_user_dscr(struct task_struct *task, unsigned long dscr)
* We prevent mucking around with the reserved area of trap
* which are used internally by the kernel.
*/
-static int set_user_trap(struct task_struct *task, unsigned long trap)
+static __always_inline int set_user_trap(struct task_struct *task, unsigned long trap)
{
set_trap(task->thread.regs, trap);
return 0;
@@ -221,17 +222,9 @@ static int gpr_get(struct task_struct *target, const struct user_regset *regset,
#ifdef CONFIG_PPC64
struct membuf to_softe = membuf_at(&to, offsetof(struct pt_regs, softe));
#endif
- int i;
-
if (target->thread.regs == NULL)
return -EIO;
- if (!FULL_REGS(target->thread.regs)) {
- /* We have a partial register set. Fill 14-31 with bogus values */
- for (i = 14; i < 32; i++)
- target->thread.regs->gpr[i] = NV_REG_POISON;
- }
-
membuf_write(&to, target->thread.regs, sizeof(struct user_pt_regs));
membuf_store(&to_msr, get_user_msr(target));
@@ -252,8 +245,6 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
if (target->thread.regs == NULL)
return -EIO;
- CHECK_FULL_REGS(target->thread.regs);
-
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
target->thread.regs,
0, PT_MSR * sizeof(reg));
@@ -522,13 +513,11 @@ static const struct user_regset native_regsets[] = {
.size = sizeof(long), .align = sizeof(long),
.regset_get = gpr_get, .set = gpr_set
},
-#ifdef CONFIG_PPC_FPU_REGS
[REGSET_FPR] = {
.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
.size = sizeof(double), .align = sizeof(double),
.regset_get = fpr_get, .set = fpr_set
},
-#endif
#ifdef CONFIG_ALTIVEC
[REGSET_VMX] = {
.core_note_type = NT_PPC_VMX, .n = 34,
@@ -661,6 +650,9 @@ int gpr32_set_common(struct task_struct *target,
const compat_ulong_t __user *u = ubuf;
compat_ulong_t reg;
+ if (!kbuf && !user_read_access_begin(u, count))
+ return -EFAULT;
+
pos /= sizeof(reg);
count /= sizeof(reg);
@@ -669,8 +661,7 @@ int gpr32_set_common(struct task_struct *target,
regs[pos++] = *k++;
else
for (; count > 0 && pos < PT_MSR; --count) {
- if (__get_user(reg, u++))
- return -EFAULT;
+ unsafe_get_user(reg, u++, Efault);
regs[pos++] = reg;
}
@@ -678,8 +669,8 @@ int gpr32_set_common(struct task_struct *target,
if (count > 0 && pos == PT_MSR) {
if (kbuf)
reg = *k++;
- else if (__get_user(reg, u++))
- return -EFAULT;
+ else
+ unsafe_get_user(reg, u++, Efault);
set_user_msr(target, reg);
++pos;
--count;
@@ -692,24 +683,24 @@ int gpr32_set_common(struct task_struct *target,
++k;
} else {
for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
- if (__get_user(reg, u++))
- return -EFAULT;
+ unsafe_get_user(reg, u++, Efault);
regs[pos++] = reg;
}
for (; count > 0 && pos < PT_TRAP; --count, ++pos)
- if (__get_user(reg, u++))
- return -EFAULT;
+ unsafe_get_user(reg, u++, Efault);
}
if (count > 0 && pos == PT_TRAP) {
if (kbuf)
reg = *k++;
- else if (__get_user(reg, u++))
- return -EFAULT;
+ else
+ unsafe_get_user(reg, u++, Efault);
set_user_trap(target, reg);
++pos;
--count;
}
+ if (!kbuf)
+ user_read_access_end();
kbuf = k;
ubuf = u;
@@ -717,25 +708,19 @@ int gpr32_set_common(struct task_struct *target,
count *= sizeof(reg);
return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
(PT_TRAP + 1) * sizeof(reg), -1);
+
+Efault:
+ user_read_access_end();
+ return -EFAULT;
}
static int gpr32_get(struct task_struct *target,
const struct user_regset *regset,
struct membuf to)
{
- int i;
-
if (target->thread.regs == NULL)
return -EIO;
- if (!FULL_REGS(target->thread.regs)) {
- /*
- * We have a partial register set.
- * Fill 14-31 with bogus values.
- */
- for (i = 14; i < 32; i++)
- target->thread.regs->gpr[i] = NV_REG_POISON;
- }
return gpr32_get_common(target, regset, to,
&target->thread.regs->gpr[0]);
}
@@ -748,7 +733,6 @@ static int gpr32_set(struct task_struct *target,
if (target->thread.regs == NULL)
return -EIO;
- CHECK_FULL_REGS(target->thread.regs);
return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
&target->thread.regs->gpr[0]);
}
diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c
index 4f3d4ff3728c..0a0a33eb0d28 100644
--- a/arch/powerpc/kernel/ptrace/ptrace.c
+++ b/arch/powerpc/kernel/ptrace/ptrace.c
@@ -59,7 +59,6 @@ long arch_ptrace(struct task_struct *child, long request,
if ((addr & (sizeof(long) - 1)) || !child->thread.regs)
break;
- CHECK_FULL_REGS(child->thread.regs);
if (index < PT_FPR0)
ret = ptrace_get_reg(child, (int) index, &tmp);
else
@@ -81,7 +80,6 @@ long arch_ptrace(struct task_struct *child, long request,
if ((addr & (sizeof(long) - 1)) || !child->thread.regs)
break;
- CHECK_FULL_REGS(child->thread.regs);
if (index < PT_FPR0)
ret = ptrace_put_reg(child, index, data);
else
@@ -354,8 +352,6 @@ void __init pt_regs_check(void)
offsetof(struct user_pt_regs, nip));
BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
offsetof(struct user_pt_regs, msr));
- BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
- offsetof(struct user_pt_regs, msr));
BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
offsetof(struct user_pt_regs, orig_gpr3));
BUILD_BUG_ON(offsetof(struct pt_regs, ctr) !=
diff --git a/arch/powerpc/kernel/ptrace/ptrace32.c b/arch/powerpc/kernel/ptrace/ptrace32.c
index d30b9ad70edc..19c224808982 100644
--- a/arch/powerpc/kernel/ptrace/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace/ptrace32.c
@@ -83,7 +83,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
if ((addr & 3) || (index > PT_FPSCR32))
break;
- CHECK_FULL_REGS(child->thread.regs);
if (index < PT_FPR0) {
ret = ptrace_get_reg(child, index, &tmp);
if (ret)
@@ -133,7 +132,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
if ((addr & 3) || numReg > PT_FPSCR)
break;
- CHECK_FULL_REGS(child->thread.regs);
if (numReg >= PT_FPR0) {
flush_fp_to_thread(child);
/* get 64 bit FPR */
@@ -187,7 +185,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
if ((addr & 3) || (index > PT_FPSCR32))
break;
- CHECK_FULL_REGS(child->thread.regs);
if (index < PT_FPR0) {
ret = ptrace_put_reg(child, index, data);
} else {
@@ -226,7 +223,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
*/
if ((addr & 3) || (numReg > PT_FPSCR))
break;
- CHECK_FULL_REGS(child->thread.regs);
if (numReg < PT_FPR0) {
unsigned long freg;
ret = ptrace_get_reg(child, numReg, &freg);
diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c
index 2d33f342a293..6857a5b0a1c3 100644
--- a/arch/powerpc/kernel/rtas-proc.c
+++ b/arch/powerpc/kernel/rtas-proc.c
@@ -755,11 +755,18 @@ static int ppc_rtas_tone_volume_show(struct seq_file *m, void *v)
return 0;
}
-#define RMO_READ_BUF_MAX 30
-
-/* RTAS Userspace access */
+/**
+ * ppc_rtas_rmo_buf_show() - Describe RTAS-addressable region for user space.
+ *
+ * Base + size description of a range of RTAS-addressable memory set
+ * aside for user space to use as work area(s) for certain RTAS
+ * functions. User space accesses this region via /dev/mem. Apart from
+ * security policies, the kernel does not arbitrate or serialize
+ * access to this region, and user space must ensure that concurrent
+ * users do not interfere with each other.
+ */
static int ppc_rtas_rmo_buf_show(struct seq_file *m, void *v)
{
- seq_printf(m, "%016lx %x\n", rtas_rmo_buf, RTAS_RMOBUF_MAX);
+ seq_printf(m, "%016lx %x\n", rtas_rmo_buf, RTAS_USER_REGION_SIZE);
return 0;
}
diff --git a/arch/powerpc/kernel/rtas-rtc.c b/arch/powerpc/kernel/rtas-rtc.c
index a28239b8b0c0..33c07c8af6c8 100644
--- a/arch/powerpc/kernel/rtas-rtc.c
+++ b/arch/powerpc/kernel/rtas-rtc.c
@@ -12,7 +12,7 @@
#define MAX_RTC_WAIT 5000 /* 5 sec */
-#define RTAS_CLOCK_BUSY (-2)
+
time64_t __init rtas_get_boot_time(void)
{
int ret[8];
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index d126d71ea5bd..99f2cce635fb 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -25,6 +25,7 @@
#include <linux/reboot.h>
#include <linux/syscalls.h>
+#include <asm/interrupt.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/hvcall.h>
@@ -46,6 +47,13 @@
/* This is here deliberately so it's only used in this file */
void enter_rtas(unsigned long);
+static inline void do_enter_rtas(unsigned long args)
+{
+ enter_rtas(args);
+
+ srr_regs_clobbered(); /* rtas uses SRRs, invalidate */
+}
+
struct rtas_t rtas = {
.lock = __ARCH_SPIN_LOCK_UNLOCKED
};
@@ -384,7 +392,7 @@ static char *__fetch_rtas_last_error(char *altbuf)
save_args = rtas.args;
rtas.args = err_args;
- enter_rtas(__pa(&rtas.args));
+ do_enter_rtas(__pa(&rtas.args));
err_args = rtas.args;
rtas.args = save_args;
@@ -430,7 +438,7 @@ va_rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret,
for (i = 0; i < nret; ++i)
args->rets[i] = 0;
- enter_rtas(__pa(args));
+ do_enter_rtas(__pa(args));
}
void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, ...)
@@ -828,7 +836,6 @@ void rtas_activate_firmware(void)
pr_err("ibm,activate-firmware failed (%i)\n", fwrc);
}
-static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE;
#ifdef CONFIG_PPC_PSERIES
/**
* rtas_call_reentrant() - Used for reentrant rtas calls
@@ -988,10 +995,10 @@ static struct rtas_filter rtas_filters[] __ro_after_init = {
static bool in_rmo_buf(u32 base, u32 end)
{
return base >= rtas_rmo_buf &&
- base < (rtas_rmo_buf + RTAS_RMOBUF_MAX) &&
+ base < (rtas_rmo_buf + RTAS_USER_REGION_SIZE) &&
base <= end &&
end >= rtas_rmo_buf &&
- end < (rtas_rmo_buf + RTAS_RMOBUF_MAX);
+ end < (rtas_rmo_buf + RTAS_USER_REGION_SIZE);
}
static bool block_rtas_call(int token, int nargs,
@@ -1052,6 +1059,14 @@ err:
return true;
}
+static void __init rtas_syscall_filter_init(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(rtas_filters); i++)
+ rtas_filters[i].token = rtas_token(rtas_filters[i].name);
+}
+
#else
static bool block_rtas_call(int token, int nargs,
@@ -1060,6 +1075,10 @@ static bool block_rtas_call(int token, int nargs,
return false;
}
+static void __init rtas_syscall_filter_init(void)
+{
+}
+
#endif /* CONFIG_PPC_RTAS_FILTER */
/* We assume to be passed big endian arguments */
@@ -1103,7 +1122,7 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
return -EINVAL;
/* Need to handle ibm,suspend_me call specially */
- if (token == ibm_suspend_me_token) {
+ if (token == rtas_token("ibm,suspend-me")) {
/*
* rtas_ibm_suspend_me assumes the streamid handle is in cpu
@@ -1127,7 +1146,7 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
flags = lock_rtas();
rtas.args = args;
- enter_rtas(__pa(&rtas.args));
+ do_enter_rtas(__pa(&rtas.args));
args = rtas.args;
/* A -1 return code indicates that the last command couldn't
@@ -1163,9 +1182,6 @@ void __init rtas_initialize(void)
unsigned long rtas_region = RTAS_INSTANTIATE_MAX;
u32 base, size, entry;
int no_base, no_size, no_entry;
-#ifdef CONFIG_PPC_RTAS_FILTER
- int i;
-#endif
/* Get RTAS dev node and fill up our "rtas" structure with infos
* about it.
@@ -1191,12 +1207,10 @@ void __init rtas_initialize(void)
* the stop-self token if any
*/
#ifdef CONFIG_PPC64
- if (firmware_has_feature(FW_FEATURE_LPAR)) {
+ if (firmware_has_feature(FW_FEATURE_LPAR))
rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX);
- ibm_suspend_me_token = rtas_token("ibm,suspend-me");
- }
#endif
- rtas_rmo_buf = memblock_phys_alloc_range(RTAS_RMOBUF_MAX, PAGE_SIZE,
+ rtas_rmo_buf = memblock_phys_alloc_range(RTAS_USER_REGION_SIZE, PAGE_SIZE,
0, rtas_region);
if (!rtas_rmo_buf)
panic("ERROR: RTAS: Failed to allocate %lx bytes below %pa\n",
@@ -1206,11 +1220,7 @@ void __init rtas_initialize(void)
rtas_last_error_token = rtas_token("rtas-last-error");
#endif
-#ifdef CONFIG_PPC_RTAS_FILTER
- for (i = 0; i < ARRAY_SIZE(rtas_filters); i++) {
- rtas_filters[i].token = rtas_token(rtas_filters[i].name);
- }
-#endif
+ rtas_syscall_filter_init();
}
int __init early_init_dt_scan_rtas(unsigned long node,
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index e4e1a94ccf6a..cc51fa52e783 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -7,6 +7,7 @@
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/device.h>
+#include <linux/memblock.h>
#include <linux/nospec.h>
#include <linux/prctl.h>
#include <linux/seq_buf.h>
@@ -18,6 +19,7 @@
#include <asm/setup.h>
#include <asm/inst.h>
+#include "setup.h"
u64 powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
@@ -250,7 +252,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
static enum stf_barrier_type stf_enabled_flush_types;
static bool no_stf_barrier;
-bool stf_barrier;
+static bool stf_barrier;
static int __init handle_no_stf_barrier(char *p)
{
@@ -298,9 +300,7 @@ static void stf_barrier_enable(bool enable)
void setup_stf_barrier(void)
{
enum stf_barrier_type type;
- bool enable, hv;
-
- hv = cpu_has_feature(CPU_FTR_HVMODE);
+ bool enable;
/* Default to fallback in case fw-features are not available */
if (cpu_has_feature(CPU_FTR_ARCH_300))
@@ -313,8 +313,7 @@ void setup_stf_barrier(void)
type = STF_BARRIER_NONE;
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
- (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) ||
- (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv));
+ security_ftr_enabled(SEC_FTR_STF_BARRIER);
if (type == STF_BARRIER_FALLBACK) {
pr_info("stf-barrier: fallback barrier available\n");
@@ -430,26 +429,29 @@ device_initcall(stf_barrier_debugfs_init);
static void update_branch_cache_flush(void)
{
- u32 *site;
+ u32 *site, __maybe_unused *site2;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
site = &patch__call_kvm_flush_link_stack;
+ site2 = &patch__call_kvm_flush_link_stack_p9;
// This controls the branch from guest_exit_cont to kvm_flush_link_stack
if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) {
- patch_instruction_site(site, ppc_inst(PPC_INST_NOP));
+ patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
+ patch_instruction_site(site2, ppc_inst(PPC_RAW_NOP()));
} else {
// Could use HW flush, but that could also flush count cache
patch_branch_site(site, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
+ patch_branch_site(site2, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
}
#endif
// Patch out the bcctr first, then nop the rest
site = &patch__call_flush_branch_caches3;
- patch_instruction_site(site, ppc_inst(PPC_INST_NOP));
+ patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
site = &patch__call_flush_branch_caches2;
- patch_instruction_site(site, ppc_inst(PPC_INST_NOP));
+ patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
site = &patch__call_flush_branch_caches1;
- patch_instruction_site(site, ppc_inst(PPC_INST_NOP));
+ patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
// This controls the branch from _switch to flush_branch_caches
if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE &&
@@ -472,12 +474,12 @@ static void update_branch_cache_flush(void)
// If we just need to flush the link stack, early return
if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) {
patch_instruction_site(&patch__flush_link_stack_return,
- ppc_inst(PPC_INST_BLR));
+ ppc_inst(PPC_RAW_BLR()));
// If we have flush instruction, early return
} else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW) {
patch_instruction_site(&patch__flush_count_cache_return,
- ppc_inst(PPC_INST_BLR));
+ ppc_inst(PPC_RAW_BLR()));
}
}
}
@@ -541,6 +543,178 @@ void setup_count_cache_flush(void)
toggle_branch_cache_flush(enable);
}
+static enum l1d_flush_type enabled_flush_types;
+static void *l1d_flush_fallback_area;
+static bool no_rfi_flush;
+static bool no_entry_flush;
+static bool no_uaccess_flush;
+bool rfi_flush;
+static bool entry_flush;
+static bool uaccess_flush;
+DEFINE_STATIC_KEY_FALSE(uaccess_flush_key);
+EXPORT_SYMBOL(uaccess_flush_key);
+
+static int __init handle_no_rfi_flush(char *p)
+{
+ pr_info("rfi-flush: disabled on command line.");
+ no_rfi_flush = true;
+ return 0;
+}
+early_param("no_rfi_flush", handle_no_rfi_flush);
+
+static int __init handle_no_entry_flush(char *p)
+{
+ pr_info("entry-flush: disabled on command line.");
+ no_entry_flush = true;
+ return 0;
+}
+early_param("no_entry_flush", handle_no_entry_flush);
+
+static int __init handle_no_uaccess_flush(char *p)
+{
+ pr_info("uaccess-flush: disabled on command line.");
+ no_uaccess_flush = true;
+ return 0;
+}
+early_param("no_uaccess_flush", handle_no_uaccess_flush);
+
+/*
+ * The RFI flush is not KPTI, but because users will see doco that says to use
+ * nopti we hijack that option here to also disable the RFI flush.
+ */
+static int __init handle_no_pti(char *p)
+{
+ pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
+ handle_no_rfi_flush(NULL);
+ return 0;
+}
+early_param("nopti", handle_no_pti);
+
+static void do_nothing(void *unused)
+{
+ /*
+ * We don't need to do the flush explicitly, just enter+exit kernel is
+ * sufficient, the RFI exit handlers will do the right thing.
+ */
+}
+
+void rfi_flush_enable(bool enable)
+{
+ if (enable) {
+ do_rfi_flush_fixups(enabled_flush_types);
+ on_each_cpu(do_nothing, NULL, 1);
+ } else
+ do_rfi_flush_fixups(L1D_FLUSH_NONE);
+
+ rfi_flush = enable;
+}
+
+static void entry_flush_enable(bool enable)
+{
+ if (enable) {
+ do_entry_flush_fixups(enabled_flush_types);
+ on_each_cpu(do_nothing, NULL, 1);
+ } else {
+ do_entry_flush_fixups(L1D_FLUSH_NONE);
+ }
+
+ entry_flush = enable;
+}
+
+static void uaccess_flush_enable(bool enable)
+{
+ if (enable) {
+ do_uaccess_flush_fixups(enabled_flush_types);
+ static_branch_enable(&uaccess_flush_key);
+ on_each_cpu(do_nothing, NULL, 1);
+ } else {
+ static_branch_disable(&uaccess_flush_key);
+ do_uaccess_flush_fixups(L1D_FLUSH_NONE);
+ }
+
+ uaccess_flush = enable;
+}
+
+static void __ref init_fallback_flush(void)
+{
+ u64 l1d_size, limit;
+ int cpu;
+
+ /* Only allocate the fallback flush area once (at boot time). */
+ if (l1d_flush_fallback_area)
+ return;
+
+ l1d_size = ppc64_caches.l1d.size;
+
+ /*
+ * If there is no d-cache-size property in the device tree, l1d_size
+ * could be zero. That leads to the loop in the asm wrapping around to
+ * 2^64-1, and then walking off the end of the fallback area and
+ * eventually causing a page fault which is fatal. Just default to
+ * something vaguely sane.
+ */
+ if (!l1d_size)
+ l1d_size = (64 * 1024);
+
+ limit = min(ppc64_bolted_size(), ppc64_rma_size);
+
+ /*
+ * Align to L1d size, and size it at 2x L1d size, to catch possible
+ * hardware prefetch runoff. We don't have a recipe for load patterns to
+ * reliably avoid the prefetcher.
+ */
+ l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2,
+ l1d_size, MEMBLOCK_LOW_LIMIT,
+ limit, NUMA_NO_NODE);
+ if (!l1d_flush_fallback_area)
+ panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n",
+ __func__, l1d_size * 2, l1d_size, &limit);
+
+
+ for_each_possible_cpu(cpu) {
+ struct paca_struct *paca = paca_ptrs[cpu];
+ paca->rfi_flush_fallback_area = l1d_flush_fallback_area;
+ paca->l1d_flush_size = l1d_size;
+ }
+}
+
+void setup_rfi_flush(enum l1d_flush_type types, bool enable)
+{
+ if (types & L1D_FLUSH_FALLBACK) {
+ pr_info("rfi-flush: fallback displacement flush available\n");
+ init_fallback_flush();
+ }
+
+ if (types & L1D_FLUSH_ORI)
+ pr_info("rfi-flush: ori type flush available\n");
+
+ if (types & L1D_FLUSH_MTTRIG)
+ pr_info("rfi-flush: mttrig type flush available\n");
+
+ enabled_flush_types = types;
+
+ if (!cpu_mitigations_off() && !no_rfi_flush)
+ rfi_flush_enable(enable);
+}
+
+void setup_entry_flush(bool enable)
+{
+ if (cpu_mitigations_off())
+ return;
+
+ if (!no_entry_flush)
+ entry_flush_enable(enable);
+}
+
+void setup_uaccess_flush(bool enable)
+{
+ if (cpu_mitigations_off())
+ return;
+
+ if (!no_uaccess_flush)
+ uaccess_flush_enable(enable);
+}
+
#ifdef CONFIG_DEBUG_FS
static int count_cache_flush_set(void *data, u64 val)
{
@@ -579,5 +753,92 @@ static __init int count_cache_flush_debugfs_init(void)
return 0;
}
device_initcall(count_cache_flush_debugfs_init);
+
+static int rfi_flush_set(void *data, u64 val)
+{
+ bool enable;
+
+ if (val == 1)
+ enable = true;
+ else if (val == 0)
+ enable = false;
+ else
+ return -EINVAL;
+
+ /* Only do anything if we're changing state */
+ if (enable != rfi_flush)
+ rfi_flush_enable(enable);
+
+ return 0;
+}
+
+static int rfi_flush_get(void *data, u64 *val)
+{
+ *val = rfi_flush ? 1 : 0;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
+
+static int entry_flush_set(void *data, u64 val)
+{
+ bool enable;
+
+ if (val == 1)
+ enable = true;
+ else if (val == 0)
+ enable = false;
+ else
+ return -EINVAL;
+
+ /* Only do anything if we're changing state */
+ if (enable != entry_flush)
+ entry_flush_enable(enable);
+
+ return 0;
+}
+
+static int entry_flush_get(void *data, u64 *val)
+{
+ *val = entry_flush ? 1 : 0;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n");
+
+static int uaccess_flush_set(void *data, u64 val)
+{
+ bool enable;
+
+ if (val == 1)
+ enable = true;
+ else if (val == 0)
+ enable = false;
+ else
+ return -EINVAL;
+
+ /* Only do anything if we're changing state */
+ if (enable != uaccess_flush)
+ uaccess_flush_enable(enable);
+
+ return 0;
+}
+
+static int uaccess_flush_get(void *data, u64 *val)
+{
+ *val = uaccess_flush ? 1 : 0;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n");
+
+static __init int rfi_flush_debugfs_init(void)
+{
+ debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
+ debugfs_create_file("entry_flush", 0600, powerpc_debugfs_root, NULL, &fops_entry_flush);
+ debugfs_create_file("uaccess_flush", 0600, powerpc_debugfs_root, NULL, &fops_uaccess_flush);
+ return 0;
+}
+device_initcall(rfi_flush_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index bee984b1887b..26328fd2990c 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -9,6 +9,7 @@
#undef DEBUG
#include <linux/export.h>
+#include <linux/panic_notifier.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/init.h>
@@ -69,7 +70,6 @@
#include "setup.h"
#ifdef DEBUG
-#include <asm/udbg.h>
#define DBG(fmt...) udbg_printf(fmt)
#else
#define DBG(fmt...)
@@ -92,8 +92,6 @@ EXPORT_SYMBOL_GPL(boot_cpuid);
int dcache_bsize;
int icache_bsize;
-unsigned long klimit = (unsigned long) _end;
-
/*
* This still seems to be needed... -- paulus
*/
@@ -829,7 +827,7 @@ static __init void print_system_info(void)
}
#ifdef CONFIG_SMP
-static void smp_setup_pacas(void)
+static void __init smp_setup_pacas(void)
{
int cpu;
@@ -931,7 +929,7 @@ void __init setup_arch(char **cmdline_p)
init_mm.start_code = (unsigned long)_stext;
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = klimit;
+ init_mm.brk = (unsigned long)_end;
mm_iommu_init(&init_mm);
irqstack_early_init();
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 8ba49a6bf515..7ec5c47fce0e 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -74,7 +74,7 @@ EXPORT_SYMBOL(DMA_MODE_WRITE);
*/
notrace void __init machine_init(u64 dt_ptr)
{
- struct ppc_inst *addr = (struct ppc_inst *)patch_site_addr(&patch__memset_nocache);
+ u32 *addr = (u32 *)patch_site_addr(&patch__memset_nocache);
struct ppc_inst insn;
/* Configure static keys first, now that we're relocated. */
@@ -85,7 +85,7 @@ notrace void __init machine_init(u64 dt_ptr)
/* Enable early debugging if any specified (see udbg.h) */
udbg_early_init();
- patch_instruction_site(&patch__memcpy_nocache, ppc_inst(PPC_INST_NOP));
+ patch_instruction_site(&patch__memcpy_nocache, ppc_inst(PPC_RAW_NOP()));
create_cond_branch(&insn, addr, branch_target(addr), 0x820000);
patch_instruction(addr, insn); /* replace b by bne cr0 */
@@ -164,7 +164,7 @@ void __init irqstack_early_init(void)
}
#ifdef CONFIG_VMAP_STACK
-void *emergency_ctx[NR_CPUS] __ro_after_init;
+void *emergency_ctx[NR_CPUS] __ro_after_init = {[0] = &init_stack};
void __init emergency_stack_init(void)
{
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 560ed8b975e7..1ff258f6c76c 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -33,6 +33,7 @@
#include <linux/pgtable.h>
#include <asm/debugfs.h>
+#include <asm/kvm_guest.h>
#include <asm/io.h>
#include <asm/kdump.h>
#include <asm/prom.h>
@@ -232,10 +233,23 @@ static void cpu_ready_for_interrupts(void)
* If we are not in hypervisor mode the job is done once for
* the whole partition in configure_exceptions().
*/
- if (cpu_has_feature(CPU_FTR_HVMODE) &&
- cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
unsigned long lpcr = mfspr(SPRN_LPCR);
- mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
+ unsigned long new_lpcr = lpcr;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ /* P10 DD1 does not have HAIL */
+ if (pvr_version_is(PVR_POWER10) &&
+ (mfspr(SPRN_PVR) & 0xf00) == 0x100)
+ new_lpcr |= LPCR_AIL_3;
+ else
+ new_lpcr |= LPCR_HAIL;
+ } else if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ new_lpcr |= LPCR_AIL_3;
+ }
+
+ if (new_lpcr != lpcr)
+ mtspr(SPRN_LPCR, new_lpcr);
}
/*
@@ -356,11 +370,11 @@ void __init early_setup(unsigned long dt_ptr)
apply_feature_fixups();
setup_feature_keys();
- early_ioremap_setup();
-
/* Initialize the hash table or TLB handling */
early_init_mmu();
+ early_ioremap_setup();
+
/*
* After firmware and early platform setup code has set things up,
* we note the SPR values for configurable control/performance
@@ -775,7 +789,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
size_t align)
{
const unsigned long goal = __pa(MAX_DMA_ADDRESS);
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
int node = early_cpu_to_node(cpu);
void *ptr;
@@ -926,281 +940,22 @@ u64 hw_nmi_get_sample_period(int watchdog_thresh)
* disable it by default. Book3S has a soft-nmi hardlockup detector based
* on the decrementer interrupt, so it does not suffer from this problem.
*
- * It is likely to get false positives in VM guests, so disable it there
- * by default too.
+ * It is likely to get false positives in KVM guests, so disable it there
+ * by default too. PowerVM will not stop or arbitrarily oversubscribe
+ * CPUs, but give a minimum regular allotment even with SPLPAR, so enable
+ * the detector for non-KVM guests, assume PowerVM.
*/
static int __init disable_hardlockup_detector(void)
{
#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
hardlockup_detector_disable();
#else
- if (firmware_has_feature(FW_FEATURE_LPAR))
- hardlockup_detector_disable();
+ if (firmware_has_feature(FW_FEATURE_LPAR)) {
+ if (is_kvm_guest())
+ hardlockup_detector_disable();
+ }
#endif
return 0;
}
early_initcall(disable_hardlockup_detector);
-
-#ifdef CONFIG_PPC_BOOK3S_64
-static enum l1d_flush_type enabled_flush_types;
-static void *l1d_flush_fallback_area;
-static bool no_rfi_flush;
-static bool no_entry_flush;
-static bool no_uaccess_flush;
-bool rfi_flush;
-bool entry_flush;
-bool uaccess_flush;
-DEFINE_STATIC_KEY_FALSE(uaccess_flush_key);
-EXPORT_SYMBOL(uaccess_flush_key);
-
-static int __init handle_no_rfi_flush(char *p)
-{
- pr_info("rfi-flush: disabled on command line.");
- no_rfi_flush = true;
- return 0;
-}
-early_param("no_rfi_flush", handle_no_rfi_flush);
-
-static int __init handle_no_entry_flush(char *p)
-{
- pr_info("entry-flush: disabled on command line.");
- no_entry_flush = true;
- return 0;
-}
-early_param("no_entry_flush", handle_no_entry_flush);
-
-static int __init handle_no_uaccess_flush(char *p)
-{
- pr_info("uaccess-flush: disabled on command line.");
- no_uaccess_flush = true;
- return 0;
-}
-early_param("no_uaccess_flush", handle_no_uaccess_flush);
-
-/*
- * The RFI flush is not KPTI, but because users will see doco that says to use
- * nopti we hijack that option here to also disable the RFI flush.
- */
-static int __init handle_no_pti(char *p)
-{
- pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
- handle_no_rfi_flush(NULL);
- return 0;
-}
-early_param("nopti", handle_no_pti);
-
-static void do_nothing(void *unused)
-{
- /*
- * We don't need to do the flush explicitly, just enter+exit kernel is
- * sufficient, the RFI exit handlers will do the right thing.
- */
-}
-
-void rfi_flush_enable(bool enable)
-{
- if (enable) {
- do_rfi_flush_fixups(enabled_flush_types);
- on_each_cpu(do_nothing, NULL, 1);
- } else
- do_rfi_flush_fixups(L1D_FLUSH_NONE);
-
- rfi_flush = enable;
-}
-
-static void entry_flush_enable(bool enable)
-{
- if (enable) {
- do_entry_flush_fixups(enabled_flush_types);
- on_each_cpu(do_nothing, NULL, 1);
- } else {
- do_entry_flush_fixups(L1D_FLUSH_NONE);
- }
-
- entry_flush = enable;
-}
-
-static void uaccess_flush_enable(bool enable)
-{
- if (enable) {
- do_uaccess_flush_fixups(enabled_flush_types);
- static_branch_enable(&uaccess_flush_key);
- on_each_cpu(do_nothing, NULL, 1);
- } else {
- static_branch_disable(&uaccess_flush_key);
- do_uaccess_flush_fixups(L1D_FLUSH_NONE);
- }
-
- uaccess_flush = enable;
-}
-
-static void __ref init_fallback_flush(void)
-{
- u64 l1d_size, limit;
- int cpu;
-
- /* Only allocate the fallback flush area once (at boot time). */
- if (l1d_flush_fallback_area)
- return;
-
- l1d_size = ppc64_caches.l1d.size;
-
- /*
- * If there is no d-cache-size property in the device tree, l1d_size
- * could be zero. That leads to the loop in the asm wrapping around to
- * 2^64-1, and then walking off the end of the fallback area and
- * eventually causing a page fault which is fatal. Just default to
- * something vaguely sane.
- */
- if (!l1d_size)
- l1d_size = (64 * 1024);
-
- limit = min(ppc64_bolted_size(), ppc64_rma_size);
-
- /*
- * Align to L1d size, and size it at 2x L1d size, to catch possible
- * hardware prefetch runoff. We don't have a recipe for load patterns to
- * reliably avoid the prefetcher.
- */
- l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2,
- l1d_size, MEMBLOCK_LOW_LIMIT,
- limit, NUMA_NO_NODE);
- if (!l1d_flush_fallback_area)
- panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n",
- __func__, l1d_size * 2, l1d_size, &limit);
-
-
- for_each_possible_cpu(cpu) {
- struct paca_struct *paca = paca_ptrs[cpu];
- paca->rfi_flush_fallback_area = l1d_flush_fallback_area;
- paca->l1d_flush_size = l1d_size;
- }
-}
-
-void setup_rfi_flush(enum l1d_flush_type types, bool enable)
-{
- if (types & L1D_FLUSH_FALLBACK) {
- pr_info("rfi-flush: fallback displacement flush available\n");
- init_fallback_flush();
- }
-
- if (types & L1D_FLUSH_ORI)
- pr_info("rfi-flush: ori type flush available\n");
-
- if (types & L1D_FLUSH_MTTRIG)
- pr_info("rfi-flush: mttrig type flush available\n");
-
- enabled_flush_types = types;
-
- if (!cpu_mitigations_off() && !no_rfi_flush)
- rfi_flush_enable(enable);
-}
-
-void setup_entry_flush(bool enable)
-{
- if (cpu_mitigations_off())
- return;
-
- if (!no_entry_flush)
- entry_flush_enable(enable);
-}
-
-void setup_uaccess_flush(bool enable)
-{
- if (cpu_mitigations_off())
- return;
-
- if (!no_uaccess_flush)
- uaccess_flush_enable(enable);
-}
-
-#ifdef CONFIG_DEBUG_FS
-static int rfi_flush_set(void *data, u64 val)
-{
- bool enable;
-
- if (val == 1)
- enable = true;
- else if (val == 0)
- enable = false;
- else
- return -EINVAL;
-
- /* Only do anything if we're changing state */
- if (enable != rfi_flush)
- rfi_flush_enable(enable);
-
- return 0;
-}
-
-static int rfi_flush_get(void *data, u64 *val)
-{
- *val = rfi_flush ? 1 : 0;
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
-
-static int entry_flush_set(void *data, u64 val)
-{
- bool enable;
-
- if (val == 1)
- enable = true;
- else if (val == 0)
- enable = false;
- else
- return -EINVAL;
-
- /* Only do anything if we're changing state */
- if (enable != entry_flush)
- entry_flush_enable(enable);
-
- return 0;
-}
-
-static int entry_flush_get(void *data, u64 *val)
-{
- *val = entry_flush ? 1 : 0;
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n");
-
-static int uaccess_flush_set(void *data, u64 val)
-{
- bool enable;
-
- if (val == 1)
- enable = true;
- else if (val == 0)
- enable = false;
- else
- return -EINVAL;
-
- /* Only do anything if we're changing state */
- if (enable != uaccess_flush)
- uaccess_flush_enable(enable);
-
- return 0;
-}
-
-static int uaccess_flush_get(void *data, u64 *val)
-{
- *val = uaccess_flush ? 1 : 0;
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n");
-
-static __init int rfi_flush_debugfs_init(void)
-{
- debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
- debugfs_create_file("entry_flush", 0600, powerpc_debugfs_root, NULL, &fops_entry_flush);
- debugfs_create_file("uaccess_flush", 0600, powerpc_debugfs_root, NULL, &fops_uaccess_flush);
- return 0;
-}
-device_initcall(rfi_flush_debugfs_init);
-#endif
-#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 9ded046edb0e..e600764a926c 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -214,7 +214,7 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
regs->gpr[0] = __NR_restart_syscall;
else
regs->gpr[3] = regs->orig_gpr3;
- regs->nip -= 4;
+ regs_add_return_ip(regs, -4);
regs->result = 0;
} else {
if (trap_is_scv(regs)) {
@@ -322,16 +322,16 @@ static unsigned long get_tm_stackpointer(struct task_struct *tsk)
* For signals taken in non-TM or suspended mode, we use the
* normal/non-checkpointed stack pointer.
*/
-
- unsigned long ret = tsk->thread.regs->gpr[1];
+ struct pt_regs *regs = tsk->thread.regs;
+ unsigned long ret = regs->gpr[1];
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BUG_ON(tsk != current);
- if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) {
+ if (MSR_TM_ACTIVE(regs->msr)) {
preempt_disable();
tm_reclaim_current(TM_CAUSE_SIGNAL);
- if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr))
+ if (MSR_TM_TRANSACTIONAL(regs->msr))
ret = tsk->thread.ckpt_regs.gpr[1];
/*
@@ -341,7 +341,7 @@ static unsigned long get_tm_stackpointer(struct task_struct *tsk)
* (tm_recheckpoint_new_task() would recheckpoint). Besides, we
* enter the signal handler in non-transactional state.
*/
- tsk->thread.regs->msr &= ~MSR_TS_MASK;
+ regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
preempt_enable();
}
#endif
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
index 2559a681536e..1f07317964e4 100644
--- a/arch/powerpc/kernel/signal.h
+++ b/arch/powerpc/kernel/signal.h
@@ -19,6 +19,15 @@ extern int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
extern int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
struct task_struct *tsk);
+static inline int __get_user_sigset(sigset_t *dst, const sigset_t __user *src)
+{
+ BUILD_BUG_ON(sizeof(sigset_t) != sizeof(u64));
+
+ return __get_user(dst->sig[0], (u64 __user *)&src->sig[0]);
+}
+#define unsafe_get_user_sigset(dst, src, label) \
+ unsafe_get_user((dst)->sig[0], (u64 __user *)&(src)->sig[0], label)
+
#ifdef CONFIG_VSX
extern unsigned long copy_vsx_to_user(void __user *to,
struct task_struct *task);
@@ -53,6 +62,26 @@ unsigned long copy_ckfpr_from_user(struct task_struct *task, void __user *from);
&buf[i], label);\
} while (0)
+#define unsafe_copy_fpr_from_user(task, from, label) do { \
+ struct task_struct *__t = task; \
+ u64 __user *buf = (u64 __user *)from; \
+ int i; \
+ \
+ for (i = 0; i < ELF_NFPREG - 1; i++) \
+ unsafe_get_user(__t->thread.TS_FPR(i), &buf[i], label); \
+ unsafe_get_user(__t->thread.fp_state.fpscr, &buf[i], label); \
+} while (0)
+
+#define unsafe_copy_vsx_from_user(task, from, label) do { \
+ struct task_struct *__t = task; \
+ u64 __user *buf = (u64 __user *)from; \
+ int i; \
+ \
+ for (i = 0; i < ELF_NVSRHALFREG ; i++) \
+ unsafe_get_user(__t->thread.fp_state.fpr[i][TS_VSRLOWOFFSET], \
+ &buf[i], label); \
+} while (0)
+
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
#define unsafe_copy_ckfpr_to_user(to, task, label) do { \
struct task_struct *__t = task; \
@@ -73,6 +102,26 @@ unsigned long copy_ckfpr_from_user(struct task_struct *task, void __user *from);
unsafe_put_user(__t->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET], \
&buf[i], label);\
} while (0)
+
+#define unsafe_copy_ckfpr_from_user(task, from, label) do { \
+ struct task_struct *__t = task; \
+ u64 __user *buf = (u64 __user *)from; \
+ int i; \
+ \
+ for (i = 0; i < ELF_NFPREG - 1 ; i++) \
+ unsafe_get_user(__t->thread.TS_CKFPR(i), &buf[i], label);\
+ unsafe_get_user(__t->thread.ckfp_state.fpscr, &buf[i], failed); \
+} while (0)
+
+#define unsafe_copy_ckvsx_from_user(task, from, label) do { \
+ struct task_struct *__t = task; \
+ u64 __user *buf = (u64 __user *)from; \
+ int i; \
+ \
+ for (i = 0; i < ELF_NVSRHALFREG ; i++) \
+ unsafe_get_user(__t->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET], \
+ &buf[i], label); \
+} while (0)
#endif
#elif defined(CONFIG_PPC_FPU_REGS)
@@ -80,6 +129,10 @@ unsigned long copy_ckfpr_from_user(struct task_struct *task, void __user *from);
unsafe_copy_to_user(to, (task)->thread.fp_state.fpr, \
ELF_NFPREG * sizeof(double), label)
+#define unsafe_copy_fpr_from_user(task, from, label) \
+ unsafe_copy_from_user((task)->thread.fp_state.fpr, from, \
+ ELF_NFPREG * sizeof(double), label)
+
static inline unsigned long
copy_fpr_to_user(void __user *to, struct task_struct *task)
{
@@ -113,7 +166,9 @@ copy_ckfpr_from_user(struct task_struct *task, void __user *from)
}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
#else
-#define unsafe_copy_fpr_to_user(to, task, label) do { } while (0)
+#define unsafe_copy_fpr_to_user(to, task, label) do { if (0) goto label;} while (0)
+
+#define unsafe_copy_fpr_from_user(task, from, label) do { if (0) goto label;} while (0)
static inline unsigned long
copy_fpr_to_user(void __user *to, struct task_struct *task)
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 75ee918a120a..0608581967f0 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -83,24 +83,17 @@
* implementation that makes things simple for little endian only)
*/
#define unsafe_put_sigset_t unsafe_put_compat_sigset
-
-static inline int get_sigset_t(sigset_t *set,
- const compat_sigset_t __user *uset)
-{
- return get_compat_sigset(set, uset);
-}
+#define unsafe_get_sigset_t unsafe_get_compat_sigset
#define to_user_ptr(p) ptr_to_compat(p)
#define from_user_ptr(p) compat_ptr(p)
static __always_inline int
-save_general_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame)
+__unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame)
{
elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
int val, i;
- WARN_ON(!FULL_REGS(regs));
-
for (i = 0; i <= PT_RESULT; i ++) {
/* Force usr to alway see softe as 1 (interrupts enabled) */
if (i == PT_SOFTE)
@@ -116,8 +109,8 @@ failed:
return 1;
}
-static inline int restore_general_regs(struct pt_regs *regs,
- struct mcontext __user *sr)
+static __always_inline int
+__unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr)
{
elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
int i;
@@ -125,10 +118,12 @@ static inline int restore_general_regs(struct pt_regs *regs,
for (i = 0; i <= PT_RESULT; i++) {
if ((i == PT_MSR) || (i == PT_SOFTE))
continue;
- if (__get_user(gregs[i], &sr->mc_gregs[i]))
- return -EFAULT;
+ unsafe_get_user(gregs[i], &sr->mc_gregs[i], failed);
}
return 0;
+
+failed:
+ return 1;
}
#else /* CONFIG_PPC64 */
@@ -142,18 +137,14 @@ static inline int restore_general_regs(struct pt_regs *regs,
unsafe_copy_to_user(__us, __s, sizeof(*__us), label); \
} while (0)
-static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
-{
- return copy_from_user(set, uset, sizeof(*uset));
-}
+#define unsafe_get_sigset_t unsafe_get_user_sigset
#define to_user_ptr(p) ((unsigned long)(p))
#define from_user_ptr(p) ((void __user *)(p))
static __always_inline int
-save_general_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame)
+__unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame)
{
- WARN_ON(!FULL_REGS(regs));
unsafe_copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE, failed);
return 0;
@@ -161,23 +152,30 @@ failed:
return 1;
}
-static inline int restore_general_regs(struct pt_regs *regs,
- struct mcontext __user *sr)
+static __always_inline
+int __unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr)
{
/* copy up to but not including MSR */
- if (__copy_from_user(regs, &sr->mc_gregs,
- PT_MSR * sizeof(elf_greg_t)))
- return -EFAULT;
+ unsafe_copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t), failed);
+
/* copy from orig_r3 (the word after the MSR) up to the end */
- if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
- GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
- return -EFAULT;
+ unsafe_copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
+ GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t), failed);
+
return 0;
+
+failed:
+ return 1;
}
#endif
#define unsafe_save_general_regs(regs, frame, label) do { \
- if (save_general_regs_unsafe(regs, frame)) \
+ if (__unsafe_save_general_regs(regs, frame)) \
+ goto label; \
+} while (0)
+
+#define unsafe_restore_general_regs(regs, frame, label) do { \
+ if (__unsafe_restore_general_regs(regs, frame)) \
goto label; \
} while (0)
@@ -260,8 +258,8 @@ static void prepare_save_user_regs(int ctx_has_vsx_region)
#endif
}
-static int save_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
- struct mcontext __user *tm_frame, int ctx_has_vsx_region)
+static int __unsafe_save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
+ struct mcontext __user *tm_frame, int ctx_has_vsx_region)
{
unsigned long msr = regs->msr;
@@ -338,7 +336,7 @@ failed:
}
#define unsafe_save_user_regs(regs, frame, tm_frame, has_vsx, label) do { \
- if (save_user_regs_unsafe(regs, frame, tm_frame, has_vsx)) \
+ if (__unsafe_save_user_regs(regs, frame, tm_frame, has_vsx)) \
goto label; \
} while (0)
@@ -350,20 +348,14 @@ failed:
* We also save the transactional registers to a second ucontext in the
* frame.
*
- * See save_user_regs_unsafe() and signal_64.c:setup_tm_sigcontexts().
+ * See __unsafe_save_user_regs() and signal_64.c:setup_tm_sigcontexts().
*/
static void prepare_save_tm_user_regs(void)
{
WARN_ON(tm_suspend_disabled);
-#ifdef CONFIG_ALTIVEC
if (cpu_has_feature(CPU_FTR_ALTIVEC))
current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
-#endif
-#ifdef CONFIG_SPE
- if (current->thread.used_spe)
- flush_spe_to_thread(current);
-#endif
}
static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
@@ -381,7 +373,6 @@ static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user
*/
unsafe_put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR], failed);
-#ifdef CONFIG_ALTIVEC
/* save altivec registers */
if (current->thread.used_vr) {
unsafe_copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state,
@@ -414,7 +405,6 @@ static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user
else
unsafe_put_user(current->thread.ckvrsave,
(u32 __user *)&tm_frame->mc_vregs[32], failed);
-#endif /* CONFIG_ALTIVEC */
unsafe_copy_ckfpr_to_user(&frame->mc_fregs, current, failed);
if (msr & MSR_FP)
@@ -422,7 +412,6 @@ static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user
else
unsafe_copy_ckfpr_to_user(&tm_frame->mc_fregs, current, failed);
-#ifdef CONFIG_VSX
/*
* Copy VSR 0-31 upper half from thread_struct to local
* buffer, then write that to userspace. Also set MSR_VSX in
@@ -438,23 +427,6 @@ static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user
msr |= MSR_VSX;
}
-#endif /* CONFIG_VSX */
-#ifdef CONFIG_SPE
- /* SPE regs are not checkpointed with TM, so this section is
- * simply the same as in save_user_regs_unsafe().
- */
- if (current->thread.used_spe) {
- unsafe_copy_to_user(&frame->mc_vregs, current->thread.evr,
- ELF_NEVRREG * sizeof(u32), failed);
- /* set MSR_SPE in the saved MSR value to indicate that
- * frame->mc_vregs contains valid data */
- msr |= MSR_SPE;
- }
-
- /* We always copy to/from spefscr */
- unsafe_put_user(current->thread.spefscr,
- (u32 __user *)&frame->mc_vregs + ELF_NEVRREG, failed);
-#endif /* CONFIG_SPE */
unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
@@ -485,69 +457,64 @@ static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user
static long restore_user_regs(struct pt_regs *regs,
struct mcontext __user *sr, int sig)
{
- long err;
unsigned int save_r2 = 0;
unsigned long msr;
#ifdef CONFIG_VSX
int i;
#endif
+ if (!user_read_access_begin(sr, sizeof(*sr)))
+ return 1;
/*
* restore general registers but not including MSR or SOFTE. Also
* take care of keeping r2 (TLS) intact if not a signal
*/
if (!sig)
save_r2 = (unsigned int)regs->gpr[2];
- err = restore_general_regs(regs, sr);
+ unsafe_restore_general_regs(regs, sr, failed);
set_trap_norestart(regs);
- err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
+ unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
if (!sig)
regs->gpr[2] = (unsigned long) save_r2;
- if (err)
- return 1;
/* if doing signal return, restore the previous little-endian mode */
if (sig)
- regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
#ifdef CONFIG_ALTIVEC
/*
* Force the process to reload the altivec registers from
* current->thread when it next does altivec instructions
*/
- regs->msr &= ~MSR_VEC;
+ regs_set_return_msr(regs, regs->msr & ~MSR_VEC);
if (msr & MSR_VEC) {
/* restore altivec registers from the stack */
- if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
- sizeof(sr->mc_vregs)))
- return 1;
+ unsafe_copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
+ sizeof(sr->mc_vregs), failed);
current->thread.used_vr = true;
} else if (current->thread.used_vr)
memset(&current->thread.vr_state, 0,
ELF_NVRREG * sizeof(vector128));
/* Always get VRSAVE back */
- if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
- return 1;
+ unsafe_get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32], failed);
if (cpu_has_feature(CPU_FTR_ALTIVEC))
mtspr(SPRN_VRSAVE, current->thread.vrsave);
#endif /* CONFIG_ALTIVEC */
- if (copy_fpr_from_user(current, &sr->mc_fregs))
- return 1;
+ unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
#ifdef CONFIG_VSX
/*
* Force the process to reload the VSX registers from
* current->thread when it next does VSX instruction.
*/
- regs->msr &= ~MSR_VSX;
+ regs_set_return_msr(regs, regs->msr & ~MSR_VSX);
if (msr & MSR_VSX) {
/*
* Restore altivec registers from the stack to a local
* buffer, then write this out to the thread_struct
*/
- if (copy_vsx_from_user(current, &sr->mc_vsregs))
- return 1;
+ unsafe_copy_vsx_from_user(current, &sr->mc_vsregs, failed);
current->thread.used_vsr = true;
} else if (current->thread.used_vsr)
for (i = 0; i < 32 ; i++)
@@ -557,27 +524,30 @@ static long restore_user_regs(struct pt_regs *regs,
* force the process to reload the FP registers from
* current->thread when it next does FP instructions
*/
- regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
+ regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1));
#ifdef CONFIG_SPE
/* force the process to reload the spe registers from
current->thread when it next does spe instructions */
- regs->msr &= ~MSR_SPE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_SPE);
if (msr & MSR_SPE) {
/* restore spe registers from the stack */
- if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
- ELF_NEVRREG * sizeof(u32)))
- return 1;
+ unsafe_copy_from_user(current->thread.evr, &sr->mc_vregs,
+ ELF_NEVRREG * sizeof(u32), failed);
current->thread.used_spe = true;
} else if (current->thread.used_spe)
memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
/* Always get SPEFSCR back */
- if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
- return 1;
+ unsafe_get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG, failed);
#endif /* CONFIG_SPE */
+ user_read_access_end();
return 0;
+
+failed:
+ user_read_access_end();
+ return 1;
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -590,11 +560,8 @@ static long restore_tm_user_regs(struct pt_regs *regs,
struct mcontext __user *sr,
struct mcontext __user *tm_sr)
{
- long err;
unsigned long msr, msr_hi;
-#ifdef CONFIG_VSX
int i;
-#endif
if (tm_suspend_disabled)
return 1;
@@ -605,28 +572,21 @@ static long restore_tm_user_regs(struct pt_regs *regs,
* TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
* were set by the signal delivery.
*/
- err = restore_general_regs(regs, tm_sr);
- err |= restore_general_regs(&current->thread.ckpt_regs, sr);
-
- err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
-
- err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
- if (err)
+ if (!user_read_access_begin(sr, sizeof(*sr)))
return 1;
+ unsafe_restore_general_regs(&current->thread.ckpt_regs, sr, failed);
+ unsafe_get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP], failed);
+ unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
+
/* Restore the previous little-endian mode */
- regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
-#ifdef CONFIG_ALTIVEC
- regs->msr &= ~MSR_VEC;
+ regs_set_return_msr(regs, regs->msr & ~MSR_VEC);
if (msr & MSR_VEC) {
/* restore altivec registers from the stack */
- if (__copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs,
- sizeof(sr->mc_vregs)) ||
- __copy_from_user(&current->thread.vr_state,
- &tm_sr->mc_vregs,
- sizeof(sr->mc_vregs)))
- return 1;
+ unsafe_copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs,
+ sizeof(sr->mc_vregs), failed);
current->thread.used_vr = true;
} else if (current->thread.used_vr) {
memset(&current->thread.vr_state, 0,
@@ -636,62 +596,62 @@ static long restore_tm_user_regs(struct pt_regs *regs,
}
/* Always get VRSAVE back */
- if (__get_user(current->thread.ckvrsave,
- (u32 __user *)&sr->mc_vregs[32]) ||
- __get_user(current->thread.vrsave,
- (u32 __user *)&tm_sr->mc_vregs[32]))
- return 1;
+ unsafe_get_user(current->thread.ckvrsave,
+ (u32 __user *)&sr->mc_vregs[32], failed);
if (cpu_has_feature(CPU_FTR_ALTIVEC))
mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
-#endif /* CONFIG_ALTIVEC */
- regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
+ regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1));
- if (copy_fpr_from_user(current, &sr->mc_fregs) ||
- copy_ckfpr_from_user(current, &tm_sr->mc_fregs))
- return 1;
+ unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
-#ifdef CONFIG_VSX
- regs->msr &= ~MSR_VSX;
+ regs_set_return_msr(regs, regs->msr & ~MSR_VSX);
if (msr & MSR_VSX) {
/*
* Restore altivec registers from the stack to a local
* buffer, then write this out to the thread_struct
*/
- if (copy_vsx_from_user(current, &tm_sr->mc_vsregs) ||
- copy_ckvsx_from_user(current, &sr->mc_vsregs))
- return 1;
+ unsafe_copy_ckvsx_from_user(current, &sr->mc_vsregs, failed);
current->thread.used_vsr = true;
} else if (current->thread.used_vsr)
for (i = 0; i < 32 ; i++) {
current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
}
-#endif /* CONFIG_VSX */
-#ifdef CONFIG_SPE
- /* SPE regs are not checkpointed with TM, so this section is
- * simply the same as in restore_user_regs().
- */
- regs->msr &= ~MSR_SPE;
- if (msr & MSR_SPE) {
- if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
- ELF_NEVRREG * sizeof(u32)))
- return 1;
- current->thread.used_spe = true;
- } else if (current->thread.used_spe)
- memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
+ user_read_access_end();
- /* Always get SPEFSCR back */
- if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
- + ELF_NEVRREG))
+ if (!user_read_access_begin(tm_sr, sizeof(*tm_sr)))
return 1;
-#endif /* CONFIG_SPE */
+
+ unsafe_restore_general_regs(regs, tm_sr, failed);
+
+ /* restore altivec registers from the stack */
+ if (msr & MSR_VEC)
+ unsafe_copy_from_user(&current->thread.vr_state, &tm_sr->mc_vregs,
+ sizeof(sr->mc_vregs), failed);
+
+ /* Always get VRSAVE back */
+ unsafe_get_user(current->thread.vrsave,
+ (u32 __user *)&tm_sr->mc_vregs[32], failed);
+
+ unsafe_copy_ckfpr_from_user(current, &tm_sr->mc_fregs, failed);
+
+ if (msr & MSR_VSX) {
+ /*
+ * Restore altivec registers from the stack to a local
+ * buffer, then write this out to the thread_struct
+ */
+ unsafe_copy_vsx_from_user(current, &tm_sr->mc_vsregs, failed);
+ current->thread.used_vsr = true;
+ }
/* Get the top half of the MSR from the user context */
- if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
- return 1;
+ unsafe_get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR], failed);
msr_hi <<= 32;
+
+ user_read_access_end();
+
/* If TM bits are set to the reserved value, it's an invalid context */
if (MSR_TM_RESV(msr_hi))
return 1;
@@ -712,7 +672,7 @@ static long restore_tm_user_regs(struct pt_regs *regs,
*
* Pull in the MSR TM bits from the user context
*/
- regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK));
/* Now, recheckpoint. This loads up all of the checkpointed (older)
* registers, including FP and V[S]Rs. After recheckpointing, the
* transactional versions should be loaded.
@@ -727,18 +687,26 @@ static long restore_tm_user_regs(struct pt_regs *regs,
msr_check_and_set(msr & (MSR_FP | MSR_VEC));
if (msr & MSR_FP) {
load_fp_state(&current->thread.fp_state);
- regs->msr |= (MSR_FP | current->thread.fpexc_mode);
+ regs_set_return_msr(regs, regs->msr | (MSR_FP | current->thread.fpexc_mode));
}
-#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
load_vr_state(&current->thread.vr_state);
- regs->msr |= MSR_VEC;
+ regs_set_return_msr(regs, regs->msr | MSR_VEC);
}
-#endif
preempt_enable();
return 0;
+
+failed:
+ user_read_access_end();
+ return 1;
+}
+#else
+static long restore_tm_user_regs(struct pt_regs *regs, struct mcontext __user *sr,
+ struct mcontext __user *tm_sr)
+{
+ return 0;
}
#endif
@@ -775,7 +743,7 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
else
prepare_save_user_regs(1);
- if (!user_write_access_begin(frame, sizeof(*frame)))
+ if (!user_access_begin(frame, sizeof(*frame)))
goto badframe;
/* Put the siginfo & fill in most of the ucontext */
@@ -805,21 +773,17 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp_rt32);
} else {
tramp = (unsigned long)mctx->mc_pad;
- /* Set up the sigreturn trampoline: li r0,sigret; sc */
- unsafe_put_user(PPC_INST_ADDI + __NR_rt_sigreturn, &mctx->mc_pad[0],
- failed);
- unsafe_put_user(PPC_INST_SC, &mctx->mc_pad[1], failed);
+ unsafe_put_user(PPC_RAW_LI(_R0, __NR_rt_sigreturn), &mctx->mc_pad[0], failed);
+ unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed);
+ asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
}
unsafe_put_sigset_t(&frame->uc.uc_sigmask, oldset, failed);
- user_write_access_end();
+ user_access_end();
if (copy_siginfo_to_user(&frame->info, &ksig->info))
goto badframe;
- if (tramp == (unsigned long)mctx->mc_pad)
- flush_icache_range(tramp, tramp + 2 * sizeof(unsigned long));
-
regs->link = tramp;
#ifdef CONFIG_PPC_FPU_REGS
@@ -837,14 +801,14 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
regs->gpr[4] = (unsigned long)&frame->info;
regs->gpr[5] = (unsigned long)&frame->uc;
regs->gpr[6] = (unsigned long)frame;
- regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
+ regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler);
/* enter the signal handler in native-endian mode */
- regs->msr &= ~MSR_LE;
- regs->msr |= (MSR_KERNEL & MSR_LE);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
+
return 0;
failed:
- user_write_access_end();
+ user_access_end();
badframe:
signal_fault(tsk, regs, "handle_rt_signal32", frame);
@@ -879,7 +843,7 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
else
prepare_save_user_regs(1);
- if (!user_write_access_begin(frame, sizeof(*frame)))
+ if (!user_access_begin(frame, sizeof(*frame)))
goto badframe;
sc = (struct sigcontext __user *) &frame->sctx;
@@ -905,14 +869,11 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp32);
} else {
tramp = (unsigned long)mctx->mc_pad;
- /* Set up the sigreturn trampoline: li r0,sigret; sc */
- unsafe_put_user(PPC_INST_ADDI + __NR_sigreturn, &mctx->mc_pad[0], failed);
- unsafe_put_user(PPC_INST_SC, &mctx->mc_pad[1], failed);
+ unsafe_put_user(PPC_RAW_LI(_R0, __NR_sigreturn), &mctx->mc_pad[0], failed);
+ unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed);
+ asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
}
- user_write_access_end();
-
- if (tramp == (unsigned long)mctx->mc_pad)
- flush_icache_range(tramp, tramp + 2 * sizeof(unsigned long));
+ user_access_end();
regs->link = tramp;
@@ -928,14 +889,14 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
regs->gpr[1] = newsp;
regs->gpr[3] = ksig->sig;
regs->gpr[4] = (unsigned long) sc;
- regs->nip = (unsigned long)ksig->ka.sa.sa_handler;
+ regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler);
/* enter the signal handler in native-endian mode */
- regs->msr &= ~MSR_LE;
- regs->msr |= (MSR_KERNEL & MSR_LE);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
+
return 0;
failed:
- user_write_access_end();
+ user_access_end();
badframe:
signal_fault(tsk, regs, "handle_signal32", frame);
@@ -948,28 +909,31 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int
sigset_t set;
struct mcontext __user *mcp;
- if (get_sigset_t(&set, &ucp->uc_sigmask))
+ if (!user_read_access_begin(ucp, sizeof(*ucp)))
return -EFAULT;
+
+ unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed);
#ifdef CONFIG_PPC64
{
u32 cmcp;
- if (__get_user(cmcp, &ucp->uc_regs))
- return -EFAULT;
+ unsafe_get_user(cmcp, &ucp->uc_regs, failed);
mcp = (struct mcontext __user *)(u64)cmcp;
- /* no need to check access_ok(mcp), since mcp < 4GB */
}
#else
- if (__get_user(mcp, &ucp->uc_regs))
- return -EFAULT;
- if (!access_ok(mcp, sizeof(*mcp)))
- return -EFAULT;
+ unsafe_get_user(mcp, &ucp->uc_regs, failed);
#endif
+ user_read_access_end();
+
set_current_blocked(&set);
if (restore_user_regs(regs, mcp, sig))
return -EFAULT;
return 0;
+
+failed:
+ user_read_access_end();
+ return -EFAULT;
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -983,11 +947,15 @@ static int do_setcontext_tm(struct ucontext __user *ucp,
u32 cmcp;
u32 tm_cmcp;
- if (get_sigset_t(&set, &ucp->uc_sigmask))
+ if (!user_read_access_begin(ucp, sizeof(*ucp)))
return -EFAULT;
- if (__get_user(cmcp, &ucp->uc_regs) ||
- __get_user(tm_cmcp, &tm_ucp->uc_regs))
+ unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed);
+ unsafe_get_user(cmcp, &ucp->uc_regs, failed);
+
+ user_read_access_end();
+
+ if (__get_user(tm_cmcp, &tm_ucp->uc_regs))
return -EFAULT;
mcp = (struct mcontext __user *)(u64)cmcp;
tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
@@ -998,6 +966,10 @@ static int do_setcontext_tm(struct ucontext __user *ucp,
return -EFAULT;
return 0;
+
+failed:
+ user_read_access_end();
+ return -EFAULT;
}
#endif
@@ -1170,7 +1142,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
* set, and recheckpoint was not called. This avoid
* hitting a TM Bad thing at RFID
*/
- regs->msr &= ~MSR_TS_MASK;
+ regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
}
/* Fall through, for non-TM restore */
#endif
@@ -1259,7 +1231,7 @@ SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
affect the contents of these registers. After this point,
failure is a problem, anyway, and it's very unlikely unless
the user is really doing something wrong. */
- regs->msr = new_msr;
+ regs_set_return_msr(regs, new_msr);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
current->thread.debug.dbcr0 = new_dbcr0;
#endif
@@ -1315,19 +1287,16 @@ SYSCALL_DEFINE0(sigreturn)
struct sigcontext __user *sc;
struct sigcontext sigctx;
struct mcontext __user *sr;
- void __user *addr;
sigset_t set;
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- struct mcontext __user *mcp, *tm_mcp;
- unsigned long msr_hi;
-#endif
+ struct mcontext __user *mcp;
+ struct mcontext __user *tm_mcp = NULL;
+ unsigned long long msr_hi = 0;
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
sc = &sf->sctx;
- addr = sc;
if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
goto badframe;
@@ -1343,31 +1312,32 @@ SYSCALL_DEFINE0(sigreturn)
#endif
set_current_blocked(&set);
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
mcp = (struct mcontext __user *)&sf->mctx;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
goto badframe;
+#endif
if (MSR_TM_ACTIVE(msr_hi<<32)) {
if (!cpu_has_feature(CPU_FTR_TM))
goto badframe;
if (restore_tm_user_regs(regs, mcp, tm_mcp))
goto badframe;
- } else
-#endif
- {
+ } else {
sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
- addr = sr;
- if (!access_ok(sr, sizeof(*sr))
- || restore_user_regs(regs, sr, 1))
- goto badframe;
+ if (restore_user_regs(regs, sr, 1)) {
+ signal_fault(current, regs, "sys_sigreturn", sr);
+
+ force_sig(SIGSEGV);
+ return 0;
+ }
}
set_thread_flag(TIF_RESTOREALL);
return 0;
badframe:
- signal_fault(current, regs, "sys_sigreturn", addr);
+ signal_fault(current, regs, "sys_sigreturn", sc);
force_sig(SIGSEGV);
return 0;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index f9e4a1ac440f..1831bba0582e 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -79,13 +79,36 @@ static elf_vrreg_t __user *sigcontext_vmx_regs(struct sigcontext __user *sc)
}
#endif
+static void prepare_setup_sigcontext(struct task_struct *tsk)
+{
+#ifdef CONFIG_ALTIVEC
+ /* save altivec registers */
+ if (tsk->thread.used_vr)
+ flush_altivec_to_thread(tsk);
+ if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ tsk->thread.vrsave = mfspr(SPRN_VRSAVE);
+#endif /* CONFIG_ALTIVEC */
+
+ flush_fp_to_thread(tsk);
+
+#ifdef CONFIG_VSX
+ if (tsk->thread.used_vsr)
+ flush_vsx_to_thread(tsk);
+#endif /* CONFIG_VSX */
+}
+
/*
* Set up the sigcontext for the signal frame.
*/
-static long setup_sigcontext(struct sigcontext __user *sc,
- struct task_struct *tsk, int signr, sigset_t *set,
- unsigned long handler, int ctx_has_vsx_region)
+#define unsafe_setup_sigcontext(sc, tsk, signr, set, handler, ctx_has_vsx_region, label)\
+do { \
+ if (__unsafe_setup_sigcontext(sc, tsk, signr, set, handler, ctx_has_vsx_region))\
+ goto label; \
+} while (0)
+static long notrace __unsafe_setup_sigcontext(struct sigcontext __user *sc,
+ struct task_struct *tsk, int signr, sigset_t *set,
+ unsigned long handler, int ctx_has_vsx_region)
{
/* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
* process never used altivec yet (MSR_VEC is zero in pt_regs of
@@ -97,25 +120,22 @@ static long setup_sigcontext(struct sigcontext __user *sc,
*/
#ifdef CONFIG_ALTIVEC
elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
- unsigned long vrsave;
#endif
struct pt_regs *regs = tsk->thread.regs;
unsigned long msr = regs->msr;
- long err = 0;
/* Force usr to alway see softe as 1 (interrupts enabled) */
unsigned long softe = 0x1;
BUG_ON(tsk != current);
#ifdef CONFIG_ALTIVEC
- err |= __put_user(v_regs, &sc->v_regs);
+ unsafe_put_user(v_regs, &sc->v_regs, efault_out);
/* save altivec registers */
if (tsk->thread.used_vr) {
- flush_altivec_to_thread(tsk);
/* Copy 33 vec registers (vr0..31 and vscr) to the stack */
- err |= __copy_to_user(v_regs, &tsk->thread.vr_state,
- 33 * sizeof(vector128));
+ unsafe_copy_to_user(v_regs, &tsk->thread.vr_state,
+ 33 * sizeof(vector128), efault_out);
/* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg)
* contains valid data.
*/
@@ -124,19 +144,12 @@ static long setup_sigcontext(struct sigcontext __user *sc,
/* We always copy to/from vrsave, it's 0 if we don't have or don't
* use altivec.
*/
- vrsave = 0;
- if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
- vrsave = mfspr(SPRN_VRSAVE);
- tsk->thread.vrsave = vrsave;
- }
-
- err |= __put_user(vrsave, (u32 __user *)&v_regs[33]);
+ unsafe_put_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33], efault_out);
#else /* CONFIG_ALTIVEC */
- err |= __put_user(0, &sc->v_regs);
+ unsafe_put_user(0, &sc->v_regs, efault_out);
#endif /* CONFIG_ALTIVEC */
- flush_fp_to_thread(tsk);
/* copy fpr regs and fpscr */
- err |= copy_fpr_to_user(&sc->fp_regs, tsk);
+ unsafe_copy_fpr_to_user(&sc->fp_regs, tsk, efault_out);
/*
* Clear the MSR VSX bit to indicate there is no valid state attached
@@ -150,26 +163,27 @@ static long setup_sigcontext(struct sigcontext __user *sc,
* VMX data.
*/
if (tsk->thread.used_vsr && ctx_has_vsx_region) {
- flush_vsx_to_thread(tsk);
v_regs += ELF_NVRREG;
- err |= copy_vsx_to_user(v_regs, tsk);
+ unsafe_copy_vsx_to_user(v_regs, tsk, efault_out);
/* set MSR_VSX in the MSR value in the frame to
* indicate that sc->vs_reg) contains valid data.
*/
msr |= MSR_VSX;
}
#endif /* CONFIG_VSX */
- err |= __put_user(&sc->gp_regs, &sc->regs);
- WARN_ON(!FULL_REGS(regs));
- err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE);
- err |= __put_user(msr, &sc->gp_regs[PT_MSR]);
- err |= __put_user(softe, &sc->gp_regs[PT_SOFTE]);
- err |= __put_user(signr, &sc->signal);
- err |= __put_user(handler, &sc->handler);
+ unsafe_put_user(&sc->gp_regs, &sc->regs, efault_out);
+ unsafe_copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE, efault_out);
+ unsafe_put_user(msr, &sc->gp_regs[PT_MSR], efault_out);
+ unsafe_put_user(softe, &sc->gp_regs[PT_SOFTE], efault_out);
+ unsafe_put_user(signr, &sc->signal, efault_out);
+ unsafe_put_user(handler, &sc->handler, efault_out);
if (set != NULL)
- err |= __put_user(set->sig[0], &sc->oldmask);
+ unsafe_put_user(set->sig[0], &sc->oldmask, efault_out);
- return err;
+ return 0;
+
+efault_out:
+ return -EFAULT;
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -294,7 +308,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
err |= __put_user(&sc->gp_regs, &sc->regs);
err |= __put_user(&tm_sc->gp_regs, &tm_sc->regs);
- WARN_ON(!FULL_REGS(regs));
err |= __copy_to_user(&tm_sc->gp_regs, regs, GP_REGS_SIZE);
err |= __copy_to_user(&sc->gp_regs,
&tsk->thread.ckpt_regs, GP_REGS_SIZE);
@@ -312,14 +325,16 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
/*
* Restore the sigcontext from the signal frame.
*/
-
-static long restore_sigcontext(struct task_struct *tsk, sigset_t *set, int sig,
- struct sigcontext __user *sc)
+#define unsafe_restore_sigcontext(tsk, set, sig, sc, label) do { \
+ if (__unsafe_restore_sigcontext(tsk, set, sig, sc)) \
+ goto label; \
+} while (0)
+static long notrace __unsafe_restore_sigcontext(struct task_struct *tsk, sigset_t *set,
+ int sig, struct sigcontext __user *sc)
{
#ifdef CONFIG_ALTIVEC
elf_vrreg_t __user *v_regs;
#endif
- unsigned long err = 0;
unsigned long save_r13 = 0;
unsigned long msr;
struct pt_regs *regs = tsk->thread.regs;
@@ -334,59 +349,57 @@ static long restore_sigcontext(struct task_struct *tsk, sigset_t *set, int sig,
save_r13 = regs->gpr[13];
/* copy the GPRs */
- err |= __copy_from_user(regs->gpr, sc->gp_regs, sizeof(regs->gpr));
- err |= __get_user(regs->nip, &sc->gp_regs[PT_NIP]);
+ unsafe_copy_from_user(regs->gpr, sc->gp_regs, sizeof(regs->gpr), efault_out);
+ unsafe_get_user(regs->nip, &sc->gp_regs[PT_NIP], efault_out);
/* get MSR separately, transfer the LE bit if doing signal return */
- err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
+ unsafe_get_user(msr, &sc->gp_regs[PT_MSR], efault_out);
if (sig)
- regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
- err |= __get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3]);
- err |= __get_user(regs->ctr, &sc->gp_regs[PT_CTR]);
- err |= __get_user(regs->link, &sc->gp_regs[PT_LNK]);
- err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]);
- err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
+ unsafe_get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3], efault_out);
+ unsafe_get_user(regs->ctr, &sc->gp_regs[PT_CTR], efault_out);
+ unsafe_get_user(regs->link, &sc->gp_regs[PT_LNK], efault_out);
+ unsafe_get_user(regs->xer, &sc->gp_regs[PT_XER], efault_out);
+ unsafe_get_user(regs->ccr, &sc->gp_regs[PT_CCR], efault_out);
/* Don't allow userspace to set SOFTE */
set_trap_norestart(regs);
- err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
- err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
- err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
+ unsafe_get_user(regs->dar, &sc->gp_regs[PT_DAR], efault_out);
+ unsafe_get_user(regs->dsisr, &sc->gp_regs[PT_DSISR], efault_out);
+ unsafe_get_user(regs->result, &sc->gp_regs[PT_RESULT], efault_out);
if (!sig)
regs->gpr[13] = save_r13;
if (set != NULL)
- err |= __get_user(set->sig[0], &sc->oldmask);
+ unsafe_get_user(set->sig[0], &sc->oldmask, efault_out);
/*
* Force reload of FP/VEC.
* This has to be done before copying stuff into tsk->thread.fpr/vr
* for the reasons explained in the previous comment.
*/
- regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX);
+ regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX));
#ifdef CONFIG_ALTIVEC
- err |= __get_user(v_regs, &sc->v_regs);
- if (err)
- return err;
+ unsafe_get_user(v_regs, &sc->v_regs, efault_out);
if (v_regs && !access_ok(v_regs, 34 * sizeof(vector128)))
return -EFAULT;
/* Copy 33 vec registers (vr0..31 and vscr) from the stack */
if (v_regs != NULL && (msr & MSR_VEC) != 0) {
- err |= __copy_from_user(&tsk->thread.vr_state, v_regs,
- 33 * sizeof(vector128));
+ unsafe_copy_from_user(&tsk->thread.vr_state, v_regs,
+ 33 * sizeof(vector128), efault_out);
tsk->thread.used_vr = true;
} else if (tsk->thread.used_vr) {
memset(&tsk->thread.vr_state, 0, 33 * sizeof(vector128));
}
/* Always get VRSAVE back */
if (v_regs != NULL)
- err |= __get_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33]);
+ unsafe_get_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33], efault_out);
else
tsk->thread.vrsave = 0;
if (cpu_has_feature(CPU_FTR_ALTIVEC))
mtspr(SPRN_VRSAVE, tsk->thread.vrsave);
#endif /* CONFIG_ALTIVEC */
/* restore floating point */
- err |= copy_fpr_from_user(tsk, &sc->fp_regs);
+ unsafe_copy_fpr_from_user(tsk, &sc->fp_regs, efault_out);
#ifdef CONFIG_VSX
/*
* Get additional VSX data. Update v_regs to point after the
@@ -395,14 +408,17 @@ static long restore_sigcontext(struct task_struct *tsk, sigset_t *set, int sig,
*/
v_regs += ELF_NVRREG;
if ((msr & MSR_VSX) != 0) {
- err |= copy_vsx_from_user(tsk, v_regs);
+ unsafe_copy_vsx_from_user(tsk, v_regs, efault_out);
tsk->thread.used_vsr = true;
} else {
for (i = 0; i < 32 ; i++)
tsk->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
}
#endif
- return err;
+ return 0;
+
+efault_out:
+ return -EFAULT;
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -452,7 +468,7 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
return -EINVAL;
/* pull in MSR LE from user context */
- regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
/* The following non-GPR non-FPR non-VR state is also checkpointed: */
err |= __get_user(regs->ctr, &tm_sc->gp_regs[PT_CTR]);
@@ -479,7 +495,7 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
* This has to be done before copying stuff into tsk->thread.fpr/vr
* for the reasons explained in the previous comment.
*/
- regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX);
+ regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX));
#ifdef CONFIG_ALTIVEC
err |= __get_user(v_regs, &sc->v_regs);
@@ -549,7 +565,7 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
preempt_disable();
/* pull in MSR TS bits from user context */
- regs->msr |= msr & MSR_TS_MASK;
+ regs_set_return_msr(regs, regs->msr | (msr & MSR_TS_MASK));
/*
* Ensure that TM is enabled in regs->msr before we leave the signal
@@ -567,7 +583,7 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
* to be de-scheduled with MSR[TS] set but without calling
* tm_recheckpoint(). This can cause a bug.
*/
- regs->msr |= MSR_TM;
+ regs_set_return_msr(regs, regs->msr | MSR_TM);
/* This loads the checkpointed FP/VEC state, if used */
tm_recheckpoint(&tsk->thread);
@@ -575,17 +591,23 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
msr_check_and_set(msr & (MSR_FP | MSR_VEC));
if (msr & MSR_FP) {
load_fp_state(&tsk->thread.fp_state);
- regs->msr |= (MSR_FP | tsk->thread.fpexc_mode);
+ regs_set_return_msr(regs, regs->msr | (MSR_FP | tsk->thread.fpexc_mode));
}
if (msr & MSR_VEC) {
load_vr_state(&tsk->thread.vr_state);
- regs->msr |= MSR_VEC;
+ regs_set_return_msr(regs, regs->msr | MSR_VEC);
}
preempt_enable();
return err;
}
+#else /* !CONFIG_PPC_TRANSACTIONAL_MEM */
+static long restore_tm_sigcontexts(struct task_struct *tsk, struct sigcontext __user *sc,
+ struct sigcontext __user *tm_sc)
+{
+ return -EINVAL;
+}
#endif
/*
@@ -596,15 +618,12 @@ static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp)
int i;
long err = 0;
- /* bctrl # call the handler */
- err |= __put_user(PPC_INST_BCTRL, &tramp[0]);
- /* addi r1, r1, __SIGNAL_FRAMESIZE # Pop the dummy stackframe */
- err |= __put_user(PPC_INST_ADDI | __PPC_RT(R1) | __PPC_RA(R1) |
- (__SIGNAL_FRAMESIZE & 0xffff), &tramp[1]);
- /* li r0, __NR_[rt_]sigreturn| */
- err |= __put_user(PPC_INST_ADDI | (syscall & 0xffff), &tramp[2]);
- /* sc */
- err |= __put_user(PPC_INST_SC, &tramp[3]);
+ /* Call the handler and pop the dummy stackframe*/
+ err |= __put_user(PPC_RAW_BCTRL(), &tramp[0]);
+ err |= __put_user(PPC_RAW_ADDI(_R1, _R1, __SIGNAL_FRAMESIZE), &tramp[1]);
+
+ err |= __put_user(PPC_RAW_LI(_R0, syscall), &tramp[2]);
+ err |= __put_user(PPC_RAW_SC(), &tramp[3]);
/* Minimal traceback info */
for (i=TRAMP_TRACEBACK; i < TRAMP_SIZE ;i++)
@@ -655,12 +674,16 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
ctx_has_vsx_region = 1;
if (old_ctx != NULL) {
- if (!access_ok(old_ctx, ctx_size)
- || setup_sigcontext(&old_ctx->uc_mcontext, current, 0, NULL, 0,
- ctx_has_vsx_region)
- || __copy_to_user(&old_ctx->uc_sigmask,
- &current->blocked, sizeof(sigset_t)))
+ prepare_setup_sigcontext(current);
+ if (!user_write_access_begin(old_ctx, ctx_size))
return -EFAULT;
+
+ unsafe_setup_sigcontext(&old_ctx->uc_mcontext, current, 0, NULL,
+ 0, ctx_has_vsx_region, efault_out);
+ unsafe_copy_to_user(&old_ctx->uc_sigmask, &current->blocked,
+ sizeof(sigset_t), efault_out);
+
+ user_write_access_end();
}
if (new_ctx == NULL)
return 0;
@@ -680,15 +703,26 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
* We kill the task with a SIGSEGV in this situation.
*/
- if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set)))
+ if (__get_user_sigset(&set, &new_ctx->uc_sigmask))
do_exit(SIGSEGV);
set_current_blocked(&set);
- if (restore_sigcontext(current, NULL, 0, &new_ctx->uc_mcontext))
+
+ if (!user_read_access_begin(new_ctx, ctx_size))
+ return -EFAULT;
+ if (__unsafe_restore_sigcontext(current, NULL, 0, &new_ctx->uc_mcontext)) {
+ user_read_access_end();
do_exit(SIGSEGV);
+ }
+ user_read_access_end();
/* This returns like rt_sigreturn */
set_thread_flag(TIF_RESTOREALL);
+
return 0;
+
+efault_out:
+ user_write_access_end();
+ return -EFAULT;
}
@@ -701,9 +735,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
struct pt_regs *regs = current_pt_regs();
struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1];
sigset_t set;
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
unsigned long msr;
-#endif
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
@@ -711,52 +743,54 @@ SYSCALL_DEFINE0(rt_sigreturn)
if (!access_ok(uc, sizeof(*uc)))
goto badframe;
- if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
+ if (__get_user_sigset(&set, &uc->uc_sigmask))
goto badframe;
set_current_blocked(&set);
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- /*
- * If there is a transactional state then throw it away.
- * The purpose of a sigreturn is to destroy all traces of the
- * signal frame, this includes any transactional state created
- * within in. We only check for suspended as we can never be
- * active in the kernel, we are active, there is nothing better to
- * do than go ahead and Bad Thing later.
- * The cause is not important as there will never be a
- * recheckpoint so it's not user visible.
- */
- if (MSR_TM_SUSPENDED(mfmsr()))
- tm_reclaim_current(0);
+ if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM)) {
+ /*
+ * If there is a transactional state then throw it away.
+ * The purpose of a sigreturn is to destroy all traces of the
+ * signal frame, this includes any transactional state created
+ * within in. We only check for suspended as we can never be
+ * active in the kernel, we are active, there is nothing better to
+ * do than go ahead and Bad Thing later.
+ * The cause is not important as there will never be a
+ * recheckpoint so it's not user visible.
+ */
+ if (MSR_TM_SUSPENDED(mfmsr()))
+ tm_reclaim_current(0);
- /*
- * Disable MSR[TS] bit also, so, if there is an exception in the
- * code below (as a page fault in copy_ckvsx_to_user()), it does
- * not recheckpoint this task if there was a context switch inside
- * the exception.
- *
- * A major page fault can indirectly call schedule(). A reschedule
- * process in the middle of an exception can have a side effect
- * (Changing the CPU MSR[TS] state), since schedule() is called
- * with the CPU MSR[TS] disable and returns with MSR[TS]=Suspended
- * (switch_to() calls tm_recheckpoint() for the 'new' process). In
- * this case, the process continues to be the same in the CPU, but
- * the CPU state just changed.
- *
- * This can cause a TM Bad Thing, since the MSR in the stack will
- * have the MSR[TS]=0, and this is what will be used to RFID.
- *
- * Clearing MSR[TS] state here will avoid a recheckpoint if there
- * is any process reschedule in kernel space. The MSR[TS] state
- * does not need to be saved also, since it will be replaced with
- * the MSR[TS] that came from user context later, at
- * restore_tm_sigcontexts.
- */
- regs->msr &= ~MSR_TS_MASK;
+ /*
+ * Disable MSR[TS] bit also, so, if there is an exception in the
+ * code below (as a page fault in copy_ckvsx_to_user()), it does
+ * not recheckpoint this task if there was a context switch inside
+ * the exception.
+ *
+ * A major page fault can indirectly call schedule(). A reschedule
+ * process in the middle of an exception can have a side effect
+ * (Changing the CPU MSR[TS] state), since schedule() is called
+ * with the CPU MSR[TS] disable and returns with MSR[TS]=Suspended
+ * (switch_to() calls tm_recheckpoint() for the 'new' process). In
+ * this case, the process continues to be the same in the CPU, but
+ * the CPU state just changed.
+ *
+ * This can cause a TM Bad Thing, since the MSR in the stack will
+ * have the MSR[TS]=0, and this is what will be used to RFID.
+ *
+ * Clearing MSR[TS] state here will avoid a recheckpoint if there
+ * is any process reschedule in kernel space. The MSR[TS] state
+ * does not need to be saved also, since it will be replaced with
+ * the MSR[TS] that came from user context later, at
+ * restore_tm_sigcontexts.
+ */
+ regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
- if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
- goto badframe;
- if (MSR_TM_ACTIVE(msr)) {
+ if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
+ goto badframe;
+ }
+
+ if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) && MSR_TM_ACTIVE(msr)) {
/* We recheckpoint on return. */
struct ucontext __user *uc_transact;
@@ -769,9 +803,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
&uc_transact->uc_mcontext))
goto badframe;
- } else
-#endif
- {
+ } else {
/*
* Fall through, for non-TM restore
*
@@ -784,17 +816,26 @@ SYSCALL_DEFINE0(rt_sigreturn)
* MSR[TS] set, but without CPU in the proper state,
* causing a TM bad thing.
*/
- current->thread.regs->msr &= ~MSR_TS_MASK;
- if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
+ regs_set_return_msr(current->thread.regs,
+ current->thread.regs->msr & ~MSR_TS_MASK);
+ if (!user_read_access_begin(&uc->uc_mcontext, sizeof(uc->uc_mcontext)))
goto badframe;
+
+ unsafe_restore_sigcontext(current, NULL, 1, &uc->uc_mcontext,
+ badframe_block);
+
+ user_read_access_end();
}
if (restore_altstack(&uc->uc_stack))
goto badframe;
set_thread_flag(TIF_RESTOREALL);
+
return 0;
+badframe_block:
+ user_read_access_end();
badframe:
signal_fault(current, regs, "rt_sigreturn", uc);
@@ -809,45 +850,60 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
unsigned long newsp = 0;
long err = 0;
struct pt_regs *regs = tsk->thread.regs;
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/* Save the thread's msr before get_tm_stackpointer() changes it */
unsigned long msr = regs->msr;
-#endif
frame = get_sigframe(ksig, tsk, sizeof(*frame), 0);
- if (!access_ok(frame, sizeof(*frame)))
- goto badframe;
- err |= __put_user(&frame->info, &frame->pinfo);
- err |= __put_user(&frame->uc, &frame->puc);
- err |= copy_siginfo_to_user(&frame->info, &ksig->info);
- if (err)
+ /*
+ * This only applies when calling unsafe_setup_sigcontext() and must be
+ * called before opening the uaccess window.
+ */
+ if (!MSR_TM_ACTIVE(msr))
+ prepare_setup_sigcontext(tsk);
+
+ if (!user_write_access_begin(frame, sizeof(*frame)))
goto badframe;
+ unsafe_put_user(&frame->info, &frame->pinfo, badframe_block);
+ unsafe_put_user(&frame->uc, &frame->puc, badframe_block);
+
/* Create the ucontext. */
- err |= __put_user(0, &frame->uc.uc_flags);
- err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]);
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ unsafe_put_user(0, &frame->uc.uc_flags, badframe_block);
+ unsafe_save_altstack(&frame->uc.uc_stack, regs->gpr[1], badframe_block);
+
if (MSR_TM_ACTIVE(msr)) {
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/* The ucontext_t passed to userland points to the second
* ucontext_t (for transactional state) with its uc_link ptr.
*/
- err |= __put_user(&frame->uc_transact, &frame->uc.uc_link);
+ unsafe_put_user(&frame->uc_transact, &frame->uc.uc_link, badframe_block);
+
+ user_write_access_end();
+
err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext,
&frame->uc_transact.uc_mcontext,
tsk, ksig->sig, NULL,
(unsigned long)ksig->ka.sa.sa_handler,
msr);
- } else
+
+ if (!user_write_access_begin(&frame->uc.uc_sigmask,
+ sizeof(frame->uc.uc_sigmask)))
+ goto badframe;
+
#endif
- {
- err |= __put_user(0, &frame->uc.uc_link);
- err |= setup_sigcontext(&frame->uc.uc_mcontext, tsk, ksig->sig,
+ } else {
+ unsafe_put_user(0, &frame->uc.uc_link, badframe_block);
+ unsafe_setup_sigcontext(&frame->uc.uc_mcontext, tsk, ksig->sig,
NULL, (unsigned long)ksig->ka.sa.sa_handler,
- 1);
+ 1, badframe_block);
}
- err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
- if (err)
+
+ unsafe_copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set), badframe_block);
+ user_write_access_end();
+
+ /* Save the siginfo outside of the unsafe block. */
+ if (copy_siginfo_to_user(&frame->info, &ksig->info))
goto badframe;
/* Make sure signal handler doesn't get spurious FP exceptions */
@@ -855,12 +911,12 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
/* Set up to return from userspace. */
if (tsk->mm->context.vdso) {
- regs->nip = VDSO64_SYMBOL(tsk->mm->context.vdso, sigtramp_rt64);
+ regs_set_return_ip(regs, VDSO64_SYMBOL(tsk->mm->context.vdso, sigtramp_rt64));
} else {
err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
if (err)
goto badframe;
- regs->nip = (unsigned long) &frame->tramp[0];
+ regs_set_return_ip(regs, (unsigned long) &frame->tramp[0]);
}
/* Allocate a dummy caller frame for the signal handler. */
@@ -885,14 +941,13 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
}
/* enter the signal handler in native-endian mode */
- regs->msr &= ~MSR_LE;
- regs->msr |= (MSR_KERNEL & MSR_LE);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
regs->gpr[1] = newsp;
regs->gpr[3] = ksig->sig;
regs->result = 0;
if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
- err |= get_user(regs->gpr[4], (unsigned long __user *)&frame->pinfo);
- err |= get_user(regs->gpr[5], (unsigned long __user *)&frame->puc);
+ regs->gpr[4] = (unsigned long)&frame->info;
+ regs->gpr[5] = (unsigned long)&frame->uc;
regs->gpr[6] = (unsigned long) frame;
} else {
regs->gpr[4] = (unsigned long)&frame->uc.uc_mcontext;
@@ -902,6 +957,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
return 0;
+badframe_block:
+ user_write_access_end();
badframe:
signal_fault(current, regs, "handle_rt_signal64", frame);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 5a4d59a1070d..447b78a87c8f 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -83,7 +83,7 @@ DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
-DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map);
+static DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map);
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
@@ -122,14 +122,14 @@ static struct thread_groups_list tgl[NR_CPUS] __initdata;
* On big-cores system, thread_group_l1_cache_map for each CPU corresponds to
* the set its siblings that share the L1-cache.
*/
-DEFINE_PER_CPU(cpumask_var_t, thread_group_l1_cache_map);
+static DEFINE_PER_CPU(cpumask_var_t, thread_group_l1_cache_map);
/*
* On some big-cores system, thread_group_l2_cache_map for each CPU
* corresponds to the set its siblings within the core that share the
* L2-cache.
*/
-DEFINE_PER_CPU(cpumask_var_t, thread_group_l2_cache_map);
+static DEFINE_PER_CPU(cpumask_var_t, thread_group_l2_cache_map);
/* SMP operations for this machine */
struct smp_ops_t *smp_ops;
@@ -619,6 +619,8 @@ static void nmi_stop_this_cpu(struct pt_regs *regs)
/*
* IRQs are already hard disabled by the smp_handle_nmi_ipi.
*/
+ set_cpu_online(smp_processor_id(), false);
+
spin_begin();
while (1)
spin_cpu_relax();
@@ -634,6 +636,15 @@ void smp_send_stop(void)
static void stop_this_cpu(void *dummy)
{
hard_irq_disable();
+
+ /*
+ * Offlining CPUs in stop_this_cpu can result in scheduler warnings,
+ * (see commit de6e5d38417e), but printk_safe_flush_on_panic() wants
+ * to know other CPUs are offline before it breaks locks to flush
+ * printk buffers, in case we panic()ed while holding the lock.
+ */
+ set_cpu_online(smp_processor_id(), false);
+
spin_begin();
while (1)
spin_cpu_relax();
@@ -1047,7 +1058,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu),
GFP_KERNEL, cpu_to_node(cpu));
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
/*
* numa_node_id() works after this.
*/
@@ -1057,17 +1068,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
local_memory_node(numa_cpu_lookup_table[cpu]));
}
#endif
- /*
- * cpu_core_map is now more updated and exists only since
- * its been exported for long. It only will have a snapshot
- * of cpu_cpu_mask.
- */
- cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu));
}
/* Init the cpumasks so the boot CPU is related to itself */
cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
+ cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
if (has_coregroup_support())
cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
@@ -1078,6 +1084,20 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
cpu_smallcore_mask(boot_cpuid));
}
+ if (cpu_to_chip_id(boot_cpuid) != -1) {
+ int idx = num_possible_cpus() / threads_per_core;
+
+ /*
+ * All threads of a core will all belong to the same core,
+ * chip_id_lookup_table will have one entry per core.
+ * Assumption: if boot_cpuid doesn't have a chip-id, then no
+ * other CPUs, will also not have chip-id.
+ */
+ chip_id_lookup_table = kcalloc(idx, sizeof(int), GFP_KERNEL);
+ if (chip_id_lookup_table)
+ memset(chip_id_lookup_table, -1, sizeof(int) * idx);
+ }
+
if (smp_ops && smp_ops->probe)
smp_ops->probe();
}
@@ -1408,6 +1428,9 @@ static void remove_cpu_from_masks(int cpu)
set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
}
+ for_each_cpu(i, cpu_core_mask(cpu))
+ set_cpus_unrelated(cpu, i, cpu_core_mask);
+
if (has_coregroup_support()) {
for_each_cpu(i, cpu_coregroup_mask(cpu))
set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
@@ -1468,8 +1491,11 @@ static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
static void add_cpu_to_masks(int cpu)
{
+ struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
int first_thread = cpu_first_thread_sibling(cpu);
cpumask_var_t mask;
+ int chip_id = -1;
+ bool ret;
int i;
/*
@@ -1485,12 +1511,39 @@ static void add_cpu_to_masks(int cpu)
add_cpu_to_smallcore_masks(cpu);
/* In CPU-hotplug path, hence use GFP_ATOMIC */
- alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
+ ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
update_mask_by_l2(cpu, &mask);
if (has_coregroup_support())
update_coregroup_mask(cpu, &mask);
+ if (chip_id_lookup_table && ret)
+ chip_id = cpu_to_chip_id(cpu);
+
+ if (chip_id == -1) {
+ cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu));
+ goto out;
+ }
+
+ if (shared_caches)
+ submask_fn = cpu_l2_cache_mask;
+
+ /* Update core_mask with all the CPUs that are part of submask */
+ or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask);
+
+ /* Skip all CPUs already part of current CPU core mask */
+ cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
+
+ for_each_cpu(i, mask) {
+ if (chip_id == cpu_to_chip_id(i)) {
+ or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask);
+ cpumask_andnot(mask, mask, submask_fn(i));
+ } else {
+ cpumask_andnot(mask, mask, cpu_core_mask(i));
+ }
+ }
+
+out:
free_cpumask_var(mask);
}
@@ -1499,13 +1552,16 @@ void start_secondary(void *unused)
{
unsigned int cpu = raw_smp_processor_id();
+ /* PPC64 calls setup_kup() in early_setup_secondary() */
+ if (IS_ENABLED(CONFIG_PPC32))
+ setup_kup();
+
mmgrab(&init_mm);
current->active_mm = &init_mm;
smp_store_cpu_info(cpu);
set_dec(tb_ticks_per_jiffy);
rcu_cpu_starting(cpu);
- preempt_disable();
cpu_callin_map[cpu] = 1;
if (smp_ops->setup_cpu)
@@ -1521,6 +1577,9 @@ void start_secondary(void *unused)
vdso_getcpu_init();
#endif
+ set_numa_node(numa_cpu_lookup_table[cpu]);
+ set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
+
/* Update topology CPU masks */
add_cpu_to_masks(cpu);
@@ -1539,9 +1598,6 @@ void start_secondary(void *unused)
shared_caches = true;
}
- set_numa_node(numa_cpu_lookup_table[cpu]);
- set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
-
smp_wmb();
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index b6440657ef92..2b0d04a1b7d2 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -23,90 +23,56 @@
#include <asm/paca.h>
-/*
- * Save stack-backtrace addresses into a stack_trace buffer.
- */
-static void save_context_stack(struct stack_trace *trace, unsigned long sp,
- struct task_struct *tsk, int savesched)
+void __no_sanitize_address arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+ struct task_struct *task, struct pt_regs *regs)
{
+ unsigned long sp;
+
+ if (regs && !consume_entry(cookie, regs->nip))
+ return;
+
+ if (regs)
+ sp = regs->gpr[1];
+ else if (task == current)
+ sp = current_stack_frame();
+ else
+ sp = task->thread.ksp;
+
for (;;) {
unsigned long *stack = (unsigned long *) sp;
unsigned long newsp, ip;
- if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
+ if (!validate_sp(sp, task, STACK_FRAME_OVERHEAD))
return;
newsp = stack[0];
ip = stack[STACK_FRAME_LR_SAVE];
- if (savesched || !in_sched_functions(ip)) {
- if (!trace->skip)
- trace->entries[trace->nr_entries++] = ip;
- else
- trace->skip--;
- }
-
- if (trace->nr_entries >= trace->max_entries)
+ if (!consume_entry(cookie, ip))
return;
sp = newsp;
}
}
-void save_stack_trace(struct stack_trace *trace)
-{
- unsigned long sp;
-
- sp = current_stack_frame();
-
- save_context_stack(trace, sp, current, 1);
-}
-EXPORT_SYMBOL_GPL(save_stack_trace);
-
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
-{
- unsigned long sp;
-
- if (!try_get_task_stack(tsk))
- return;
-
- if (tsk == current)
- sp = current_stack_frame();
- else
- sp = tsk->thread.ksp;
-
- save_context_stack(trace, sp, tsk, 0);
-
- put_task_stack(tsk);
-}
-EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
-
-void
-save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
-{
- save_context_stack(trace, regs->gpr[1], current, 0);
-}
-EXPORT_SYMBOL_GPL(save_stack_trace_regs);
-
-#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
/*
* This function returns an error if it detects any unreliable features of the
* stack. Otherwise it guarantees that the stack trace is reliable.
*
* If the task is not 'current', the caller *must* ensure the task is inactive.
*/
-static int __save_stack_trace_tsk_reliable(struct task_struct *tsk,
- struct stack_trace *trace)
+int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
+ void *cookie, struct task_struct *task)
{
unsigned long sp;
unsigned long newsp;
- unsigned long stack_page = (unsigned long)task_stack_page(tsk);
+ unsigned long stack_page = (unsigned long)task_stack_page(task);
unsigned long stack_end;
int graph_idx = 0;
bool firstframe;
stack_end = stack_page + THREAD_SIZE;
- if (!is_idle_task(tsk)) {
+ if (!is_idle_task(task)) {
/*
* For user tasks, this is the SP value loaded on
* kernel entry, see "PACAKSAVE(r13)" in _switch() and
@@ -130,10 +96,10 @@ static int __save_stack_trace_tsk_reliable(struct task_struct *tsk,
stack_end -= STACK_FRAME_OVERHEAD;
}
- if (tsk == current)
+ if (task == current)
sp = current_stack_frame();
else
- sp = tsk->thread.ksp;
+ sp = task->thread.ksp;
if (sp < stack_page + sizeof(struct thread_struct) ||
sp > stack_end - STACK_FRAME_MIN_SIZE) {
@@ -182,7 +148,7 @@ static int __save_stack_trace_tsk_reliable(struct task_struct *tsk,
* FIXME: IMHO these tests do not belong in
* arch-dependent code, they are generic.
*/
- ip = ftrace_graph_ret_addr(tsk, &graph_idx, ip, stack);
+ ip = ftrace_graph_ret_addr(task, &graph_idx, ip, stack);
#ifdef CONFIG_KPROBES
/*
* Mark stacktraces with kretprobed functions on them
@@ -192,36 +158,12 @@ static int __save_stack_trace_tsk_reliable(struct task_struct *tsk,
return -EINVAL;
#endif
- if (trace->nr_entries >= trace->max_entries)
- return -E2BIG;
- if (!trace->skip)
- trace->entries[trace->nr_entries++] = ip;
- else
- trace->skip--;
+ if (!consume_entry(cookie, ip))
+ return -EINVAL;
}
return 0;
}
-int save_stack_trace_tsk_reliable(struct task_struct *tsk,
- struct stack_trace *trace)
-{
- int ret;
-
- /*
- * If the task doesn't have a stack (e.g., a zombie), the stack is
- * "reliably" empty.
- */
- if (!try_get_task_stack(tsk))
- return 0;
-
- ret = __save_stack_trace_tsk_reliable(tsk, trace);
-
- put_task_stack(tsk);
-
- return ret;
-}
-#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
-
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
static void handle_backtrace_ipi(struct pt_regs *regs)
{
@@ -230,17 +172,31 @@ static void handle_backtrace_ipi(struct pt_regs *regs)
static void raise_backtrace_ipi(cpumask_t *mask)
{
+ struct paca_struct *p;
unsigned int cpu;
+ u64 delay_us;
for_each_cpu(cpu, mask) {
- if (cpu == smp_processor_id())
+ if (cpu == smp_processor_id()) {
handle_backtrace_ipi(NULL);
- else
- smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, 5 * USEC_PER_SEC);
- }
+ continue;
+ }
- for_each_cpu(cpu, mask) {
- struct paca_struct *p = paca_ptrs[cpu];
+ delay_us = 5 * USEC_PER_SEC;
+
+ if (smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, delay_us)) {
+ // Now wait up to 5s for the other CPU to do its backtrace
+ while (cpumask_test_cpu(cpu, mask) && delay_us) {
+ udelay(1);
+ delay_us--;
+ }
+
+ // Other CPU cleared itself from the mask
+ if (delay_us)
+ continue;
+ }
+
+ p = paca_ptrs[cpu];
cpumask_clear_cpu(cpu, mask);
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index 078608ec2e92..bf4ae0f0e36c 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -82,16 +82,8 @@ int
ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct __kernel_old_timeval __user *tvp)
{
if ( (unsigned long)n >= 4096 )
- {
- unsigned long __user *buffer = (unsigned long __user *)n;
- if (!access_ok(buffer, 5*sizeof(unsigned long))
- || __get_user(n, buffer)
- || __get_user(inp, ((fd_set __user * __user *)(buffer+1)))
- || __get_user(outp, ((fd_set __user * __user *)(buffer+2)))
- || __get_user(exp, ((fd_set __user * __user *)(buffer+3)))
- || __get_user(tvp, ((struct __kernel_old_timeval __user * __user *)(buffer+4))))
- return -EFAULT;
- }
+ return sys_old_select((void __user *)n);
+
return sys_select(n, inp, outp, exp, tvp);
}
#endif
@@ -122,7 +114,8 @@ SYSCALL_DEFINE0(switch_endian)
{
struct thread_info *ti;
- current->thread.regs->msr ^= MSR_LE;
+ regs_set_return_msr(current->thread.regs,
+ current->thread.regs->msr ^ MSR_LE);
/*
* Set TIF_RESTOREALL so that r3 isn't clobbered on return to
diff --git a/arch/powerpc/kernel/syscalls/Makefile b/arch/powerpc/kernel/syscalls/Makefile
index 9e3be295dbba..5476f62eb80f 100644
--- a/arch/powerpc/kernel/syscalls/Makefile
+++ b/arch/powerpc/kernel/syscalls/Makefile
@@ -6,53 +6,38 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
$(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
syscall := $(src)/syscall.tbl
-syshdr := $(srctree)/$(src)/syscallhdr.sh
-systbl := $(srctree)/$(src)/syscalltbl.sh
+syshdr := $(srctree)/scripts/syscallhdr.sh
+systbl := $(srctree)/scripts/syscalltbl.sh
quiet_cmd_syshdr = SYSHDR $@
- cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
- '$(syshdr_abis_$(basetarget))' \
- '$(syshdr_pfx_$(basetarget))' \
- '$(syshdr_offset_$(basetarget))'
+ cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr --abis $(abis) $< $@
quiet_cmd_systbl = SYSTBL $@
- cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \
- '$(systbl_abis_$(basetarget))' \
- '$(systbl_abi_$(basetarget))' \
- '$(systbl_offset_$(basetarget))'
+ cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis $(abis) $< $@
-syshdr_abis_unistd_32 := common,nospu,32
+$(uapi)/unistd_32.h: abis := common,nospu,32
$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
-syshdr_abis_unistd_64 := common,nospu,64
+$(uapi)/unistd_64.h: abis := common,nospu,64
$(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
-systbl_abis_syscall_table_32 := common,nospu,32
-systbl_abi_syscall_table_32 := 32
+$(kapi)/syscall_table_32.h: abis := common,nospu,32
$(kapi)/syscall_table_32.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
-systbl_abis_syscall_table_64 := common,nospu,64
-systbl_abi_syscall_table_64 := 64
+$(kapi)/syscall_table_64.h: abis := common,nospu,64
$(kapi)/syscall_table_64.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
-systbl_abis_syscall_table_c32 := common,nospu,32
-systbl_abi_syscall_table_c32 := c32
-$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) FORCE
- $(call if_changed,systbl)
-
-systbl_abis_syscall_table_spu := common,spu
-systbl_abi_syscall_table_spu := spu
+$(kapi)/syscall_table_spu.h: abis := common,spu
$(kapi)/syscall_table_spu.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
uapisyshdr-y += unistd_32.h unistd_64.h
kapisyshdr-y += syscall_table_32.h \
syscall_table_64.h \
- syscall_table_c32.h \
syscall_table_spu.h
uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y))
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index 0b2480cf3e47..aef2a290e71a 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -522,3 +522,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/powerpc/kernel/syscalls/syscallhdr.sh b/arch/powerpc/kernel/syscalls/syscallhdr.sh
deleted file mode 100644
index 02d6751f3be3..000000000000
--- a/arch/powerpc/kernel/syscalls/syscallhdr.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-prefix="$4"
-offset="$5"
-
-fileguard=_UAPI_ASM_POWERPC_`basename "$out" | sed \
- -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
- -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- printf "#ifndef %s\n" "${fileguard}"
- printf "#define %s\n" "${fileguard}"
- printf "\n"
-
- nxt=0
- while read nr abi name entry compat ; do
- if [ -z "$offset" ]; then
- printf "#define __NR_%s%s\t%s\n" \
- "${prefix}" "${name}" "${nr}"
- else
- printf "#define __NR_%s%s\t(%s + %s)\n" \
- "${prefix}" "${name}" "${offset}" "${nr}"
- fi
- nxt=$((nr+1))
- done
-
- printf "\n"
- printf "#ifdef __KERNEL__\n"
- printf "#define __NR_syscalls\t%s\n" "${nxt}"
- printf "#endif\n"
- printf "\n"
- printf "#endif /* %s */\n" "${fileguard}"
-) > "$out"
diff --git a/arch/powerpc/kernel/syscalls/syscalltbl.sh b/arch/powerpc/kernel/syscalls/syscalltbl.sh
deleted file mode 100644
index f7393a7b18aa..000000000000
--- a/arch/powerpc/kernel/syscalls/syscalltbl.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-my_abi="$4"
-offset="$5"
-
-emit() {
- t_nxt="$1"
- t_nr="$2"
- t_entry="$3"
-
- while [ $t_nxt -lt $t_nr ]; do
- printf "__SYSCALL(%s,sys_ni_syscall)\n" "${t_nxt}"
- t_nxt=$((t_nxt+1))
- done
- printf "__SYSCALL(%s,%s)\n" "${t_nxt}" "${t_entry}"
-}
-
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- nxt=0
- if [ -z "$offset" ]; then
- offset=0
- fi
-
- while read nr abi name entry compat ; do
- if [ "$my_abi" = "c32" ] && [ ! -z "$compat" ]; then
- emit $((nxt+offset)) $((nr+offset)) $compat
- else
- emit $((nxt+offset)) $((nr+offset)) $entry
- fi
- nxt=$((nr+1))
- done
-) > "$out"
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 2e08640bb3b4..5ff0e55d0db1 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -843,14 +843,14 @@ static int register_cpu_online(unsigned int cpu)
#ifdef HAS_PPC_PMC_IBM
case PPC_PMC_IBM:
attrs = ibm_common_attrs;
- nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
+ nattrs = ARRAY_SIZE(ibm_common_attrs);
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_IBM */
#ifdef HAS_PPC_PMC_G4
case PPC_PMC_G4:
attrs = g4_common_attrs;
- nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
+ nattrs = ARRAY_SIZE(g4_common_attrs);
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_G4 */
@@ -858,7 +858,7 @@ static int register_cpu_online(unsigned int cpu)
case PPC_PMC_PA6T:
/* PA Semi starts counting at PMC0 */
attrs = pa6t_attrs;
- nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
+ nattrs = ARRAY_SIZE(pa6t_attrs);
pmc_attrs = NULL;
break;
#endif
@@ -940,14 +940,14 @@ static int unregister_cpu_online(unsigned int cpu)
#ifdef HAS_PPC_PMC_IBM
case PPC_PMC_IBM:
attrs = ibm_common_attrs;
- nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
+ nattrs = ARRAY_SIZE(ibm_common_attrs);
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_IBM */
#ifdef HAS_PPC_PMC_G4
case PPC_PMC_G4:
attrs = g4_common_attrs;
- nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
+ nattrs = ARRAY_SIZE(g4_common_attrs);
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_G4 */
@@ -955,7 +955,7 @@ static int unregister_cpu_online(unsigned int cpu)
case PPC_PMC_PA6T:
/* PA Semi starts counting at PMC0 */
attrs = pa6t_attrs;
- nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
+ nattrs = ARRAY_SIZE(pa6t_attrs);
pmc_attrs = NULL;
break;
#endif
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
index d34276f3c495..cb3358886203 100644
--- a/arch/powerpc/kernel/systbl.S
+++ b/arch/powerpc/kernel/systbl.S
@@ -21,6 +21,7 @@
#define __SYSCALL(nr, entry) .long entry
#endif
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
.globl sys_call_table
sys_call_table:
#ifdef CONFIG_PPC64
@@ -30,8 +31,10 @@ sys_call_table:
#endif
#ifdef CONFIG_COMPAT
+#undef __SYSCALL_WITH_COMPAT
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat)
.globl compat_sys_call_table
compat_sys_call_table:
#define compat_sys_sigsuspend sys_sigsuspend
-#include <asm/syscall_table_c32.h>
+#include <asm/syscall_table_32.h>
#endif
diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c
index 6c31af7f4fa8..b9a047d92ec0 100644
--- a/arch/powerpc/kernel/tau_6xx.c
+++ b/arch/powerpc/kernel/tau_6xx.c
@@ -201,7 +201,7 @@ static int __init TAU_init(void)
tau_int_enable = IS_ENABLED(CONFIG_TAU_INT) &&
!strcmp(cur_cpu_spec->platform, "ppc750");
- tau_workq = alloc_workqueue("tau", WQ_UNBOUND, 1, 0);
+ tau_workq = alloc_workqueue("tau", WQ_UNBOUND, 1);
if (!tau_workq)
return -ENOMEM;
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index b67d93a609a2..e45ce427bffb 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -231,24 +231,13 @@ static u64 scan_dispatch_log(u64 stop_tb)
void notrace accumulate_stolen_time(void)
{
u64 sst, ust;
- unsigned long save_irq_soft_mask = irq_soft_mask_return();
struct cpu_accounting_data *acct = &local_paca->accounting;
- /* We are called early in the exception entry, before
- * soft/hard_enabled are sync'ed to the expected state
- * for the exception. We are hard disabled but the PACA
- * needs to reflect that so various debug stuff doesn't
- * complain
- */
- irq_soft_mask_set(IRQS_DISABLED);
-
sst = scan_dispatch_log(acct->starttime_user);
ust = scan_dispatch_log(acct->starttime);
acct->stime -= sst;
acct->utime -= ust;
acct->steal_time += ust + sst;
-
- irq_soft_mask_set(save_irq_soft_mask);
}
static inline u64 calculate_stolen_time(u64 stop_tb)
@@ -508,16 +497,6 @@ EXPORT_SYMBOL(profile_pc);
* 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
*/
#ifdef CONFIG_PPC64
-static inline unsigned long test_irq_work_pending(void)
-{
- unsigned long x;
-
- asm volatile("lbz %0,%1(13)"
- : "=r" (x)
- : "i" (offsetof(struct paca_struct, irq_work_pending)));
- return x;
-}
-
static inline void set_irq_work_pending_flag(void)
{
asm volatile("stb %0,%1(13)" : :
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 42761ebec9f7..d89c5df4f206 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -49,7 +49,7 @@ ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
addr = ppc_function_entry((void *)addr);
/* if (link) set op to 'bl' else 'b' */
- create_branch(&op, (struct ppc_inst *)ip, addr, link ? 1 : 0);
+ create_branch(&op, (u32 *)ip, addr, link ? 1 : 0);
return op;
}
@@ -68,7 +68,7 @@ ftrace_modify_code(unsigned long ip, struct ppc_inst old, struct ppc_inst new)
*/
/* read the text we want to modify */
- if (probe_kernel_read_inst(&replaced, (void *)ip))
+ if (copy_inst_from_kernel_nofault(&replaced, (void *)ip))
return -EFAULT;
/* Make sure it is what we expect it to be */
@@ -79,7 +79,7 @@ ftrace_modify_code(unsigned long ip, struct ppc_inst old, struct ppc_inst new)
}
/* replace the text with the new text */
- if (patch_instruction((struct ppc_inst *)ip, new))
+ if (patch_instruction((u32 *)ip, new))
return -EPERM;
return 0;
@@ -94,7 +94,7 @@ static int test_24bit_addr(unsigned long ip, unsigned long addr)
addr = ppc_function_entry((void *)addr);
/* use the create_branch to verify that this offset can be branched */
- return create_branch(&op, (struct ppc_inst *)ip, addr, 0) == 0;
+ return create_branch(&op, (u32 *)ip, addr, 0) == 0;
}
static int is_bl_op(struct ppc_inst op)
@@ -130,7 +130,7 @@ __ftrace_make_nop(struct module *mod,
struct ppc_inst op, pop;
/* read where this goes */
- if (probe_kernel_read_inst(&op, (void *)ip)) {
+ if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
pr_err("Fetching opcode failed.\n");
return -EFAULT;
}
@@ -162,15 +162,15 @@ __ftrace_make_nop(struct module *mod,
#ifdef CONFIG_MPROFILE_KERNEL
/* When using -mkernel_profile there is no load to jump over */
- pop = ppc_inst(PPC_INST_NOP);
+ pop = ppc_inst(PPC_RAW_NOP());
- if (probe_kernel_read_inst(&op, (void *)(ip - 4))) {
+ if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) {
pr_err("Fetching instruction at %lx failed.\n", ip - 4);
return -EFAULT;
}
/* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
- if (!ppc_inst_equal(op, ppc_inst(PPC_INST_MFLR)) &&
+ if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) &&
!ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) {
pr_err("Unexpected instruction %s around bl _mcount\n",
ppc_inst_as_str(op));
@@ -197,18 +197,18 @@ __ftrace_make_nop(struct module *mod,
* Check what is in the next instruction. We can see ld r2,40(r1), but
* on first pass after boot we will see mflr r0.
*/
- if (probe_kernel_read_inst(&op, (void *)(ip + 4))) {
+ if (copy_inst_from_kernel_nofault(&op, (void *)(ip + 4))) {
pr_err("Fetching op failed.\n");
return -EFAULT;
}
if (!ppc_inst_equal(op, ppc_inst(PPC_INST_LD_TOC))) {
- pr_err("Expected %08x found %s\n", PPC_INST_LD_TOC, ppc_inst_as_str(op));
+ pr_err("Expected %08lx found %s\n", PPC_INST_LD_TOC, ppc_inst_as_str(op));
return -EINVAL;
}
#endif /* CONFIG_MPROFILE_KERNEL */
- if (patch_instruction((struct ppc_inst *)ip, pop)) {
+ if (patch_instruction((u32 *)ip, pop)) {
pr_err("Patching NOP failed.\n");
return -EPERM;
}
@@ -278,9 +278,9 @@ __ftrace_make_nop(struct module *mod,
return -EINVAL;
}
- op = ppc_inst(PPC_INST_NOP);
+ op = ppc_inst(PPC_RAW_NOP());
- if (patch_instruction((struct ppc_inst *)ip, op))
+ if (patch_instruction((u32 *)ip, op))
return -EPERM;
return 0;
@@ -349,7 +349,7 @@ static int setup_mcount_compiler_tramp(unsigned long tramp)
return -1;
/* New trampoline -- read where this goes */
- if (probe_kernel_read_inst(&op, (void *)tramp)) {
+ if (copy_inst_from_kernel_nofault(&op, (void *)tramp)) {
pr_debug("Fetching opcode failed.\n");
return -1;
}
@@ -380,7 +380,7 @@ static int setup_mcount_compiler_tramp(unsigned long tramp)
return -1;
}
- if (patch_branch((struct ppc_inst *)tramp, ptr, 0)) {
+ if (patch_branch((u32 *)tramp, ptr, 0)) {
pr_debug("REL24 out of range!\n");
return -1;
}
@@ -399,7 +399,7 @@ static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
struct ppc_inst op;
/* Read where this goes */
- if (probe_kernel_read_inst(&op, (void *)ip)) {
+ if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
pr_err("Fetching opcode failed.\n");
return -EFAULT;
}
@@ -424,7 +424,7 @@ static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
}
}
- if (patch_instruction((struct ppc_inst *)ip, ppc_inst(PPC_INST_NOP))) {
+ if (patch_instruction((u32 *)ip, ppc_inst(PPC_RAW_NOP()))) {
pr_err("Patching NOP failed.\n");
return -EPERM;
}
@@ -446,7 +446,7 @@ int ftrace_make_nop(struct module *mod,
if (test_24bit_addr(ip, addr)) {
/* within range */
old = ftrace_call_replace(ip, addr, 1);
- new = ppc_inst(PPC_INST_NOP);
+ new = ppc_inst(PPC_RAW_NOP());
return ftrace_modify_code(ip, old, new);
} else if (core_kernel_text(ip))
return __ftrace_make_nop_kernel(rec, addr);
@@ -510,7 +510,7 @@ static int
expected_nop_sequence(void *ip, struct ppc_inst op0, struct ppc_inst op1)
{
/* look for patched "NOP" on ppc64 with -mprofile-kernel */
- if (!ppc_inst_equal(op0, ppc_inst(PPC_INST_NOP)))
+ if (!ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP())))
return 0;
return 1;
}
@@ -526,10 +526,10 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
struct module *mod = rec->arch.mod;
/* read where this goes */
- if (probe_kernel_read_inst(op, ip))
+ if (copy_inst_from_kernel_nofault(op, ip))
return -EFAULT;
- if (probe_kernel_read_inst(op + 1, ip + 4))
+ if (copy_inst_from_kernel_nofault(op + 1, ip + 4))
return -EFAULT;
if (!expected_nop_sequence(ip, op[0], op[1])) {
@@ -589,14 +589,14 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
int err;
struct ppc_inst op;
- unsigned long ip = rec->ip;
+ u32 *ip = (u32 *)rec->ip;
/* read where this goes */
- if (probe_kernel_read_inst(&op, (void *)ip))
+ if (copy_inst_from_kernel_nofault(&op, ip))
return -EFAULT;
/* It should be pointing to a nop */
- if (!ppc_inst_equal(op, ppc_inst(PPC_INST_NOP))) {
+ if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) {
pr_err("Expected NOP but have %s\n", ppc_inst_as_str(op));
return -EINVAL;
}
@@ -608,8 +608,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
}
/* create the branch to the trampoline */
- err = create_branch(&op, (struct ppc_inst *)ip,
- rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
+ err = create_branch(&op, ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
if (err) {
pr_err("REL24 out of range!\n");
return -EINVAL;
@@ -617,7 +616,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
pr_devel("write to %lx\n", rec->ip);
- if (patch_instruction((struct ppc_inst *)ip, op))
+ if (patch_instruction(ip, op))
return -EPERM;
return 0;
@@ -648,12 +647,12 @@ static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
}
/* Make sure we have a nop */
- if (probe_kernel_read_inst(&op, ip)) {
+ if (copy_inst_from_kernel_nofault(&op, ip)) {
pr_err("Unable to read ftrace location %p\n", ip);
return -EFAULT;
}
- if (!ppc_inst_equal(op, ppc_inst(PPC_INST_NOP))) {
+ if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) {
pr_err("Unexpected call sequence at %p: %s\n", ip, ppc_inst_as_str(op));
return -EINVAL;
}
@@ -684,7 +683,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
*/
if (test_24bit_addr(ip, addr)) {
/* within range */
- old = ppc_inst(PPC_INST_NOP);
+ old = ppc_inst(PPC_RAW_NOP());
new = ftrace_call_replace(ip, addr, 1);
return ftrace_modify_code(ip, old, new);
} else if (core_kernel_text(ip))
@@ -726,7 +725,7 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
}
/* read where this goes */
- if (probe_kernel_read_inst(&op, (void *)ip)) {
+ if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
pr_err("Fetching opcode failed.\n");
return -EFAULT;
}
@@ -762,7 +761,7 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
/* The new target may be within range */
if (test_24bit_addr(ip, addr)) {
/* within range */
- if (patch_branch((struct ppc_inst *)ip, addr, BRANCH_SET_LINK)) {
+ if (patch_branch((u32 *)ip, addr, BRANCH_SET_LINK)) {
pr_err("REL24 out of range!\n");
return -EINVAL;
}
@@ -790,12 +789,12 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
}
/* Ensure branch is within 24 bits */
- if (create_branch(&op, (struct ppc_inst *)ip, tramp, BRANCH_SET_LINK)) {
+ if (create_branch(&op, (u32 *)ip, tramp, BRANCH_SET_LINK)) {
pr_err("Branch out of range\n");
return -EINVAL;
}
- if (patch_branch((struct ppc_inst *)ip, tramp, BRANCH_SET_LINK)) {
+ if (patch_branch((u32 *)ip, tramp, BRANCH_SET_LINK)) {
pr_err("REL24 out of range!\n");
return -EINVAL;
}
@@ -851,7 +850,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
struct ppc_inst old, new;
int ret;
- old = ppc_inst_read((struct ppc_inst *)&ftrace_call);
+ old = ppc_inst_read((u32 *)&ftrace_call);
new = ftrace_call_replace(ip, (unsigned long)func, 1);
ret = ftrace_modify_code(ip, old, new);
@@ -859,7 +858,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
/* Also update the regs callback function */
if (!ret) {
ip = (unsigned long)(&ftrace_regs_call);
- old = ppc_inst_read((struct ppc_inst *)&ftrace_regs_call);
+ old = ppc_inst_read((u32 *)&ftrace_regs_call);
new = ftrace_call_replace(ip, (unsigned long)func, 1);
ret = ftrace_modify_code(ip, old, new);
}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 1583fd1c6010..dfbce527c98e 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -53,7 +53,6 @@
#ifdef CONFIG_PPC64
#include <asm/firmware.h>
#include <asm/processor.h>
-#include <asm/tm.h>
#endif
#include <asm/kexec.h>
#include <asm/ppc-opcode.h>
@@ -68,6 +67,7 @@
#include <asm/kprobes.h>
#include <asm/stacktrace.h>
#include <asm/nmi.h>
+#include <asm/disassemble.h>
#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
int (*__debugger)(struct pt_regs *regs) __read_mostly;
@@ -222,7 +222,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs,
/*
* system_reset_excption handles debugger, crash dump, panic, for 0x100
*/
- if (TRAP(regs) == 0x100)
+ if (TRAP(regs) == INTERRUPT_SYSTEM_RESET)
return;
crash_fadump(regs, "die oops");
@@ -290,7 +290,7 @@ void die(const char *str, struct pt_regs *regs, long err)
/*
* system_reset_excption handles debugger, crash dump, panic, for 0x100
*/
- if (TRAP(regs) != 0x100) {
+ if (TRAP(regs) != INTERRUPT_SYSTEM_RESET) {
if (debugger(regs))
return;
}
@@ -405,7 +405,7 @@ void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
* Now test if the interrupt has hit a range that may be using
* HSPRG1 without having RI=0 (i.e., an HSRR interrupt). The
* problem ranges all run un-relocated. Test real and virt modes
- * at the same time by droping the high bit of the nip (virt mode
+ * at the same time by dropping the high bit of the nip (virt mode
* entry points still have the +0x4000 offset).
*/
nip &= ~0xc000000000000000ULL;
@@ -428,7 +428,7 @@ void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
return;
nonrecoverable:
- regs->msr &= ~MSR_RI;
+ regs_set_return_msr(regs, regs->msr & ~MSR_RI);
#endif
}
DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception)
@@ -538,11 +538,11 @@ static inline int check_io_access(struct pt_regs *regs)
* For the debug message, we look at the preceding
* load or store.
*/
- if (*nip == PPC_INST_NOP)
+ if (*nip == PPC_RAW_NOP())
nip -= 2;
- else if (*nip == PPC_INST_ISYNC)
+ else if (*nip == PPC_RAW_ISYNC())
--nip;
- if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) {
+ if (*nip == PPC_RAW_SYNC() || get_op(*nip) == OP_TRAP) {
unsigned int rb;
--nip;
@@ -550,8 +550,8 @@ static inline int check_io_access(struct pt_regs *regs)
printk(KERN_DEBUG "%s bad port %lx at %p\n",
(*nip & 0x100)? "OUT to": "IN from",
regs->gpr[rb] - _IO_BASE, nip);
- regs->msr |= MSR_RI;
- regs->nip = extable_fixup(entry);
+ regs_set_return_msr(regs, regs->msr | MSR_RI);
+ regs_set_return_ip(regs, extable_fixup(entry));
return 1;
}
}
@@ -587,8 +587,8 @@ static inline int check_io_access(struct pt_regs *regs)
#define REASON_BOUNDARY SRR1_BOUNDARY
#define single_stepping(regs) ((regs)->msr & MSR_SE)
-#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
-#define clear_br_trace(regs) ((regs)->msr &= ~MSR_BE)
+#define clear_single_step(regs) (regs_set_return_msr((regs), (regs)->msr & ~MSR_SE))
+#define clear_br_trace(regs) (regs_set_return_msr((regs), (regs)->msr & ~MSR_BE))
#endif
#define inst_length(reason) (((reason) & REASON_PREFIXED) ? 8 : 4)
@@ -864,7 +864,7 @@ static void p9_hmi_special_emu(struct pt_regs *regs)
unsigned long ea, msr, msr_mask;
bool swap;
- if (__get_user_inatomic(instr, (unsigned int __user *)regs->nip))
+ if (__get_user(instr, (unsigned int __user *)regs->nip))
return;
/*
@@ -1032,7 +1032,7 @@ static void p9_hmi_special_emu(struct pt_regs *regs)
#endif /* !__LITTLE_ENDIAN__ */
/* Go to next instruction */
- regs->nip += 4;
+ regs_add_return_ip(regs, 4);
}
#endif /* CONFIG_VSX */
@@ -1079,6 +1079,16 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception)
_exception(SIGTRAP, regs, TRAP_UNK, 0);
}
+DEFINE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception)
+{
+ printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
+ regs->nip, regs->msr, regs->trap);
+
+ _exception(SIGTRAP, regs, TRAP_UNK, 0);
+
+ return 0;
+}
+
DEFINE_INTERRUPT_HANDLER(instruction_breakpoint_exception)
{
if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
@@ -1309,7 +1319,6 @@ static int emulate_instruction(struct pt_regs *regs)
if (!user_mode(regs))
return -EINVAL;
- CHECK_FULL_REGS(regs);
if (get_user(instword, (u32 __user *)(regs->nip)))
return -EFAULT;
@@ -1406,7 +1415,6 @@ int is_valid_bugaddr(unsigned long addr)
static int emulate_math(struct pt_regs *regs)
{
int ret;
- extern int do_mathemu(struct pt_regs *regs);
ret = do_mathemu(regs);
if (ret >= 0)
@@ -1469,7 +1477,7 @@ static void do_program_check(struct pt_regs *regs)
if (!(regs->msr & MSR_PR) && /* not user-mode */
report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
- regs->nip += 4;
+ regs_add_return_ip(regs, 4);
return;
}
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
@@ -1531,7 +1539,7 @@ static void do_program_check(struct pt_regs *regs)
if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
switch (emulate_instruction(regs)) {
case 0:
- regs->nip += 4;
+ regs_add_return_ip(regs, 4);
emulate_single_step(regs);
return;
case -EFAULT:
@@ -1559,7 +1567,7 @@ DEFINE_INTERRUPT_HANDLER(program_check_exception)
*/
DEFINE_INTERRUPT_HANDLER(emulation_assist_interrupt)
{
- regs->msr |= REASON_ILLEGAL;
+ regs_set_return_msr(regs, regs->msr | REASON_ILLEGAL);
do_program_check(regs);
}
@@ -1586,7 +1594,7 @@ DEFINE_INTERRUPT_HANDLER(alignment_exception)
if (fixed == 1) {
/* skip over emulated instruction */
- regs->nip += inst_length(reason);
+ regs_add_return_ip(regs, inst_length(reason));
emulate_single_step(regs);
return;
}
@@ -1606,15 +1614,6 @@ bad:
bad_page_fault(regs, sig);
}
-DEFINE_INTERRUPT_HANDLER(StackOverflow)
-{
- pr_crit("Kernel stack overflow in process %s[%d], r1=%lx\n",
- current->comm, task_pid_nr(current), regs->gpr[1]);
- debugger(regs);
- show_regs(regs);
- panic("kernel stack overflow");
-}
-
DEFINE_INTERRUPT_HANDLER(stack_overflow_exception)
{
die("Kernel stack overflow", regs, SIGSEGV);
@@ -1661,7 +1660,7 @@ static void tm_unavailable(struct pt_regs *regs)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (user_mode(regs)) {
current->thread.load_tm++;
- regs->msr |= MSR_TM;
+ regs_set_return_msr(regs, regs->msr | MSR_TM);
tm_enable();
tm_restore_sprs(&current->thread);
return;
@@ -1693,7 +1692,7 @@ DEFINE_INTERRUPT_HANDLER(facility_unavailable_exception)
u8 status;
bool hv;
- hv = (TRAP(regs) == 0xf80);
+ hv = (TRAP(regs) == INTERRUPT_H_FAC_UNAVAIL);
if (hv)
value = mfspr(SPRN_HFSCR);
else
@@ -1753,7 +1752,7 @@ DEFINE_INTERRUPT_HANDLER(facility_unavailable_exception)
pr_err("DSCR based mfspr emulation failed\n");
return;
}
- regs->nip += 4;
+ regs_add_return_ip(regs, 4);
emulate_single_step(regs);
}
return;
@@ -1950,7 +1949,7 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
*/
if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
current->thread.debug.dbcr1))
- regs->msr |= MSR_DE;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
else
/* Make sure the IDM flag is off */
current->thread.debug.dbcr0 &= ~DBCR0_IDM;
@@ -1971,7 +1970,7 @@ DEFINE_INTERRUPT_HANDLER(DebugException)
* instead of stopping here when hitting a BT
*/
if (debug_status & DBSR_BT) {
- regs->msr &= ~MSR_DE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_DE);
/* Disable BT */
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
@@ -1982,7 +1981,7 @@ DEFINE_INTERRUPT_HANDLER(DebugException)
if (user_mode(regs)) {
current->thread.debug.dbcr0 &= ~DBCR0_BT;
current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
- regs->msr |= MSR_DE;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
return;
}
@@ -1996,7 +1995,7 @@ DEFINE_INTERRUPT_HANDLER(DebugException)
if (debugger_sstep(regs))
return;
} else if (debug_status & DBSR_IC) { /* Instruction complete */
- regs->msr &= ~MSR_DE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_DE);
/* Disable instruction completion */
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
@@ -2018,7 +2017,7 @@ DEFINE_INTERRUPT_HANDLER(DebugException)
current->thread.debug.dbcr0 &= ~DBCR0_IC;
if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
current->thread.debug.dbcr1))
- regs->msr |= MSR_DE;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
else
/* Make sure the IDM bit is off */
current->thread.debug.dbcr0 &= ~DBCR0_IDM;
@@ -2046,7 +2045,7 @@ DEFINE_INTERRUPT_HANDLER(altivec_assist_exception)
PPC_WARN_EMULATED(altivec, regs);
err = emulate_altivec(regs);
if (err == 0) {
- regs->nip += 4; /* skip emulated instruction */
+ regs_add_return_ip(regs, 4); /* skip emulated instruction */
emulate_single_step(regs);
return;
}
@@ -2111,7 +2110,7 @@ DEFINE_INTERRUPT_HANDLER(SPEFloatingPointException)
err = do_spe_mathemu(regs);
if (err == 0) {
- regs->nip += 4; /* skip emulated instruction */
+ regs_add_return_ip(regs, 4); /* skip emulated instruction */
emulate_single_step(regs);
return;
}
@@ -2142,10 +2141,10 @@ DEFINE_INTERRUPT_HANDLER(SPEFloatingPointRoundException)
giveup_spe(current);
preempt_enable();
- regs->nip -= 4;
+ regs_add_return_ip(regs, -4);
err = speround_handler(regs);
if (err == 0) {
- regs->nip += 4; /* skip emulated instruction */
+ regs_add_return_ip(regs, 4); /* skip emulated instruction */
emulate_single_step(regs);
return;
}
@@ -2170,11 +2169,14 @@ DEFINE_INTERRUPT_HANDLER(SPEFloatingPointRoundException)
* in the MSR is 0. This indicates that SRR0/1 are live, and that
* we therefore lost state by taking this exception.
*/
-DEFINE_INTERRUPT_HANDLER(unrecoverable_exception)
+void __noreturn unrecoverable_exception(struct pt_regs *regs)
{
pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n",
regs->trap, regs->nip, regs->msr);
die("Unrecoverable exception", regs, SIGABRT);
+ /* die() should not return */
+ for (;;)
+ ;
}
#if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
@@ -2189,10 +2191,11 @@ void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
return;
}
-DEFINE_INTERRUPT_HANDLER(WatchdogException) /* XXX NMI? async? */
+DEFINE_INTERRUPT_HANDLER_NMI(WatchdogException)
{
printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
WatchdogHandler(regs);
+ return 0;
}
#endif
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index 9356b60d6030..8513aa49614e 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -296,3 +296,42 @@ void __init udbg_init_40x_realmode(void)
}
#endif /* CONFIG_PPC_EARLY_DEBUG_40x */
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_MICROWATT
+
+#define UDBG_UART_MW_ADDR ((void __iomem *)0xc0002000)
+
+static u8 udbg_uart_in_isa300_rm(unsigned int reg)
+{
+ uint64_t msr = mfmsr();
+ uint8_t c;
+
+ mtmsr(msr & ~(MSR_EE|MSR_DR));
+ isync();
+ eieio();
+ c = __raw_rm_readb(UDBG_UART_MW_ADDR + (reg << 2));
+ mtmsr(msr);
+ isync();
+ return c;
+}
+
+static void udbg_uart_out_isa300_rm(unsigned int reg, u8 val)
+{
+ uint64_t msr = mfmsr();
+
+ mtmsr(msr & ~(MSR_EE|MSR_DR));
+ isync();
+ eieio();
+ __raw_rm_writeb(val, UDBG_UART_MW_ADDR + (reg << 2));
+ mtmsr(msr);
+ isync();
+}
+
+void __init udbg_init_debug_microwatt(void)
+{
+ udbg_uart_in = udbg_uart_in_isa300_rm;
+ udbg_uart_out = udbg_uart_out_isa300_rm;
+ udbg_use_uart();
+}
+
+#endif /* CONFIG_PPC_EARLY_DEBUG_MICROWATT */
diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c
index e8a63713e655..c6975467d9ff 100644
--- a/arch/powerpc/kernel/uprobes.c
+++ b/arch/powerpc/kernel/uprobes.c
@@ -41,6 +41,13 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
if (addr & 0x03)
return -EINVAL;
+ if (cpu_has_feature(CPU_FTR_ARCH_31) &&
+ ppc_inst_prefixed(ppc_inst_read(auprobe->insn)) &&
+ (addr & 0x3f) == 60) {
+ pr_info_ratelimited("Cannot register a uprobe on 64 byte unaligned prefixed instruction\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -55,7 +62,7 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
autask->saved_trap_nr = current->thread.trap_nr;
current->thread.trap_nr = UPROBE_TRAP_NR;
- regs->nip = current->utask->xol_vaddr;
+ regs_set_return_ip(regs, current->utask->xol_vaddr);
user_enable_single_step(current);
return 0;
@@ -112,7 +119,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
* support doesn't exist and have to fix-up the next instruction
* to be executed.
*/
- regs->nip = (unsigned long)ppc_inst_next((void *)utask->vaddr, &auprobe->insn);
+ regs_set_return_ip(regs, (unsigned long)ppc_inst_next((void *)utask->vaddr, auprobe->insn));
user_disable_single_step(current);
return 0;
@@ -175,7 +182,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
* emulate_step() returns 1 if the insn was successfully emulated.
* For all other cases, we need to single-step in hardware.
*/
- ret = emulate_step(regs, ppc_inst_read(&auprobe->insn));
+ ret = emulate_step(regs, ppc_inst_read(auprobe->insn));
if (ret > 0)
return true;
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index e839a906fdf2..717f2c9a7573 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -18,6 +18,7 @@
#include <linux/security.h>
#include <linux/memblock.h>
#include <linux/syscalls.h>
+#include <linux/time_namespace.h>
#include <vdso/datapage.h>
#include <asm/syscall.h>
@@ -50,15 +51,21 @@ static union {
} vdso_data_store __page_aligned_data;
struct vdso_arch_data *vdso_data = &vdso_data_store.data;
+enum vvar_pages {
+ VVAR_DATA_PAGE_OFFSET,
+ VVAR_TIMENS_PAGE_OFFSET,
+ VVAR_NR_PAGES,
+};
+
static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma,
unsigned long text_size)
{
unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
- if (new_size != text_size + PAGE_SIZE)
+ if (new_size != text_size)
return -EINVAL;
- current->mm->context.vdso = (void __user *)new_vma->vm_start + PAGE_SIZE;
+ current->mm->context.vdso = (void __user *)new_vma->vm_start;
return 0;
}
@@ -73,6 +80,14 @@ static int vdso64_mremap(const struct vm_special_mapping *sm, struct vm_area_str
return vdso_mremap(sm, new_vma, &vdso64_end - &vdso64_start);
}
+static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma, struct vm_fault *vmf);
+
+static struct vm_special_mapping vvar_spec __ro_after_init = {
+ .name = "[vvar]",
+ .fault = vvar_fault,
+};
+
static struct vm_special_mapping vdso32_spec __ro_after_init = {
.name = "[vdso]",
.mremap = vdso32_mremap,
@@ -83,17 +98,105 @@ static struct vm_special_mapping vdso64_spec __ro_after_init = {
.mremap = vdso64_mremap,
};
+#ifdef CONFIG_TIME_NS
+struct vdso_data *arch_get_vdso_data(void *vvar_page)
+{
+ return ((struct vdso_arch_data *)vvar_page)->data;
+}
+
+/*
+ * The vvar mapping contains data for a specific time namespace, so when a task
+ * changes namespace we must unmap its vvar data for the old namespace.
+ * Subsequent faults will map in data for the new namespace.
+ *
+ * For more details see timens_setup_vdso_data().
+ */
+int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
+{
+ struct mm_struct *mm = task->mm;
+ struct vm_area_struct *vma;
+
+ mmap_read_lock(mm);
+
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ unsigned long size = vma->vm_end - vma->vm_start;
+
+ if (vma_is_special_mapping(vma, &vvar_spec))
+ zap_page_range(vma, vma->vm_start, size);
+ }
+
+ mmap_read_unlock(mm);
+ return 0;
+}
+
+static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
+{
+ if (likely(vma->vm_mm == current->mm))
+ return current->nsproxy->time_ns->vvar_page;
+
+ /*
+ * VM_PFNMAP | VM_IO protect .fault() handler from being called
+ * through interfaces like /proc/$pid/mem or
+ * process_vm_{readv,writev}() as long as there's no .access()
+ * in special_mapping_vmops.
+ * For more details check_vma_flags() and __access_remote_vm()
+ */
+ WARN(1, "vvar_page accessed remotely");
+
+ return NULL;
+}
+#else
+static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
+{
+ return NULL;
+}
+#endif
+
+static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct page *timens_page = find_timens_vvar_page(vma);
+ unsigned long pfn;
+
+ switch (vmf->pgoff) {
+ case VVAR_DATA_PAGE_OFFSET:
+ if (timens_page)
+ pfn = page_to_pfn(timens_page);
+ else
+ pfn = virt_to_pfn(vdso_data);
+ break;
+#ifdef CONFIG_TIME_NS
+ case VVAR_TIMENS_PAGE_OFFSET:
+ /*
+ * If a task belongs to a time namespace then a namespace
+ * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
+ * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
+ * offset.
+ * See also the comment near timens_setup_vdso_data().
+ */
+ if (!timens_page)
+ return VM_FAULT_SIGBUS;
+ pfn = virt_to_pfn(vdso_data);
+ break;
+#endif /* CONFIG_TIME_NS */
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+
+ return vmf_insert_pfn(vma, vmf->address, pfn);
+}
+
/*
* This is called from binfmt_elf, we create the special vma for the
* vDSO and insert it into the mm struct tree
*/
static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
- struct mm_struct *mm = current->mm;
+ unsigned long vdso_size, vdso_base, mappings_size;
struct vm_special_mapping *vdso_spec;
+ unsigned long vvar_size = VVAR_NR_PAGES * PAGE_SIZE;
+ struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- unsigned long vdso_size;
- unsigned long vdso_base;
if (is_32bit_task()) {
vdso_spec = &vdso32_spec;
@@ -110,8 +213,8 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
vdso_base = 0;
}
- /* Add a page to the vdso size for the data page */
- vdso_size += PAGE_SIZE;
+ mappings_size = vdso_size + vvar_size;
+ mappings_size += (VDSO_ALIGNMENT - 1) & PAGE_MASK;
/*
* pick a base address for the vDSO in process space. We try to put it
@@ -119,9 +222,7 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
* and end up putting it elsewhere.
* Add enough to the size so that the result can be aligned.
*/
- vdso_base = get_unmapped_area(NULL, vdso_base,
- vdso_size + ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
- 0, 0);
+ vdso_base = get_unmapped_area(NULL, vdso_base, mappings_size, 0, 0);
if (IS_ERR_VALUE(vdso_base))
return vdso_base;
@@ -133,7 +234,13 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
* install_special_mapping or the perf counter mmap tracking code
* will fail to recognise it as a vDSO.
*/
- mm->context.vdso = (void __user *)vdso_base + PAGE_SIZE;
+ mm->context.vdso = (void __user *)vdso_base + vvar_size;
+
+ vma = _install_special_mapping(mm, vdso_base, vvar_size,
+ VM_READ | VM_MAYREAD | VM_IO |
+ VM_DONTDUMP | VM_PFNMAP, &vvar_spec);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
/*
* our vma flags don't have VM_WRITE so by default, the process isn't
@@ -145,9 +252,12 @@ static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_int
* It's fine to use that for setting breakpoints in the vDSO code
* pages though.
*/
- vma = _install_special_mapping(mm, vdso_base, vdso_size,
+ vma = _install_special_mapping(mm, vdso_base + vvar_size, vdso_size,
VM_READ | VM_EXEC | VM_MAYREAD |
VM_MAYWRITE | VM_MAYEXEC, vdso_spec);
+ if (IS_ERR(vma))
+ do_munmap(mm, vdso_base, vvar_size, NULL);
+
return PTR_ERR_OR_ZERO(vma);
}
@@ -249,10 +359,8 @@ static struct page ** __init vdso_setup_pages(void *start, void *end)
if (!pagelist)
panic("%s: Cannot allocate page list for VDSO", __func__);
- pagelist[0] = virt_to_page(vdso_data);
-
for (i = 0; i < pages; i++)
- pagelist[i + 1] = virt_to_page(start + i * PAGE_SIZE);
+ pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
return pagelist;
}
diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S
index a6e29f880e0e..d21d08140a5e 100644
--- a/arch/powerpc/kernel/vdso32/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso32/gettimeofday.S
@@ -65,3 +65,14 @@ V_FUNCTION_END(__kernel_clock_getres)
V_FUNCTION_BEGIN(__kernel_time)
cvdso_call_time __c_kernel_time
V_FUNCTION_END(__kernel_time)
+
+/* Routines for restoring integer registers, called by the compiler. */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the integer restore area. */
+_GLOBAL(_restgpr_31_x)
+_GLOBAL(_rest32gpr_31_x)
+ lwz r0,4(r11)
+ lwz r31,-4(r11)
+ mtlr r0
+ mr r1,r11
+ blr
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S
index a4b806b0d618..58e0099f70f4 100644
--- a/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S
@@ -17,7 +17,7 @@ ENTRY(_start)
SECTIONS
{
- PROVIDE(_vdso_datapage = . - PAGE_SIZE);
+ PROVIDE(_vdso_datapage = . - 2 * PAGE_SIZE);
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S
index 2f3c359cacd3..0288cad428b0 100644
--- a/arch/powerpc/kernel/vdso64/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S
@@ -17,7 +17,7 @@ ENTRY(_start)
SECTIONS
{
- PROVIDE(_vdso_datapage = . - PAGE_SIZE);
+ PROVIDE(_vdso_datapage = . - 2 * PAGE_SIZE);
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 801dc28fdcca..fc120fac1910 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -67,14 +67,16 @@ _GLOBAL(load_up_altivec)
#ifdef CONFIG_PPC32
mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
oris r9,r9,MSR_VEC@h
-#ifdef CONFIG_VMAP_STACK
tovirt(r5, r5)
-#endif
#else
ld r4,PACACURRENT(r13)
addi r5,r4,THREAD /* Get THREAD */
oris r12,r12,MSR_VEC@h
std r12,_MSR(r1)
+#ifdef CONFIG_PPC_BOOK3S_64
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+#endif
#endif
li r4,1
stb r4,THREAD_LOAD_VEC(r5)
@@ -133,7 +135,9 @@ _GLOBAL(load_up_vsx)
/* enable use of VSX after return */
oris r12,r12,MSR_VSX@h
std r12,_MSR(r1)
- b fast_interrupt_return
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+ b fast_interrupt_return_srr
#endif /* CONFIG_VSX */
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 72fa3c00229a..40bdefe9caa7 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -9,6 +9,22 @@
#define EMITS_PT_NOTE
#define RO_EXCEPTION_TABLE_ALIGN 0
+#define SOFT_MASK_TABLE(align) \
+ . = ALIGN(align); \
+ __soft_mask_table : AT(ADDR(__soft_mask_table) - LOAD_OFFSET) { \
+ __start___soft_mask_table = .; \
+ KEEP(*(__soft_mask_table)) \
+ __stop___soft_mask_table = .; \
+ }
+
+#define RESTART_TABLE(align) \
+ . = ALIGN(align); \
+ __restart_table : AT(ADDR(__restart_table) - LOAD_OFFSET) { \
+ __start___restart_table = .; \
+ KEEP(*(__restart_table)) \
+ __stop___restart_table = .; \
+ }
+
#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h>
@@ -124,6 +140,9 @@ SECTIONS
RO_DATA(PAGE_SIZE)
#ifdef CONFIG_PPC64
+ SOFT_MASK_TABLE(8)
+ RESTART_TABLE(8)
+
. = ALIGN(8);
__stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
__start___stf_entry_barrier_fixup = .;
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index c9a8f4781a10..a165635fd214 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -24,6 +24,7 @@
#include <linux/kdebug.h>
#include <linux/sched/debug.h>
#include <linux/delay.h>
+#include <linux/processor.h>
#include <linux/smp.h>
#include <asm/interrupt.h>
diff --git a/arch/powerpc/kexec/Makefile b/arch/powerpc/kexec/Makefile
index 4aff6846c772..b6c52608cb49 100644
--- a/arch/powerpc/kexec/Makefile
+++ b/arch/powerpc/kexec/Makefile
@@ -9,13 +9,6 @@ obj-$(CONFIG_PPC32) += relocate_32.o
obj-$(CONFIG_KEXEC_FILE) += file_load.o ranges.o file_load_$(BITS).o elf_$(BITS).o
-ifdef CONFIG_HAVE_IMA_KEXEC
-ifdef CONFIG_IMA
-obj-y += ima.o
-endif
-endif
-
-
# Disable GCOV, KCOV & sanitizers in odd or sensitive code
GCOV_PROFILE_core_$(BITS).o := n
KCOV_INSTRUMENT_core_$(BITS).o := n
diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c
index 56da5eb2b923..48525e8b5730 100644
--- a/arch/powerpc/kexec/core.c
+++ b/arch/powerpc/kexec/core.c
@@ -68,11 +68,11 @@ void machine_kexec_cleanup(struct kimage *image)
void arch_crash_save_vmcoreinfo(void)
{
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
VMCOREINFO_SYMBOL(node_data);
VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
#endif
-#ifndef CONFIG_NEED_MULTIPLE_NODES
+#ifndef CONFIG_NUMA
VMCOREINFO_SYMBOL(contig_page_data);
#endif
#if defined(CONFIG_PPC64) && defined(CONFIG_SPARSEMEM_VMEMMAP)
diff --git a/arch/powerpc/kexec/crash.c b/arch/powerpc/kexec/crash.c
index c9a889880214..10f997e6bb95 100644
--- a/arch/powerpc/kexec/crash.c
+++ b/arch/powerpc/kexec/crash.c
@@ -24,6 +24,7 @@
#include <asm/smp.h>
#include <asm/setjmp.h>
#include <asm/debug.h>
+#include <asm/interrupt.h>
/*
* The primary CPU waits a while for all secondary CPUs to enter. This is to
@@ -104,8 +105,8 @@ void crash_ipi_callback(struct pt_regs *regs)
static void crash_kexec_prepare_cpus(int cpu)
{
unsigned int msecs;
- unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
- int tries = 0;
+ volatile unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
+ volatile int tries = 0;
int (*old_handler)(struct pt_regs *regs);
printk(KERN_EMERG "Sending IPI to other CPUs\n");
@@ -336,7 +337,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
* If we came in via system reset, wait a while for the secondary
* CPUs to enter.
*/
- if (TRAP(regs) == 0x100)
+ if (TRAP(regs) == INTERRUPT_SYSTEM_RESET)
mdelay(PRIMARY_TIMEOUT);
crash_kexec_prepare_cpus(crashing_cpu);
diff --git a/arch/powerpc/kexec/elf_64.c b/arch/powerpc/kexec/elf_64.c
index 9842e33533df..eeb258002d1e 100644
--- a/arch/powerpc/kexec/elf_64.c
+++ b/arch/powerpc/kexec/elf_64.c
@@ -19,6 +19,7 @@
#include <linux/kexec.h>
#include <linux/libfdt.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -29,7 +30,6 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
unsigned long cmdline_len)
{
int ret;
- unsigned int fdt_size;
unsigned long kernel_load_addr;
unsigned long initrd_load_addr = 0, fdt_load_addr;
void *fdt;
@@ -45,7 +45,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
ret = kexec_build_elf_info(kernel_buf, kernel_len, &ehdr, &elf_info);
if (ret)
- goto out;
+ return ERR_PTR(ret);
if (image->type == KEXEC_TYPE_CRASH) {
/* min & max buffer values for kdump case */
@@ -102,15 +102,10 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
pr_debug("Loaded initrd at 0x%lx\n", initrd_load_addr);
}
- fdt_size = kexec_fdt_totalsize_ppc64(image);
- fdt = kmalloc(fdt_size, GFP_KERNEL);
+ fdt = of_kexec_alloc_and_setup_fdt(image, initrd_load_addr,
+ initrd_len, cmdline,
+ kexec_extra_fdt_size_ppc64(image));
if (!fdt) {
- pr_err("Not enough memory for the device tree.\n");
- ret = -ENOMEM;
- goto out;
- }
- ret = fdt_open_into(initial_boot_params, fdt, fdt_size);
- if (ret < 0) {
pr_err("Error setting up the new device tree.\n");
ret = -EINVAL;
goto out;
@@ -119,18 +114,22 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
ret = setup_new_fdt_ppc64(image, fdt, initrd_load_addr,
initrd_len, cmdline);
if (ret)
- goto out;
+ goto out_free_fdt;
fdt_pack(fdt);
kbuf.buffer = fdt;
- kbuf.bufsz = kbuf.memsz = fdt_size;
+ kbuf.bufsz = kbuf.memsz = fdt_totalsize(fdt);
kbuf.buf_align = PAGE_SIZE;
kbuf.top_down = true;
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
ret = kexec_add_buffer(&kbuf);
if (ret)
- goto out;
+ goto out_free_fdt;
+
+ /* FDT will be freed in arch_kimage_file_post_load_cleanup */
+ image->arch.fdt = fdt;
+
fdt_load_addr = kbuf.mem;
pr_debug("Loaded device tree at 0x%lx\n", fdt_load_addr);
@@ -141,12 +140,15 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
if (ret)
pr_err("Error setting up the purgatory.\n");
+ goto out;
+
+out_free_fdt:
+ kvfree(fdt);
out:
kfree(modified_cmdline);
kexec_free_elf_info(&elf_info);
- /* Make kimage_file_post_load_cleanup free the fdt buffer for us. */
- return ret ? ERR_PTR(ret) : fdt;
+ return ret ? ERR_PTR(ret) : NULL;
}
const struct kexec_file_ops kexec_elf64_ops = {
diff --git a/arch/powerpc/kexec/file_load.c b/arch/powerpc/kexec/file_load.c
index 9a232bc36c8f..4284f76cbef5 100644
--- a/arch/powerpc/kexec/file_load.c
+++ b/arch/powerpc/kexec/file_load.c
@@ -19,7 +19,6 @@
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
#include <asm/setup.h>
-#include <asm/ima.h>
#define SLAVE_CODE_SIZE 256 /* First 0x100 bytes */
@@ -45,7 +44,7 @@ char *setup_kdump_cmdline(struct kimage *image, char *cmdline,
return NULL;
elfcorehdr_strlen = sprintf(cmdline_ptr, "elfcorehdr=0x%lx ",
- image->arch.elfcorehdr_addr);
+ image->elf_load_addr);
if (elfcorehdr_strlen + cmdline_len > COMMAND_LINE_SIZE) {
pr_err("Appending elfcorehdr=<addr> exceeds cmdline size\n");
@@ -108,183 +107,3 @@ int setup_purgatory(struct kimage *image, const void *slave_code,
return 0;
}
-
-/**
- * delete_fdt_mem_rsv - delete memory reservation with given address and size
- *
- * Return: 0 on success, or negative errno on error.
- */
-int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size)
-{
- int i, ret, num_rsvs = fdt_num_mem_rsv(fdt);
-
- for (i = 0; i < num_rsvs; i++) {
- uint64_t rsv_start, rsv_size;
-
- ret = fdt_get_mem_rsv(fdt, i, &rsv_start, &rsv_size);
- if (ret) {
- pr_err("Malformed device tree.\n");
- return -EINVAL;
- }
-
- if (rsv_start == start && rsv_size == size) {
- ret = fdt_del_mem_rsv(fdt, i);
- if (ret) {
- pr_err("Error deleting device tree reservation.\n");
- return -EINVAL;
- }
-
- return 0;
- }
- }
-
- return -ENOENT;
-}
-
-/*
- * setup_new_fdt - modify /chosen and memory reservation for the next kernel
- * @image: kexec image being loaded.
- * @fdt: Flattened device tree for the next kernel.
- * @initrd_load_addr: Address where the next initrd will be loaded.
- * @initrd_len: Size of the next initrd, or 0 if there will be none.
- * @cmdline: Command line for the next kernel, or NULL if there will
- * be none.
- *
- * Return: 0 on success, or negative errno on error.
- */
-int setup_new_fdt(const struct kimage *image, void *fdt,
- unsigned long initrd_load_addr, unsigned long initrd_len,
- const char *cmdline)
-{
- int ret, chosen_node;
- const void *prop;
-
- /* Remove memory reservation for the current device tree. */
- ret = delete_fdt_mem_rsv(fdt, __pa(initial_boot_params),
- fdt_totalsize(initial_boot_params));
- if (ret == 0)
- pr_debug("Removed old device tree reservation.\n");
- else if (ret != -ENOENT)
- return ret;
-
- chosen_node = fdt_path_offset(fdt, "/chosen");
- if (chosen_node == -FDT_ERR_NOTFOUND) {
- chosen_node = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"),
- "chosen");
- if (chosen_node < 0) {
- pr_err("Error creating /chosen.\n");
- return -EINVAL;
- }
- } else if (chosen_node < 0) {
- pr_err("Malformed device tree: error reading /chosen.\n");
- return -EINVAL;
- }
-
- /* Did we boot using an initrd? */
- prop = fdt_getprop(fdt, chosen_node, "linux,initrd-start", NULL);
- if (prop) {
- uint64_t tmp_start, tmp_end, tmp_size;
-
- tmp_start = fdt64_to_cpu(*((const fdt64_t *) prop));
-
- prop = fdt_getprop(fdt, chosen_node, "linux,initrd-end", NULL);
- if (!prop) {
- pr_err("Malformed device tree.\n");
- return -EINVAL;
- }
- tmp_end = fdt64_to_cpu(*((const fdt64_t *) prop));
-
- /*
- * kexec reserves exact initrd size, while firmware may
- * reserve a multiple of PAGE_SIZE, so check for both.
- */
- tmp_size = tmp_end - tmp_start;
- ret = delete_fdt_mem_rsv(fdt, tmp_start, tmp_size);
- if (ret == -ENOENT)
- ret = delete_fdt_mem_rsv(fdt, tmp_start,
- round_up(tmp_size, PAGE_SIZE));
- if (ret == 0)
- pr_debug("Removed old initrd reservation.\n");
- else if (ret != -ENOENT)
- return ret;
-
- /* If there's no new initrd, delete the old initrd's info. */
- if (initrd_len == 0) {
- ret = fdt_delprop(fdt, chosen_node,
- "linux,initrd-start");
- if (ret) {
- pr_err("Error deleting linux,initrd-start.\n");
- return -EINVAL;
- }
-
- ret = fdt_delprop(fdt, chosen_node, "linux,initrd-end");
- if (ret) {
- pr_err("Error deleting linux,initrd-end.\n");
- return -EINVAL;
- }
- }
- }
-
- if (initrd_len) {
- ret = fdt_setprop_u64(fdt, chosen_node,
- "linux,initrd-start",
- initrd_load_addr);
- if (ret < 0)
- goto err;
-
- /* initrd-end is the first address after the initrd image. */
- ret = fdt_setprop_u64(fdt, chosen_node, "linux,initrd-end",
- initrd_load_addr + initrd_len);
- if (ret < 0)
- goto err;
-
- ret = fdt_add_mem_rsv(fdt, initrd_load_addr, initrd_len);
- if (ret) {
- pr_err("Error reserving initrd memory: %s\n",
- fdt_strerror(ret));
- return -EINVAL;
- }
- }
-
- if (cmdline != NULL) {
- ret = fdt_setprop_string(fdt, chosen_node, "bootargs", cmdline);
- if (ret < 0)
- goto err;
- } else {
- ret = fdt_delprop(fdt, chosen_node, "bootargs");
- if (ret && ret != -FDT_ERR_NOTFOUND) {
- pr_err("Error deleting bootargs.\n");
- return -EINVAL;
- }
- }
-
- if (image->type == KEXEC_TYPE_CRASH) {
- /*
- * Avoid elfcorehdr from being stomped on in kdump kernel by
- * setting up memory reserve map.
- */
- ret = fdt_add_mem_rsv(fdt, image->arch.elfcorehdr_addr,
- image->arch.elf_headers_sz);
- if (ret) {
- pr_err("Error reserving elfcorehdr memory: %s\n",
- fdt_strerror(ret));
- goto err;
- }
- }
-
- ret = setup_ima_buffer(image, fdt, chosen_node);
- if (ret) {
- pr_err("Error setting up the new device tree.\n");
- return ret;
- }
-
- ret = fdt_setprop(fdt, chosen_node, "linux,booted-from-kexec", NULL, 0);
- if (ret)
- goto err;
-
- return 0;
-
-err:
- pr_err("Error setting up the new device tree.\n");
- return -EINVAL;
-}
diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
index 02b9e4d0dc40..5056e175ca2c 100644
--- a/arch/powerpc/kexec/file_load_64.c
+++ b/arch/powerpc/kexec/file_load_64.c
@@ -816,9 +816,9 @@ static int load_elfcorehdr_segment(struct kimage *image, struct kexec_buf *kbuf)
goto out;
}
- image->arch.elfcorehdr_addr = kbuf->mem;
- image->arch.elf_headers_sz = headers_sz;
- image->arch.elf_headers = headers;
+ image->elf_load_addr = kbuf->mem;
+ image->elf_headers_sz = headers_sz;
+ image->elf_headers = headers;
out:
kfree(cmem);
return ret;
@@ -852,7 +852,7 @@ int load_crashdump_segments_ppc64(struct kimage *image,
return ret;
}
pr_debug("Loaded elf core header at 0x%lx, bufsz=0x%lx memsz=0x%lx\n",
- image->arch.elfcorehdr_addr, kbuf->bufsz, kbuf->memsz);
+ image->elf_load_addr, kbuf->bufsz, kbuf->memsz);
return 0;
}
@@ -927,37 +927,114 @@ out:
}
/**
- * kexec_fdt_totalsize_ppc64 - Return the estimated size needed to setup FDT
- * for kexec/kdump kernel.
- * @image: kexec image being loaded.
+ * kexec_extra_fdt_size_ppc64 - Return the estimated additional size needed to
+ * setup FDT for kexec/kdump kernel.
+ * @image: kexec image being loaded.
*
- * Returns the estimated size needed for kexec/kdump kernel FDT.
+ * Returns the estimated extra size needed for kexec/kdump kernel FDT.
*/
-unsigned int kexec_fdt_totalsize_ppc64(struct kimage *image)
+unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
{
- unsigned int fdt_size;
u64 usm_entries;
- /*
- * The below estimate more than accounts for a typical kexec case where
- * the additional space is to accommodate things like kexec cmdline,
- * chosen node with properties for initrd start & end addresses and
- * a property to indicate kexec boot..
- */
- fdt_size = fdt_totalsize(initial_boot_params) + (2 * COMMAND_LINE_SIZE);
if (image->type != KEXEC_TYPE_CRASH)
- return fdt_size;
+ return 0;
/*
- * For kdump kernel, also account for linux,usable-memory and
+ * For kdump kernel, account for linux,usable-memory and
* linux,drconf-usable-memory properties. Get an approximate on the
* number of usable memory entries and use for FDT size estimation.
*/
usm_entries = ((memblock_end_of_DRAM() / drmem_lmb_size()) +
(2 * (resource_size(&crashk_res) / drmem_lmb_size())));
- fdt_size += (unsigned int)(usm_entries * sizeof(u64));
+ return (unsigned int)(usm_entries * sizeof(u64));
+}
+
+/**
+ * add_node_props - Reads node properties from device node structure and add
+ * them to fdt.
+ * @fdt: Flattened device tree of the kernel
+ * @node_offset: offset of the node to add a property at
+ * @dn: device node pointer
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+static int add_node_props(void *fdt, int node_offset, const struct device_node *dn)
+{
+ int ret = 0;
+ struct property *pp;
- return fdt_size;
+ if (!dn)
+ return -EINVAL;
+
+ for_each_property_of_node(dn, pp) {
+ ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length);
+ if (ret < 0) {
+ pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret));
+ return ret;
+ }
+ }
+ return ret;
+}
+
+/**
+ * update_cpus_node - Update cpus node of flattened device tree using of_root
+ * device node.
+ * @fdt: Flattened device tree of the kernel.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+static int update_cpus_node(void *fdt)
+{
+ struct device_node *cpus_node, *dn;
+ int cpus_offset, cpus_subnode_offset, ret = 0;
+
+ cpus_offset = fdt_path_offset(fdt, "/cpus");
+ if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) {
+ pr_err("Malformed device tree: error reading /cpus node: %s\n",
+ fdt_strerror(cpus_offset));
+ return cpus_offset;
+ }
+
+ if (cpus_offset > 0) {
+ ret = fdt_del_node(fdt, cpus_offset);
+ if (ret < 0) {
+ pr_err("Error deleting /cpus node: %s\n", fdt_strerror(ret));
+ return -EINVAL;
+ }
+ }
+
+ /* Add cpus node to fdt */
+ cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), "cpus");
+ if (cpus_offset < 0) {
+ pr_err("Error creating /cpus node: %s\n", fdt_strerror(cpus_offset));
+ return -EINVAL;
+ }
+
+ /* Add cpus node properties */
+ cpus_node = of_find_node_by_path("/cpus");
+ ret = add_node_props(fdt, cpus_offset, cpus_node);
+ of_node_put(cpus_node);
+ if (ret < 0)
+ return ret;
+
+ /* Loop through all subnodes of cpus and add them to fdt */
+ for_each_node_by_type(dn, "cpu") {
+ cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name);
+ if (cpus_subnode_offset < 0) {
+ pr_err("Unable to add %s subnode: %s\n", dn->full_name,
+ fdt_strerror(cpus_subnode_offset));
+ ret = cpus_subnode_offset;
+ goto out;
+ }
+
+ ret = add_node_props(fdt, cpus_subnode_offset, dn);
+ if (ret < 0)
+ goto out;
+ }
+out:
+ of_node_put(dn);
+ return ret;
}
/**
@@ -979,10 +1056,6 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
struct crash_mem *umem = NULL, *rmem = NULL;
int i, nr_ranges, ret;
- ret = setup_new_fdt(image, fdt, initrd_load_addr, initrd_len, cmdline);
- if (ret)
- goto out;
-
/*
* Restrict memory usage for kdump kernel by setting up
* usable memory ranges and memory reserve map.
@@ -1020,6 +1093,11 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
}
}
+ /* Update cpus nodes information to account hotplug CPUs. */
+ ret = update_cpus_node(fdt);
+ if (ret < 0)
+ goto out;
+
/* Update memory reserve map */
ret = get_reserved_memory_ranges(&rmem);
if (ret)
@@ -1142,9 +1220,12 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image)
vfree(image->arch.backup_buf);
image->arch.backup_buf = NULL;
- vfree(image->arch.elf_headers);
- image->arch.elf_headers = NULL;
- image->arch.elf_headers_sz = 0;
+ vfree(image->elf_headers);
+ image->elf_headers = NULL;
+ image->elf_headers_sz = 0;
+
+ kvfree(image->arch.fdt);
+ image->arch.fdt = NULL;
return kexec_image_post_load_cleanup_default(image);
}
diff --git a/arch/powerpc/kexec/ima.c b/arch/powerpc/kexec/ima.c
deleted file mode 100644
index 720e50e490b6..000000000000
--- a/arch/powerpc/kexec/ima.c
+++ /dev/null
@@ -1,219 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2016 IBM Corporation
- *
- * Authors:
- * Thiago Jung Bauermann <bauerman@linux.vnet.ibm.com>
- */
-
-#include <linux/slab.h>
-#include <linux/kexec.h>
-#include <linux/of.h>
-#include <linux/memblock.h>
-#include <linux/libfdt.h>
-
-static int get_addr_size_cells(int *addr_cells, int *size_cells)
-{
- struct device_node *root;
-
- root = of_find_node_by_path("/");
- if (!root)
- return -EINVAL;
-
- *addr_cells = of_n_addr_cells(root);
- *size_cells = of_n_size_cells(root);
-
- of_node_put(root);
-
- return 0;
-}
-
-static int do_get_kexec_buffer(const void *prop, int len, unsigned long *addr,
- size_t *size)
-{
- int ret, addr_cells, size_cells;
-
- ret = get_addr_size_cells(&addr_cells, &size_cells);
- if (ret)
- return ret;
-
- if (len < 4 * (addr_cells + size_cells))
- return -ENOENT;
-
- *addr = of_read_number(prop, addr_cells);
- *size = of_read_number(prop + 4 * addr_cells, size_cells);
-
- return 0;
-}
-
-/**
- * ima_get_kexec_buffer - get IMA buffer from the previous kernel
- * @addr: On successful return, set to point to the buffer contents.
- * @size: On successful return, set to the buffer size.
- *
- * Return: 0 on success, negative errno on error.
- */
-int ima_get_kexec_buffer(void **addr, size_t *size)
-{
- int ret, len;
- unsigned long tmp_addr;
- size_t tmp_size;
- const void *prop;
-
- prop = of_get_property(of_chosen, "linux,ima-kexec-buffer", &len);
- if (!prop)
- return -ENOENT;
-
- ret = do_get_kexec_buffer(prop, len, &tmp_addr, &tmp_size);
- if (ret)
- return ret;
-
- *addr = __va(tmp_addr);
- *size = tmp_size;
-
- return 0;
-}
-
-/**
- * ima_free_kexec_buffer - free memory used by the IMA buffer
- */
-int ima_free_kexec_buffer(void)
-{
- int ret;
- unsigned long addr;
- size_t size;
- struct property *prop;
-
- prop = of_find_property(of_chosen, "linux,ima-kexec-buffer", NULL);
- if (!prop)
- return -ENOENT;
-
- ret = do_get_kexec_buffer(prop->value, prop->length, &addr, &size);
- if (ret)
- return ret;
-
- ret = of_remove_property(of_chosen, prop);
- if (ret)
- return ret;
-
- return memblock_free(addr, size);
-
-}
-
-/**
- * remove_ima_buffer - remove the IMA buffer property and reservation from @fdt
- *
- * The IMA measurement buffer is of no use to a subsequent kernel, so we always
- * remove it from the device tree.
- */
-void remove_ima_buffer(void *fdt, int chosen_node)
-{
- int ret, len;
- unsigned long addr;
- size_t size;
- const void *prop;
-
- prop = fdt_getprop(fdt, chosen_node, "linux,ima-kexec-buffer", &len);
- if (!prop)
- return;
-
- ret = do_get_kexec_buffer(prop, len, &addr, &size);
- fdt_delprop(fdt, chosen_node, "linux,ima-kexec-buffer");
- if (ret)
- return;
-
- ret = delete_fdt_mem_rsv(fdt, addr, size);
- if (!ret)
- pr_debug("Removed old IMA buffer reservation.\n");
-}
-
-#ifdef CONFIG_IMA_KEXEC
-/**
- * arch_ima_add_kexec_buffer - do arch-specific steps to add the IMA buffer
- *
- * Architectures should use this function to pass on the IMA buffer
- * information to the next kernel.
- *
- * Return: 0 on success, negative errno on error.
- */
-int arch_ima_add_kexec_buffer(struct kimage *image, unsigned long load_addr,
- size_t size)
-{
- image->arch.ima_buffer_addr = load_addr;
- image->arch.ima_buffer_size = size;
-
- return 0;
-}
-
-static int write_number(void *p, u64 value, int cells)
-{
- if (cells == 1) {
- u32 tmp;
-
- if (value > U32_MAX)
- return -EINVAL;
-
- tmp = cpu_to_be32(value);
- memcpy(p, &tmp, sizeof(tmp));
- } else if (cells == 2) {
- u64 tmp;
-
- tmp = cpu_to_be64(value);
- memcpy(p, &tmp, sizeof(tmp));
- } else
- return -EINVAL;
-
- return 0;
-}
-
-/**
- * setup_ima_buffer - add IMA buffer information to the fdt
- * @image: kexec image being loaded.
- * @fdt: Flattened device tree for the next kernel.
- * @chosen_node: Offset to the chosen node.
- *
- * Return: 0 on success, or negative errno on error.
- */
-int setup_ima_buffer(const struct kimage *image, void *fdt, int chosen_node)
-{
- int ret, addr_cells, size_cells, entry_size;
- u8 value[16];
-
- remove_ima_buffer(fdt, chosen_node);
- if (!image->arch.ima_buffer_size)
- return 0;
-
- ret = get_addr_size_cells(&addr_cells, &size_cells);
- if (ret)
- return ret;
-
- entry_size = 4 * (addr_cells + size_cells);
-
- if (entry_size > sizeof(value))
- return -EINVAL;
-
- ret = write_number(value, image->arch.ima_buffer_addr, addr_cells);
- if (ret)
- return ret;
-
- ret = write_number(value + 4 * addr_cells, image->arch.ima_buffer_size,
- size_cells);
- if (ret)
- return ret;
-
- ret = fdt_setprop(fdt, chosen_node, "linux,ima-kexec-buffer", value,
- entry_size);
- if (ret < 0)
- return -EINVAL;
-
- ret = fdt_add_mem_rsv(fdt, image->arch.ima_buffer_addr,
- image->arch.ima_buffer_size);
- if (ret)
- return -EINVAL;
-
- pr_debug("IMA buffer at 0x%llx, size = 0x%zx\n",
- image->arch.ima_buffer_addr, image->arch.ima_buffer_size);
-
- return 0;
-}
-#endif /* CONFIG_IMA_KEXEC */
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 2bfeaa13befb..583c14ef596e 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -6,7 +6,7 @@
ccflags-y := -Ivirt/kvm -Iarch/powerpc/kvm
KVM := ../../../virt/kvm
-common-objs-y = $(KVM)/kvm_main.o $(KVM)/eventfd.o
+common-objs-y = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/binary_stats.o
common-objs-$(CONFIG_KVM_VFIO) += $(KVM)/vfio.o
common-objs-$(CONFIG_KVM_MMIO) += $(KVM)/coalesced_mmio.o
@@ -57,6 +57,7 @@ kvm-pr-y := \
book3s_32_mmu.o
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
+ book3s_64_entry.o \
tm.o
ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
@@ -86,6 +87,7 @@ kvm-book3s_64-builtin-tm-objs-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \
ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
book3s_hv_hmi.o \
+ book3s_hv_p9_entry.o \
book3s_hv_rmhandlers.o \
book3s_hv_rm_mmu.o \
book3s_hv_ras.o \
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 44bf567b6589..79833f78d1da 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -38,37 +38,66 @@
/* #define EXIT_DEBUG */
-struct kvm_stats_debugfs_item debugfs_entries[] = {
- VCPU_STAT("exits", sum_exits),
- VCPU_STAT("mmio", mmio_exits),
- VCPU_STAT("sig", signal_exits),
- VCPU_STAT("sysc", syscall_exits),
- VCPU_STAT("inst_emu", emulated_inst_exits),
- VCPU_STAT("dec", dec_exits),
- VCPU_STAT("ext_intr", ext_intr_exits),
- VCPU_STAT("queue_intr", queue_intr),
- VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
- VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
- VCPU_STAT("halt_wait_ns", halt_wait_ns),
- VCPU_STAT("halt_successful_poll", halt_successful_poll),
- VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
- VCPU_STAT("halt_successful_wait", halt_successful_wait),
- VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
- VCPU_STAT("halt_wakeup", halt_wakeup),
- VCPU_STAT("pf_storage", pf_storage),
- VCPU_STAT("sp_storage", sp_storage),
- VCPU_STAT("pf_instruc", pf_instruc),
- VCPU_STAT("sp_instruc", sp_instruc),
- VCPU_STAT("ld", ld),
- VCPU_STAT("ld_slow", ld_slow),
- VCPU_STAT("st", st),
- VCPU_STAT("st_slow", st_slow),
- VCPU_STAT("pthru_all", pthru_all),
- VCPU_STAT("pthru_host", pthru_host),
- VCPU_STAT("pthru_bad_aff", pthru_bad_aff),
- VM_STAT("largepages_2M", num_2M_pages, .mode = 0444),
- VM_STAT("largepages_1G", num_1G_pages, .mode = 0444),
- { NULL }
+const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+ KVM_GENERIC_VM_STATS(),
+ STATS_DESC_ICOUNTER(VM, num_2M_pages),
+ STATS_DESC_ICOUNTER(VM, num_1G_pages)
+};
+static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
+ sizeof(struct kvm_vm_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vm_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vm_stats_desc),
+};
+
+const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ KVM_GENERIC_VCPU_STATS(),
+ STATS_DESC_COUNTER(VCPU, sum_exits),
+ STATS_DESC_COUNTER(VCPU, mmio_exits),
+ STATS_DESC_COUNTER(VCPU, signal_exits),
+ STATS_DESC_COUNTER(VCPU, light_exits),
+ STATS_DESC_COUNTER(VCPU, itlb_real_miss_exits),
+ STATS_DESC_COUNTER(VCPU, itlb_virt_miss_exits),
+ STATS_DESC_COUNTER(VCPU, dtlb_real_miss_exits),
+ STATS_DESC_COUNTER(VCPU, dtlb_virt_miss_exits),
+ STATS_DESC_COUNTER(VCPU, syscall_exits),
+ STATS_DESC_COUNTER(VCPU, isi_exits),
+ STATS_DESC_COUNTER(VCPU, dsi_exits),
+ STATS_DESC_COUNTER(VCPU, emulated_inst_exits),
+ STATS_DESC_COUNTER(VCPU, dec_exits),
+ STATS_DESC_COUNTER(VCPU, ext_intr_exits),
+ STATS_DESC_TIME_NSEC(VCPU, halt_wait_ns),
+ STATS_DESC_COUNTER(VCPU, halt_successful_wait),
+ STATS_DESC_COUNTER(VCPU, dbell_exits),
+ STATS_DESC_COUNTER(VCPU, gdbell_exits),
+ STATS_DESC_COUNTER(VCPU, ld),
+ STATS_DESC_COUNTER(VCPU, st),
+ STATS_DESC_COUNTER(VCPU, pf_storage),
+ STATS_DESC_COUNTER(VCPU, pf_instruc),
+ STATS_DESC_COUNTER(VCPU, sp_storage),
+ STATS_DESC_COUNTER(VCPU, sp_instruc),
+ STATS_DESC_COUNTER(VCPU, queue_intr),
+ STATS_DESC_COUNTER(VCPU, ld_slow),
+ STATS_DESC_COUNTER(VCPU, st_slow),
+ STATS_DESC_COUNTER(VCPU, pthru_all),
+ STATS_DESC_COUNTER(VCPU, pthru_host),
+ STATS_DESC_COUNTER(VCPU, pthru_bad_aff)
+};
+static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
+ sizeof(struct kvm_vcpu_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vcpu_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vcpu_stats_desc),
};
static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
@@ -171,6 +200,12 @@ void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags)
}
EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check);
+void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu)
+{
+ kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_SYSCALL, 0);
+}
+EXPORT_SYMBOL(kvmppc_core_queue_syscall);
+
void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
{
/* might as well deliver this straight away */
@@ -834,26 +869,24 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
- unsigned flags)
+bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
- return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
+ return kvm->arch.kvm_ops->unmap_gfn_range(kvm, range);
}
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
+bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- return kvm->arch.kvm_ops->age_hva(kvm, start, end);
+ return kvm->arch.kvm_ops->age_gfn(kvm, range);
}
-int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
+ return kvm->arch.kvm_ops->test_age_gfn(kvm, range);
}
-int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
- return 0;
+ return kvm->arch.kvm_ops->set_spte_gfn(kvm, range);
}
int kvmppc_core_init_vm(struct kvm *kvm)
@@ -1046,13 +1079,10 @@ static int kvmppc_book3s_init(void)
#ifdef CONFIG_KVM_XICS
#ifdef CONFIG_KVM_XIVE
if (xics_on_xive()) {
- kvmppc_xive_init_module();
kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
- if (kvmppc_xive_native_supported()) {
- kvmppc_xive_native_init_module();
+ if (kvmppc_xive_native_supported())
kvm_register_device_ops(&kvm_xive_native_ops,
KVM_DEV_TYPE_XIVE);
- }
} else
#endif
kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
@@ -1062,12 +1092,6 @@ static int kvmppc_book3s_init(void)
static void kvmppc_book3s_exit(void)
{
-#ifdef CONFIG_KVM_XICS
- if (xics_on_xive()) {
- kvmppc_xive_exit_module();
- kvmppc_xive_native_exit_module();
- }
-#endif
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
kvmppc_book3s_exit_pr();
#endif
diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h
index 9b6323ec8e60..740e51def5a5 100644
--- a/arch/powerpc/kvm/book3s.h
+++ b/arch/powerpc/kvm/book3s.h
@@ -9,12 +9,10 @@
extern void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
struct kvm_memory_slot *memslot);
-extern int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start,
- unsigned long end);
-extern int kvm_age_hva_hv(struct kvm *kvm, unsigned long start,
- unsigned long end);
-extern int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva);
-extern void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte);
+extern bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range);
+extern bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range);
+extern bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range);
+extern bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range);
extern int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu);
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index e8e7b2c530d1..4b3a8d80cfa3 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -353,9 +353,6 @@ void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
preempt_enable();
}
-/* From mm/mmu_context_hash32.c */
-#define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
-
int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
diff --git a/arch/powerpc/kvm/book3s_64_entry.S b/arch/powerpc/kvm/book3s_64_entry.S
new file mode 100644
index 000000000000..983b8c18bc31
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_entry.S
@@ -0,0 +1,416 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#include <asm/asm-offsets.h>
+#include <asm/cache.h>
+#include <asm/code-patching-asm.h>
+#include <asm/exception-64s.h>
+#include <asm/export.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_book3s_asm.h>
+#include <asm/mmu.h>
+#include <asm/ppc_asm.h>
+#include <asm/ptrace.h>
+#include <asm/reg.h>
+#include <asm/ultravisor-api.h>
+
+/*
+ * These are branched to from interrupt handlers in exception-64s.S which set
+ * IKVM_REAL or IKVM_VIRT, if HSTATE_IN_GUEST was found to be non-zero.
+ */
+
+/*
+ * This is a hcall, so register convention is as
+ * Documentation/powerpc/papr_hcalls.rst.
+ *
+ * This may also be a syscall from PR-KVM userspace that is to be
+ * reflected to the PR guest kernel, so registers may be set up for
+ * a system call rather than hcall. We don't currently clobber
+ * anything here, but the 0xc00 handler has already clobbered CTR
+ * and CR0, so PR-KVM can not support a guest kernel that preserves
+ * those registers across its system calls.
+ *
+ * The state of registers is as kvmppc_interrupt, except CFAR is not
+ * saved, R13 is not in SCRATCH0, and R10 does not contain the trap.
+ */
+.global kvmppc_hcall
+.balign IFETCH_ALIGN_BYTES
+kvmppc_hcall:
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ lbz r10,HSTATE_IN_GUEST(r13)
+ cmpwi r10,KVM_GUEST_MODE_HV_P9
+ beq kvmppc_p9_exit_hcall
+#endif
+ ld r10,PACA_EXGEN+EX_R13(r13)
+ SET_SCRATCH0(r10)
+ li r10,0xc00
+ /* Now we look like kvmppc_interrupt */
+ li r11,PACA_EXGEN
+ b .Lgot_save_area
+
+/*
+ * KVM interrupt entry occurs after GEN_INT_ENTRY runs, and follows that
+ * call convention:
+ *
+ * guest R9-R13, CTR, CFAR, PPR saved in PACA EX_xxx save area
+ * guest (H)DAR, (H)DSISR are also in the save area for relevant interrupts
+ * guest R13 also saved in SCRATCH0
+ * R13 = PACA
+ * R11 = (H)SRR0
+ * R12 = (H)SRR1
+ * R9 = guest CR
+ * PPR is set to medium
+ *
+ * With the addition for KVM:
+ * R10 = trap vector
+ */
+.global kvmppc_interrupt
+.balign IFETCH_ALIGN_BYTES
+kvmppc_interrupt:
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ std r10,HSTATE_SCRATCH0(r13)
+ lbz r10,HSTATE_IN_GUEST(r13)
+ cmpwi r10,KVM_GUEST_MODE_HV_P9
+ beq kvmppc_p9_exit_interrupt
+ ld r10,HSTATE_SCRATCH0(r13)
+#endif
+ li r11,PACA_EXGEN
+ cmpdi r10,0x200
+ bgt+ .Lgot_save_area
+ li r11,PACA_EXMC
+ beq .Lgot_save_area
+ li r11,PACA_EXNMI
+.Lgot_save_area:
+ add r11,r11,r13
+BEGIN_FTR_SECTION
+ ld r12,EX_CFAR(r11)
+ std r12,HSTATE_CFAR(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+ ld r12,EX_CTR(r11)
+ mtctr r12
+BEGIN_FTR_SECTION
+ ld r12,EX_PPR(r11)
+ std r12,HSTATE_PPR(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+ ld r12,EX_R12(r11)
+ std r12,HSTATE_SCRATCH0(r13)
+ sldi r12,r9,32
+ or r12,r12,r10
+ ld r9,EX_R9(r11)
+ ld r10,EX_R10(r11)
+ ld r11,EX_R11(r11)
+
+ /*
+ * Hcalls and other interrupts come here after normalising register
+ * contents and save locations:
+ *
+ * R12 = (guest CR << 32) | interrupt vector
+ * R13 = PACA
+ * guest R12 saved in shadow HSTATE_SCRATCH0
+ * guest R13 saved in SPRN_SCRATCH0
+ */
+ std r9,HSTATE_SCRATCH2(r13)
+ lbz r9,HSTATE_IN_GUEST(r13)
+ cmpwi r9,KVM_GUEST_MODE_SKIP
+ beq- .Lmaybe_skip
+.Lno_skip:
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ cmpwi r9,KVM_GUEST_MODE_GUEST
+ beq kvmppc_interrupt_pr
+#endif
+ b kvmppc_interrupt_hv
+#else
+ b kvmppc_interrupt_pr
+#endif
+
+/*
+ * "Skip" interrupts are part of a trick KVM uses a with hash guests to load
+ * the faulting instruction in guest memory from the the hypervisor without
+ * walking page tables.
+ *
+ * When the guest takes a fault that requires the hypervisor to load the
+ * instruction (e.g., MMIO emulation), KVM is running in real-mode with HV=1
+ * and the guest MMU context loaded. It sets KVM_GUEST_MODE_SKIP, and sets
+ * MSR[DR]=1 while leaving MSR[IR]=0, so it continues to fetch HV instructions
+ * but loads and stores will access the guest context. This is used to load
+ * the faulting instruction using the faulting guest effective address.
+ *
+ * However the guest context may not be able to translate, or it may cause a
+ * machine check or other issue, which results in a fault in the host
+ * (even with KVM-HV).
+ *
+ * These faults come here because KVM_GUEST_MODE_SKIP was set, so if they
+ * are (or are likely) caused by that load, the instruction is skipped by
+ * just returning with the PC advanced +4, where it is noticed the load did
+ * not execute and it goes to the slow path which walks the page tables to
+ * read guest memory.
+ */
+.Lmaybe_skip:
+ cmpwi r12,BOOK3S_INTERRUPT_MACHINE_CHECK
+ beq 1f
+ cmpwi r12,BOOK3S_INTERRUPT_DATA_STORAGE
+ beq 1f
+ cmpwi r12,BOOK3S_INTERRUPT_DATA_SEGMENT
+ beq 1f
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ /* HSRR interrupts get 2 added to interrupt number */
+ cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE | 0x2
+ beq 2f
+#endif
+ b .Lno_skip
+1: mfspr r9,SPRN_SRR0
+ addi r9,r9,4
+ mtspr SPRN_SRR0,r9
+ ld r12,HSTATE_SCRATCH0(r13)
+ ld r9,HSTATE_SCRATCH2(r13)
+ GET_SCRATCH0(r13)
+ RFI_TO_KERNEL
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+2: mfspr r9,SPRN_HSRR0
+ addi r9,r9,4
+ mtspr SPRN_HSRR0,r9
+ ld r12,HSTATE_SCRATCH0(r13)
+ ld r9,HSTATE_SCRATCH2(r13)
+ GET_SCRATCH0(r13)
+ HRFI_TO_KERNEL
+#endif
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+
+/* Stack frame offsets for kvmppc_p9_enter_guest */
+#define SFS (144 + STACK_FRAME_MIN_SIZE)
+#define STACK_SLOT_NVGPRS (SFS - 144) /* 18 gprs */
+
+/*
+ * void kvmppc_p9_enter_guest(struct vcpu *vcpu);
+ *
+ * Enter the guest on a ISAv3.0 or later system.
+ */
+.balign IFETCH_ALIGN_BYTES
+_GLOBAL(kvmppc_p9_enter_guest)
+EXPORT_SYMBOL_GPL(kvmppc_p9_enter_guest)
+ mflr r0
+ std r0,PPC_LR_STKOFF(r1)
+ stdu r1,-SFS(r1)
+
+ std r1,HSTATE_HOST_R1(r13)
+
+ mfcr r4
+ stw r4,SFS+8(r1)
+
+ reg = 14
+ .rept 18
+ std reg,STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
+ reg = reg + 1
+ .endr
+
+ ld r4,VCPU_LR(r3)
+ mtlr r4
+ ld r4,VCPU_CTR(r3)
+ mtctr r4
+ ld r4,VCPU_XER(r3)
+ mtspr SPRN_XER,r4
+
+ ld r1,VCPU_CR(r3)
+
+BEGIN_FTR_SECTION
+ ld r4,VCPU_CFAR(r3)
+ mtspr SPRN_CFAR,r4
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+BEGIN_FTR_SECTION
+ ld r4,VCPU_PPR(r3)
+ mtspr SPRN_PPR,r4
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
+ reg = 4
+ .rept 28
+ ld reg,__VCPU_GPR(reg)(r3)
+ reg = reg + 1
+ .endr
+
+ ld r4,VCPU_KVM(r3)
+ lbz r4,KVM_SECURE_GUEST(r4)
+ cmpdi r4,0
+ ld r4,VCPU_GPR(R4)(r3)
+ bne .Lret_to_ultra
+
+ mtcr r1
+
+ ld r0,VCPU_GPR(R0)(r3)
+ ld r1,VCPU_GPR(R1)(r3)
+ ld r2,VCPU_GPR(R2)(r3)
+ ld r3,VCPU_GPR(R3)(r3)
+
+ HRFI_TO_GUEST
+ b .
+
+ /*
+ * Use UV_RETURN ultracall to return control back to the Ultravisor
+ * after processing an hypercall or interrupt that was forwarded
+ * (a.k.a. reflected) to the Hypervisor.
+ *
+ * All registers have already been reloaded except the ucall requires:
+ * R0 = hcall result
+ * R2 = SRR1, so UV can detect a synthesized interrupt (if any)
+ * R3 = UV_RETURN
+ */
+.Lret_to_ultra:
+ mtcr r1
+ ld r1,VCPU_GPR(R1)(r3)
+
+ ld r0,VCPU_GPR(R3)(r3)
+ mfspr r2,SPRN_SRR1
+ LOAD_REG_IMMEDIATE(r3, UV_RETURN)
+ sc 2
+
+/*
+ * kvmppc_p9_exit_hcall and kvmppc_p9_exit_interrupt are branched to from
+ * above if the interrupt was taken for a guest that was entered via
+ * kvmppc_p9_enter_guest().
+ *
+ * The exit code recovers the host stack and vcpu pointer, saves all guest GPRs
+ * and CR, LR, XER as well as guest MSR and NIA into the VCPU, then re-
+ * establishes the host stack and registers to return from the
+ * kvmppc_p9_enter_guest() function, which saves CTR and other guest registers
+ * (SPRs and FP, VEC, etc).
+ */
+.balign IFETCH_ALIGN_BYTES
+kvmppc_p9_exit_hcall:
+ mfspr r11,SPRN_SRR0
+ mfspr r12,SPRN_SRR1
+ li r10,0xc00
+ std r10,HSTATE_SCRATCH0(r13)
+
+.balign IFETCH_ALIGN_BYTES
+kvmppc_p9_exit_interrupt:
+ /*
+ * If set to KVM_GUEST_MODE_HV_P9 but we're still in the
+ * hypervisor, that means we can't return from the entry stack.
+ */
+ rldicl. r10,r12,64-MSR_HV_LG,63
+ bne- kvmppc_p9_bad_interrupt
+
+ std r1,HSTATE_SCRATCH1(r13)
+ std r3,HSTATE_SCRATCH2(r13)
+ ld r1,HSTATE_HOST_R1(r13)
+ ld r3,HSTATE_KVM_VCPU(r13)
+
+ std r9,VCPU_CR(r3)
+
+1:
+ std r11,VCPU_PC(r3)
+ std r12,VCPU_MSR(r3)
+
+ reg = 14
+ .rept 18
+ std reg,__VCPU_GPR(reg)(r3)
+ reg = reg + 1
+ .endr
+
+ /* r1, r3, r9-r13 are saved to vcpu by C code */
+ std r0,VCPU_GPR(R0)(r3)
+ std r2,VCPU_GPR(R2)(r3)
+ reg = 4
+ .rept 5
+ std reg,__VCPU_GPR(reg)(r3)
+ reg = reg + 1
+ .endr
+
+ ld r2,PACATOC(r13)
+
+ mflr r4
+ std r4,VCPU_LR(r3)
+ mfspr r4,SPRN_XER
+ std r4,VCPU_XER(r3)
+
+ reg = 14
+ .rept 18
+ ld reg,STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
+ reg = reg + 1
+ .endr
+
+ lwz r4,SFS+8(r1)
+ mtcr r4
+
+ /*
+ * Flush the link stack here, before executing the first blr on the
+ * way out of the guest.
+ *
+ * The link stack won't match coming out of the guest anyway so the
+ * only cost is the flush itself. The call clobbers r0.
+ */
+1: nop
+ patch_site 1b patch__call_kvm_flush_link_stack_p9
+
+ addi r1,r1,SFS
+ ld r0,PPC_LR_STKOFF(r1)
+ mtlr r0
+ blr
+
+/*
+ * Took an interrupt somewhere right before HRFID to guest, so registers are
+ * in a bad way. Return things hopefully enough to run host virtual code and
+ * run the Linux interrupt handler (SRESET or MCE) to print something useful.
+ *
+ * We could be really clever and save all host registers in known locations
+ * before setting HSTATE_IN_GUEST, then restoring them all here, and setting
+ * return address to a fixup that sets them up again. But that's a lot of
+ * effort for a small bit of code. Lots of other things to do first.
+ */
+kvmppc_p9_bad_interrupt:
+BEGIN_MMU_FTR_SECTION
+ /*
+ * Hash host doesn't try to recover MMU (requires host SLB reload)
+ */
+ b .
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
+ /*
+ * Clean up guest registers to give host a chance to run.
+ */
+ li r10,0
+ mtspr SPRN_AMR,r10
+ mtspr SPRN_IAMR,r10
+ mtspr SPRN_CIABR,r10
+ mtspr SPRN_DAWRX0,r10
+BEGIN_FTR_SECTION
+ mtspr SPRN_DAWRX1,r10
+END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
+ mtspr SPRN_PID,r10
+
+ /*
+ * Switch to host MMU mode
+ */
+ ld r10, HSTATE_KVM_VCPU(r13)
+ ld r10, VCPU_KVM(r10)
+ lwz r10, KVM_HOST_LPID(r10)
+ mtspr SPRN_LPID,r10
+
+ ld r10, HSTATE_KVM_VCPU(r13)
+ ld r10, VCPU_KVM(r10)
+ ld r10, KVM_HOST_LPCR(r10)
+ mtspr SPRN_LPCR,r10
+
+ /*
+ * Set GUEST_MODE_NONE so the handler won't branch to KVM, and clear
+ * MSR_RI in r12 ([H]SRR1) so the handler won't try to return.
+ */
+ li r10,KVM_GUEST_MODE_NONE
+ stb r10,HSTATE_IN_GUEST(r13)
+ li r10,MSR_RI
+ andc r12,r12,r10
+
+ /*
+ * Go back to interrupt handler. MCE and SRESET have their specific
+ * PACA save area so they should be used directly. They set up their
+ * own stack. The other handlers all use EXGEN. They will use the
+ * guest r1 if it looks like a kernel stack, so just load the
+ * emergency stack and go to program check for all other interrupts.
+ */
+ ld r10,HSTATE_SCRATCH0(r13)
+ cmpwi r10,BOOK3S_INTERRUPT_MACHINE_CHECK
+ beq machine_check_common
+
+ cmpwi r10,BOOK3S_INTERRUPT_SYSTEM_RESET
+ beq system_reset_common
+
+ b .
+#endif
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index e452158a18d7..c3e31fef0be1 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -8,6 +8,7 @@
*/
#include <linux/kvm_host.h>
+#include <linux/pkeys.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
@@ -133,6 +134,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
else
kvmppc_mmu_flush_icache(pfn);
+ rflags |= pte_to_hpte_pkey_bits(0, HPTE_USE_KERNEL_KEY);
rflags = (rflags & ~HPTE_R_WIMG) | orig_pte->wimg;
/*
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index bb6773594cf8..c63e263312a4 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -752,51 +752,6 @@ void kvmppc_rmap_reset(struct kvm *kvm)
srcu_read_unlock(&kvm->srcu, srcu_idx);
}
-typedef int (*hva_handler_fn)(struct kvm *kvm, struct kvm_memory_slot *memslot,
- unsigned long gfn);
-
-static int kvm_handle_hva_range(struct kvm *kvm,
- unsigned long start,
- unsigned long end,
- hva_handler_fn handler)
-{
- int ret;
- int retval = 0;
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
-
- slots = kvm_memslots(kvm);
- kvm_for_each_memslot(memslot, slots) {
- unsigned long hva_start, hva_end;
- gfn_t gfn, gfn_end;
-
- hva_start = max(start, memslot->userspace_addr);
- hva_end = min(end, memslot->userspace_addr +
- (memslot->npages << PAGE_SHIFT));
- if (hva_start >= hva_end)
- continue;
- /*
- * {gfn(page) | page intersects with [hva_start, hva_end)} =
- * {gfn, gfn+1, ..., gfn_end-1}.
- */
- gfn = hva_to_gfn_memslot(hva_start, memslot);
- gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
-
- for (; gfn < gfn_end; ++gfn) {
- ret = handler(kvm, memslot, gfn);
- retval |= ret;
- }
- }
-
- return retval;
-}
-
-static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
- hva_handler_fn handler)
-{
- return kvm_handle_hva_range(kvm, hva, hva + 1, handler);
-}
-
/* Must be called with both HPTE and rmap locked */
static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i,
struct kvm_memory_slot *memslot,
@@ -840,8 +795,8 @@ static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i,
}
}
-static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
- unsigned long gfn)
+static void kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ unsigned long gfn)
{
unsigned long i;
__be64 *hptep;
@@ -874,16 +829,21 @@ static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
unlock_rmap(rmapp);
__unlock_hpte(hptep, be64_to_cpu(hptep[0]));
}
- return 0;
}
-int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end)
+bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range)
{
- hva_handler_fn handler;
+ gfn_t gfn;
- handler = kvm_is_radix(kvm) ? kvm_unmap_radix : kvm_unmap_rmapp;
- kvm_handle_hva_range(kvm, start, end, handler);
- return 0;
+ if (kvm_is_radix(kvm)) {
+ for (gfn = range->start; gfn < range->end; gfn++)
+ kvm_unmap_radix(kvm, range->slot, gfn);
+ } else {
+ for (gfn = range->start; gfn < range->end; gfn++)
+ kvm_unmap_rmapp(kvm, range->slot, gfn);
+ }
+
+ return false;
}
void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
@@ -913,8 +873,8 @@ void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
}
}
-static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
- unsigned long gfn)
+static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ unsigned long gfn)
{
struct revmap_entry *rev = kvm->arch.hpt.rev;
unsigned long head, i, j;
@@ -968,26 +928,34 @@ static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
return ret;
}
-int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, unsigned long end)
+bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
{
- hva_handler_fn handler;
+ gfn_t gfn;
+ bool ret = false;
- handler = kvm_is_radix(kvm) ? kvm_age_radix : kvm_age_rmapp;
- return kvm_handle_hva_range(kvm, start, end, handler);
+ if (kvm_is_radix(kvm)) {
+ for (gfn = range->start; gfn < range->end; gfn++)
+ ret |= kvm_age_radix(kvm, range->slot, gfn);
+ } else {
+ for (gfn = range->start; gfn < range->end; gfn++)
+ ret |= kvm_age_rmapp(kvm, range->slot, gfn);
+ }
+
+ return ret;
}
-static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
- unsigned long gfn)
+static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ unsigned long gfn)
{
struct revmap_entry *rev = kvm->arch.hpt.rev;
unsigned long head, i, j;
unsigned long *hp;
- int ret = 1;
+ bool ret = true;
unsigned long *rmapp;
rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
if (*rmapp & KVMPPC_RMAP_REFERENCED)
- return 1;
+ return true;
lock_rmap(rmapp);
if (*rmapp & KVMPPC_RMAP_REFERENCED)
@@ -1002,27 +970,33 @@ static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
goto out;
} while ((i = j) != head);
}
- ret = 0;
+ ret = false;
out:
unlock_rmap(rmapp);
return ret;
}
-int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva)
+bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
{
- hva_handler_fn handler;
+ WARN_ON(range->start + 1 != range->end);
- handler = kvm_is_radix(kvm) ? kvm_test_age_radix : kvm_test_age_rmapp;
- return kvm_handle_hva(kvm, hva, handler);
+ if (kvm_is_radix(kvm))
+ return kvm_test_age_radix(kvm, range->slot, range->start);
+ else
+ return kvm_test_age_rmapp(kvm, range->slot, range->start);
}
-void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte)
+bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
{
- hva_handler_fn handler;
+ WARN_ON(range->start + 1 != range->end);
+
+ if (kvm_is_radix(kvm))
+ kvm_unmap_radix(kvm, range->slot, range->start);
+ else
+ kvm_unmap_rmapp(kvm, range->slot, range->start);
- handler = kvm_is_radix(kvm) ? kvm_unmap_radix : kvm_unmap_rmapp;
- kvm_handle_hva(kvm, hva, handler);
+ return false;
}
static int vcpus_running(struct kvm *kvm)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index e603de7ade52..b5905ae4377c 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -21,6 +21,7 @@
#include <asm/pte-walk.h>
#include <asm/ultravisor.h>
#include <asm/kvm_book3s_uvmem.h>
+#include <asm/plpar_wrappers.h>
/*
* Supported radix tree geometry.
@@ -318,9 +319,19 @@ void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
}
psi = shift_to_mmu_psize(pshift);
- rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
- rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
- lpid, rb);
+
+ if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE)) {
+ rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
+ rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
+ lpid, rb);
+ } else {
+ rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
+ H_RPTI_TYPE_NESTED |
+ H_RPTI_TYPE_TLB,
+ psize_to_rpti_pgsize(psi),
+ addr, addr + psize);
+ }
+
if (rc)
pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc);
}
@@ -334,8 +345,14 @@ static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid)
return;
}
- rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
- lpid, TLBIEL_INVAL_SET_LPID);
+ if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE))
+ rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
+ lpid, TLBIEL_INVAL_SET_LPID);
+ else
+ rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
+ H_RPTI_TYPE_NESTED |
+ H_RPTI_TYPE_PWC, H_RPTI_PAGE_ALL,
+ 0, -1UL);
if (rc)
pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc);
}
@@ -993,8 +1010,8 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
}
/* Called with kvm->mmu_lock held */
-int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
- unsigned long gfn)
+void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ unsigned long gfn)
{
pte_t *ptep;
unsigned long gpa = gfn << PAGE_SHIFT;
@@ -1002,24 +1019,23 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) {
uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT);
- return 0;
+ return;
}
ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
if (ptep && pte_present(*ptep))
kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
kvm->arch.lpid);
- return 0;
}
/* Called with kvm->mmu_lock held */
-int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
- unsigned long gfn)
+bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ unsigned long gfn)
{
pte_t *ptep;
unsigned long gpa = gfn << PAGE_SHIFT;
unsigned int shift;
- int ref = 0;
+ bool ref = false;
unsigned long old, *rmapp;
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
@@ -1035,26 +1051,27 @@ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_ACCESSED, 0,
old & PTE_RPN_MASK,
1UL << shift);
- ref = 1;
+ ref = true;
}
return ref;
}
/* Called with kvm->mmu_lock held */
-int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
- unsigned long gfn)
+bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ unsigned long gfn)
+
{
pte_t *ptep;
unsigned long gpa = gfn << PAGE_SHIFT;
unsigned int shift;
- int ref = 0;
+ bool ref = false;
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
return ref;
ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
if (ptep && pte_present(*ptep) && pte_young(*ptep))
- ref = 1;
+ ref = true;
return ref;
}
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 083a4e037718..dc6591548f0c 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -391,10 +391,6 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
/* liobn, ioba, tce); */
- /* For radix, we might be in virtual mode, so punt */
- if (kvm_is_radix(vcpu->kvm))
- return H_TOO_HARD;
-
stt = kvmppc_find_table(vcpu->kvm, liobn);
if (!stt)
return H_TOO_HARD;
@@ -489,10 +485,6 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
bool prereg = false;
struct kvmppc_spapr_tce_iommu_table *stit;
- /* For radix, we might be in virtual mode, so punt */
- if (kvm_is_radix(vcpu->kvm))
- return H_TOO_HARD;
-
/*
* used to check for invalidations in progress
*/
@@ -602,10 +594,6 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
long i, ret;
struct kvmppc_spapr_tce_iommu_table *stit;
- /* For radix, we might be in virtual mode, so punt */
- if (kvm_is_radix(vcpu->kvm))
- return H_TOO_HARD;
-
stt = kvmppc_find_table(vcpu->kvm, liobn);
if (!stt)
return H_TOO_HARD;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 13bad6bf4c95..1d1fcc290fca 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -76,6 +76,7 @@
#include <asm/kvm_book3s_uvmem.h>
#include <asm/ultravisor.h>
#include <asm/dtl.h>
+#include <asm/plpar_wrappers.h>
#include "book3s.h"
@@ -103,13 +104,9 @@ static int target_smt_mode;
module_param(target_smt_mode, int, 0644);
MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)");
-static bool indep_threads_mode = true;
-module_param(indep_threads_mode, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(indep_threads_mode, "Independent-threads mode (only on POWER9)");
-
static bool one_vm_per_core;
module_param(one_vm_per_core, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(one_vm_per_core, "Only run vCPUs from the same VM on a core (requires indep_threads_mode=N)");
+MODULE_PARM_DESC(one_vm_per_core, "Only run vCPUs from the same VM on a core (requires POWER8 or older)");
#ifdef CONFIG_KVM_XICS
static const struct kernel_param_ops module_param_ops = {
@@ -134,9 +131,6 @@ static inline bool nesting_enabled(struct kvm *kvm)
return kvm->arch.nested_enable && kvm_is_radix(kvm);
}
-/* If set, the threads on each CPU core have to be in the same MMU mode */
-static bool no_mixing_hpt_and_radix __read_mostly;
-
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
/*
@@ -236,7 +230,7 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
waitp = kvm_arch_vcpu_get_wait(vcpu);
if (rcuwait_wake_up(waitp))
- ++vcpu->stat.halt_wakeup;
+ ++vcpu->stat.generic.halt_wakeup;
cpu = READ_ONCE(vcpu->arch.thread_cpu);
if (cpu >= 0 && kvmppc_ipi_thread(cpu))
@@ -803,8 +797,12 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
vcpu->arch.dawrx1 = value2;
return H_SUCCESS;
case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
- /* KVM does not support mflags=2 (AIL=2) */
- if (mflags != 0 && mflags != 3)
+ /*
+ * KVM does not support mflags=2 (AIL=2) and AIL=1 is reserved.
+ * Keep this in synch with kvmppc_filter_guest_lpcr_hv.
+ */
+ if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG) &&
+ kvmhv_vcpu_is_radix(vcpu) && mflags == 3)
return H_UNSUPPORTED_FLAG_START;
return H_TOO_HARD;
default:
@@ -896,6 +894,10 @@ static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
* H_SUCCESS if the source vcore wasn't idle (e.g. if it may
* have useful work to do and should not confer) so we don't
* recheck that here.
+ *
+ * In the case of the P9 single vcpu per vcore case, the real
+ * mode handler is not called but no other threads are in the
+ * source vcore.
*/
spin_lock(&vcore->lock);
@@ -921,8 +923,71 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
return yield_count;
}
+/*
+ * H_RPT_INVALIDATE hcall handler for nested guests.
+ *
+ * Handles only nested process-scoped invalidation requests in L0.
+ */
+static int kvmppc_nested_h_rpt_invalidate(struct kvm_vcpu *vcpu)
+{
+ unsigned long type = kvmppc_get_gpr(vcpu, 6);
+ unsigned long pid, pg_sizes, start, end;
+
+ /*
+ * The partition-scoped invalidations aren't handled here in L0.
+ */
+ if (type & H_RPTI_TYPE_NESTED)
+ return RESUME_HOST;
+
+ pid = kvmppc_get_gpr(vcpu, 4);
+ pg_sizes = kvmppc_get_gpr(vcpu, 7);
+ start = kvmppc_get_gpr(vcpu, 8);
+ end = kvmppc_get_gpr(vcpu, 9);
+
+ do_h_rpt_invalidate_prt(pid, vcpu->arch.nested->shadow_lpid,
+ type, pg_sizes, start, end);
+
+ kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
+ return RESUME_GUEST;
+}
+
+static long kvmppc_h_rpt_invalidate(struct kvm_vcpu *vcpu,
+ unsigned long id, unsigned long target,
+ unsigned long type, unsigned long pg_sizes,
+ unsigned long start, unsigned long end)
+{
+ if (!kvm_is_radix(vcpu->kvm))
+ return H_UNSUPPORTED;
+
+ if (end < start)
+ return H_P5;
+
+ /*
+ * Partition-scoped invalidation for nested guests.
+ */
+ if (type & H_RPTI_TYPE_NESTED) {
+ if (!nesting_enabled(vcpu->kvm))
+ return H_FUNCTION;
+
+ /* Support only cores as target */
+ if (target != H_RPTI_TARGET_CMMU)
+ return H_P2;
+
+ return do_h_rpt_invalidate_pat(vcpu, id, type, pg_sizes,
+ start, end);
+ }
+
+ /*
+ * Process-scoped invalidation for L1 guests.
+ */
+ do_h_rpt_invalidate_prt(id, vcpu->kvm->arch.lpid,
+ type, pg_sizes, start, end);
+ return H_SUCCESS;
+}
+
int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
{
+ struct kvm *kvm = vcpu->kvm;
unsigned long req = kvmppc_get_gpr(vcpu, 3);
unsigned long target, ret = H_SUCCESS;
int yield_count;
@@ -934,11 +999,57 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
return RESUME_HOST;
switch (req) {
+ case H_REMOVE:
+ ret = kvmppc_h_remove(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_ENTER:
+ ret = kvmppc_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6),
+ kvmppc_get_gpr(vcpu, 7));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_READ:
+ ret = kvmppc_h_read(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_CLEAR_MOD:
+ ret = kvmppc_h_clear_mod(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_CLEAR_REF:
+ ret = kvmppc_h_clear_ref(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_PROTECT:
+ ret = kvmppc_h_protect(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_BULK_REMOVE:
+ ret = kvmppc_h_bulk_remove(vcpu);
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+
case H_CEDE:
break;
case H_PROD:
target = kvmppc_get_gpr(vcpu, 4);
- tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
+ tvcpu = kvmppc_find_vcpu(kvm, target);
if (!tvcpu) {
ret = H_PARAMETER;
break;
@@ -952,7 +1063,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
target = kvmppc_get_gpr(vcpu, 4);
if (target == -1)
break;
- tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
+ tvcpu = kvmppc_find_vcpu(kvm, target);
if (!tvcpu) {
ret = H_PARAMETER;
break;
@@ -968,12 +1079,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
kvmppc_get_gpr(vcpu, 6));
break;
case H_RTAS:
- if (list_empty(&vcpu->kvm->arch.rtas_tokens))
+ if (list_empty(&kvm->arch.rtas_tokens))
return RESUME_HOST;
- idx = srcu_read_lock(&vcpu->kvm->srcu);
+ idx = srcu_read_lock(&kvm->srcu);
rc = kvmppc_rtas_hcall(vcpu);
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ srcu_read_unlock(&kvm->srcu, idx);
if (rc == -ENOENT)
return RESUME_HOST;
@@ -1057,15 +1168,23 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
if (!powernv_get_random_long(&vcpu->arch.regs.gpr[4]))
ret = H_HARDWARE;
break;
+ case H_RPT_INVALIDATE:
+ ret = kvmppc_h_rpt_invalidate(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6),
+ kvmppc_get_gpr(vcpu, 7),
+ kvmppc_get_gpr(vcpu, 8),
+ kvmppc_get_gpr(vcpu, 9));
+ break;
case H_SET_PARTITION_TABLE:
ret = H_FUNCTION;
- if (nesting_enabled(vcpu->kvm))
+ if (nesting_enabled(kvm))
ret = kvmhv_set_partition_table(vcpu);
break;
case H_ENTER_NESTED:
ret = H_FUNCTION;
- if (!nesting_enabled(vcpu->kvm))
+ if (!nesting_enabled(kvm))
break;
ret = kvmhv_enter_nested_guest(vcpu);
if (ret == H_INTERRUPT) {
@@ -1080,12 +1199,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
break;
case H_TLB_INVALIDATE:
ret = H_FUNCTION;
- if (nesting_enabled(vcpu->kvm))
+ if (nesting_enabled(kvm))
ret = kvmhv_do_nested_tlbie(vcpu);
break;
case H_COPY_TOFROM_GUEST:
ret = H_FUNCTION;
- if (nesting_enabled(vcpu->kvm))
+ if (nesting_enabled(kvm))
ret = kvmhv_copy_tofrom_guest_nested(vcpu);
break;
case H_PAGE_INIT:
@@ -1096,7 +1215,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
case H_SVM_PAGE_IN:
ret = H_UNSUPPORTED;
if (kvmppc_get_srr1(vcpu) & MSR_S)
- ret = kvmppc_h_svm_page_in(vcpu->kvm,
+ ret = kvmppc_h_svm_page_in(kvm,
kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6));
@@ -1104,7 +1223,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
case H_SVM_PAGE_OUT:
ret = H_UNSUPPORTED;
if (kvmppc_get_srr1(vcpu) & MSR_S)
- ret = kvmppc_h_svm_page_out(vcpu->kvm,
+ ret = kvmppc_h_svm_page_out(kvm,
kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6));
@@ -1112,12 +1231,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
case H_SVM_INIT_START:
ret = H_UNSUPPORTED;
if (kvmppc_get_srr1(vcpu) & MSR_S)
- ret = kvmppc_h_svm_init_start(vcpu->kvm);
+ ret = kvmppc_h_svm_init_start(kvm);
break;
case H_SVM_INIT_DONE:
ret = H_UNSUPPORTED;
if (kvmppc_get_srr1(vcpu) & MSR_S)
- ret = kvmppc_h_svm_init_done(vcpu->kvm);
+ ret = kvmppc_h_svm_init_done(kvm);
break;
case H_SVM_INIT_ABORT:
/*
@@ -1127,24 +1246,26 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
* Instead the kvm->arch.secure_guest flag is checked inside
* kvmppc_h_svm_init_abort().
*/
- ret = kvmppc_h_svm_init_abort(vcpu->kvm);
+ ret = kvmppc_h_svm_init_abort(kvm);
break;
default:
return RESUME_HOST;
}
+ WARN_ON_ONCE(ret == H_TOO_HARD);
kvmppc_set_gpr(vcpu, 3, ret);
vcpu->arch.hcall_needed = 0;
return RESUME_GUEST;
}
/*
- * Handle H_CEDE in the nested virtualization case where we haven't
- * called the real-mode hcall handlers in book3s_hv_rmhandlers.S.
+ * Handle H_CEDE in the P9 path where we don't call the real-mode hcall
+ * handlers in book3s_hv_rmhandlers.S.
+ *
* This has to be done early, not in kvmppc_pseries_do_hcall(), so
* that the cede logic in kvmppc_run_single_vcpu() works properly.
*/
-static void kvmppc_nested_cede(struct kvm_vcpu *vcpu)
+static void kvmppc_cede(struct kvm_vcpu *vcpu)
{
vcpu->arch.shregs.msr |= MSR_EE;
vcpu->arch.ceded = 1;
@@ -1175,6 +1296,7 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd)
case H_XIRR_X:
#endif
case H_PAGE_INIT:
+ case H_RPT_INVALIDATE:
return 1;
}
@@ -1397,13 +1519,39 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
}
case BOOK3S_INTERRUPT_SYSCALL:
{
- /* hcall - punt to userspace */
int i;
- /* hypercall with MSR_PR has already been handled in rmode,
- * and never reaches here.
- */
+ if (unlikely(vcpu->arch.shregs.msr & MSR_PR)) {
+ /*
+ * Guest userspace executed sc 1. This can only be
+ * reached by the P9 path because the old path
+ * handles this case in realmode hcall handlers.
+ */
+ if (!kvmhv_vcpu_is_radix(vcpu)) {
+ /*
+ * A guest could be running PR KVM, so this
+ * may be a PR KVM hcall. It must be reflected
+ * to the guest kernel as a sc interrupt.
+ */
+ kvmppc_core_queue_syscall(vcpu);
+ } else {
+ /*
+ * Radix guests can not run PR KVM or nested HV
+ * hash guests which might run PR KVM, so this
+ * is always a privilege fault. Send a program
+ * check to guest kernel.
+ */
+ kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
+ }
+ r = RESUME_GUEST;
+ break;
+ }
+ /*
+ * hcall - gather args and set exit_reason. This will next be
+ * handled by kvmppc_pseries_do_hcall which may be able to deal
+ * with it and resume guest, or may punt to userspace.
+ */
run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
for (i = 0; i < 9; ++i)
run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
@@ -1416,20 +1564,102 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* We get these next two if the guest accesses a page which it thinks
* it has mapped but which is not actually present, either because
* it is for an emulated I/O device or because the corresonding
- * host page has been paged out. Any other HDSI/HISI interrupts
- * have been handled already.
+ * host page has been paged out.
+ *
+ * Any other HDSI/HISI interrupts have been handled already for P7/8
+ * guests. For POWER9 hash guests not using rmhandlers, basic hash
+ * fault handling is done here.
*/
- case BOOK3S_INTERRUPT_H_DATA_STORAGE:
- r = RESUME_PAGE_FAULT;
+ case BOOK3S_INTERRUPT_H_DATA_STORAGE: {
+ unsigned long vsid;
+ long err;
+
+ if (vcpu->arch.fault_dsisr == HDSISR_CANARY) {
+ r = RESUME_GUEST; /* Just retry if it's the canary */
+ break;
+ }
+
+ if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /*
+ * Radix doesn't require anything, and pre-ISAv3.0 hash
+ * already attempted to handle this in rmhandlers. The
+ * hash fault handling below is v3 only (it uses ASDR
+ * via fault_gpa).
+ */
+ r = RESUME_PAGE_FAULT;
+ break;
+ }
+
+ if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) {
+ kvmppc_core_queue_data_storage(vcpu,
+ vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
+ r = RESUME_GUEST;
+ break;
+ }
+
+ if (!(vcpu->arch.shregs.msr & MSR_DR))
+ vsid = vcpu->kvm->arch.vrma_slb_v;
+ else
+ vsid = vcpu->arch.fault_gpa;
+
+ err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
+ vsid, vcpu->arch.fault_dsisr, true);
+ if (err == 0) {
+ r = RESUME_GUEST;
+ } else if (err == -1 || err == -2) {
+ r = RESUME_PAGE_FAULT;
+ } else {
+ kvmppc_core_queue_data_storage(vcpu,
+ vcpu->arch.fault_dar, err);
+ r = RESUME_GUEST;
+ }
break;
- case BOOK3S_INTERRUPT_H_INST_STORAGE:
+ }
+ case BOOK3S_INTERRUPT_H_INST_STORAGE: {
+ unsigned long vsid;
+ long err;
+
vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr &
DSISR_SRR1_MATCH_64S;
- if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
- vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
- r = RESUME_PAGE_FAULT;
+ if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /*
+ * Radix doesn't require anything, and pre-ISAv3.0 hash
+ * already attempted to handle this in rmhandlers. The
+ * hash fault handling below is v3 only (it uses ASDR
+ * via fault_gpa).
+ */
+ if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
+ vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
+ r = RESUME_PAGE_FAULT;
+ break;
+ }
+
+ if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) {
+ kvmppc_core_queue_inst_storage(vcpu,
+ vcpu->arch.fault_dsisr);
+ r = RESUME_GUEST;
+ break;
+ }
+
+ if (!(vcpu->arch.shregs.msr & MSR_IR))
+ vsid = vcpu->kvm->arch.vrma_slb_v;
+ else
+ vsid = vcpu->arch.fault_gpa;
+
+ err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
+ vsid, vcpu->arch.fault_dsisr, false);
+ if (err == 0) {
+ r = RESUME_GUEST;
+ } else if (err == -1) {
+ r = RESUME_PAGE_FAULT;
+ } else {
+ kvmppc_core_queue_inst_storage(vcpu, err);
+ r = RESUME_GUEST;
+ }
break;
+ }
+
/*
* This occurs if the guest executes an illegal instruction.
* If the guest debug is disabled, generate a program interrupt
@@ -1590,6 +1820,23 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
if (!xics_on_xive())
kvmppc_xics_rm_complete(vcpu, 0);
break;
+ case BOOK3S_INTERRUPT_SYSCALL:
+ {
+ unsigned long req = kvmppc_get_gpr(vcpu, 3);
+
+ /*
+ * The H_RPT_INVALIDATE hcalls issued by nested
+ * guests for process-scoped invalidations when
+ * GTSE=0, are handled here in L0.
+ */
+ if (req == H_RPT_INVALIDATE) {
+ r = kvmppc_nested_h_rpt_invalidate(vcpu);
+ break;
+ }
+
+ r = RESUME_HOST;
+ break;
+ }
default:
r = RESUME_HOST;
break;
@@ -1635,6 +1882,49 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
return 0;
}
+/*
+ * Enforce limits on guest LPCR values based on hardware availability,
+ * guest configuration, and possibly hypervisor support and security
+ * concerns.
+ */
+unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm, unsigned long lpcr)
+{
+ /* LPCR_TC only applies to HPT guests */
+ if (kvm_is_radix(kvm))
+ lpcr &= ~LPCR_TC;
+
+ /* On POWER8 and above, userspace can modify AIL */
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+ lpcr &= ~LPCR_AIL;
+ if ((lpcr & LPCR_AIL) != LPCR_AIL_3)
+ lpcr &= ~LPCR_AIL; /* LPCR[AIL]=1/2 is disallowed */
+ /*
+ * On some POWER9s we force AIL off for radix guests to prevent
+ * executing in MSR[HV]=1 mode with the MMU enabled and PIDR set to
+ * guest, which can result in Q0 translations with LPID=0 PID=PIDR to
+ * be cached, which the host TLB management does not expect.
+ */
+ if (kvm_is_radix(kvm) && cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
+ lpcr &= ~LPCR_AIL;
+
+ /*
+ * On POWER9, allow userspace to enable large decrementer for the
+ * guest, whether or not the host has it enabled.
+ */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ lpcr &= ~LPCR_LD;
+
+ return lpcr;
+}
+
+static void verify_lpcr(struct kvm *kvm, unsigned long lpcr)
+{
+ if (lpcr != kvmppc_filter_lpcr_hv(kvm, lpcr)) {
+ WARN_ONCE(1, "lpcr 0x%lx differs from filtered 0x%lx\n",
+ lpcr, kvmppc_filter_lpcr_hv(kvm, lpcr));
+ }
+}
+
static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
bool preserve_top32)
{
@@ -1643,6 +1933,23 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
u64 mask;
spin_lock(&vc->lock);
+
+ /*
+ * Userspace can only modify
+ * DPFD (default prefetch depth), ILE (interrupt little-endian),
+ * TC (translation control), AIL (alternate interrupt location),
+ * LD (large decrementer).
+ * These are subject to restrictions from kvmppc_filter_lcpr_hv().
+ */
+ mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD;
+
+ /* Broken 32-bit version of LPCR must not clear top bits */
+ if (preserve_top32)
+ mask &= 0xFFFFFFFF;
+
+ new_lpcr = kvmppc_filter_lpcr_hv(kvm,
+ (vc->lpcr & ~mask) | (new_lpcr & mask));
+
/*
* If ILE (interrupt little-endian) has changed, update the
* MSR_LE bit in the intr_msr for each vcpu in this vcore.
@@ -1661,25 +1968,8 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
}
}
- /*
- * Userspace can only modify DPFD (default prefetch depth),
- * ILE (interrupt little-endian) and TC (translation control).
- * On POWER8 and POWER9 userspace can also modify AIL (alt. interrupt loc.).
- */
- mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
- if (cpu_has_feature(CPU_FTR_ARCH_207S))
- mask |= LPCR_AIL;
- /*
- * On POWER9, allow userspace to enable large decrementer for the
- * guest, whether or not the host has it enabled.
- */
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- mask |= LPCR_LD;
+ vc->lpcr = new_lpcr;
- /* Broken 32-bit version of LPCR must not clear top bits */
- if (preserve_top32)
- mask &= 0xFFFFFFFF;
- vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
spin_unlock(&vc->lock);
}
@@ -2195,7 +2485,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
*/
static int threads_per_vcore(struct kvm *kvm)
{
- if (kvm->arch.threads_indep)
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
return 1;
return threads_per_subcore;
}
@@ -2619,7 +2909,7 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
cpumask_t *cpu_in_guest;
int i;
- cpu = cpu_first_thread_sibling(cpu);
+ cpu = cpu_first_tlb_thread_sibling(cpu);
if (nested) {
cpumask_set_cpu(cpu, &nested->need_tlb_flush);
cpu_in_guest = &nested->cpu_in_guest;
@@ -2633,9 +2923,10 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
* the other side is the first smp_mb() in kvmppc_run_core().
*/
smp_mb();
- for (i = 0; i < threads_per_core; ++i)
- if (cpumask_test_cpu(cpu + i, cpu_in_guest))
- smp_call_function_single(cpu + i, do_nothing, NULL, 1);
+ for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu);
+ i += cpu_tlb_thread_sibling_step())
+ if (cpumask_test_cpu(i, cpu_in_guest))
+ smp_call_function_single(i, do_nothing, NULL, 1);
}
static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
@@ -2666,8 +2957,8 @@ static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
*/
if (prev_cpu != pcpu) {
if (prev_cpu >= 0 &&
- cpu_first_thread_sibling(prev_cpu) !=
- cpu_first_thread_sibling(pcpu))
+ cpu_first_tlb_thread_sibling(prev_cpu) !=
+ cpu_first_tlb_thread_sibling(pcpu))
radix_flush_cpu(kvm, prev_cpu, vcpu);
if (nested)
nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;
@@ -2929,9 +3220,6 @@ static void prepare_threads(struct kvmppc_vcore *vc)
for_each_runnable_thread(i, vcpu, vc) {
if (signal_pending(vcpu->arch.run_task))
vcpu->arch.ret = -EINTR;
- else if (no_mixing_hpt_and_radix &&
- kvm_is_radix(vc->kvm) != radix_enabled())
- vcpu->arch.ret = -EINVAL;
else if (vcpu->arch.vpa.update_pending ||
vcpu->arch.slb_shadow.update_pending ||
vcpu->arch.dtl.update_pending)
@@ -3138,6 +3426,9 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
int trap;
bool is_power8;
+ if (WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)))
+ return;
+
/*
* Remove from the list any threads that have a signal pending
* or need a VPA update done
@@ -3165,9 +3456,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
* Make sure we are running on primary threads, and that secondary
* threads are offline. Also check if the number of threads in this
* guest are greater than the current system threads per guest.
- * On POWER9, we need to be not in independent-threads mode if
- * this is a HPT guest on a radix host machine where the
- * CPU threads may not be in different MMU modes.
*/
if ((controlled_threads > 1) &&
((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
@@ -3192,18 +3480,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
collect_piggybacks(&core_info, target_threads);
/*
- * On radix, arrange for TLB flushing if necessary.
- * This has to be done before disabling interrupts since
- * it uses smp_call_function().
- */
- pcpu = smp_processor_id();
- if (kvm_is_radix(vc->kvm)) {
- for (sub = 0; sub < core_info.n_subcores; ++sub)
- for_each_runnable_thread(i, vcpu, core_info.vc[sub])
- kvmppc_prepare_radix_vcpu(vcpu, pcpu);
- }
-
- /*
* Hard-disable interrupts, and check resched flag and signals.
* If we need to reschedule or deliver a signal, clean up
* and return without going into the guest(s).
@@ -3235,8 +3511,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
cmd_bit = stat_bit = 0;
split = core_info.n_subcores;
sip = NULL;
- is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S)
- && !cpu_has_feature(CPU_FTR_ARCH_300);
+ is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S);
if (split > 1) {
sip = &split_info;
@@ -3440,184 +3715,113 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
trace_kvmppc_run_core(vc, 1);
}
-/*
- * Load up hypervisor-mode registers on P9.
- */
-static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
- unsigned long lpcr)
+static void load_spr_state(struct kvm_vcpu *vcpu)
{
- struct kvmppc_vcore *vc = vcpu->arch.vcore;
- s64 hdec;
- u64 tb, purr, spurr;
- int trap;
- unsigned long host_hfscr = mfspr(SPRN_HFSCR);
- unsigned long host_ciabr = mfspr(SPRN_CIABR);
- unsigned long host_dawr0 = mfspr(SPRN_DAWR0);
- unsigned long host_dawrx0 = mfspr(SPRN_DAWRX0);
- unsigned long host_psscr = mfspr(SPRN_PSSCR);
- unsigned long host_pidr = mfspr(SPRN_PID);
- unsigned long host_dawr1 = 0;
- unsigned long host_dawrx1 = 0;
-
- if (cpu_has_feature(CPU_FTR_DAWR1)) {
- host_dawr1 = mfspr(SPRN_DAWR1);
- host_dawrx1 = mfspr(SPRN_DAWRX1);
- }
+ mtspr(SPRN_DSCR, vcpu->arch.dscr);
+ mtspr(SPRN_IAMR, vcpu->arch.iamr);
+ mtspr(SPRN_PSPB, vcpu->arch.pspb);
+ mtspr(SPRN_FSCR, vcpu->arch.fscr);
+ mtspr(SPRN_TAR, vcpu->arch.tar);
+ mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
+ mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
+ mtspr(SPRN_BESCR, vcpu->arch.bescr);
+ mtspr(SPRN_WORT, vcpu->arch.wort);
+ mtspr(SPRN_TIDR, vcpu->arch.tid);
+ mtspr(SPRN_AMR, vcpu->arch.amr);
+ mtspr(SPRN_UAMOR, vcpu->arch.uamor);
/*
- * P8 and P9 suppress the HDEC exception when LPCR[HDICE] = 0,
- * so set HDICE before writing HDEC.
+ * DAR, DSISR, and for nested HV, SPRGs must be set with MSR[RI]
+ * clear (or hstate set appropriately to catch those registers
+ * being clobbered if we take a MCE or SRESET), so those are done
+ * later.
*/
- mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr | LPCR_HDICE);
- isync();
- hdec = time_limit - mftb();
- if (hdec < 0) {
- mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
- isync();
- return BOOK3S_INTERRUPT_HV_DECREMENTER;
- }
- mtspr(SPRN_HDEC, hdec);
-
- if (vc->tb_offset) {
- u64 new_tb = mftb() + vc->tb_offset;
- mtspr(SPRN_TBU40, new_tb);
- tb = mftb();
- if ((tb & 0xffffff) < (new_tb & 0xffffff))
- mtspr(SPRN_TBU40, new_tb + 0x1000000);
- vc->tb_offset_applied = vc->tb_offset;
- }
-
- if (vc->pcr)
- mtspr(SPRN_PCR, vc->pcr | PCR_MASK);
- mtspr(SPRN_DPDES, vc->dpdes);
- mtspr(SPRN_VTB, vc->vtb);
-
- local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR);
- local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR);
- mtspr(SPRN_PURR, vcpu->arch.purr);
- mtspr(SPRN_SPURR, vcpu->arch.spurr);
-
- if (dawr_enabled()) {
- mtspr(SPRN_DAWR0, vcpu->arch.dawr0);
- mtspr(SPRN_DAWRX0, vcpu->arch.dawrx0);
- if (cpu_has_feature(CPU_FTR_DAWR1)) {
- mtspr(SPRN_DAWR1, vcpu->arch.dawr1);
- mtspr(SPRN_DAWRX1, vcpu->arch.dawrx1);
- }
- }
- mtspr(SPRN_CIABR, vcpu->arch.ciabr);
- mtspr(SPRN_IC, vcpu->arch.ic);
- mtspr(SPRN_PID, vcpu->arch.pid);
-
- mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
- (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
-
- mtspr(SPRN_HFSCR, vcpu->arch.hfscr);
-
- mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0);
- mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1);
- mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2);
- mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3);
-
- mtspr(SPRN_AMOR, ~0UL);
-
- mtspr(SPRN_LPCR, lpcr);
- isync();
-
- kvmppc_xive_push_vcpu(vcpu);
-
- mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
- mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
-
- trap = __kvmhv_vcpu_entry_p9(vcpu);
-
- /* Advance host PURR/SPURR by the amount used by guest */
- purr = mfspr(SPRN_PURR);
- spurr = mfspr(SPRN_SPURR);
- mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr +
- purr - vcpu->arch.purr);
- mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr +
- spurr - vcpu->arch.spurr);
- vcpu->arch.purr = purr;
- vcpu->arch.spurr = spurr;
+ if (!(vcpu->arch.ctrl & 1))
+ mtspr(SPRN_CTRLT, mfspr(SPRN_CTRLF) & ~1);
+}
- vcpu->arch.ic = mfspr(SPRN_IC);
- vcpu->arch.pid = mfspr(SPRN_PID);
- vcpu->arch.psscr = mfspr(SPRN_PSSCR) & PSSCR_GUEST_VIS;
+static void store_spr_state(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
- vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0);
- vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1);
- vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
- vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
+ vcpu->arch.iamr = mfspr(SPRN_IAMR);
+ vcpu->arch.pspb = mfspr(SPRN_PSPB);
+ vcpu->arch.fscr = mfspr(SPRN_FSCR);
+ vcpu->arch.tar = mfspr(SPRN_TAR);
+ vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
+ vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
+ vcpu->arch.bescr = mfspr(SPRN_BESCR);
+ vcpu->arch.wort = mfspr(SPRN_WORT);
+ vcpu->arch.tid = mfspr(SPRN_TIDR);
+ vcpu->arch.amr = mfspr(SPRN_AMR);
+ vcpu->arch.uamor = mfspr(SPRN_UAMOR);
+ vcpu->arch.dscr = mfspr(SPRN_DSCR);
+}
- /* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */
- mtspr(SPRN_PSSCR, host_psscr |
- (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
- mtspr(SPRN_HFSCR, host_hfscr);
- mtspr(SPRN_CIABR, host_ciabr);
- mtspr(SPRN_DAWR0, host_dawr0);
- mtspr(SPRN_DAWRX0, host_dawrx0);
- if (cpu_has_feature(CPU_FTR_DAWR1)) {
- mtspr(SPRN_DAWR1, host_dawr1);
- mtspr(SPRN_DAWRX1, host_dawrx1);
- }
- mtspr(SPRN_PID, host_pidr);
+/*
+ * Privileged (non-hypervisor) host registers to save.
+ */
+struct p9_host_os_sprs {
+ unsigned long dscr;
+ unsigned long tidr;
+ unsigned long iamr;
+ unsigned long amr;
+ unsigned long fscr;
+};
- /*
- * Since this is radix, do a eieio; tlbsync; ptesync sequence in
- * case we interrupted the guest between a tlbie and a ptesync.
- */
- asm volatile("eieio; tlbsync; ptesync");
+static void save_p9_host_os_sprs(struct p9_host_os_sprs *host_os_sprs)
+{
+ host_os_sprs->dscr = mfspr(SPRN_DSCR);
+ host_os_sprs->tidr = mfspr(SPRN_TIDR);
+ host_os_sprs->iamr = mfspr(SPRN_IAMR);
+ host_os_sprs->amr = mfspr(SPRN_AMR);
+ host_os_sprs->fscr = mfspr(SPRN_FSCR);
+}
- /*
- * cp_abort is required if the processor supports local copy-paste
- * to clear the copy buffer that was under control of the guest.
- */
- if (cpu_has_feature(CPU_FTR_ARCH_31))
- asm volatile(PPC_CP_ABORT);
+/* vcpu guest regs must already be saved */
+static void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu,
+ struct p9_host_os_sprs *host_os_sprs)
+{
+ mtspr(SPRN_PSPB, 0);
+ mtspr(SPRN_WORT, 0);
+ mtspr(SPRN_UAMOR, 0);
- mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid); /* restore host LPID */
- isync();
+ mtspr(SPRN_DSCR, host_os_sprs->dscr);
+ mtspr(SPRN_TIDR, host_os_sprs->tidr);
+ mtspr(SPRN_IAMR, host_os_sprs->iamr);
- vc->dpdes = mfspr(SPRN_DPDES);
- vc->vtb = mfspr(SPRN_VTB);
- mtspr(SPRN_DPDES, 0);
- if (vc->pcr)
- mtspr(SPRN_PCR, PCR_MASK);
+ if (host_os_sprs->amr != vcpu->arch.amr)
+ mtspr(SPRN_AMR, host_os_sprs->amr);
- if (vc->tb_offset_applied) {
- u64 new_tb = mftb() - vc->tb_offset_applied;
- mtspr(SPRN_TBU40, new_tb);
- tb = mftb();
- if ((tb & 0xffffff) < (new_tb & 0xffffff))
- mtspr(SPRN_TBU40, new_tb + 0x1000000);
- vc->tb_offset_applied = 0;
- }
+ if (host_os_sprs->fscr != vcpu->arch.fscr)
+ mtspr(SPRN_FSCR, host_os_sprs->fscr);
- mtspr(SPRN_HDEC, 0x7fffffff);
- mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
+ /* Save guest CTRL register, set runlatch to 1 */
+ if (!(vcpu->arch.ctrl & 1))
+ mtspr(SPRN_CTRLT, 1);
+}
- return trap;
+static inline bool hcall_is_xics(unsigned long req)
+{
+ return req == H_EOI || req == H_CPPR || req == H_IPI ||
+ req == H_IPOLL || req == H_XIRR || req == H_XIRR_X;
}
/*
- * Virtual-mode guest entry for POWER9 and later when the host and
- * guest are both using the radix MMU. The LPIDR has already been set.
+ * Guest entry for POWER9 and later CPUs.
*/
static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
unsigned long lpcr)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
- unsigned long host_dscr = mfspr(SPRN_DSCR);
- unsigned long host_tidr = mfspr(SPRN_TIDR);
- unsigned long host_iamr = mfspr(SPRN_IAMR);
- unsigned long host_amr = mfspr(SPRN_AMR);
- unsigned long host_fscr = mfspr(SPRN_FSCR);
+ struct p9_host_os_sprs host_os_sprs;
s64 dec;
u64 tb;
int trap, save_pmu;
+ WARN_ON_ONCE(vcpu->arch.ceded);
+
dec = mfspr(SPRN_DEC);
tb = mftb();
if (dec < 0)
@@ -3626,7 +3830,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
if (local_paca->kvm_hstate.dec_expires < time_limit)
time_limit = local_paca->kvm_hstate.dec_expires;
- vcpu->arch.ceded = 0;
+ save_p9_host_os_sprs(&host_os_sprs);
kvmhv_save_host_pmu(); /* saves it to PACA kvm_hstate */
@@ -3655,24 +3859,20 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
#endif
mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
- mtspr(SPRN_DSCR, vcpu->arch.dscr);
- mtspr(SPRN_IAMR, vcpu->arch.iamr);
- mtspr(SPRN_PSPB, vcpu->arch.pspb);
- mtspr(SPRN_FSCR, vcpu->arch.fscr);
- mtspr(SPRN_TAR, vcpu->arch.tar);
- mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
- mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
- mtspr(SPRN_BESCR, vcpu->arch.bescr);
- mtspr(SPRN_WORT, vcpu->arch.wort);
- mtspr(SPRN_TIDR, vcpu->arch.tid);
- mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
- mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
- mtspr(SPRN_AMR, vcpu->arch.amr);
- mtspr(SPRN_UAMOR, vcpu->arch.uamor);
-
- if (!(vcpu->arch.ctrl & 1))
- mtspr(SPRN_CTRLT, mfspr(SPRN_CTRLF) & ~1);
+ load_spr_state(vcpu);
+ /*
+ * When setting DEC, we must always deal with irq_work_raise via NMI vs
+ * setting DEC. The problem occurs right as we switch into guest mode
+ * if a NMI hits and sets pending work and sets DEC, then that will
+ * apply to the guest and not bring us back to the host.
+ *
+ * irq_work_raise could check a flag (or possibly LPCR[HDICE] for
+ * example) and set HDEC to 1? That wouldn't solve the nested hv
+ * case which needs to abort the hcall or zero the time limit.
+ *
+ * XXX: Another day's problem.
+ */
mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb());
if (kvmhv_on_pseries()) {
@@ -3680,7 +3880,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
* We need to save and restore the guest visible part of the
* psscr (i.e. using SPRN_PSSCR_PR) since the hypervisor
* doesn't do this for us. Note only required if pseries since
- * this is done in kvmhv_load_hv_regs_and_go() below otherwise.
+ * this is done in kvmhv_vcpu_entry_p9() below otherwise.
*/
unsigned long host_psscr;
/* call our hypervisor to load up HV regs and go */
@@ -3700,6 +3900,8 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
hvregs.vcpu_token = vcpu->vcpu_id;
}
hvregs.hdec_expiry = time_limit;
+ mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
+ mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs),
__pa(&vcpu->arch.regs));
kvmhv_restore_hv_return_state(vcpu, &hvregs);
@@ -3712,15 +3914,41 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
/* H_CEDE has to be handled now, not later */
if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
kvmppc_get_gpr(vcpu, 3) == H_CEDE) {
- kvmppc_nested_cede(vcpu);
+ kvmppc_cede(vcpu);
kvmppc_set_gpr(vcpu, 3, 0);
trap = 0;
}
} else {
- trap = kvmhv_load_hv_regs_and_go(vcpu, time_limit, lpcr);
+ kvmppc_xive_push_vcpu(vcpu);
+ trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr);
+ if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
+ !(vcpu->arch.shregs.msr & MSR_PR)) {
+ unsigned long req = kvmppc_get_gpr(vcpu, 3);
+
+ /* H_CEDE has to be handled now, not later */
+ if (req == H_CEDE) {
+ kvmppc_cede(vcpu);
+ kvmppc_xive_rearm_escalation(vcpu); /* may un-cede */
+ kvmppc_set_gpr(vcpu, 3, 0);
+ trap = 0;
+
+ /* XICS hcalls must be handled before xive is pulled */
+ } else if (hcall_is_xics(req)) {
+ int ret;
+
+ ret = kvmppc_xive_xics_hcall(vcpu, req);
+ if (ret != H_TOO_HARD) {
+ kvmppc_set_gpr(vcpu, 3, ret);
+ trap = 0;
+ }
+ }
+ }
+ kvmppc_xive_pull_vcpu(vcpu);
+
+ if (kvm_is_radix(vcpu->kvm))
+ vcpu->arch.slb_max = 0;
}
- vcpu->arch.slb_max = 0;
dec = mfspr(SPRN_DEC);
if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
dec = (s32) dec;
@@ -3728,34 +3956,10 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu->arch.dec_expires = dec + tb;
vcpu->cpu = -1;
vcpu->arch.thread_cpu = -1;
- vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
-
- vcpu->arch.iamr = mfspr(SPRN_IAMR);
- vcpu->arch.pspb = mfspr(SPRN_PSPB);
- vcpu->arch.fscr = mfspr(SPRN_FSCR);
- vcpu->arch.tar = mfspr(SPRN_TAR);
- vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
- vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
- vcpu->arch.bescr = mfspr(SPRN_BESCR);
- vcpu->arch.wort = mfspr(SPRN_WORT);
- vcpu->arch.tid = mfspr(SPRN_TIDR);
- vcpu->arch.amr = mfspr(SPRN_AMR);
- vcpu->arch.uamor = mfspr(SPRN_UAMOR);
- vcpu->arch.dscr = mfspr(SPRN_DSCR);
-
- mtspr(SPRN_PSPB, 0);
- mtspr(SPRN_WORT, 0);
- mtspr(SPRN_UAMOR, 0);
- mtspr(SPRN_DSCR, host_dscr);
- mtspr(SPRN_TIDR, host_tidr);
- mtspr(SPRN_IAMR, host_iamr);
- mtspr(SPRN_PSPB, 0);
- if (host_amr != vcpu->arch.amr)
- mtspr(SPRN_AMR, host_amr);
+ store_spr_state(vcpu);
- if (host_fscr != vcpu->arch.fscr)
- mtspr(SPRN_FSCR, host_fscr);
+ restore_p9_host_os_sprs(vcpu, &host_os_sprs);
msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
store_fp_state(&vcpu->arch.fp);
@@ -3785,6 +3989,9 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vc->in_guest = 0;
mtspr(SPRN_DEC, local_paca->kvm_hstate.dec_expires - mftb());
+ /* We may have raced with new irq work */
+ if (test_irq_work_pending())
+ set_dec(1);
mtspr(SPRN_SPRG_VDSO_WRITE, local_paca->sprg_vdso);
kvmhv_load_host_pmu();
@@ -3885,7 +4092,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
cur = start_poll = ktime_get();
if (vc->halt_poll_ns) {
ktime_t stop = ktime_add_ns(start_poll, vc->halt_poll_ns);
- ++vc->runner->stat.halt_attempted_poll;
+ ++vc->runner->stat.generic.halt_attempted_poll;
vc->vcore_state = VCORE_POLLING;
spin_unlock(&vc->lock);
@@ -3896,13 +4103,13 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
break;
}
cur = ktime_get();
- } while (single_task_running() && ktime_before(cur, stop));
+ } while (kvm_vcpu_can_poll(cur, stop));
spin_lock(&vc->lock);
vc->vcore_state = VCORE_INACTIVE;
if (!do_sleep) {
- ++vc->runner->stat.halt_successful_poll;
+ ++vc->runner->stat.generic.halt_successful_poll;
goto out;
}
}
@@ -3914,7 +4121,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
do_sleep = 0;
/* If we polled, count this as a successful poll */
if (vc->halt_poll_ns)
- ++vc->runner->stat.halt_successful_poll;
+ ++vc->runner->stat.generic.halt_successful_poll;
goto out;
}
@@ -3941,13 +4148,13 @@ out:
ktime_to_ns(cur) - ktime_to_ns(start_wait);
/* Attribute failed poll time */
if (vc->halt_poll_ns)
- vc->runner->stat.halt_poll_fail_ns +=
+ vc->runner->stat.generic.halt_poll_fail_ns +=
ktime_to_ns(start_wait) -
ktime_to_ns(start_poll);
} else {
/* Attribute successful poll time */
if (vc->halt_poll_ns)
- vc->runner->stat.halt_poll_success_ns +=
+ vc->runner->stat.generic.halt_poll_success_ns +=
ktime_to_ns(cur) -
ktime_to_ns(start_poll);
}
@@ -3974,7 +4181,6 @@ out:
/*
* This never fails for a radix guest, as none of the operations it does
* for a radix guest can fail or have a way to report failure.
- * kvmhv_run_single_vcpu() relies on this fact.
*/
static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
{
@@ -4130,7 +4336,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
{
struct kvm_run *run = vcpu->run;
int trap, r, pcpu;
- int srcu_idx, lpid;
+ int srcu_idx;
struct kvmppc_vcore *vc;
struct kvm *kvm = vcpu->kvm;
struct kvm_nested_guest *nested = vcpu->arch.nested;
@@ -4153,8 +4359,15 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
vc->runner = vcpu;
/* See if the MMU is ready to go */
- if (!kvm->arch.mmu_ready)
- kvmhv_setup_mmu(vcpu);
+ if (!kvm->arch.mmu_ready) {
+ r = kvmhv_setup_mmu(vcpu);
+ if (r) {
+ run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ run->fail_entry.hardware_entry_failure_reason = 0;
+ vcpu->arch.ret = r;
+ return r;
+ }
+ }
if (need_resched())
cond_resched();
@@ -4167,7 +4380,8 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
preempt_disable();
pcpu = smp_processor_id();
vc->pcpu = pcpu;
- kvmppc_prepare_radix_vcpu(vcpu, pcpu);
+ if (kvm_is_radix(kvm))
+ kvmppc_prepare_radix_vcpu(vcpu, pcpu);
local_irq_disable();
hard_irq_disable();
@@ -4204,13 +4418,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
vc->vcore_state = VCORE_RUNNING;
trace_kvmppc_run_core(vc, 0);
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
- mtspr(SPRN_LPID, lpid);
- isync();
- kvmppc_check_need_tlb_flush(kvm, pcpu, nested);
- }
-
guest_enter_irqoff();
srcu_idx = srcu_read_lock(&kvm->srcu);
@@ -4229,11 +4436,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
srcu_read_unlock(&kvm->srcu, srcu_idx);
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- mtspr(SPRN_LPID, kvm->arch.host_lpid);
- isync();
- }
-
set_irq_happened(trap);
kvmppc_set_host_core(pcpu);
@@ -4379,19 +4581,23 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
do {
- /*
- * The TLB prefetch bug fixup is only in the kvmppc_run_vcpu
- * path, which also handles hash and dependent threads mode.
- */
- if (kvm->arch.threads_indep && kvm_is_radix(kvm) &&
- !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
r = kvmhv_run_single_vcpu(vcpu, ~(u64)0,
vcpu->arch.vcore->lpcr);
else
r = kvmppc_run_vcpu(vcpu);
- if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
- !(vcpu->arch.shregs.msr & MSR_PR)) {
+ if (run->exit_reason == KVM_EXIT_PAPR_HCALL) {
+ if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_PR)) {
+ /*
+ * These should have been caught reflected
+ * into the guest by now. Final sanity check:
+ * don't allow userspace to execute hcalls in
+ * the hypervisor.
+ */
+ r = RESUME_GUEST;
+ continue;
+ }
trace_kvm_hcall_enter(vcpu);
r = kvmppc_pseries_do_hcall(vcpu);
trace_kvm_hcall_exit(vcpu, r);
@@ -4415,12 +4621,14 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
mtspr(SPRN_EBBRR, ebb_regs[1]);
mtspr(SPRN_BESCR, ebb_regs[2]);
mtspr(SPRN_TAR, user_tar);
- mtspr(SPRN_FSCR, current->thread.fscr);
}
mtspr(SPRN_VRSAVE, user_vrsave);
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
atomic_dec(&kvm->arch.vcpus_running);
+
+ srr_regs_clobbered();
+
return r;
}
@@ -4641,8 +4849,10 @@ void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
struct kvmppc_vcore *vc = kvm->arch.vcores[i];
if (!vc)
continue;
+
spin_lock(&vc->lock);
vc->lpcr = (vc->lpcr & ~mask) | lpcr;
+ verify_lpcr(kvm, vc->lpcr);
spin_unlock(&vc->lock);
if (++cores_done >= kvm->arch.online_vcores)
break;
@@ -4717,8 +4927,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
/* Look up the VMA for the start of this memory slot */
hva = memslot->userspace_addr;
mmap_read_lock(kvm->mm);
- vma = find_vma(kvm->mm, hva);
- if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
+ vma = vma_lookup(kvm->mm, hva);
+ if (!vma || (vma->vm_flags & VM_IO))
goto up_out;
psize = vma_kernel_pagesize(vma);
@@ -4770,7 +4980,7 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
kvmhv_release_all_nested(kvm);
kvmppc_rmap_reset(kvm);
kvm->arch.process_table = 0;
- /* Mutual exclusion with kvm_unmap_hva_range etc. */
+ /* Mutual exclusion with kvm_unmap_gfn_range etc. */
spin_lock(&kvm->mmu_lock);
kvm->arch.radix = 0;
spin_unlock(&kvm->mmu_lock);
@@ -4792,7 +5002,7 @@ int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
if (err)
return err;
kvmppc_rmap_reset(kvm);
- /* Mutual exclusion with kvm_unmap_hva_range etc. */
+ /* Mutual exclusion with kvm_unmap_gfn_range etc. */
spin_lock(&kvm->mmu_lock);
kvm->arch.radix = 1;
spin_unlock(&kvm->mmu_lock);
@@ -4970,6 +5180,7 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
kvmppc_setup_partition_table(kvm);
}
+ verify_lpcr(kvm, lpcr);
kvm->arch.lpcr = lpcr;
/* Initialization for future HPT resizes */
@@ -4996,18 +5207,8 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
/*
* Track that we now have a HV mode VM active. This blocks secondary
* CPU threads from coming online.
- * On POWER9, we only need to do this if the "indep_threads_mode"
- * module parameter has been set to N.
*/
- if (cpu_has_feature(CPU_FTR_ARCH_300)) {
- if (!indep_threads_mode && !cpu_has_feature(CPU_FTR_HVMODE)) {
- pr_warn("KVM: Ignoring indep_threads_mode=N in nested hypervisor\n");
- kvm->arch.threads_indep = true;
- } else {
- kvm->arch.threads_indep = indep_threads_mode;
- }
- }
- if (!kvm->arch.threads_indep)
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
kvm_hv_vm_activated();
/*
@@ -5048,7 +5249,7 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
{
debugfs_remove_recursive(kvm->arch.debugfs_dir);
- if (!kvm->arch.threads_indep)
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
kvm_hv_vm_deactivated();
kvmppc_free_vcores(kvm);
@@ -5369,8 +5570,10 @@ static unsigned int default_hcall_list[] = {
H_READ,
H_PROTECT,
H_BULK_REMOVE,
+#ifdef CONFIG_SPAPR_TCE_IOMMU
H_GET_TCE,
H_PUT_TCE,
+#endif
H_SET_DABR,
H_SET_XDABR,
H_CEDE,
@@ -5467,7 +5670,9 @@ static int kvmhv_enable_nested(struct kvm *kvm)
{
if (!nested)
return -EPERM;
- if (!cpu_has_feature(CPU_FTR_ARCH_300) || no_mixing_hpt_and_radix)
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ return -ENODEV;
+ if (!radix_enabled())
return -ENODEV;
/* kvm == NULL means the caller is testing if the capability exists */
@@ -5630,11 +5835,25 @@ static int kvmhv_enable_dawr1(struct kvm *kvm)
static bool kvmppc_hash_v3_possible(void)
{
- if (radix_enabled() && no_mixing_hpt_and_radix)
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
return false;
- return cpu_has_feature(CPU_FTR_ARCH_300) &&
- cpu_has_feature(CPU_FTR_HVMODE);
+ if (!cpu_has_feature(CPU_FTR_HVMODE))
+ return false;
+
+ /*
+ * POWER9 chips before version 2.02 can't have some threads in
+ * HPT mode and some in radix mode on the same core.
+ */
+ if (radix_enabled()) {
+ unsigned int pvr = mfspr(SPRN_PVR);
+ if ((pvr >> 16) == PVR_POWER9 &&
+ (((pvr & 0xe000) == 0 && (pvr & 0xfff) < 0x202) ||
+ ((pvr & 0xe000) == 0x2000 && (pvr & 0xfff) < 0x101)))
+ return false;
+ }
+
+ return true;
}
static struct kvmppc_ops kvm_ops_hv = {
@@ -5654,10 +5873,10 @@ static struct kvmppc_ops kvm_ops_hv = {
.flush_memslot = kvmppc_core_flush_memslot_hv,
.prepare_memory_region = kvmppc_core_prepare_memory_region_hv,
.commit_memory_region = kvmppc_core_commit_memory_region_hv,
- .unmap_hva_range = kvm_unmap_hva_range_hv,
- .age_hva = kvm_age_hva_hv,
- .test_age_hva = kvm_test_age_hva_hv,
- .set_spte_hva = kvm_set_spte_hva_hv,
+ .unmap_gfn_range = kvm_unmap_gfn_range_hv,
+ .age_gfn = kvm_age_gfn_hv,
+ .test_age_gfn = kvm_test_age_gfn_hv,
+ .set_spte_gfn = kvm_set_spte_gfn_hv,
.free_memslot = kvmppc_core_free_memslot_hv,
.init_vm = kvmppc_core_init_vm_hv,
.destroy_vm = kvmppc_core_destroy_vm_hv,
@@ -5778,18 +5997,6 @@ static int kvmppc_book3s_init_hv(void)
if (kvmppc_radix_possible())
r = kvmppc_radix_init();
- /*
- * POWER9 chips before version 2.02 can't have some threads in
- * HPT mode and some in radix mode on the same core.
- */
- if (cpu_has_feature(CPU_FTR_ARCH_300)) {
- unsigned int pvr = mfspr(SPRN_PVR);
- if ((pvr >> 16) == PVR_POWER9 &&
- (((pvr & 0xe000) == 0 && (pvr & 0xfff) < 0x202) ||
- ((pvr & 0xe000) == 0x2000 && (pvr & 0xfff) < 0x101)))
- no_mixing_hpt_and_radix = true;
- }
-
r = kvmppc_uvmem_init();
if (r < 0)
pr_err("KVM-HV: kvmppc_uvmem_init failed %d\n", r);
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 158d309b42a3..be8ef1c5b1bf 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -35,21 +35,6 @@
#include "book3s_xive.h"
/*
- * The XIVE module will populate these when it loads
- */
-unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
-unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
-int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
- unsigned long mfrr);
-int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
-int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
-EXPORT_SYMBOL_GPL(__xive_vm_h_xirr);
-EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll);
-EXPORT_SYMBOL_GPL(__xive_vm_h_ipi);
-EXPORT_SYMBOL_GPL(__xive_vm_h_cppr);
-EXPORT_SYMBOL_GPL(__xive_vm_h_eoi);
-
-/*
* Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
* should be power of 2.
*/
@@ -196,16 +181,9 @@ int kvmppc_hwrng_present(void)
}
EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
-long kvmppc_h_random(struct kvm_vcpu *vcpu)
+long kvmppc_rm_h_random(struct kvm_vcpu *vcpu)
{
- int r;
-
- /* Only need to do the expensive mfmsr() on radix */
- if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR))
- r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]);
- else
- r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]);
- if (r)
+ if (powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]))
return H_SUCCESS;
return H_HARDWARE;
@@ -221,15 +199,6 @@ void kvmhv_rm_send_ipi(int cpu)
void __iomem *xics_phys;
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
- /* For a nested hypervisor, use the XICS via hcall */
- if (kvmhv_on_pseries()) {
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- plpar_hcall_raw(H_IPI, retbuf, get_hard_smp_processor_id(cpu),
- IPI_PRIORITY);
- return;
- }
-
/* On POWER9 we can use msgsnd for any destination cpu. */
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
msg |= get_hard_smp_processor_id(cpu);
@@ -442,19 +411,12 @@ static long kvmppc_read_one_intr(bool *again)
return 1;
/* Now read the interrupt from the ICP */
- if (kvmhv_on_pseries()) {
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- rc = plpar_hcall_raw(H_XIRR, retbuf, 0xFF);
- xirr = cpu_to_be32(retbuf[0]);
- } else {
- xics_phys = local_paca->kvm_hstate.xics_phys;
- rc = 0;
- if (!xics_phys)
- rc = opal_int_get_xirr(&xirr, false);
- else
- xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
- }
+ xics_phys = local_paca->kvm_hstate.xics_phys;
+ rc = 0;
+ if (!xics_phys)
+ rc = opal_int_get_xirr(&xirr, false);
+ else
+ xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
if (rc < 0)
return 1;
@@ -483,13 +445,7 @@ static long kvmppc_read_one_intr(bool *again)
*/
if (xisr == XICS_IPI) {
rc = 0;
- if (kvmhv_on_pseries()) {
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- plpar_hcall_raw(H_IPI, retbuf,
- hard_smp_processor_id(), 0xff);
- plpar_hcall_raw(H_EOI, retbuf, h_xirr);
- } else if (xics_phys) {
+ if (xics_phys) {
__raw_rm_writeb(0xff, xics_phys + XICS_MFRR);
__raw_rm_writel(xirr, xics_phys + XICS_XIRR);
} else {
@@ -515,13 +471,7 @@ static long kvmppc_read_one_intr(bool *again)
/* We raced with the host,
* we need to resend that IPI, bummer
*/
- if (kvmhv_on_pseries()) {
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- plpar_hcall_raw(H_IPI, retbuf,
- hard_smp_processor_id(),
- IPI_PRIORITY);
- } else if (xics_phys)
+ if (xics_phys)
__raw_rm_writeb(IPI_PRIORITY,
xics_phys + XICS_MFRR);
else
@@ -541,22 +491,13 @@ static long kvmppc_read_one_intr(bool *again)
}
#ifdef CONFIG_KVM_XICS
-static inline bool is_rm(void)
-{
- return !(mfmsr() & MSR_DR);
-}
-
unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
- if (xics_on_xive()) {
- if (is_rm())
- return xive_rm_h_xirr(vcpu);
- if (unlikely(!__xive_vm_h_xirr))
- return H_NOT_AVAILABLE;
- return __xive_vm_h_xirr(vcpu);
- } else
+ if (xics_on_xive())
+ return xive_rm_h_xirr(vcpu);
+ else
return xics_rm_h_xirr(vcpu);
}
@@ -565,13 +506,9 @@ unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
vcpu->arch.regs.gpr[5] = get_tb();
- if (xics_on_xive()) {
- if (is_rm())
- return xive_rm_h_xirr(vcpu);
- if (unlikely(!__xive_vm_h_xirr))
- return H_NOT_AVAILABLE;
- return __xive_vm_h_xirr(vcpu);
- } else
+ if (xics_on_xive())
+ return xive_rm_h_xirr(vcpu);
+ else
return xics_rm_h_xirr(vcpu);
}
@@ -579,13 +516,9 @@ unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
- if (xics_on_xive()) {
- if (is_rm())
- return xive_rm_h_ipoll(vcpu, server);
- if (unlikely(!__xive_vm_h_ipoll))
- return H_NOT_AVAILABLE;
- return __xive_vm_h_ipoll(vcpu, server);
- } else
+ if (xics_on_xive())
+ return xive_rm_h_ipoll(vcpu, server);
+ else
return H_TOO_HARD;
}
@@ -594,13 +527,9 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
- if (xics_on_xive()) {
- if (is_rm())
- return xive_rm_h_ipi(vcpu, server, mfrr);
- if (unlikely(!__xive_vm_h_ipi))
- return H_NOT_AVAILABLE;
- return __xive_vm_h_ipi(vcpu, server, mfrr);
- } else
+ if (xics_on_xive())
+ return xive_rm_h_ipi(vcpu, server, mfrr);
+ else
return xics_rm_h_ipi(vcpu, server, mfrr);
}
@@ -608,13 +537,9 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
- if (xics_on_xive()) {
- if (is_rm())
- return xive_rm_h_cppr(vcpu, cppr);
- if (unlikely(!__xive_vm_h_cppr))
- return H_NOT_AVAILABLE;
- return __xive_vm_h_cppr(vcpu, cppr);
- } else
+ if (xics_on_xive())
+ return xive_rm_h_cppr(vcpu, cppr);
+ else
return xics_rm_h_cppr(vcpu, cppr);
}
@@ -622,13 +547,9 @@ int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
{
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
- if (xics_on_xive()) {
- if (is_rm())
- return xive_rm_h_eoi(vcpu, xirr);
- if (unlikely(!__xive_vm_h_eoi))
- return H_NOT_AVAILABLE;
- return __xive_vm_h_eoi(vcpu, xirr);
- } else
+ if (xics_on_xive())
+ return xive_rm_h_eoi(vcpu, xirr);
+ else
return xics_rm_h_eoi(vcpu, xirr);
}
#endif /* CONFIG_KVM_XICS */
@@ -662,6 +583,9 @@ static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
{
+ /* Guest must always run with ME enabled, HV disabled. */
+ msr = (msr | MSR_ME) & ~MSR_HV;
+
/*
* Check for illegal transactional state bit combination
* and if we find it, force the TS field to a safe state.
@@ -797,7 +721,7 @@ void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
* Thus we make all 4 threads use the same bit.
*/
if (cpu_has_feature(CPU_FTR_ARCH_300))
- pcpu = cpu_first_thread_sibling(pcpu);
+ pcpu = cpu_first_tlb_thread_sibling(pcpu);
if (nested)
need_tlb_flush = &nested->need_tlb_flush;
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 327417d79eac..4444f83cb133 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -58,7 +58,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
/*
* Put whatever is in the decrementer into the
* hypervisor decrementer.
- * Because of a hardware deviation in P8 and P9,
+ * Because of a hardware deviation in P8,
* we need to set LPCR[HDICE] before writing HDEC.
*/
ld r5, HSTATE_KVM_VCORE(r13)
@@ -67,15 +67,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
ori r8, r9, LPCR_HDICE
mtspr SPRN_LPCR, r8
isync
- andis. r0, r9, LPCR_LD@h
mfspr r8,SPRN_DEC
mftb r7
-BEGIN_FTR_SECTION
- /* On POWER9, don't sign-extend if host LPCR[LD] bit is set */
- bne 32f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
extsw r8,r8
-32: mtspr SPRN_HDEC,r8
+ mtspr SPRN_HDEC,r8
add r8,r8,r7
std r8,HSTATE_DECEXP(r13)
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 0cd0e7aad588..8543ad538b0c 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -19,6 +19,7 @@
#include <asm/pgalloc.h>
#include <asm/pte-walk.h>
#include <asm/reg.h>
+#include <asm/plpar_wrappers.h>
static struct patb_entry *pseries_partition_tb;
@@ -53,7 +54,8 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
hr->dawrx1 = vcpu->arch.dawrx1;
}
-static void byteswap_pt_regs(struct pt_regs *regs)
+/* Use noinline_for_stack due to https://bugs.llvm.org/show_bug.cgi?id=49610 */
+static noinline_for_stack void byteswap_pt_regs(struct pt_regs *regs)
{
unsigned long *addr = (unsigned long *) regs;
@@ -132,8 +134,33 @@ static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap,
}
}
+/*
+ * This can result in some L0 HV register state being leaked to an L1
+ * hypervisor when the hv_guest_state is copied back to the guest after
+ * being modified here.
+ *
+ * There is no known problem with such a leak, and in many cases these
+ * register settings could be derived by the guest by observing behaviour
+ * and timing, interrupts, etc., but it is an issue to consider.
+ */
static void sanitise_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ u64 mask;
+
+ /*
+ * Don't let L1 change LPCR bits for the L2 except these:
+ */
+ mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD |
+ LPCR_LPES | LPCR_MER;
+
+ /*
+ * Additional filtering is required depending on hardware
+ * and configuration.
+ */
+ hr->lpcr = kvmppc_filter_lpcr_hv(vcpu->kvm,
+ (vc->lpcr & ~mask) | (hr->lpcr & mask));
+
/*
* Don't let L1 enable features for L2 which we've disabled for L1,
* but preserve the interrupt cause field.
@@ -271,8 +298,6 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
u64 hv_ptr, regs_ptr;
u64 hdec_exp;
s64 delta_purr, delta_spurr, delta_ic, delta_vtb;
- u64 mask;
- unsigned long lpcr;
if (vcpu->kvm->arch.l1_ptcr == 0)
return H_NOT_AVAILABLE;
@@ -320,10 +345,10 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
vcpu->arch.nested = l2;
vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token;
vcpu->arch.regs = l2_regs;
- vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
- mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD |
- LPCR_LPES | LPCR_MER;
- lpcr = (vc->lpcr & ~mask) | (l2_hv.lpcr & mask);
+
+ /* Guest must always run with ME enabled, HV disabled. */
+ vcpu->arch.shregs.msr = (vcpu->arch.regs.msr | MSR_ME) & ~MSR_HV;
+
sanitise_hv_regs(vcpu, &l2_hv);
restore_hv_regs(vcpu, &l2_hv);
@@ -335,7 +360,7 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
r = RESUME_HOST;
break;
}
- r = kvmhv_run_single_vcpu(vcpu, hdec_exp, lpcr);
+ r = kvmhv_run_single_vcpu(vcpu, hdec_exp, l2_hv.lpcr);
} while (is_kvmppc_resume_guest(r));
/* save L2 state for return */
@@ -444,8 +469,15 @@ static void kvmhv_flush_lpid(unsigned int lpid)
return;
}
- rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1),
- lpid, TLBIEL_INVAL_SET_LPID);
+ if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE))
+ rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1),
+ lpid, TLBIEL_INVAL_SET_LPID);
+ else
+ rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
+ H_RPTI_TYPE_NESTED |
+ H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC |
+ H_RPTI_TYPE_PAT,
+ H_RPTI_PAGE_ALL, 0, -1UL);
if (rc)
pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc);
}
@@ -1191,6 +1223,113 @@ long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu)
return H_SUCCESS;
}
+static long do_tlb_invalidate_nested_all(struct kvm_vcpu *vcpu,
+ unsigned long lpid, unsigned long ric)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_nested_guest *gp;
+
+ gp = kvmhv_get_nested(kvm, lpid, false);
+ if (gp) {
+ kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
+ kvmhv_put_nested(gp);
+ }
+ return H_SUCCESS;
+}
+
+/*
+ * Number of pages above which we invalidate the entire LPID rather than
+ * flush individual pages.
+ */
+static unsigned long tlb_range_flush_page_ceiling __read_mostly = 33;
+
+static long do_tlb_invalidate_nested_tlb(struct kvm_vcpu *vcpu,
+ unsigned long lpid,
+ unsigned long pg_sizes,
+ unsigned long start,
+ unsigned long end)
+{
+ int ret = H_P4;
+ unsigned long addr, nr_pages;
+ struct mmu_psize_def *def;
+ unsigned long psize, ap, page_size;
+ bool flush_lpid;
+
+ for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
+ def = &mmu_psize_defs[psize];
+ if (!(pg_sizes & def->h_rpt_pgsize))
+ continue;
+
+ nr_pages = (end - start) >> def->shift;
+ flush_lpid = nr_pages > tlb_range_flush_page_ceiling;
+ if (flush_lpid)
+ return do_tlb_invalidate_nested_all(vcpu, lpid,
+ RIC_FLUSH_TLB);
+ addr = start;
+ ap = mmu_get_ap(psize);
+ page_size = 1UL << def->shift;
+ do {
+ ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap,
+ get_epn(addr));
+ if (ret)
+ return H_P4;
+ addr += page_size;
+ } while (addr < end);
+ }
+ return ret;
+}
+
+/*
+ * Performs partition-scoped invalidations for nested guests
+ * as part of H_RPT_INVALIDATE hcall.
+ */
+long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
+ unsigned long type, unsigned long pg_sizes,
+ unsigned long start, unsigned long end)
+{
+ /*
+ * If L2 lpid isn't valid, we need to return H_PARAMETER.
+ *
+ * However, nested KVM issues a L2 lpid flush call when creating
+ * partition table entries for L2. This happens even before the
+ * corresponding shadow lpid is created in HV which happens in
+ * H_ENTER_NESTED call. Since we can't differentiate this case from
+ * the invalid case, we ignore such flush requests and return success.
+ */
+ if (!kvmhv_find_nested(vcpu->kvm, lpid))
+ return H_SUCCESS;
+
+ /*
+ * A flush all request can be handled by a full lpid flush only.
+ */
+ if ((type & H_RPTI_TYPE_NESTED_ALL) == H_RPTI_TYPE_NESTED_ALL)
+ return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_ALL);
+
+ /*
+ * We don't need to handle a PWC flush like process table here,
+ * because intermediate partition scoped table in nested guest doesn't
+ * really have PWC. Only level we have PWC is in L0 and for nested
+ * invalidate at L0 we always do kvm_flush_lpid() which does
+ * radix__flush_all_lpid(). For range invalidate at any level, we
+ * are not removing the higher level page tables and hence there is
+ * no PWC invalidate needed.
+ *
+ * if (type & H_RPTI_TYPE_PWC) {
+ * ret = do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_PWC);
+ * if (ret)
+ * return H_P4;
+ * }
+ */
+
+ if (start == 0 && end == -1)
+ return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_TLB);
+
+ if (type & H_RPTI_TYPE_TLB)
+ return do_tlb_invalidate_nested_tlb(vcpu, lpid, pg_sizes,
+ start, end);
+ return H_SUCCESS;
+}
+
/* Used to convert a nested guest real address to a L1 guest real address */
static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu,
struct kvm_nested_guest *gp,
diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c
new file mode 100644
index 000000000000..83f592eadcd2
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c
@@ -0,0 +1,508 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <asm/asm-prototypes.h>
+#include <asm/dbell.h>
+#include <asm/kvm_ppc.h>
+#include <asm/ppc-opcode.h>
+
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+static void __start_timing(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ u64 tb = mftb() - vc->tb_offset_applied;
+
+ vcpu->arch.cur_activity = next;
+ vcpu->arch.cur_tb_start = tb;
+}
+
+static void __accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ struct kvmhv_tb_accumulator *curr;
+ u64 tb = mftb() - vc->tb_offset_applied;
+ u64 prev_tb;
+ u64 delta;
+ u64 seq;
+
+ curr = vcpu->arch.cur_activity;
+ vcpu->arch.cur_activity = next;
+ prev_tb = vcpu->arch.cur_tb_start;
+ vcpu->arch.cur_tb_start = tb;
+
+ if (!curr)
+ return;
+
+ delta = tb - prev_tb;
+
+ seq = curr->seqcount;
+ curr->seqcount = seq + 1;
+ smp_wmb();
+ curr->tb_total += delta;
+ if (seq == 0 || delta < curr->tb_min)
+ curr->tb_min = delta;
+ if (delta > curr->tb_max)
+ curr->tb_max = delta;
+ smp_wmb();
+ curr->seqcount = seq + 2;
+}
+
+#define start_timing(vcpu, next) __start_timing(vcpu, next)
+#define end_timing(vcpu) __start_timing(vcpu, NULL)
+#define accumulate_time(vcpu, next) __accumulate_time(vcpu, next)
+#else
+#define start_timing(vcpu, next) do {} while (0)
+#define end_timing(vcpu) do {} while (0)
+#define accumulate_time(vcpu, next) do {} while (0)
+#endif
+
+static inline void mfslb(unsigned int idx, u64 *slbee, u64 *slbev)
+{
+ asm volatile("slbmfev %0,%1" : "=r" (*slbev) : "r" (idx));
+ asm volatile("slbmfee %0,%1" : "=r" (*slbee) : "r" (idx));
+}
+
+static inline void mtslb(u64 slbee, u64 slbev)
+{
+ asm volatile("slbmte %0,%1" :: "r" (slbev), "r" (slbee));
+}
+
+static inline void clear_slb_entry(unsigned int idx)
+{
+ mtslb(idx, 0);
+}
+
+static inline void slb_clear_invalidate_partition(void)
+{
+ clear_slb_entry(0);
+ asm volatile(PPC_SLBIA(6));
+}
+
+/*
+ * Malicious or buggy radix guests may have inserted SLB entries
+ * (only 0..3 because radix always runs with UPRT=1), so these must
+ * be cleared here to avoid side-channels. slbmte is used rather
+ * than slbia, as it won't clear cached translations.
+ */
+static void radix_clear_slb(void)
+{
+ int i;
+
+ for (i = 0; i < 4; i++)
+ clear_slb_entry(i);
+}
+
+static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
+{
+ struct kvm_nested_guest *nested = vcpu->arch.nested;
+ u32 lpid;
+
+ lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
+
+ /*
+ * All the isync()s are overkill but trivially follow the ISA
+ * requirements. Some can likely be replaced with justification
+ * comment for why they are not needed.
+ */
+ isync();
+ mtspr(SPRN_LPID, lpid);
+ isync();
+ mtspr(SPRN_LPCR, lpcr);
+ isync();
+ mtspr(SPRN_PID, vcpu->arch.pid);
+ isync();
+}
+
+static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
+{
+ u32 lpid;
+ int i;
+
+ lpid = kvm->arch.lpid;
+
+ mtspr(SPRN_LPID, lpid);
+ mtspr(SPRN_LPCR, lpcr);
+ mtspr(SPRN_PID, vcpu->arch.pid);
+
+ for (i = 0; i < vcpu->arch.slb_max; i++)
+ mtslb(vcpu->arch.slb[i].orige, vcpu->arch.slb[i].origv);
+
+ isync();
+}
+
+static void switch_mmu_to_host(struct kvm *kvm, u32 pid)
+{
+ isync();
+ mtspr(SPRN_PID, pid);
+ isync();
+ mtspr(SPRN_LPID, kvm->arch.host_lpid);
+ isync();
+ mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
+ isync();
+
+ if (!radix_enabled())
+ slb_restore_bolted_realmode();
+}
+
+static void save_clear_host_mmu(struct kvm *kvm)
+{
+ if (!radix_enabled()) {
+ /*
+ * Hash host could save and restore host SLB entries to
+ * reduce SLB fault overheads of VM exits, but for now the
+ * existing code clears all entries and restores just the
+ * bolted ones when switching back to host.
+ */
+ slb_clear_invalidate_partition();
+ }
+}
+
+static void save_clear_guest_mmu(struct kvm *kvm, struct kvm_vcpu *vcpu)
+{
+ if (kvm_is_radix(kvm)) {
+ radix_clear_slb();
+ } else {
+ int i;
+ int nr = 0;
+
+ /*
+ * This must run before switching to host (radix host can't
+ * access all SLBs).
+ */
+ for (i = 0; i < vcpu->arch.slb_nr; i++) {
+ u64 slbee, slbev;
+ mfslb(i, &slbee, &slbev);
+ if (slbee & SLB_ESID_V) {
+ vcpu->arch.slb[nr].orige = slbee | i;
+ vcpu->arch.slb[nr].origv = slbev;
+ nr++;
+ }
+ }
+ vcpu->arch.slb_max = nr;
+ slb_clear_invalidate_partition();
+ }
+}
+
+int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_nested_guest *nested = vcpu->arch.nested;
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ s64 hdec;
+ u64 tb, purr, spurr;
+ u64 *exsave;
+ bool ri_set;
+ int trap;
+ unsigned long msr;
+ unsigned long host_hfscr;
+ unsigned long host_ciabr;
+ unsigned long host_dawr0;
+ unsigned long host_dawrx0;
+ unsigned long host_psscr;
+ unsigned long host_pidr;
+ unsigned long host_dawr1;
+ unsigned long host_dawrx1;
+
+ hdec = time_limit - mftb();
+ if (hdec < 0)
+ return BOOK3S_INTERRUPT_HV_DECREMENTER;
+
+ WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_HV);
+ WARN_ON_ONCE(!(vcpu->arch.shregs.msr & MSR_ME));
+
+ start_timing(vcpu, &vcpu->arch.rm_entry);
+
+ vcpu->arch.ceded = 0;
+
+ if (vc->tb_offset) {
+ u64 new_tb = mftb() + vc->tb_offset;
+ mtspr(SPRN_TBU40, new_tb);
+ tb = mftb();
+ if ((tb & 0xffffff) < (new_tb & 0xffffff))
+ mtspr(SPRN_TBU40, new_tb + 0x1000000);
+ vc->tb_offset_applied = vc->tb_offset;
+ }
+
+ msr = mfmsr();
+
+ host_hfscr = mfspr(SPRN_HFSCR);
+ host_ciabr = mfspr(SPRN_CIABR);
+ host_dawr0 = mfspr(SPRN_DAWR0);
+ host_dawrx0 = mfspr(SPRN_DAWRX0);
+ host_psscr = mfspr(SPRN_PSSCR);
+ host_pidr = mfspr(SPRN_PID);
+ if (cpu_has_feature(CPU_FTR_DAWR1)) {
+ host_dawr1 = mfspr(SPRN_DAWR1);
+ host_dawrx1 = mfspr(SPRN_DAWRX1);
+ }
+
+ if (vc->pcr)
+ mtspr(SPRN_PCR, vc->pcr | PCR_MASK);
+ mtspr(SPRN_DPDES, vc->dpdes);
+ mtspr(SPRN_VTB, vc->vtb);
+
+ local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR);
+ local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR);
+ mtspr(SPRN_PURR, vcpu->arch.purr);
+ mtspr(SPRN_SPURR, vcpu->arch.spurr);
+
+ if (dawr_enabled()) {
+ mtspr(SPRN_DAWR0, vcpu->arch.dawr0);
+ mtspr(SPRN_DAWRX0, vcpu->arch.dawrx0);
+ if (cpu_has_feature(CPU_FTR_DAWR1)) {
+ mtspr(SPRN_DAWR1, vcpu->arch.dawr1);
+ mtspr(SPRN_DAWRX1, vcpu->arch.dawrx1);
+ }
+ }
+ mtspr(SPRN_CIABR, vcpu->arch.ciabr);
+ mtspr(SPRN_IC, vcpu->arch.ic);
+
+ mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
+ (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
+
+ mtspr(SPRN_HFSCR, vcpu->arch.hfscr);
+
+ mtspr(SPRN_HSRR0, vcpu->arch.regs.nip);
+ mtspr(SPRN_HSRR1, (vcpu->arch.shregs.msr & ~MSR_HV) | MSR_ME);
+
+ /*
+ * On POWER9 DD2.1 and below, sometimes on a Hypervisor Data Storage
+ * Interrupt (HDSI) the HDSISR is not be updated at all.
+ *
+ * To work around this we put a canary value into the HDSISR before
+ * returning to a guest and then check for this canary when we take a
+ * HDSI. If we find the canary on a HDSI, we know the hardware didn't
+ * update the HDSISR. In this case we return to the guest to retake the
+ * HDSI which should correctly update the HDSISR the second time HDSI
+ * entry.
+ *
+ * Just do this on all p9 processors for now.
+ */
+ mtspr(SPRN_HDSISR, HDSISR_CANARY);
+
+ mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0);
+ mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1);
+ mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2);
+ mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3);
+
+ mtspr(SPRN_AMOR, ~0UL);
+
+ local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_HV_P9;
+
+ /*
+ * Hash host, hash guest, or radix guest with prefetch bug, all have
+ * to disable the MMU before switching to guest MMU state.
+ */
+ if (!radix_enabled() || !kvm_is_radix(kvm) ||
+ cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
+ __mtmsrd(msr & ~(MSR_IR|MSR_DR|MSR_RI), 0);
+
+ save_clear_host_mmu(kvm);
+
+ if (kvm_is_radix(kvm)) {
+ switch_mmu_to_guest_radix(kvm, vcpu, lpcr);
+ if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
+ __mtmsrd(0, 1); /* clear RI */
+
+ } else {
+ switch_mmu_to_guest_hpt(kvm, vcpu, lpcr);
+ }
+
+ /* TLBIEL uses LPID=LPIDR, so run this after setting guest LPID */
+ kvmppc_check_need_tlb_flush(kvm, vc->pcpu, nested);
+
+ /*
+ * P9 suppresses the HDEC exception when LPCR[HDICE] = 0,
+ * so set guest LPCR (with HDICE) before writing HDEC.
+ */
+ mtspr(SPRN_HDEC, hdec);
+
+ mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
+ mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
+ mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
+ mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
+
+ accumulate_time(vcpu, &vcpu->arch.guest_time);
+
+ kvmppc_p9_enter_guest(vcpu);
+
+ accumulate_time(vcpu, &vcpu->arch.rm_intr);
+
+ /* XXX: Could get these from r11/12 and paca exsave instead */
+ vcpu->arch.shregs.srr0 = mfspr(SPRN_SRR0);
+ vcpu->arch.shregs.srr1 = mfspr(SPRN_SRR1);
+ vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
+ vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
+
+ /* 0x2 bit for HSRR is only used by PR and P7/8 HV paths, clear it */
+ trap = local_paca->kvm_hstate.scratch0 & ~0x2;
+
+ /* HSRR interrupts leave MSR[RI] unchanged, SRR interrupts clear it. */
+ ri_set = false;
+ if (likely(trap > BOOK3S_INTERRUPT_MACHINE_CHECK)) {
+ if (trap != BOOK3S_INTERRUPT_SYSCALL &&
+ (vcpu->arch.shregs.msr & MSR_RI))
+ ri_set = true;
+ exsave = local_paca->exgen;
+ } else if (trap == BOOK3S_INTERRUPT_SYSTEM_RESET) {
+ exsave = local_paca->exnmi;
+ } else { /* trap == 0x200 */
+ exsave = local_paca->exmc;
+ }
+
+ vcpu->arch.regs.gpr[1] = local_paca->kvm_hstate.scratch1;
+ vcpu->arch.regs.gpr[3] = local_paca->kvm_hstate.scratch2;
+
+ /*
+ * Only set RI after reading machine check regs (DAR, DSISR, SRR0/1)
+ * and hstate scratch (which we need to move into exsave to make
+ * re-entrant vs SRESET/MCE)
+ */
+ if (ri_set) {
+ if (unlikely(!(mfmsr() & MSR_RI))) {
+ __mtmsrd(MSR_RI, 1);
+ WARN_ON_ONCE(1);
+ }
+ } else {
+ WARN_ON_ONCE(mfmsr() & MSR_RI);
+ __mtmsrd(MSR_RI, 1);
+ }
+
+ vcpu->arch.regs.gpr[9] = exsave[EX_R9/sizeof(u64)];
+ vcpu->arch.regs.gpr[10] = exsave[EX_R10/sizeof(u64)];
+ vcpu->arch.regs.gpr[11] = exsave[EX_R11/sizeof(u64)];
+ vcpu->arch.regs.gpr[12] = exsave[EX_R12/sizeof(u64)];
+ vcpu->arch.regs.gpr[13] = exsave[EX_R13/sizeof(u64)];
+ vcpu->arch.ppr = exsave[EX_PPR/sizeof(u64)];
+ vcpu->arch.cfar = exsave[EX_CFAR/sizeof(u64)];
+ vcpu->arch.regs.ctr = exsave[EX_CTR/sizeof(u64)];
+
+ vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
+
+ if (unlikely(trap == BOOK3S_INTERRUPT_MACHINE_CHECK)) {
+ vcpu->arch.fault_dar = exsave[EX_DAR/sizeof(u64)];
+ vcpu->arch.fault_dsisr = exsave[EX_DSISR/sizeof(u64)];
+ kvmppc_realmode_machine_check(vcpu);
+
+ } else if (unlikely(trap == BOOK3S_INTERRUPT_HMI)) {
+ kvmppc_realmode_hmi_handler();
+
+ } else if (trap == BOOK3S_INTERRUPT_H_EMUL_ASSIST) {
+ vcpu->arch.emul_inst = mfspr(SPRN_HEIR);
+
+ } else if (trap == BOOK3S_INTERRUPT_H_DATA_STORAGE) {
+ vcpu->arch.fault_dar = exsave[EX_DAR/sizeof(u64)];
+ vcpu->arch.fault_dsisr = exsave[EX_DSISR/sizeof(u64)];
+ vcpu->arch.fault_gpa = mfspr(SPRN_ASDR);
+
+ } else if (trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
+ vcpu->arch.fault_gpa = mfspr(SPRN_ASDR);
+
+ } else if (trap == BOOK3S_INTERRUPT_H_FAC_UNAVAIL) {
+ vcpu->arch.hfscr = mfspr(SPRN_HFSCR);
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ /*
+ * Softpatch interrupt for transactional memory emulation cases
+ * on POWER9 DD2.2. This is early in the guest exit path - we
+ * haven't saved registers or done a treclaim yet.
+ */
+ } else if (trap == BOOK3S_INTERRUPT_HV_SOFTPATCH) {
+ vcpu->arch.emul_inst = mfspr(SPRN_HEIR);
+
+ /*
+ * The cases we want to handle here are those where the guest
+ * is in real suspend mode and is trying to transition to
+ * transactional mode.
+ */
+ if (local_paca->kvm_hstate.fake_suspend &&
+ (vcpu->arch.shregs.msr & MSR_TS_S)) {
+ if (kvmhv_p9_tm_emulation_early(vcpu)) {
+ /* Prevent it being handled again. */
+ trap = 0;
+ }
+ }
+#endif
+ }
+
+ accumulate_time(vcpu, &vcpu->arch.rm_exit);
+
+ /* Advance host PURR/SPURR by the amount used by guest */
+ purr = mfspr(SPRN_PURR);
+ spurr = mfspr(SPRN_SPURR);
+ mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr +
+ purr - vcpu->arch.purr);
+ mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr +
+ spurr - vcpu->arch.spurr);
+ vcpu->arch.purr = purr;
+ vcpu->arch.spurr = spurr;
+
+ vcpu->arch.ic = mfspr(SPRN_IC);
+ vcpu->arch.pid = mfspr(SPRN_PID);
+ vcpu->arch.psscr = mfspr(SPRN_PSSCR) & PSSCR_GUEST_VIS;
+
+ vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0);
+ vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1);
+ vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
+ vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
+
+ /* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */
+ mtspr(SPRN_PSSCR, host_psscr |
+ (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
+ mtspr(SPRN_HFSCR, host_hfscr);
+ mtspr(SPRN_CIABR, host_ciabr);
+ mtspr(SPRN_DAWR0, host_dawr0);
+ mtspr(SPRN_DAWRX0, host_dawrx0);
+ if (cpu_has_feature(CPU_FTR_DAWR1)) {
+ mtspr(SPRN_DAWR1, host_dawr1);
+ mtspr(SPRN_DAWRX1, host_dawrx1);
+ }
+
+ if (kvm_is_radix(kvm)) {
+ /*
+ * Since this is radix, do a eieio; tlbsync; ptesync sequence
+ * in case we interrupted the guest between a tlbie and a
+ * ptesync.
+ */
+ asm volatile("eieio; tlbsync; ptesync");
+ }
+
+ /*
+ * cp_abort is required if the processor supports local copy-paste
+ * to clear the copy buffer that was under control of the guest.
+ */
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ asm volatile(PPC_CP_ABORT);
+
+ vc->dpdes = mfspr(SPRN_DPDES);
+ vc->vtb = mfspr(SPRN_VTB);
+ mtspr(SPRN_DPDES, 0);
+ if (vc->pcr)
+ mtspr(SPRN_PCR, PCR_MASK);
+
+ if (vc->tb_offset_applied) {
+ u64 new_tb = mftb() - vc->tb_offset_applied;
+ mtspr(SPRN_TBU40, new_tb);
+ tb = mftb();
+ if ((tb & 0xffffff) < (new_tb & 0xffffff))
+ mtspr(SPRN_TBU40, new_tb + 0x1000000);
+ vc->tb_offset_applied = 0;
+ }
+
+ mtspr(SPRN_HDEC, 0x7fffffff);
+
+ save_clear_guest_mmu(kvm, vcpu);
+ switch_mmu_to_host(kvm, host_pidr);
+ local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_NONE;
+
+ /*
+ * If we are in real mode, only switch MMU on after the MMU is
+ * switched to host, to avoid the P9_RADIX_PREFETCH_BUG.
+ */
+ __mtmsrd(msr, 0);
+
+ end_timing(vcpu);
+
+ return trap;
+}
+EXPORT_SYMBOL_GPL(kvmhv_vcpu_entry_p9);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 88da2764c1bb..632b2545072b 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -23,20 +23,9 @@
#include <asm/pte-walk.h>
/* Translate address of a vmalloc'd thing to a linear map address */
-static void *real_vmalloc_addr(void *x)
+static void *real_vmalloc_addr(void *addr)
{
- unsigned long addr = (unsigned long) x;
- pte_t *p;
- /*
- * assume we don't have huge pages in vmalloc space...
- * So don't worry about THP collapse/split. Called
- * Only in realmode with MSR_EE = 0, hence won't need irq_save/restore.
- */
- p = find_init_mm_pte(addr, NULL);
- if (!p || !pte_present(*p))
- return NULL;
- addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
- return __va(addr);
+ return __va(ppc_find_vmap_phys((unsigned long)addr));
}
/* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
@@ -57,6 +46,10 @@ static int global_invalidates(struct kvm *kvm)
else
global = 1;
+ /* LPID has been switched to host if in virt mode so can't do local */
+ if (!global && (mfmsr() & (MSR_IR|MSR_DR)))
+ global = 1;
+
if (!global) {
/* any other core might now have stale TLB entries... */
smp_wmb();
@@ -67,7 +60,7 @@ static int global_invalidates(struct kvm *kvm)
* so use the bit for the first thread to represent the core.
*/
if (cpu_has_feature(CPU_FTR_ARCH_300))
- cpu = cpu_first_thread_sibling(cpu);
+ cpu = cpu_first_tlb_thread_sibling(cpu);
cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush);
}
@@ -409,6 +402,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
vcpu->arch.pgdir, true,
&vcpu->arch.regs.gpr[4]);
}
+EXPORT_SYMBOL_GPL(kvmppc_h_enter);
#ifdef __BIG_ENDIAN__
#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
@@ -553,6 +547,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
&vcpu->arch.regs.gpr[4]);
}
+EXPORT_SYMBOL_GPL(kvmppc_h_remove);
long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
{
@@ -671,10 +666,10 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
return ret;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_bulk_remove);
long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
- unsigned long pte_index, unsigned long avpn,
- unsigned long va)
+ unsigned long pte_index, unsigned long avpn)
{
struct kvm *kvm = vcpu->kvm;
__be64 *hpte;
@@ -742,6 +737,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
return H_SUCCESS;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_protect);
long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index)
@@ -782,6 +778,7 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
}
return H_SUCCESS;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_read);
long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index)
@@ -830,6 +827,7 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
return ret;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_clear_ref);
long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index)
@@ -877,6 +875,7 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
return ret;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_clear_mod);
static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq,
unsigned long gpa, int writing, unsigned long *hpa,
@@ -1295,3 +1294,4 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
return -1; /* send fault up to host kernel mode */
}
+EXPORT_SYMBOL_GPL(kvmppc_hpte_hv_fault);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index c2c9c733f359..0a11ec88a0ae 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -141,13 +141,6 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
return;
}
- if (xive_enabled() && kvmhv_on_pseries()) {
- /* No XICS access or hypercalls available, too hard */
- this_icp->rm_action |= XICS_RM_KICK_VCPU;
- this_icp->rm_kick_target = vcpu;
- return;
- }
-
/*
* Check if the core is loaded,
* if not, find an available host core to post to wake the VCPU,
@@ -771,14 +764,6 @@ static void icp_eoi(struct irq_chip *c, u32 hwirq, __be32 xirr, bool *again)
void __iomem *xics_phys;
int64_t rc;
- if (kvmhv_on_pseries()) {
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- iosync();
- plpar_hcall_raw(H_EOI, retbuf, hwirq);
- return;
- }
-
rc = pnv_opal_pci_msi_eoi(c, hwirq);
if (rc)
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 5e634db4809b..8dd437d7a2c6 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -25,18 +25,10 @@
#include <asm/export.h>
#include <asm/tm.h>
#include <asm/opal.h>
-#include <asm/xive-regs.h>
#include <asm/thread_info.h>
#include <asm/asm-compat.h>
#include <asm/feature-fixups.h>
#include <asm/cpuidle.h>
-#include <asm/ultravisor-api.h>
-
-/* Sign-extend HDEC if not on POWER9 */
-#define EXTEND_HDEC(reg) \
-BEGIN_FTR_SECTION; \
- extsw reg, reg; \
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
/* Values in HSTATE_NAPPING(r13) */
#define NAPPING_CEDE 1
@@ -44,9 +36,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
#define NAPPING_UNSPLIT 3
/* Stack frame offsets for kvmppc_hv_entry */
-#define SFS 208
+#define SFS 160
#define STACK_SLOT_TRAP (SFS-4)
-#define STACK_SLOT_SHORT_PATH (SFS-8)
#define STACK_SLOT_TID (SFS-16)
#define STACK_SLOT_PSSCR (SFS-24)
#define STACK_SLOT_PID (SFS-32)
@@ -57,10 +48,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
#define STACK_SLOT_HFSCR (SFS-72)
#define STACK_SLOT_AMR (SFS-80)
#define STACK_SLOT_UAMOR (SFS-88)
-#define STACK_SLOT_DAWR1 (SFS-96)
-#define STACK_SLOT_DAWRX1 (SFS-104)
-/* the following is used by the P9 short path */
-#define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */
+#define STACK_SLOT_FSCR (SFS-96)
/*
* Call kvmppc_hv_entry in real mode.
@@ -136,15 +124,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
/* Return the trap number on this thread as the return value */
mr r3, r12
- /*
- * If we came back from the guest via a relocation-on interrupt,
- * we will be in virtual mode at this point, which makes it a
- * little easier to get back to the caller.
- */
- mfmsr r0
- andi. r0, r0, MSR_IR /* in real mode? */
- bne .Lvirt_return
-
/* RFI into the highmem handler */
mfmsr r6
li r0, MSR_RI
@@ -154,11 +133,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
mtsrr1 r7
RFI_TO_KERNEL
- /* Virtual-mode return */
-.Lvirt_return:
- mtlr r8
- blr
-
kvmppc_primary_no_guest:
/* We handle this much like a ceded vcpu */
/* put the HDEC into the DEC, since HDEC interrupts don't wake us */
@@ -245,7 +219,7 @@ kvm_novcpu_wakeup:
/* See if our timeslice has expired (HDEC is negative) */
mfspr r0, SPRN_HDEC
- EXTEND_HDEC(r0)
+ extsw r0, r0
li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
cmpdi r0, 0
blt kvm_novcpu_exit
@@ -347,10 +321,8 @@ kvm_secondary_got_guest:
lbz r4, HSTATE_PTID(r13)
cmpwi r4, 0
bne 63f
- LOAD_REG_ADDR(r6, decrementer_max)
- ld r6, 0(r6)
+ lis r6,0x7fff /* MAX_INT@h */
mtspr SPRN_HDEC, r6
-BEGIN_FTR_SECTION
/* and set per-LPAR registers, if doing dynamic micro-threading */
ld r6, HSTATE_SPLIT_MODE(r13)
cmpdi r6, 0
@@ -362,7 +334,6 @@ BEGIN_FTR_SECTION
ld r0, KVM_SPLIT_LDBAR(r6)
mtspr SPRN_LDBAR, r0
isync
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
63:
/* Order load of vcpu after load of vcore */
lwsync
@@ -433,7 +404,6 @@ kvm_no_guest:
blr
53:
-BEGIN_FTR_SECTION
HMT_LOW
ld r5, HSTATE_KVM_VCORE(r13)
cmpdi r5, 0
@@ -448,14 +418,6 @@ BEGIN_FTR_SECTION
b kvm_unsplit_nap
60: HMT_MEDIUM
b kvm_secondary_got_guest
-FTR_SECTION_ELSE
- HMT_LOW
- ld r5, HSTATE_KVM_VCORE(r13)
- cmpdi r5, 0
- beq kvm_no_guest
- HMT_MEDIUM
- b kvm_secondary_got_guest
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
54: li r0, KVM_HWTHREAD_IN_KVM
stb r0, HSTATE_HWTHREAD_STATE(r13)
@@ -581,13 +543,11 @@ kvmppc_hv_entry:
bne 10f
lwz r7,KVM_LPID(r9)
-BEGIN_FTR_SECTION
ld r6,KVM_SDR1(r9)
li r0,LPID_RSVD /* switch to reserved LPID */
mtspr SPRN_LPID,r0
ptesync
mtspr SPRN_SDR1,r6 /* switch to partition page table */
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
mtspr SPRN_LPID,r7
isync
@@ -668,16 +628,6 @@ kvmppc_got_guest:
/* Save host values of some registers */
BEGIN_FTR_SECTION
- mfspr r5, SPRN_TIDR
- mfspr r6, SPRN_PSSCR
- mfspr r7, SPRN_PID
- std r5, STACK_SLOT_TID(r1)
- std r6, STACK_SLOT_PSSCR(r1)
- std r7, STACK_SLOT_PID(r1)
- mfspr r5, SPRN_HFSCR
- std r5, STACK_SLOT_HFSCR(r1)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
-BEGIN_FTR_SECTION
mfspr r5, SPRN_CIABR
mfspr r6, SPRN_DAWR0
mfspr r7, SPRN_DAWRX0
@@ -686,13 +636,9 @@ BEGIN_FTR_SECTION
std r6, STACK_SLOT_DAWR0(r1)
std r7, STACK_SLOT_DAWRX0(r1)
std r8, STACK_SLOT_IAMR(r1)
+ mfspr r5, SPRN_FSCR
+ std r5, STACK_SLOT_FSCR(r1)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
-BEGIN_FTR_SECTION
- mfspr r6, SPRN_DAWR1
- mfspr r7, SPRN_DAWRX1
- std r6, STACK_SLOT_DAWR1(r1)
- std r7, STACK_SLOT_DAWRX1(r1)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S | CPU_FTR_DAWR1)
mfspr r5, SPRN_AMR
std r5, STACK_SLOT_AMR(r1)
@@ -710,13 +656,9 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/*
- * Branch around the call if both CPU_FTR_TM and
- * CPU_FTR_P9_TM_HV_ASSIST are off.
- */
BEGIN_FTR_SECTION
b 91f
-END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
+END_FTR_SECTION_IFCLR(CPU_FTR_TM)
/*
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
*/
@@ -783,12 +725,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
ld r6, VCPU_DAWRX0(r4)
mtspr SPRN_DAWR0, r5
mtspr SPRN_DAWRX0, r6
-BEGIN_FTR_SECTION
- ld r5, VCPU_DAWR1(r4)
- ld r6, VCPU_DAWRX1(r4)
- mtspr SPRN_DAWR1, r5
- mtspr SPRN_DAWRX1, r6
-END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
1:
ld r7, VCPU_CIABR(r4)
ld r8, VCPU_TAR(r4)
@@ -806,7 +742,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
mtspr SPRN_BESCR, r6
mtspr SPRN_PID, r7
mtspr SPRN_WORT, r8
-BEGIN_FTR_SECTION
/* POWER8-only registers */
ld r5, VCPU_TCSCR(r4)
ld r6, VCPU_ACOP(r4)
@@ -817,18 +752,6 @@ BEGIN_FTR_SECTION
mtspr SPRN_CSIGR, r7
mtspr SPRN_TACR, r8
nop
-FTR_SECTION_ELSE
- /* POWER9-only registers */
- ld r5, VCPU_TID(r4)
- ld r6, VCPU_PSSCR(r4)
- lbz r8, HSTATE_FAKE_SUSPEND(r13)
- oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
- rldimi r6, r8, PSSCR_FAKE_SUSPEND_LG, 63 - PSSCR_FAKE_SUSPEND_LG
- ld r7, VCPU_HFSCR(r4)
- mtspr SPRN_TIDR, r5
- mtspr SPRN_PSSCR, r6
- mtspr SPRN_HFSCR, r7
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
8:
ld r5, VCPU_SPRG0(r4)
@@ -898,23 +821,15 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
/* Check if HDEC expires soon */
mfspr r3, SPRN_HDEC
- EXTEND_HDEC(r3)
+ extsw r3, r3
cmpdi r3, 512 /* 1 microsecond */
blt hdec_soon
- ld r6, VCPU_KVM(r4)
- lbz r0, KVM_RADIX(r6)
- cmpwi r0, 0
- bne 9f
-
- /* For hash guest, clear out and reload the SLB */
-BEGIN_MMU_FTR_SECTION
- /* Radix host won't have populated the SLB, so no need to clear */
+ /* Clear out and reload the SLB */
li r6, 0
slbmte r6, r6
PPC_SLBIA(6)
ptesync
-END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
/* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
lwz r5,VCPU_SLB_MAX(r4)
@@ -929,96 +844,9 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
bdnz 1b
9:
-#ifdef CONFIG_KVM_XICS
- /* We are entering the guest on that thread, push VCPU to XIVE */
- ld r11, VCPU_XIVE_SAVED_STATE(r4)
- li r9, TM_QW1_OS
- lwz r8, VCPU_XIVE_CAM_WORD(r4)
- cmpwi r8, 0
- beq no_xive
- li r7, TM_QW1_OS + TM_WORD2
- mfmsr r0
- andi. r0, r0, MSR_DR /* in real mode? */
- beq 2f
- ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
- cmpldi cr1, r10, 0
- beq cr1, no_xive
- eieio
- stdx r11,r9,r10
- stwx r8,r7,r10
- b 3f
-2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
- cmpldi cr1, r10, 0
- beq cr1, no_xive
- eieio
- stdcix r11,r9,r10
- stwcix r8,r7,r10
-3: li r9, 1
- stb r9, VCPU_XIVE_PUSHED(r4)
- eieio
-
- /*
- * We clear the irq_pending flag. There is a small chance of a
- * race vs. the escalation interrupt happening on another
- * processor setting it again, but the only consequence is to
- * cause a spurrious wakeup on the next H_CEDE which is not an
- * issue.
- */
- li r0,0
- stb r0, VCPU_IRQ_PENDING(r4)
-
- /*
- * In single escalation mode, if the escalation interrupt is
- * on, we mask it.
- */
- lbz r0, VCPU_XIVE_ESC_ON(r4)
- cmpwi cr1, r0,0
- beq cr1, 1f
- li r9, XIVE_ESB_SET_PQ_01
- beq 4f /* in real mode? */
- ld r10, VCPU_XIVE_ESC_VADDR(r4)
- ldx r0, r10, r9
- b 5f
-4: ld r10, VCPU_XIVE_ESC_RADDR(r4)
- ldcix r0, r10, r9
-5: sync
-
- /* We have a possible subtle race here: The escalation interrupt might
- * have fired and be on its way to the host queue while we mask it,
- * and if we unmask it early enough (re-cede right away), there is
- * a theorical possibility that it fires again, thus landing in the
- * target queue more than once which is a big no-no.
- *
- * Fortunately, solving this is rather easy. If the above load setting
- * PQ to 01 returns a previous value where P is set, then we know the
- * escalation interrupt is somewhere on its way to the host. In that
- * case we simply don't clear the xive_esc_on flag below. It will be
- * eventually cleared by the handler for the escalation interrupt.
- *
- * Then, when doing a cede, we check that flag again before re-enabling
- * the escalation interrupt, and if set, we abort the cede.
- */
- andi. r0, r0, XIVE_ESB_VAL_P
- bne- 1f
-
- /* Now P is 0, we can clear the flag */
- li r0, 0
- stb r0, VCPU_XIVE_ESC_ON(r4)
-1:
-no_xive:
-#endif /* CONFIG_KVM_XICS */
-
- li r0, 0
- stw r0, STACK_SLOT_SHORT_PATH(r1)
-
deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */
/* Check if we can deliver an external or decrementer interrupt now */
ld r0, VCPU_PENDING_EXC(r4)
-BEGIN_FTR_SECTION
- /* On POWER9, also check for emulated doorbell interrupt */
- lbz r3, VCPU_DBELL_REQ(r4)
- or r0, r0, r3
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
cmpdi r0, 0
beq 71f
mr r3, r4
@@ -1030,7 +858,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
mtspr SPRN_SRR0, r6
mtspr SPRN_SRR1, r7
-fast_guest_entry_c:
ld r10, VCPU_PC(r4)
ld r11, VCPU_MSR(r4)
/* r11 = vcpu->arch.msr & ~MSR_HV */
@@ -1092,18 +919,8 @@ BEGIN_FTR_SECTION
mtspr SPRN_PPR, r0
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
-/* Move canary into DSISR to check for later */
-BEGIN_FTR_SECTION
- li r0, 0x7fff
- mtspr SPRN_HDSISR, r0
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
-
- ld r6, VCPU_KVM(r4)
- lbz r7, KVM_SECURE_GUEST(r6)
- cmpdi r7, 0
ld r6, VCPU_GPR(R6)(r4)
ld r7, VCPU_GPR(R7)(r4)
- bne ret_to_ultra
ld r0, VCPU_CR(r4)
mtcr r0
@@ -1114,117 +931,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
ld r4, VCPU_GPR(R4)(r4)
HRFI_TO_GUEST
b .
-/*
- * Use UV_RETURN ultracall to return control back to the Ultravisor after
- * processing an hypercall or interrupt that was forwarded (a.k.a. reflected)
- * to the Hypervisor.
- *
- * All registers have already been loaded, except:
- * R0 = hcall result
- * R2 = SRR1, so UV can detect a synthesized interrupt (if any)
- * R3 = UV_RETURN
- */
-ret_to_ultra:
- ld r0, VCPU_CR(r4)
- mtcr r0
-
- ld r0, VCPU_GPR(R3)(r4)
- mfspr r2, SPRN_SRR1
- li r3, 0
- ori r3, r3, UV_RETURN
- ld r4, VCPU_GPR(R4)(r4)
- sc 2
-
-/*
- * Enter the guest on a P9 or later system where we have exactly
- * one vcpu per vcore and we don't need to go to real mode
- * (which implies that host and guest are both using radix MMU mode).
- * r3 = vcpu pointer
- * Most SPRs and all the VSRs have been loaded already.
- */
-_GLOBAL(__kvmhv_vcpu_entry_p9)
-EXPORT_SYMBOL_GPL(__kvmhv_vcpu_entry_p9)
- mflr r0
- std r0, PPC_LR_STKOFF(r1)
- stdu r1, -SFS(r1)
-
- li r0, 1
- stw r0, STACK_SLOT_SHORT_PATH(r1)
-
- std r3, HSTATE_KVM_VCPU(r13)
- mfcr r4
- stw r4, SFS+8(r1)
-
- std r1, HSTATE_HOST_R1(r13)
-
- reg = 14
- .rept 18
- std reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
- reg = reg + 1
- .endr
-
- reg = 14
- .rept 18
- ld reg, __VCPU_GPR(reg)(r3)
- reg = reg + 1
- .endr
-
- mfmsr r10
- std r10, HSTATE_HOST_MSR(r13)
-
- mr r4, r3
- b fast_guest_entry_c
-guest_exit_short_path:
- /*
- * Malicious or buggy radix guests may have inserted SLB entries
- * (only 0..3 because radix always runs with UPRT=1), so these must
- * be cleared here to avoid side-channels. slbmte is used rather
- * than slbia, as it won't clear cached translations.
- */
- li r0,0
- slbmte r0,r0
- li r4,1
- slbmte r0,r4
- li r4,2
- slbmte r0,r4
- li r4,3
- slbmte r0,r4
-
- li r0, KVM_GUEST_MODE_NONE
- stb r0, HSTATE_IN_GUEST(r13)
-
- reg = 14
- .rept 18
- std reg, __VCPU_GPR(reg)(r9)
- reg = reg + 1
- .endr
-
- reg = 14
- .rept 18
- ld reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
- reg = reg + 1
- .endr
-
- lwz r4, SFS+8(r1)
- mtcr r4
-
- mr r3, r12 /* trap number */
-
- addi r1, r1, SFS
- ld r0, PPC_LR_STKOFF(r1)
- mtlr r0
-
- /* If we are in real mode, do a rfid to get back to the caller */
- mfmsr r4
- andi. r5, r4, MSR_IR
- bnelr
- rldicl r5, r4, 64 - MSR_TS_S_LG, 62 /* extract TS field */
- mtspr SPRN_SRR0, r0
- ld r10, HSTATE_HOST_MSR(r13)
- rldimi r10, r5, MSR_TS_S_LG, 63 - MSR_TS_T_LG
- mtspr SPRN_SRR1, r10
- RFI_TO_KERNEL
- b .
secondary_too_late:
li r12, 0
@@ -1265,21 +971,16 @@ hdec_soon:
kvmppc_interrupt_hv:
/*
* Register contents:
+ * R9 = HSTATE_IN_GUEST
* R12 = (guest CR << 32) | interrupt vector
* R13 = PACA
* guest R12 saved in shadow VCPU SCRATCH0
* guest R13 saved in SPRN_SCRATCH0
+ * guest R9 saved in HSTATE_SCRATCH2
*/
- std r9, HSTATE_SCRATCH2(r13)
- lbz r9, HSTATE_IN_GUEST(r13)
- cmpwi r9, KVM_GUEST_MODE_HOST_HV
- beq kvmppc_bad_host_intr
-#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
- cmpwi r9, KVM_GUEST_MODE_GUEST
- ld r9, HSTATE_SCRATCH2(r13)
- beq kvmppc_interrupt_pr
-#endif
/* We're now back in the host but in guest MMU context */
+ cmpwi r9,KVM_GUEST_MODE_HOST_HV
+ beq kvmppc_bad_host_intr
li r9, KVM_GUEST_MODE_HOST_HV
stb r9, HSTATE_IN_GUEST(r13)
@@ -1397,7 +1098,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
bne 2f
mfspr r3,SPRN_HDEC
- EXTEND_HDEC(r3)
+ extsw r3, r3
cmpdi r3,0
mr r4,r9
bge fast_guest_return
@@ -1409,14 +1110,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/* Hypervisor doorbell - exit only if host IPI flag set */
cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
bne 3f
-BEGIN_FTR_SECTION
- PPC_MSGSYNC
- lwsync
- /* always exit if we're running a nested guest */
- ld r0, VCPU_NESTED(r9)
- cmpdi r0, 0
- bne guest_exit_cont
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lbz r0, HSTATE_HOST_IPI(r13)
cmpwi r0, 0
beq maybe_reenter_guest
@@ -1446,62 +1139,16 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
mr r4, r9
bl kvmhv_accumulate_time
#endif
-#ifdef CONFIG_KVM_XICS
- /* We are exiting, pull the VP from the XIVE */
- lbz r0, VCPU_XIVE_PUSHED(r9)
- cmpwi cr0, r0, 0
- beq 1f
- li r7, TM_SPC_PULL_OS_CTX
- li r6, TM_QW1_OS
- mfmsr r0
- andi. r0, r0, MSR_DR /* in real mode? */
- beq 2f
- ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
- cmpldi cr0, r10, 0
- beq 1f
- /* First load to pull the context, we ignore the value */
- eieio
- lwzx r11, r7, r10
- /* Second load to recover the context state (Words 0 and 1) */
- ldx r11, r6, r10
- b 3f
-2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
- cmpldi cr0, r10, 0
- beq 1f
- /* First load to pull the context, we ignore the value */
- eieio
- lwzcix r11, r7, r10
- /* Second load to recover the context state (Words 0 and 1) */
- ldcix r11, r6, r10
-3: std r11, VCPU_XIVE_SAVED_STATE(r9)
- /* Fixup some of the state for the next load */
- li r10, 0
- li r0, 0xff
- stb r10, VCPU_XIVE_PUSHED(r9)
- stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
- stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
- eieio
-1:
-#endif /* CONFIG_KVM_XICS */
/*
* Possibly flush the link stack here, before we do a blr in
- * guest_exit_short_path.
+ * kvmhv_switch_to_host.
*/
1: nop
patch_site 1b patch__call_kvm_flush_link_stack
- /* If we came in through the P9 short path, go back out to C now */
- lwz r0, STACK_SLOT_SHORT_PATH(r1)
- cmpwi r0, 0
- bne guest_exit_short_path
-
/* For hash guest, read the guest SLB and save it away */
- ld r5, VCPU_KVM(r9)
- lbz r0, KVM_RADIX(r5)
li r5, 0
- cmpwi r0, 0
- bne 0f /* for radix, save 0 entries */
lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
mtctr r0
li r6,0
@@ -1525,9 +1172,6 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
stw r5,VCPU_SLB_MAX(r9)
/* load host SLB entries */
-BEGIN_MMU_FTR_SECTION
- b guest_bypass
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
ld r8,PACA_SLBSHADOWPTR(r13)
.rept SLB_NUM_BOLTED
@@ -1540,21 +1184,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
slbmte r6,r5
1: addi r8,r8,16
.endr
- b guest_bypass
-
-0: /*
- * Sanitise radix guest SLB, see guest_exit_short_path comment.
- * We clear vcpu->arch.slb_max to match earlier behaviour.
- */
- li r0,0
- stw r0,VCPU_SLB_MAX(r9)
- slbmte r0,r0
- li r4,1
- slbmte r0,r4
- li r4,2
- slbmte r0,r4
- li r4,3
- slbmte r0,r4
guest_bypass:
stw r12, STACK_SLOT_TRAP(r1)
@@ -1564,12 +1193,6 @@ guest_bypass:
ld r3, HSTATE_KVM_VCORE(r13)
mfspr r5,SPRN_DEC
mftb r6
- /* On P9, if the guest has large decr enabled, don't sign extend */
-BEGIN_FTR_SECTION
- ld r4, VCORE_LPCR(r3)
- andis. r4, r4, LPCR_LD@h
- bne 16f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
extsw r5,r5
16: add r5,r5,r6
/* r5 is a guest timebase value here, convert to host TB */
@@ -1643,7 +1266,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
std r6, VCPU_BESCR(r9)
stw r7, VCPU_GUEST_PID(r9)
std r8, VCPU_WORT(r9)
-BEGIN_FTR_SECTION
mfspr r5, SPRN_TCSCR
mfspr r6, SPRN_ACOP
mfspr r7, SPRN_CSIGR
@@ -1652,17 +1274,10 @@ BEGIN_FTR_SECTION
std r6, VCPU_ACOP(r9)
std r7, VCPU_CSIGR(r9)
std r8, VCPU_TACR(r9)
-FTR_SECTION_ELSE
- mfspr r5, SPRN_TIDR
- mfspr r6, SPRN_PSSCR
- std r5, VCPU_TID(r9)
- rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
- rotldi r6, r6, 60
- std r6, VCPU_PSSCR(r9)
- /* Restore host HFSCR value */
- ld r7, STACK_SLOT_HFSCR(r1)
- mtspr SPRN_HFSCR, r7
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
+BEGIN_FTR_SECTION
+ ld r5, STACK_SLOT_FSCR(r1)
+ mtspr SPRN_FSCR, r5
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
/*
* Restore various registers to 0, where non-zero values
* set by the guest could disrupt the host.
@@ -1670,13 +1285,11 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
li r0, 0
mtspr SPRN_PSPB, r0
mtspr SPRN_WORT, r0
-BEGIN_FTR_SECTION
mtspr SPRN_TCSCR, r0
/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
li r0, 1
sldi r0, r0, 31
mtspr SPRN_MMCRS, r0
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
/* Save and restore AMR, IAMR and UAMOR before turning on the MMU */
ld r8, STACK_SLOT_IAMR(r1)
@@ -1733,13 +1346,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
bl kvmppc_save_fp
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/*
- * Branch around the call if both CPU_FTR_TM and
- * CPU_FTR_P9_TM_HV_ASSIST are off.
- */
BEGIN_FTR_SECTION
b 91f
-END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
+END_FTR_SECTION_IFCLR(CPU_FTR_TM)
/*
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
*/
@@ -1785,80 +1394,6 @@ BEGIN_FTR_SECTION
mtspr SPRN_DAWR0, r6
mtspr SPRN_DAWRX0, r7
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
-BEGIN_FTR_SECTION
- ld r6, STACK_SLOT_DAWR1(r1)
- ld r7, STACK_SLOT_DAWRX1(r1)
- mtspr SPRN_DAWR1, r6
- mtspr SPRN_DAWRX1, r7
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S | CPU_FTR_DAWR1)
-BEGIN_FTR_SECTION
- ld r5, STACK_SLOT_TID(r1)
- ld r6, STACK_SLOT_PSSCR(r1)
- ld r7, STACK_SLOT_PID(r1)
- mtspr SPRN_TIDR, r5
- mtspr SPRN_PSSCR, r6
- mtspr SPRN_PID, r7
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
-
-#ifdef CONFIG_PPC_RADIX_MMU
- /*
- * Are we running hash or radix ?
- */
- ld r5, VCPU_KVM(r9)
- lbz r0, KVM_RADIX(r5)
- cmpwi cr2, r0, 0
- beq cr2, 2f
-
- /*
- * Radix: do eieio; tlbsync; ptesync sequence in case we
- * interrupted the guest between a tlbie and a ptesync.
- */
- eieio
- tlbsync
- ptesync
-
-BEGIN_FTR_SECTION
- /* Radix: Handle the case where the guest used an illegal PID */
- LOAD_REG_ADDR(r4, mmu_base_pid)
- lwz r3, VCPU_GUEST_PID(r9)
- lwz r5, 0(r4)
- cmpw cr0,r3,r5
- blt 2f
-
- /*
- * Illegal PID, the HW might have prefetched and cached in the TLB
- * some translations for the LPID 0 / guest PID combination which
- * Linux doesn't know about, so we need to flush that PID out of
- * the TLB. First we need to set LPIDR to 0 so tlbiel applies to
- * the right context.
- */
- li r0,0
- mtspr SPRN_LPID,r0
- isync
-
- /* Then do a congruence class local flush */
- ld r6,VCPU_KVM(r9)
- lwz r0,KVM_TLB_SETS(r6)
- mtctr r0
- li r7,0x400 /* IS field = 0b01 */
- ptesync
- sldi r0,r3,32 /* RS has PID */
-1: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */
- addi r7,r7,0x1000
- bdnz 1b
- ptesync
-END_FTR_SECTION_IFSET(CPU_FTR_P9_RADIX_PREFETCH_BUG)
-
-2:
-#endif /* CONFIG_PPC_RADIX_MMU */
-
- /*
- * cp_abort is required if the processor supports local copy-paste
- * to clear the copy buffer that was under control of the guest.
- */
-BEGIN_FTR_SECTION
- PPC_CP_ABORT
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31)
/*
* POWER7/POWER8 guest -> host partition switch code.
@@ -1895,13 +1430,11 @@ kvmhv_switch_to_host:
/* Primary thread switches back to host partition */
lwz r7,KVM_HOST_LPID(r4)
-BEGIN_FTR_SECTION
ld r6,KVM_HOST_SDR1(r4)
li r8,LPID_RSVD /* switch to reserved LPID */
mtspr SPRN_LPID,r8
ptesync
mtspr SPRN_SDR1,r6 /* switch to host page table */
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
mtspr SPRN_LPID,r7
isync
@@ -2110,26 +1643,13 @@ kvmppc_tm_emul:
* reflect the HDSI to the guest as a DSI.
*/
kvmppc_hdsi:
- ld r3, VCPU_KVM(r9)
- lbz r0, KVM_RADIX(r3)
mfspr r4, SPRN_HDAR
mfspr r6, SPRN_HDSISR
-BEGIN_FTR_SECTION
- /* Look for DSISR canary. If we find it, retry instruction */
- cmpdi r6, 0x7fff
- beq 6f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
- cmpwi r0, 0
- bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
/* HPTE not found fault or protection fault? */
andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
beq 1f /* if not, send it to the guest */
andi. r0, r11, MSR_DR /* data relocation enabled? */
beq 3f
-BEGIN_FTR_SECTION
- mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
- b 4f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
clrrdi r0, r4, 28
PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
@@ -2197,31 +1717,15 @@ fast_interrupt_c_return:
stb r0, HSTATE_IN_GUEST(r13)
b guest_exit_cont
-.Lradix_hdsi:
- std r4, VCPU_FAULT_DAR(r9)
- stw r6, VCPU_FAULT_DSISR(r9)
-.Lradix_hisi:
- mfspr r5, SPRN_ASDR
- std r5, VCPU_FAULT_GPA(r9)
- b guest_exit_cont
-
/*
* Similarly for an HISI, reflect it to the guest as an ISI unless
* it is an HPTE not found fault for a page that we have paged out.
*/
kvmppc_hisi:
- ld r3, VCPU_KVM(r9)
- lbz r0, KVM_RADIX(r3)
- cmpwi r0, 0
- bne .Lradix_hisi /* for radix, just save ASDR */
andis. r0, r11, SRR1_ISI_NOPT@h
beq 1f
andi. r0, r11, MSR_IR /* instruction relocation enabled? */
beq 3f
-BEGIN_FTR_SECTION
- mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
- b 4f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
clrrdi r0, r10, 28
PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
li r0, BOOK3S_INTERRUPT_INST_SEGMENT
@@ -2269,10 +1773,6 @@ hcall_try_real_mode:
andi. r0,r11,MSR_PR
/* sc 1 from userspace - reflect to guest syscall */
bne sc_1_fast_return
- /* sc 1 from nested guest - give it to L1 to handle */
- ld r0, VCPU_NESTED(r9)
- cmpdi r0, 0
- bne guest_exit_cont
clrrdi r3,r3,2
cmpldi r3,hcall_real_table_end - hcall_real_table
bge guest_exit_cont
@@ -2537,7 +2037,7 @@ hcall_real_table:
#else
.long 0 /* 0x2fc - H_XIRR_X*/
#endif
- .long DOTSYM(kvmppc_h_random) - hcall_real_table
+ .long DOTSYM(kvmppc_rm_h_random) - hcall_real_table
.globl hcall_real_table_end
hcall_real_table_end:
@@ -2668,13 +2168,9 @@ _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
bl kvmppc_save_fp
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/*
- * Branch around the call if both CPU_FTR_TM and
- * CPU_FTR_P9_TM_HV_ASSIST are off.
- */
BEGIN_FTR_SECTION
b 91f
-END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
+END_FTR_SECTION_IFCLR(CPU_FTR_TM)
/*
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
*/
@@ -2694,15 +2190,8 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
mfspr r3, SPRN_DEC
mfspr r4, SPRN_HDEC
mftb r5
-BEGIN_FTR_SECTION
- /* On P9 check whether the guest has large decrementer mode enabled */
- ld r6, HSTATE_KVM_VCORE(r13)
- ld r6, VCORE_LPCR(r6)
- andis. r6, r6, LPCR_LD@h
- bne 68f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
extsw r3, r3
-68: EXTEND_HDEC(r4)
+ extsw r4, r4
cmpd r3, r4
ble 67f
mtspr SPRN_DEC, r4
@@ -2747,28 +2236,11 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
kvm_nap_sequence: /* desired LPCR value in r5 */
-BEGIN_FTR_SECTION
- /*
- * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
- * enable state loss = 1 (allow SMT mode switch)
- * requested level = 0 (just stop dispatching)
- */
- lis r3, (PSSCR_EC | PSSCR_ESL)@h
- /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
- li r4, LPCR_PECE_HVEE@higher
- sldi r4, r4, 32
- or r5, r5, r4
-FTR_SECTION_ELSE
li r3, PNV_THREAD_NAP
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
mtspr SPRN_LPCR,r5
isync
-BEGIN_FTR_SECTION
- bl isa300_idle_stop_mayloss
-FTR_SECTION_ELSE
bl isa206_idle_insn_mayloss
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
mfspr r0, SPRN_CTRLF
ori r0, r0, 1
@@ -2787,10 +2259,8 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
beq kvm_end_cede
cmpwi r0, NAPPING_NOVCPU
beq kvm_novcpu_wakeup
-BEGIN_FTR_SECTION
cmpwi r0, NAPPING_UNSPLIT
beq kvm_unsplit_wakeup
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
twi 31,0,0 /* Nap state must not be zero */
33: mr r4, r3
@@ -2810,13 +2280,9 @@ kvm_end_cede:
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/*
- * Branch around the call if both CPU_FTR_TM and
- * CPU_FTR_P9_TM_HV_ASSIST are off.
- */
BEGIN_FTR_SECTION
b 91f
-END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
+END_FTR_SECTION_IFCLR(CPU_FTR_TM)
/*
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
*/
@@ -2906,47 +2372,7 @@ kvm_cede_prodded:
/* we've ceded but we want to give control to the host */
kvm_cede_exit:
ld r9, HSTATE_KVM_VCPU(r13)
-#ifdef CONFIG_KVM_XICS
- /* are we using XIVE with single escalation? */
- ld r10, VCPU_XIVE_ESC_VADDR(r9)
- cmpdi r10, 0
- beq 3f
- li r6, XIVE_ESB_SET_PQ_00
- /*
- * If we still have a pending escalation, abort the cede,
- * and we must set PQ to 10 rather than 00 so that we don't
- * potentially end up with two entries for the escalation
- * interrupt in the XIVE interrupt queue. In that case
- * we also don't want to set xive_esc_on to 1 here in
- * case we race with xive_esc_irq().
- */
- lbz r5, VCPU_XIVE_ESC_ON(r9)
- cmpwi r5, 0
- beq 4f
- li r0, 0
- stb r0, VCPU_CEDED(r9)
- /*
- * The escalation interrupts are special as we don't EOI them.
- * There is no need to use the load-after-store ordering offset
- * to set PQ to 10 as we won't use StoreEOI.
- */
- li r6, XIVE_ESB_SET_PQ_10
- b 5f
-4: li r0, 1
- stb r0, VCPU_XIVE_ESC_ON(r9)
- /* make sure store to xive_esc_on is seen before xive_esc_irq runs */
- sync
-5: /* Enable XIVE escalation */
- mfmsr r0
- andi. r0, r0, MSR_DR /* in real mode? */
- beq 1f
- ldx r0, r10, r6
- b 2f
-1: ld r10, VCPU_XIVE_ESC_RADDR(r9)
- ldcix r0, r10, r6
-2: sync
-#endif /* CONFIG_KVM_XICS */
-3: b guest_exit_cont
+ b guest_exit_cont
/* Try to do machine check recovery in real mode */
machine_check_realmode:
@@ -3023,10 +2449,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
PPC_MSGCLR(6)
/* see if it's a host IPI */
li r3, 1
-BEGIN_FTR_SECTION
- PPC_MSGSYNC
- lwsync
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lbz r0, HSTATE_HOST_IPI(r13)
cmpwi r0, 0
bnelr
@@ -3335,73 +2757,12 @@ kvmppc_bad_host_intr:
std r3, STACK_FRAME_OVERHEAD-16(r1)
/*
- * On POWER9 do a minimal restore of the MMU and call C code,
- * which will print a message and panic.
* XXX On POWER7 and POWER8, we just spin here since we don't
* know what the other threads are doing (and we don't want to
* coordinate with them) - but at least we now have register state
* in memory that we might be able to look at from another CPU.
*/
-BEGIN_FTR_SECTION
b .
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
- ld r9, HSTATE_KVM_VCPU(r13)
- ld r10, VCPU_KVM(r9)
-
- li r0, 0
- mtspr SPRN_AMR, r0
- mtspr SPRN_IAMR, r0
- mtspr SPRN_CIABR, r0
- mtspr SPRN_DAWRX0, r0
-BEGIN_FTR_SECTION
- mtspr SPRN_DAWRX1, r0
-END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
-
- /* Clear hash and radix guest SLB, see guest_exit_short_path comment. */
- slbmte r0, r0
- PPC_SLBIA(6)
-
-BEGIN_MMU_FTR_SECTION
- b 4f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
-
- ptesync
- ld r8, PACA_SLBSHADOWPTR(r13)
- .rept SLB_NUM_BOLTED
- li r3, SLBSHADOW_SAVEAREA
- LDX_BE r5, r8, r3
- addi r3, r3, 8
- LDX_BE r6, r8, r3
- andis. r7, r5, SLB_ESID_V@h
- beq 3f
- slbmte r6, r5
-3: addi r8, r8, 16
- .endr
-
-4: lwz r7, KVM_HOST_LPID(r10)
- mtspr SPRN_LPID, r7
- mtspr SPRN_PID, r0
- ld r8, KVM_HOST_LPCR(r10)
- mtspr SPRN_LPCR, r8
- isync
- li r0, KVM_GUEST_MODE_NONE
- stb r0, HSTATE_IN_GUEST(r13)
-
- /*
- * Turn on the MMU and jump to C code
- */
- bcl 20, 31, .+4
-5: mflr r3
- addi r3, r3, 9f - 5b
- li r4, -1
- rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */
- ld r4, PACAKMSR(r13)
- mtspr SPRN_SRR0, r3
- mtspr SPRN_SRR1, r4
- RFI_TO_KERNEL
-9: addi r3, r1, STACK_FRAME_OVERHEAD
- bl kvmppc_bad_interrupt
- b 9b
/*
* This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index 84e5a2dc8be5..a7061ee3b157 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -90,6 +90,7 @@
#include <linux/migrate.h>
#include <linux/kvm_host.h>
#include <linux/ksm.h>
+#include <linux/of.h>
#include <asm/ultravisor.h>
#include <asm/mman.h>
#include <asm/kvm_ppc.h>
@@ -614,7 +615,7 @@ void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *slot,
/* Fetch the VMA if addr is not in the latest fetched one */
if (!vma || addr >= vma->vm_end) {
- vma = find_vma_intersection(kvm->mm, addr, addr+1);
+ vma = vma_lookup(kvm->mm, addr);
if (!vma) {
pr_err("Can't find VMA for gfn:0x%lx\n", gfn);
break;
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 913944dc3620..6bc9425acb32 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -25,6 +25,7 @@
#include <asm/cputable.h>
#include <asm/cacheflush.h>
#include <linux/uaccess.h>
+#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
@@ -425,61 +426,39 @@ static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
}
/************* MMU Notifiers *************/
-static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
- unsigned long end)
+static bool do_kvm_unmap_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
long i;
struct kvm_vcpu *vcpu;
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
- slots = kvm_memslots(kvm);
- kvm_for_each_memslot(memslot, slots) {
- unsigned long hva_start, hva_end;
- gfn_t gfn, gfn_end;
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvmppc_mmu_pte_pflush(vcpu, range->start << PAGE_SHIFT,
+ range->end << PAGE_SHIFT);
- hva_start = max(start, memslot->userspace_addr);
- hva_end = min(end, memslot->userspace_addr +
- (memslot->npages << PAGE_SHIFT));
- if (hva_start >= hva_end)
- continue;
- /*
- * {gfn(page) | page intersects with [hva_start, hva_end)} =
- * {gfn, gfn+1, ..., gfn_end-1}.
- */
- gfn = hva_to_gfn_memslot(hva_start, memslot);
- gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
- kvm_for_each_vcpu(i, vcpu, kvm)
- kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
- gfn_end << PAGE_SHIFT);
- }
+ return false;
}
-static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
- unsigned long end)
+static bool kvm_unmap_gfn_range_pr(struct kvm *kvm, struct kvm_gfn_range *range)
{
- do_kvm_unmap_hva(kvm, start, end);
-
- return 0;
+ return do_kvm_unmap_gfn(kvm, range);
}
-static int kvm_age_hva_pr(struct kvm *kvm, unsigned long start,
- unsigned long end)
+static bool kvm_age_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range)
{
/* XXX could be more clever ;) */
- return 0;
+ return false;
}
-static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
+static bool kvm_test_age_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range)
{
/* XXX could be more clever ;) */
- return 0;
+ return false;
}
-static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
+static bool kvm_set_spte_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range)
{
/* The page will get remapped properly on its next fault */
- do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
+ return do_kvm_unmap_gfn(kvm, range);
}
/*****************************************/
@@ -515,7 +494,7 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
if (!vcpu->arch.pending_exceptions) {
kvm_vcpu_block(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
- vcpu->stat.halt_wakeup++;
+ vcpu->stat.generic.halt_wakeup++;
/* Unset POW bit after we woke up */
msr &= ~MSR_POW;
@@ -1870,6 +1849,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
/* Make sure we save the guest TAR/EBB/DSCR state */
kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
+ srr_regs_clobbered();
out:
vcpu->mode = OUTSIDE_GUEST_MODE;
return ret;
@@ -2079,10 +2059,10 @@ static struct kvmppc_ops kvm_ops_pr = {
.flush_memslot = kvmppc_core_flush_memslot_pr,
.prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
.commit_memory_region = kvmppc_core_commit_memory_region_pr,
- .unmap_hva_range = kvm_unmap_hva_range_pr,
- .age_hva = kvm_age_hva_pr,
- .test_age_hva = kvm_test_age_hva_pr,
- .set_spte_hva = kvm_set_spte_hva_pr,
+ .unmap_gfn_range = kvm_unmap_gfn_range_pr,
+ .age_gfn = kvm_age_gfn_pr,
+ .test_age_gfn = kvm_test_age_gfn_pr,
+ .set_spte_gfn = kvm_set_spte_gfn_pr,
.free_memslot = kvmppc_core_free_memslot_pr,
.init_vm = kvmppc_core_init_vm_pr,
.destroy_vm = kvmppc_core_destroy_vm_pr,
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index 031c8015864a..ac14239f3424 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -378,7 +378,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE);
kvm_vcpu_block(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
- vcpu->stat.halt_wakeup++;
+ vcpu->stat.generic.halt_wakeup++;
return EMULATE_DONE;
case H_LOGICAL_CI_LOAD:
return kvmppc_h_pr_logical_ci_load(vcpu);
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 1f492aa4c8d6..202046a83fc1 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -164,12 +164,15 @@ kvmppc_interrupt_pr:
/* 64-bit entry. Register usage at this point:
*
* SPRG_SCRATCH0 = guest R13
+ * R9 = HSTATE_IN_GUEST
* R12 = (guest CR << 32) | exit handler id
* R13 = PACA
* HSTATE.SCRATCH0 = guest R12
+ * HSTATE.SCRATCH2 = guest R9
*/
#ifdef CONFIG_PPC64
/* Match 32-bit entry */
+ ld r9,HSTATE_SCRATCH2(r13)
rotldi r12, r12, 32 /* Flip R12 halves for stw */
stw r12, HSTATE_SCRATCH1(r13) /* CR is now in the low half */
srdi r12, r12, 32 /* shift trap into low half */
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index e7219b6f5f9a..8cfab3547494 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -14,6 +14,7 @@
#include <linux/percpu.h>
#include <linux/cpumask.h>
#include <linux/uaccess.h>
+#include <linux/irqdomain.h>
#include <asm/kvm_book3s.h>
#include <asm/kvm_ppc.h>
#include <asm/hvcall.h>
@@ -128,6 +129,71 @@ void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu);
/*
+ * Pull a vcpu's context from the XIVE on guest exit.
+ * This assumes we are in virtual mode (MMU on)
+ */
+void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu)
+{
+ void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
+
+ if (!vcpu->arch.xive_pushed)
+ return;
+
+ /*
+ * Should not have been pushed if there is no tima
+ */
+ if (WARN_ON(!tima))
+ return;
+
+ eieio();
+ /* First load to pull the context, we ignore the value */
+ __raw_readl(tima + TM_SPC_PULL_OS_CTX);
+ /* Second load to recover the context state (Words 0 and 1) */
+ vcpu->arch.xive_saved_state.w01 = __raw_readq(tima + TM_QW1_OS);
+
+ /* Fixup some of the state for the next load */
+ vcpu->arch.xive_saved_state.lsmfb = 0;
+ vcpu->arch.xive_saved_state.ack = 0xff;
+ vcpu->arch.xive_pushed = 0;
+ eieio();
+}
+EXPORT_SYMBOL_GPL(kvmppc_xive_pull_vcpu);
+
+void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
+{
+ void __iomem *esc_vaddr = (void __iomem *)vcpu->arch.xive_esc_vaddr;
+
+ if (!esc_vaddr)
+ return;
+
+ /* we are using XIVE with single escalation */
+
+ if (vcpu->arch.xive_esc_on) {
+ /*
+ * If we still have a pending escalation, abort the cede,
+ * and we must set PQ to 10 rather than 00 so that we don't
+ * potentially end up with two entries for the escalation
+ * interrupt in the XIVE interrupt queue. In that case
+ * we also don't want to set xive_esc_on to 1 here in
+ * case we race with xive_esc_irq().
+ */
+ vcpu->arch.ceded = 0;
+ /*
+ * The escalation interrupts are special as we don't EOI them.
+ * There is no need to use the load-after-store ordering offset
+ * to set PQ to 10 as we won't use StoreEOI.
+ */
+ __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_10);
+ } else {
+ vcpu->arch.xive_esc_on = true;
+ mb();
+ __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_00);
+ }
+ mb();
+}
+EXPORT_SYMBOL_GPL(kvmppc_xive_rearm_escalation);
+
+/*
* This is a simple trigger for a generic XIVE IRQ. This must
* only be called for interrupts that support a trigger page
*/
@@ -2075,6 +2141,36 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
return 0;
}
+int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
+ /* The VM should have configured XICS mode before doing XICS hcalls. */
+ if (!kvmppc_xics_enabled(vcpu))
+ return H_TOO_HARD;
+
+ switch (req) {
+ case H_XIRR:
+ return xive_vm_h_xirr(vcpu);
+ case H_CPPR:
+ return xive_vm_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
+ case H_EOI:
+ return xive_vm_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
+ case H_IPI:
+ return xive_vm_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5));
+ case H_IPOLL:
+ return xive_vm_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
+ case H_XIRR_X:
+ xive_vm_h_xirr(vcpu);
+ kvmppc_set_gpr(vcpu, 5, get_tb() + vc->tb_offset);
+ return H_SUCCESS;
+ }
+
+ return H_UNSUPPORTED;
+}
+EXPORT_SYMBOL_GPL(kvmppc_xive_xics_hcall);
+
int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
{
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
@@ -2257,21 +2353,3 @@ struct kvm_device_ops kvm_xive_ops = {
.get_attr = xive_get_attr,
.has_attr = xive_has_attr,
};
-
-void kvmppc_xive_init_module(void)
-{
- __xive_vm_h_xirr = xive_vm_h_xirr;
- __xive_vm_h_ipoll = xive_vm_h_ipoll;
- __xive_vm_h_ipi = xive_vm_h_ipi;
- __xive_vm_h_cppr = xive_vm_h_cppr;
- __xive_vm_h_eoi = xive_vm_h_eoi;
-}
-
-void kvmppc_xive_exit_module(void)
-{
- __xive_vm_h_xirr = NULL;
- __xive_vm_h_ipoll = NULL;
- __xive_vm_h_ipi = NULL;
- __xive_vm_h_cppr = NULL;
- __xive_vm_h_eoi = NULL;
-}
diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h
index 86c24a4ad809..afe9eeac6d56 100644
--- a/arch/powerpc/kvm/book3s_xive.h
+++ b/arch/powerpc/kvm/book3s_xive.h
@@ -289,13 +289,6 @@ extern int xive_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
extern int xive_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
extern int xive_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
-extern unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
-extern unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
-extern int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
- unsigned long mfrr);
-extern int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
-extern int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
-
/*
* Common Xive routines for XICS-over-XIVE and XIVE native
*/
diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
index 76800c84f2a3..573ecaab3597 100644
--- a/arch/powerpc/kvm/book3s_xive_native.c
+++ b/arch/powerpc/kvm/book3s_xive_native.c
@@ -12,6 +12,7 @@
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/file.h>
+#include <linux/irqdomain.h>
#include <asm/uaccess.h>
#include <asm/kvm_book3s.h>
#include <asm/kvm_ppc.h>
@@ -1281,13 +1282,3 @@ struct kvm_device_ops kvm_xive_native_ops = {
.has_attr = kvmppc_xive_native_has_attr,
.mmap = kvmppc_xive_native_mmap,
};
-
-void kvmppc_xive_native_init_module(void)
-{
- ;
-}
-
-void kvmppc_xive_native_exit_module(void)
-{
- ;
-}
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 7d5fe43f85c4..551b30d84aee 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -36,29 +36,59 @@
unsigned long kvmppc_booke_handlers;
-struct kvm_stats_debugfs_item debugfs_entries[] = {
- VCPU_STAT("mmio", mmio_exits),
- VCPU_STAT("sig", signal_exits),
- VCPU_STAT("itlb_r", itlb_real_miss_exits),
- VCPU_STAT("itlb_v", itlb_virt_miss_exits),
- VCPU_STAT("dtlb_r", dtlb_real_miss_exits),
- VCPU_STAT("dtlb_v", dtlb_virt_miss_exits),
- VCPU_STAT("sysc", syscall_exits),
- VCPU_STAT("isi", isi_exits),
- VCPU_STAT("dsi", dsi_exits),
- VCPU_STAT("inst_emu", emulated_inst_exits),
- VCPU_STAT("dec", dec_exits),
- VCPU_STAT("ext_intr", ext_intr_exits),
- VCPU_STAT("halt_successful_poll", halt_successful_poll),
- VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
- VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
- VCPU_STAT("halt_wakeup", halt_wakeup),
- VCPU_STAT("doorbell", dbell_exits),
- VCPU_STAT("guest doorbell", gdbell_exits),
- VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
- VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
- VM_STAT("remote_tlb_flush", remote_tlb_flush),
- { NULL }
+const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+ KVM_GENERIC_VM_STATS(),
+ STATS_DESC_ICOUNTER(VM, num_2M_pages),
+ STATS_DESC_ICOUNTER(VM, num_1G_pages)
+};
+static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
+ sizeof(struct kvm_vm_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vm_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vm_stats_desc),
+};
+
+const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ KVM_GENERIC_VCPU_STATS(),
+ STATS_DESC_COUNTER(VCPU, sum_exits),
+ STATS_DESC_COUNTER(VCPU, mmio_exits),
+ STATS_DESC_COUNTER(VCPU, signal_exits),
+ STATS_DESC_COUNTER(VCPU, light_exits),
+ STATS_DESC_COUNTER(VCPU, itlb_real_miss_exits),
+ STATS_DESC_COUNTER(VCPU, itlb_virt_miss_exits),
+ STATS_DESC_COUNTER(VCPU, dtlb_real_miss_exits),
+ STATS_DESC_COUNTER(VCPU, dtlb_virt_miss_exits),
+ STATS_DESC_COUNTER(VCPU, syscall_exits),
+ STATS_DESC_COUNTER(VCPU, isi_exits),
+ STATS_DESC_COUNTER(VCPU, dsi_exits),
+ STATS_DESC_COUNTER(VCPU, emulated_inst_exits),
+ STATS_DESC_COUNTER(VCPU, dec_exits),
+ STATS_DESC_COUNTER(VCPU, ext_intr_exits),
+ STATS_DESC_TIME_NSEC(VCPU, halt_wait_ns),
+ STATS_DESC_COUNTER(VCPU, halt_successful_wait),
+ STATS_DESC_COUNTER(VCPU, dbell_exits),
+ STATS_DESC_COUNTER(VCPU, gdbell_exits),
+ STATS_DESC_COUNTER(VCPU, ld),
+ STATS_DESC_COUNTER(VCPU, st),
+ STATS_DESC_COUNTER(VCPU, pthru_all),
+ STATS_DESC_COUNTER(VCPU, pthru_host),
+ STATS_DESC_COUNTER(VCPU, pthru_bad_aff)
+};
+static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
+ sizeof(struct kvm_vcpu_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vcpu_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vcpu_stats_desc),
};
/* TODO: use vcpu_printf() */
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index ed0c9c43d0cf..7f16afc331ef 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -721,45 +721,36 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
/************* MMU Notifiers *************/
-static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+static bool kvm_e500_mmu_unmap_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- trace_kvm_unmap_hva(hva);
-
/*
* Flush all shadow tlb entries everywhere. This is slow, but
* we are 100% sure that we catch the to be unmapped page
*/
- kvm_flush_remote_tlbs(kvm);
-
- return 0;
+ return true;
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
- unsigned flags)
+bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
- /* kvm_unmap_hva flushes everything anyways */
- kvm_unmap_hva(kvm, start);
-
- return 0;
+ return kvm_e500_mmu_unmap_gfn(kvm, range);
}
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
+bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
/* XXX could be more clever ;) */
- return 0;
+ return false;
}
-int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
/* XXX could be more clever ;) */
- return 0;
+ return false;
}
-int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
/* The page will get remapped properly on its next fault */
- kvm_unmap_hva(kvm, hva);
- return 0;
+ return kvm_e500_mmu_unmap_gfn(kvm, range);
}
/*****************************************/
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index a2a68a958fa0..be33b5321a76 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -682,6 +682,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 &&
!kvmppc_hv_ops->enable_dawr1(NULL));
break;
+ case KVM_CAP_PPC_RPT_INVALIDATE:
+ r = 1;
+ break;
#endif
default:
r = 0;
diff --git a/arch/powerpc/kvm/trace_booke.h b/arch/powerpc/kvm/trace_booke.h
index 3837842986aa..eff6e82dbcd4 100644
--- a/arch/powerpc/kvm/trace_booke.h
+++ b/arch/powerpc/kvm/trace_booke.h
@@ -69,21 +69,6 @@ TRACE_EVENT(kvm_exit,
)
);
-TRACE_EVENT(kvm_unmap_hva,
- TP_PROTO(unsigned long hva),
- TP_ARGS(hva),
-
- TP_STRUCT__entry(
- __field( unsigned long, hva )
- ),
-
- TP_fast_assign(
- __entry->hva = hva;
- ),
-
- TP_printk("unmap hva 0x%lx\n", __entry->hva)
-);
-
TRACE_EVENT(kvm_booke206_stlb_write,
TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3),
TP_ARGS(mas0, mas8, mas1, mas2, mas7_3),
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index d4efc182662a..99a7c9132422 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -5,6 +5,9 @@
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
+CFLAGS_code-patching.o += -fno-stack-protector
+CFLAGS_feature-fixups.o += -fno-stack-protector
+
CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
@@ -16,7 +19,7 @@ CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING
CFLAGS_feature-fixups.o += -DDISABLE_BRANCH_PROFILING
endif
-obj-y += alloc.o code-patching.o feature-fixups.o pmem.o inst.o test_code-patching.o
+obj-y += alloc.o code-patching.o feature-fixups.o pmem.o test_code-patching.o
ifndef CONFIG_KASAN
obj-y += string.o memcmp_$(BITS).o
@@ -36,7 +39,7 @@ extra-$(CONFIG_PPC64) += crtsavres.o
endif
obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \
- memcpy_power7.o
+ memcpy_power7.o restart_table.o
obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \
memcpy_64.o copy_mc_64.o
diff --git a/arch/powerpc/lib/checksum_wrappers.c b/arch/powerpc/lib/checksum_wrappers.c
index b895166afc82..f3999cbb2fcc 100644
--- a/arch/powerpc/lib/checksum_wrappers.c
+++ b/arch/powerpc/lib/checksum_wrappers.c
@@ -16,16 +16,12 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst,
{
__wsum csum;
- might_sleep();
-
- if (unlikely(!access_ok(src, len)))
+ if (unlikely(!user_read_access_begin(src, len)))
return 0;
- allow_read_from_user(src, len);
-
csum = csum_partial_copy_generic((void __force *)src, dst, len);
- prevent_read_from_user(src, len);
+ user_read_access_end();
return csum;
}
EXPORT_SYMBOL(csum_and_copy_from_user);
@@ -34,15 +30,12 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len)
{
__wsum csum;
- might_sleep();
- if (unlikely(!access_ok(dst, len)))
+ if (unlikely(!user_write_access_begin(dst, len)))
return 0;
- allow_write_to_user(dst, len);
-
csum = csum_partial_copy_generic(src, (void __force *)dst, len);
- prevent_write_to_user(dst, len);
+ user_write_access_end();
return csum;
}
EXPORT_SYMBOL(csum_and_copy_to_user);
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 2333625b5e31..f9a3019e37b4 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -18,13 +18,17 @@
#include <asm/setup.h>
#include <asm/inst.h>
-static int __patch_instruction(struct ppc_inst *exec_addr, struct ppc_inst instr,
- struct ppc_inst *patch_addr)
+static int __patch_instruction(u32 *exec_addr, struct ppc_inst instr, u32 *patch_addr)
{
- if (!ppc_inst_prefixed(instr))
- __put_user_asm_goto(ppc_inst_val(instr), patch_addr, failed, "stw");
- else
- __put_user_asm_goto(ppc_inst_as_u64(instr), patch_addr, failed, "std");
+ if (!ppc_inst_prefixed(instr)) {
+ u32 val = ppc_inst_val(instr);
+
+ __put_kernel_nofault(patch_addr, &val, u32, failed);
+ } else {
+ u64 val = ppc_inst_as_ulong(instr);
+
+ __put_kernel_nofault(patch_addr, &val, u64, failed);
+ }
asm ("dcbst 0, %0; sync; icbi 0,%1; sync; isync" :: "r" (patch_addr),
"r" (exec_addr));
@@ -35,7 +39,7 @@ failed:
return -EFAULT;
}
-int raw_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr)
+int raw_patch_instruction(u32 *addr, struct ppc_inst instr)
{
return __patch_instruction(addr, instr, addr);
}
@@ -65,22 +69,16 @@ static int text_area_cpu_down(unsigned int cpu)
}
/*
- * Run as a late init call. This allows all the boot time patching to be done
- * simply by patching the code, and then we're called here prior to
- * mark_rodata_ro(), which happens after all init calls are run. Although
- * BUG_ON() is rude, in this case it should only happen if ENOMEM, and we judge
- * it as being preferable to a kernel that will crash later when someone tries
- * to use patch_instruction().
+ * Although BUG_ON() is rude, in this case it should only happen if ENOMEM, and
+ * we judge it as being preferable to a kernel that will crash later when
+ * someone tries to use patch_instruction().
*/
-static int __init setup_text_poke_area(void)
+void __init poking_init(void)
{
BUG_ON(!cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"powerpc/text_poke:online", text_area_cpu_up,
text_area_cpu_down));
-
- return 0;
}
-late_initcall(setup_text_poke_area);
/*
* This can be called for kernel text or a module.
@@ -143,10 +141,10 @@ static inline int unmap_patch_area(unsigned long addr)
return 0;
}
-static int do_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr)
+static int do_patch_instruction(u32 *addr, struct ppc_inst instr)
{
int err;
- struct ppc_inst *patch_addr = NULL;
+ u32 *patch_addr = NULL;
unsigned long flags;
unsigned long text_poke_addr;
unsigned long kaddr = (unsigned long)addr;
@@ -167,7 +165,7 @@ static int do_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr)
goto out;
}
- patch_addr = (struct ppc_inst *)(text_poke_addr + (kaddr & ~PAGE_MASK));
+ patch_addr = (u32 *)(text_poke_addr + (kaddr & ~PAGE_MASK));
__patch_instruction(addr, instr, patch_addr);
@@ -182,14 +180,14 @@ out:
}
#else /* !CONFIG_STRICT_KERNEL_RWX */
-static int do_patch_instruction(struct ppc_inst *addr, struct ppc_inst instr)
+static int do_patch_instruction(u32 *addr, struct ppc_inst instr)
{
return raw_patch_instruction(addr, instr);
}
#endif /* CONFIG_STRICT_KERNEL_RWX */
-int patch_instruction(struct ppc_inst *addr, struct ppc_inst instr)
+int patch_instruction(u32 *addr, struct ppc_inst instr)
{
/* Make sure we aren't patching a freed init section */
if (init_mem_is_free && init_section_contains(addr, 4)) {
@@ -200,7 +198,7 @@ int patch_instruction(struct ppc_inst *addr, struct ppc_inst instr)
}
NOKPROBE_SYMBOL(patch_instruction);
-int patch_branch(struct ppc_inst *addr, unsigned long target, int flags)
+int patch_branch(u32 *addr, unsigned long target, int flags)
{
struct ppc_inst instr;
@@ -252,8 +250,7 @@ bool is_conditional_branch(struct ppc_inst instr)
}
NOKPROBE_SYMBOL(is_conditional_branch);
-int create_branch(struct ppc_inst *instr,
- const struct ppc_inst *addr,
+int create_branch(struct ppc_inst *instr, const u32 *addr,
unsigned long target, int flags)
{
long offset;
@@ -273,7 +270,7 @@ int create_branch(struct ppc_inst *instr,
return 0;
}
-int create_cond_branch(struct ppc_inst *instr, const struct ppc_inst *addr,
+int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
unsigned long target, int flags)
{
long offset;
@@ -320,39 +317,39 @@ int instr_is_relative_link_branch(struct ppc_inst instr)
return instr_is_relative_branch(instr) && (ppc_inst_val(instr) & BRANCH_SET_LINK);
}
-static unsigned long branch_iform_target(const struct ppc_inst *instr)
+static unsigned long branch_iform_target(const u32 *instr)
{
signed long imm;
- imm = ppc_inst_val(*instr) & 0x3FFFFFC;
+ imm = ppc_inst_val(ppc_inst_read(instr)) & 0x3FFFFFC;
/* If the top bit of the immediate value is set this is negative */
if (imm & 0x2000000)
imm -= 0x4000000;
- if ((ppc_inst_val(*instr) & BRANCH_ABSOLUTE) == 0)
+ if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0)
imm += (unsigned long)instr;
return (unsigned long)imm;
}
-static unsigned long branch_bform_target(const struct ppc_inst *instr)
+static unsigned long branch_bform_target(const u32 *instr)
{
signed long imm;
- imm = ppc_inst_val(*instr) & 0xFFFC;
+ imm = ppc_inst_val(ppc_inst_read(instr)) & 0xFFFC;
/* If the top bit of the immediate value is set this is negative */
if (imm & 0x8000)
imm -= 0x10000;
- if ((ppc_inst_val(*instr) & BRANCH_ABSOLUTE) == 0)
+ if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0)
imm += (unsigned long)instr;
return (unsigned long)imm;
}
-unsigned long branch_target(const struct ppc_inst *instr)
+unsigned long branch_target(const u32 *instr)
{
if (instr_is_branch_iform(ppc_inst_read(instr)))
return branch_iform_target(instr);
@@ -362,17 +359,7 @@ unsigned long branch_target(const struct ppc_inst *instr)
return 0;
}
-int instr_is_branch_to_addr(const struct ppc_inst *instr, unsigned long addr)
-{
- if (instr_is_branch_iform(ppc_inst_read(instr)) ||
- instr_is_branch_bform(ppc_inst_read(instr)))
- return branch_target(instr) == addr;
-
- return 0;
-}
-
-int translate_branch(struct ppc_inst *instr, const struct ppc_inst *dest,
- const struct ppc_inst *src)
+int translate_branch(struct ppc_inst *instr, const u32 *dest, const u32 *src)
{
unsigned long target;
target = branch_target(src);
@@ -399,12 +386,21 @@ void __patch_exception(int exc, unsigned long addr)
* instruction of the exception, not the first one
*/
- patch_branch((struct ppc_inst *)(ibase + (exc / 4) + 1), addr, 0);
+ patch_branch(ibase + (exc / 4) + 1, addr, 0);
}
#endif
#ifdef CONFIG_CODE_PATCHING_SELFTEST
+static int instr_is_branch_to_addr(const u32 *instr, unsigned long addr)
+{
+ if (instr_is_branch_iform(ppc_inst_read(instr)) ||
+ instr_is_branch_bform(ppc_inst_read(instr)))
+ return branch_target(instr) == addr;
+
+ return 0;
+}
+
static void __init test_trampoline(void)
{
asm ("nop;\n");
@@ -417,9 +413,9 @@ static void __init test_branch_iform(void)
{
int err;
struct ppc_inst instr;
- unsigned long addr;
-
- addr = (unsigned long)&instr;
+ u32 tmp[2];
+ u32 *iptr = tmp;
+ unsigned long addr = (unsigned long)tmp;
/* The simplest case, branch to self, no flags */
check(instr_is_branch_iform(ppc_inst(0x48000000)));
@@ -440,63 +436,68 @@ static void __init test_branch_iform(void)
check(!instr_is_branch_iform(ppc_inst(0x7bfffffd)));
/* Absolute branch to 0x100 */
- instr = ppc_inst(0x48000103);
- check(instr_is_branch_to_addr(&instr, 0x100));
+ patch_instruction(iptr, ppc_inst(0x48000103));
+ check(instr_is_branch_to_addr(iptr, 0x100));
/* Absolute branch to 0x420fc */
- instr = ppc_inst(0x480420ff);
- check(instr_is_branch_to_addr(&instr, 0x420fc));
+ patch_instruction(iptr, ppc_inst(0x480420ff));
+ check(instr_is_branch_to_addr(iptr, 0x420fc));
/* Maximum positive relative branch, + 20MB - 4B */
- instr = ppc_inst(0x49fffffc);
- check(instr_is_branch_to_addr(&instr, addr + 0x1FFFFFC));
+ patch_instruction(iptr, ppc_inst(0x49fffffc));
+ check(instr_is_branch_to_addr(iptr, addr + 0x1FFFFFC));
/* Smallest negative relative branch, - 4B */
- instr = ppc_inst(0x4bfffffc);
- check(instr_is_branch_to_addr(&instr, addr - 4));
+ patch_instruction(iptr, ppc_inst(0x4bfffffc));
+ check(instr_is_branch_to_addr(iptr, addr - 4));
/* Largest negative relative branch, - 32 MB */
- instr = ppc_inst(0x4a000000);
- check(instr_is_branch_to_addr(&instr, addr - 0x2000000));
+ patch_instruction(iptr, ppc_inst(0x4a000000));
+ check(instr_is_branch_to_addr(iptr, addr - 0x2000000));
/* Branch to self, with link */
- err = create_branch(&instr, &instr, addr, BRANCH_SET_LINK);
- check(instr_is_branch_to_addr(&instr, addr));
+ err = create_branch(&instr, iptr, addr, BRANCH_SET_LINK);
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr));
/* Branch to self - 0x100, with link */
- err = create_branch(&instr, &instr, addr - 0x100, BRANCH_SET_LINK);
- check(instr_is_branch_to_addr(&instr, addr - 0x100));
+ err = create_branch(&instr, iptr, addr - 0x100, BRANCH_SET_LINK);
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr - 0x100));
/* Branch to self + 0x100, no link */
- err = create_branch(&instr, &instr, addr + 0x100, 0);
- check(instr_is_branch_to_addr(&instr, addr + 0x100));
+ err = create_branch(&instr, iptr, addr + 0x100, 0);
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr + 0x100));
/* Maximum relative negative offset, - 32 MB */
- err = create_branch(&instr, &instr, addr - 0x2000000, BRANCH_SET_LINK);
- check(instr_is_branch_to_addr(&instr, addr - 0x2000000));
+ err = create_branch(&instr, iptr, addr - 0x2000000, BRANCH_SET_LINK);
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr - 0x2000000));
/* Out of range relative negative offset, - 32 MB + 4*/
- err = create_branch(&instr, &instr, addr - 0x2000004, BRANCH_SET_LINK);
+ err = create_branch(&instr, iptr, addr - 0x2000004, BRANCH_SET_LINK);
check(err);
/* Out of range relative positive offset, + 32 MB */
- err = create_branch(&instr, &instr, addr + 0x2000000, BRANCH_SET_LINK);
+ err = create_branch(&instr, iptr, addr + 0x2000000, BRANCH_SET_LINK);
check(err);
/* Unaligned target */
- err = create_branch(&instr, &instr, addr + 3, BRANCH_SET_LINK);
+ err = create_branch(&instr, iptr, addr + 3, BRANCH_SET_LINK);
check(err);
/* Check flags are masked correctly */
- err = create_branch(&instr, &instr, addr, 0xFFFFFFFC);
- check(instr_is_branch_to_addr(&instr, addr));
+ err = create_branch(&instr, iptr, addr, 0xFFFFFFFC);
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr));
check(ppc_inst_equal(instr, ppc_inst(0x48000000)));
}
static void __init test_create_function_call(void)
{
- struct ppc_inst *iptr;
+ u32 *iptr;
unsigned long dest;
struct ppc_inst instr;
/* Check we can create a function call */
- iptr = (struct ppc_inst *)ppc_function_entry(test_trampoline);
+ iptr = (u32 *)ppc_function_entry(test_trampoline);
dest = ppc_function_entry(test_create_function_call);
create_branch(&instr, iptr, dest, BRANCH_SET_LINK);
patch_instruction(iptr, instr);
@@ -507,10 +508,11 @@ static void __init test_branch_bform(void)
{
int err;
unsigned long addr;
- struct ppc_inst *iptr, instr;
+ struct ppc_inst instr;
+ u32 tmp[2];
+ u32 *iptr = tmp;
unsigned int flags;
- iptr = &instr;
addr = (unsigned long)iptr;
/* The simplest case, branch to self, no flags */
@@ -523,39 +525,43 @@ static void __init test_branch_bform(void)
check(!instr_is_branch_bform(ppc_inst(0x7bffffff)));
/* Absolute conditional branch to 0x100 */
- instr = ppc_inst(0x43ff0103);
- check(instr_is_branch_to_addr(&instr, 0x100));
+ patch_instruction(iptr, ppc_inst(0x43ff0103));
+ check(instr_is_branch_to_addr(iptr, 0x100));
/* Absolute conditional branch to 0x20fc */
- instr = ppc_inst(0x43ff20ff);
- check(instr_is_branch_to_addr(&instr, 0x20fc));
+ patch_instruction(iptr, ppc_inst(0x43ff20ff));
+ check(instr_is_branch_to_addr(iptr, 0x20fc));
/* Maximum positive relative conditional branch, + 32 KB - 4B */
- instr = ppc_inst(0x43ff7ffc);
- check(instr_is_branch_to_addr(&instr, addr + 0x7FFC));
+ patch_instruction(iptr, ppc_inst(0x43ff7ffc));
+ check(instr_is_branch_to_addr(iptr, addr + 0x7FFC));
/* Smallest negative relative conditional branch, - 4B */
- instr = ppc_inst(0x43fffffc);
- check(instr_is_branch_to_addr(&instr, addr - 4));
+ patch_instruction(iptr, ppc_inst(0x43fffffc));
+ check(instr_is_branch_to_addr(iptr, addr - 4));
/* Largest negative relative conditional branch, - 32 KB */
- instr = ppc_inst(0x43ff8000);
- check(instr_is_branch_to_addr(&instr, addr - 0x8000));
+ patch_instruction(iptr, ppc_inst(0x43ff8000));
+ check(instr_is_branch_to_addr(iptr, addr - 0x8000));
/* All condition code bits set & link */
flags = 0x3ff000 | BRANCH_SET_LINK;
/* Branch to self */
err = create_cond_branch(&instr, iptr, addr, flags);
- check(instr_is_branch_to_addr(&instr, addr));
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr));
/* Branch to self - 0x100 */
err = create_cond_branch(&instr, iptr, addr - 0x100, flags);
- check(instr_is_branch_to_addr(&instr, addr - 0x100));
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr - 0x100));
/* Branch to self + 0x100 */
err = create_cond_branch(&instr, iptr, addr + 0x100, flags);
- check(instr_is_branch_to_addr(&instr, addr + 0x100));
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr + 0x100));
/* Maximum relative negative offset, - 32 KB */
err = create_cond_branch(&instr, iptr, addr - 0x8000, flags);
- check(instr_is_branch_to_addr(&instr, addr - 0x8000));
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr - 0x8000));
/* Out of range relative negative offset, - 32 KB + 4*/
err = create_cond_branch(&instr, iptr, addr - 0x8004, flags);
@@ -571,7 +577,8 @@ static void __init test_branch_bform(void)
/* Check flags are masked correctly */
err = create_cond_branch(&instr, iptr, addr, 0xFFFFFFFC);
- check(instr_is_branch_to_addr(&instr, addr));
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr));
check(ppc_inst_equal(instr, ppc_inst(0x43FF0000)));
}
@@ -710,9 +717,9 @@ static void __init test_prefixed_patching(void)
extern unsigned int code_patching_test1_expected[];
extern unsigned int end_code_patching_test1[];
- __patch_instruction((struct ppc_inst *)code_patching_test1,
+ __patch_instruction(code_patching_test1,
ppc_inst_prefix(OP_PREFIX << 26, 0x00000000),
- (struct ppc_inst *)code_patching_test1);
+ code_patching_test1);
check(!memcmp(code_patching_test1,
code_patching_test1_expected,
diff --git a/arch/powerpc/lib/error-inject.c b/arch/powerpc/lib/error-inject.c
index 407b992fb02f..e834079d2b5c 100644
--- a/arch/powerpc/lib/error-inject.c
+++ b/arch/powerpc/lib/error-inject.c
@@ -11,6 +11,6 @@ void override_function_with_return(struct pt_regs *regs)
* function in the kernel/module, captured on a kprobe. We don't need
* to worry about 32-bit userspace on a 64-bit kernel.
*/
- regs->nip = regs->link;
+ regs_set_return_ip(regs, regs->link);
}
NOKPROBE_SYMBOL(override_function_with_return);
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 1fd31b4b0e13..cda17bee5afe 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -14,8 +14,10 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/sched/mm.h>
+#include <linux/stop_machine.h>
#include <asm/cputable.h>
#include <asm/code-patching.h>
+#include <asm/interrupt.h>
#include <asm/page.h>
#include <asm/sections.h>
#include <asm/setup.h>
@@ -32,26 +34,25 @@ struct fixup_entry {
long alt_end_off;
};
-static struct ppc_inst *calc_addr(struct fixup_entry *fcur, long offset)
+static u32 *calc_addr(struct fixup_entry *fcur, long offset)
{
/*
* We store the offset to the code as a negative offset from
* the start of the alt_entry, to support the VDSO. This
* routine converts that back into an actual address.
*/
- return (struct ppc_inst *)((unsigned long)fcur + offset);
+ return (u32 *)((unsigned long)fcur + offset);
}
-static int patch_alt_instruction(struct ppc_inst *src, struct ppc_inst *dest,
- struct ppc_inst *alt_start, struct ppc_inst *alt_end)
+static int patch_alt_instruction(u32 *src, u32 *dest, u32 *alt_start, u32 *alt_end)
{
int err;
struct ppc_inst instr;
instr = ppc_inst_read(src);
- if (instr_is_relative_branch(*src)) {
- struct ppc_inst *target = (struct ppc_inst *)branch_target(src);
+ if (instr_is_relative_branch(ppc_inst_read(src))) {
+ u32 *target = (u32 *)branch_target(src);
/* Branch within the section doesn't need translating */
if (target < alt_start || target > alt_end) {
@@ -68,7 +69,7 @@ static int patch_alt_instruction(struct ppc_inst *src, struct ppc_inst *dest,
static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
{
- struct ppc_inst *start, *end, *alt_start, *alt_end, *src, *dest, nop;
+ u32 *start, *end, *alt_start, *alt_end, *src, *dest;
start = calc_addr(fcur, fcur->start_off);
end = calc_addr(fcur, fcur->end_off);
@@ -90,9 +91,8 @@ static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
return 1;
}
- nop = ppc_inst(PPC_INST_NOP);
- for (; dest < end; dest = ppc_inst_next(dest, &nop))
- raw_patch_instruction(dest, nop);
+ for (; dest < end; dest++)
+ raw_patch_instruction(dest, ppc_inst(PPC_RAW_NOP()));
return 0;
}
@@ -127,21 +127,21 @@ static void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
start = PTRRELOC(&__start___stf_entry_barrier_fixup);
end = PTRRELOC(&__stop___stf_entry_barrier_fixup);
- instrs[0] = 0x60000000; /* nop */
- instrs[1] = 0x60000000; /* nop */
- instrs[2] = 0x60000000; /* nop */
+ instrs[0] = PPC_RAW_NOP();
+ instrs[1] = PPC_RAW_NOP();
+ instrs[2] = PPC_RAW_NOP();
i = 0;
if (types & STF_BARRIER_FALLBACK) {
- instrs[i++] = 0x7d4802a6; /* mflr r10 */
- instrs[i++] = 0x60000000; /* branch patched below */
- instrs[i++] = 0x7d4803a6; /* mtlr r10 */
+ instrs[i++] = PPC_RAW_MFLR(_R10);
+ instrs[i++] = PPC_RAW_NOP(); /* branch patched below */
+ instrs[i++] = PPC_RAW_MTLR(_R10);
} else if (types & STF_BARRIER_EIEIO) {
- instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
+ instrs[i++] = PPC_RAW_EIEIO() | 0x02000000; /* eieio + bit 6 hint */
} else if (types & STF_BARRIER_SYNC_ORI) {
- instrs[i++] = 0x7c0004ac; /* hwsync */
- instrs[i++] = 0xe94d0000; /* ld r10,0(r13) */
- instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
+ instrs[i++] = PPC_RAW_SYNC();
+ instrs[i++] = PPC_RAW_LD(_R10, _R13, 0);
+ instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
}
for (i = 0; start < end; start++, i++) {
@@ -149,17 +149,17 @@ static void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
-
- if (types & STF_BARRIER_FALLBACK)
- patch_branch((struct ppc_inst *)(dest + 1),
- (unsigned long)&stf_barrier_fallback,
- BRANCH_SET_LINK);
- else
- patch_instruction((struct ppc_inst *)(dest + 1),
- ppc_inst(instrs[1]));
-
- patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+ // See comment in do_entry_flush_fixups() RE order of patching
+ if (types & STF_BARRIER_FALLBACK) {
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_branch(dest + 1,
+ (unsigned long)&stf_barrier_fallback, BRANCH_SET_LINK);
+ } else {
+ patch_instruction(dest + 1, ppc_inst(instrs[1]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ }
}
printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
@@ -179,32 +179,31 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
start = PTRRELOC(&__start___stf_exit_barrier_fixup);
end = PTRRELOC(&__stop___stf_exit_barrier_fixup);
- instrs[0] = 0x60000000; /* nop */
- instrs[1] = 0x60000000; /* nop */
- instrs[2] = 0x60000000; /* nop */
- instrs[3] = 0x60000000; /* nop */
- instrs[4] = 0x60000000; /* nop */
- instrs[5] = 0x60000000; /* nop */
+ instrs[0] = PPC_RAW_NOP();
+ instrs[1] = PPC_RAW_NOP();
+ instrs[2] = PPC_RAW_NOP();
+ instrs[3] = PPC_RAW_NOP();
+ instrs[4] = PPC_RAW_NOP();
+ instrs[5] = PPC_RAW_NOP();
i = 0;
if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) {
if (cpu_has_feature(CPU_FTR_HVMODE)) {
- instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */
- instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */
+ instrs[i++] = PPC_RAW_MTSPR(SPRN_HSPRG1, _R13);
+ instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_HSPRG0);
} else {
- instrs[i++] = 0x7db243a6; /* mtsprg 2,r13 */
- instrs[i++] = 0x7db142a6; /* mfsprg r13,1 */
+ instrs[i++] = PPC_RAW_MTSPR(SPRN_SPRG2, _R13);
+ instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_SPRG1);
}
- instrs[i++] = 0x7c0004ac; /* hwsync */
- instrs[i++] = 0xe9ad0000; /* ld r13,0(r13) */
- instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */
- } else {
- instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */
- }
+ instrs[i++] = PPC_RAW_SYNC();
+ instrs[i++] = PPC_RAW_LD(_R13, _R13, 0);
+ instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_HSPRG1);
+ else
+ instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_SPRG2);
} else if (types & STF_BARRIER_EIEIO) {
- instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
+ instrs[i++] = PPC_RAW_EIEIO() | 0x02000000; /* eieio + bit 6 hint */
}
for (i = 0; start < end; start++, i++) {
@@ -212,12 +211,12 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
- patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
- patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
- patch_instruction((struct ppc_inst *)(dest + 3), ppc_inst(instrs[3]));
- patch_instruction((struct ppc_inst *)(dest + 4), ppc_inst(instrs[4]));
- patch_instruction((struct ppc_inst *)(dest + 5), ppc_inst(instrs[5]));
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ patch_instruction(dest + 1, ppc_inst(instrs[1]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_instruction(dest + 3, ppc_inst(instrs[3]));
+ patch_instruction(dest + 4, ppc_inst(instrs[4]));
+ patch_instruction(dest + 5, ppc_inst(instrs[5]));
}
printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
(types == STF_BARRIER_NONE) ? "no" :
@@ -227,11 +226,44 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
: "unknown");
}
+static bool stf_exit_reentrant = false;
+static bool rfi_exit_reentrant = false;
+
+static int __do_stf_barrier_fixups(void *data)
+{
+ enum stf_barrier_type *types = data;
+
+ do_stf_entry_barrier_fixups(*types);
+ do_stf_exit_barrier_fixups(*types);
+
+ return 0;
+}
void do_stf_barrier_fixups(enum stf_barrier_type types)
{
- do_stf_entry_barrier_fixups(types);
- do_stf_exit_barrier_fixups(types);
+ /*
+ * The call to the fallback entry flush, and the fallback/sync-ori exit
+ * flush can not be safely patched in/out while other CPUs are
+ * executing them. So call __do_stf_barrier_fixups() on one CPU while
+ * all other CPUs spin in the stop machine core with interrupts hard
+ * disabled.
+ *
+ * The branch to mark interrupt exits non-reentrant is enabled first,
+ * then stop_machine runs which will ensure all CPUs are out of the
+ * low level interrupt exit code before patching. After the patching,
+ * if allowed, then flip the branch to allow fast exits.
+ */
+ static_branch_enable(&interrupt_exit_not_reentrant);
+
+ stop_machine(__do_stf_barrier_fixups, &types, NULL);
+
+ if ((types & STF_BARRIER_FALLBACK) || (types & STF_BARRIER_SYNC_ORI))
+ stf_exit_reentrant = false;
+ else
+ stf_exit_reentrant = true;
+
+ if (stf_exit_reentrant && rfi_exit_reentrant)
+ static_branch_disable(&interrupt_exit_not_reentrant);
}
void do_uaccess_flush_fixups(enum l1d_flush_type types)
@@ -243,35 +275,35 @@ void do_uaccess_flush_fixups(enum l1d_flush_type types)
start = PTRRELOC(&__start___uaccess_flush_fixup);
end = PTRRELOC(&__stop___uaccess_flush_fixup);
- instrs[0] = 0x60000000; /* nop */
- instrs[1] = 0x60000000; /* nop */
- instrs[2] = 0x60000000; /* nop */
- instrs[3] = 0x4e800020; /* blr */
+ instrs[0] = PPC_RAW_NOP();
+ instrs[1] = PPC_RAW_NOP();
+ instrs[2] = PPC_RAW_NOP();
+ instrs[3] = PPC_RAW_BLR();
i = 0;
if (types == L1D_FLUSH_FALLBACK) {
- instrs[3] = 0x60000000; /* nop */
+ instrs[3] = PPC_RAW_NOP();
/* fallthrough to fallback flush */
}
if (types & L1D_FLUSH_ORI) {
- instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
- instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
+ instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
+ instrs[i++] = PPC_RAW_ORI(_R30, _R30, 0); /* L1d flush */
}
if (types & L1D_FLUSH_MTTRIG)
- instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
+ instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0);
for (i = 0; start < end; start++, i++) {
dest = (void *)start + *start;
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
+ patch_instruction(dest, ppc_inst(instrs[0]));
- patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
- patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
- patch_instruction((struct ppc_inst *)(dest + 3), ppc_inst(instrs[3]));
+ patch_instruction(dest + 1, ppc_inst(instrs[1]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_instruction(dest + 3, ppc_inst(instrs[3]));
}
printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i,
@@ -284,30 +316,56 @@ void do_uaccess_flush_fixups(enum l1d_flush_type types)
: "unknown");
}
-void do_entry_flush_fixups(enum l1d_flush_type types)
+static int __do_entry_flush_fixups(void *data)
{
+ enum l1d_flush_type types = *(enum l1d_flush_type *)data;
unsigned int instrs[3], *dest;
long *start, *end;
int i;
- instrs[0] = 0x60000000; /* nop */
- instrs[1] = 0x60000000; /* nop */
- instrs[2] = 0x60000000; /* nop */
+ instrs[0] = PPC_RAW_NOP();
+ instrs[1] = PPC_RAW_NOP();
+ instrs[2] = PPC_RAW_NOP();
i = 0;
if (types == L1D_FLUSH_FALLBACK) {
- instrs[i++] = 0x7d4802a6; /* mflr r10 */
- instrs[i++] = 0x60000000; /* branch patched below */
- instrs[i++] = 0x7d4803a6; /* mtlr r10 */
+ instrs[i++] = PPC_RAW_MFLR(_R10);
+ instrs[i++] = PPC_RAW_NOP(); /* branch patched below */
+ instrs[i++] = PPC_RAW_MTLR(_R10);
}
if (types & L1D_FLUSH_ORI) {
- instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
- instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
+ instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
+ instrs[i++] = PPC_RAW_ORI(_R30, _R30, 0); /* L1d flush */
}
if (types & L1D_FLUSH_MTTRIG)
- instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
+ instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0);
+
+ /*
+ * If we're patching in or out the fallback flush we need to be careful about the
+ * order in which we patch instructions. That's because it's possible we could
+ * take a page fault after patching one instruction, so the sequence of
+ * instructions must be safe even in a half patched state.
+ *
+ * To make that work, when patching in the fallback flush we patch in this order:
+ * - the mflr (dest)
+ * - the mtlr (dest + 2)
+ * - the branch (dest + 1)
+ *
+ * That ensures the sequence is safe to execute at any point. In contrast if we
+ * patch the mtlr last, it's possible we could return from the branch and not
+ * restore LR, leading to a crash later.
+ *
+ * When patching out the fallback flush (either with nops or another flush type),
+ * we patch in this order:
+ * - the branch (dest + 1)
+ * - the mtlr (dest + 2)
+ * - the mflr (dest)
+ *
+ * Note we are protected by stop_machine() from other CPUs executing the code in a
+ * semi-patched state.
+ */
start = PTRRELOC(&__start___entry_flush_fixup);
end = PTRRELOC(&__stop___entry_flush_fixup);
@@ -316,15 +374,16 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
-
- if (types == L1D_FLUSH_FALLBACK)
- patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&entry_flush_fallback,
- BRANCH_SET_LINK);
- else
- patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
-
- patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+ if (types == L1D_FLUSH_FALLBACK) {
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_branch(dest + 1,
+ (unsigned long)&entry_flush_fallback, BRANCH_SET_LINK);
+ } else {
+ patch_instruction(dest + 1, ppc_inst(instrs[1]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ }
}
start = PTRRELOC(&__start___scv_entry_flush_fixup);
@@ -334,15 +393,16 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
-
- if (types == L1D_FLUSH_FALLBACK)
- patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&scv_entry_flush_fallback,
- BRANCH_SET_LINK);
- else
- patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
-
- patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+ if (types == L1D_FLUSH_FALLBACK) {
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_branch(dest + 1,
+ (unsigned long)&scv_entry_flush_fallback, BRANCH_SET_LINK);
+ } else {
+ patch_instruction(dest + 1, ppc_inst(instrs[1]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ }
}
@@ -354,10 +414,24 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
: "ori type" :
(types & L1D_FLUSH_MTTRIG) ? "mttrig type"
: "unknown");
+
+ return 0;
}
-void do_rfi_flush_fixups(enum l1d_flush_type types)
+void do_entry_flush_fixups(enum l1d_flush_type types)
{
+ /*
+ * The call to the fallback flush can not be safely patched in/out while
+ * other CPUs are executing it. So call __do_entry_flush_fixups() on one
+ * CPU while all other CPUs spin in the stop machine core with interrupts
+ * hard disabled.
+ */
+ stop_machine(__do_entry_flush_fixups, &types, NULL);
+}
+
+static int __do_rfi_flush_fixups(void *data)
+{
+ enum l1d_flush_type types = *(enum l1d_flush_type *)data;
unsigned int instrs[3], *dest;
long *start, *end;
int i;
@@ -365,31 +439,31 @@ void do_rfi_flush_fixups(enum l1d_flush_type types)
start = PTRRELOC(&__start___rfi_flush_fixup);
end = PTRRELOC(&__stop___rfi_flush_fixup);
- instrs[0] = 0x60000000; /* nop */
- instrs[1] = 0x60000000; /* nop */
- instrs[2] = 0x60000000; /* nop */
+ instrs[0] = PPC_RAW_NOP();
+ instrs[1] = PPC_RAW_NOP();
+ instrs[2] = PPC_RAW_NOP();
if (types & L1D_FLUSH_FALLBACK)
/* b .+16 to fallback flush */
- instrs[0] = 0x48000010;
+ instrs[0] = PPC_INST_BRANCH | 16;
i = 0;
if (types & L1D_FLUSH_ORI) {
- instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
- instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
+ instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
+ instrs[i++] = PPC_RAW_ORI(_R30, _R30, 0); /* L1d flush */
}
if (types & L1D_FLUSH_MTTRIG)
- instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
+ instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0);
for (i = 0; start < end; start++, i++) {
dest = (void *)start + *start;
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
- patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
- patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ patch_instruction(dest + 1, ppc_inst(instrs[1]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
}
printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i,
@@ -400,6 +474,29 @@ void do_rfi_flush_fixups(enum l1d_flush_type types)
: "ori type" :
(types & L1D_FLUSH_MTTRIG) ? "mttrig type"
: "unknown");
+
+ return 0;
+}
+
+void do_rfi_flush_fixups(enum l1d_flush_type types)
+{
+ /*
+ * stop_machine gets all CPUs out of the interrupt exit handler same
+ * as do_stf_barrier_fixups. do_rfi_flush_fixups patching can run
+ * without stop_machine, so this could be achieved with a broadcast
+ * IPI instead, but this matches the stf sequence.
+ */
+ static_branch_enable(&interrupt_exit_not_reentrant);
+
+ stop_machine(__do_rfi_flush_fixups, &types, NULL);
+
+ if (types & L1D_FLUSH_FALLBACK)
+ rfi_exit_reentrant = false;
+ else
+ rfi_exit_reentrant = true;
+
+ if (stf_exit_reentrant && rfi_exit_reentrant)
+ static_branch_disable(&interrupt_exit_not_reentrant);
}
void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
@@ -411,18 +508,18 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_
start = fixup_start;
end = fixup_end;
- instr = 0x60000000; /* nop */
+ instr = PPC_RAW_NOP();
if (enable) {
pr_info("barrier-nospec: using ORI speculation barrier\n");
- instr = 0x63ff0000; /* ori 31,31,0 speculation barrier */
+ instr = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
}
for (i = 0; start < end; start++, i++) {
dest = (void *)start + *start;
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instr));
+ patch_instruction(dest, ppc_inst(instr));
}
printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
@@ -452,21 +549,21 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_
start = fixup_start;
end = fixup_end;
- instr[0] = PPC_INST_NOP;
- instr[1] = PPC_INST_NOP;
+ instr[0] = PPC_RAW_NOP();
+ instr[1] = PPC_RAW_NOP();
if (enable) {
pr_info("barrier-nospec: using isync; sync as speculation barrier\n");
- instr[0] = PPC_INST_ISYNC;
- instr[1] = PPC_INST_SYNC;
+ instr[0] = PPC_RAW_ISYNC();
+ instr[1] = PPC_RAW_SYNC();
}
for (i = 0; start < end; start++, i++) {
dest = (void *)start + *start;
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction((struct ppc_inst *)dest, ppc_inst(instr[0]));
- patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instr[1]));
+ patch_instruction(dest, ppc_inst(instr[0]));
+ patch_instruction(dest + 1, ppc_inst(instr[1]));
}
printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
@@ -480,7 +577,7 @@ static void patch_btb_flush_section(long *curr)
end = (void *)curr + *(curr + 1);
for (; start < end; start++) {
pr_devel("patching dest %lx\n", (unsigned long)start);
- patch_instruction((struct ppc_inst *)start, ppc_inst(PPC_INST_NOP));
+ patch_instruction(start, ppc_inst(PPC_RAW_NOP()));
}
}
@@ -499,7 +596,7 @@ void do_btb_flush_fixups(void)
void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
{
long *start, *end;
- struct ppc_inst *dest;
+ u32 *dest;
if (!(value & CPU_FTR_LWSYNC))
return ;
@@ -516,13 +613,14 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
static void do_final_fixups(void)
{
#if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
- struct ppc_inst inst, *src, *dest, *end;
+ struct ppc_inst inst;
+ u32 *src, *dest, *end;
if (PHYSICAL_START == 0)
return;
- src = (struct ppc_inst *)(KERNELBASE + PHYSICAL_START);
- dest = (struct ppc_inst *)KERNELBASE;
+ src = (u32 *)(KERNELBASE + PHYSICAL_START);
+ dest = (u32 *)KERNELBASE;
end = (void *)src + (__end_interrupts - _stext);
while (src < end) {
diff --git a/arch/powerpc/lib/inst.c b/arch/powerpc/lib/inst.c
deleted file mode 100644
index 9cc17eb62462..000000000000
--- a/arch/powerpc/lib/inst.c
+++ /dev/null
@@ -1,73 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2020, IBM Corporation.
- */
-
-#include <linux/uaccess.h>
-#include <asm/disassemble.h>
-#include <asm/inst.h>
-#include <asm/ppc-opcode.h>
-
-#ifdef CONFIG_PPC64
-int probe_user_read_inst(struct ppc_inst *inst,
- struct ppc_inst __user *nip)
-{
- unsigned int val, suffix;
- int err;
-
- err = copy_from_user_nofault(&val, nip, sizeof(val));
- if (err)
- return err;
- if (get_op(val) == OP_PREFIX) {
- err = copy_from_user_nofault(&suffix, (void __user *)nip + 4, 4);
- *inst = ppc_inst_prefix(val, suffix);
- } else {
- *inst = ppc_inst(val);
- }
- return err;
-}
-
-int probe_kernel_read_inst(struct ppc_inst *inst,
- struct ppc_inst *src)
-{
- unsigned int val, suffix;
- int err;
-
- err = copy_from_kernel_nofault(&val, src, sizeof(val));
- if (err)
- return err;
- if (get_op(val) == OP_PREFIX) {
- err = copy_from_kernel_nofault(&suffix, (void *)src + 4, 4);
- *inst = ppc_inst_prefix(val, suffix);
- } else {
- *inst = ppc_inst(val);
- }
- return err;
-}
-#else /* !CONFIG_PPC64 */
-int probe_user_read_inst(struct ppc_inst *inst,
- struct ppc_inst __user *nip)
-{
- unsigned int val;
- int err;
-
- err = copy_from_user_nofault(&val, nip, sizeof(val));
- if (!err)
- *inst = ppc_inst(val);
-
- return err;
-}
-
-int probe_kernel_read_inst(struct ppc_inst *inst,
- struct ppc_inst *src)
-{
- unsigned int val;
- int err;
-
- err = copy_from_kernel_nofault(&val, src, sizeof(val));
- if (!err)
- *inst = ppc_inst(val);
-
- return err;
-}
-#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/lib/restart_table.c b/arch/powerpc/lib/restart_table.c
new file mode 100644
index 000000000000..bccb662c1b7b
--- /dev/null
+++ b/arch/powerpc/lib/restart_table.c
@@ -0,0 +1,56 @@
+#include <asm/interrupt.h>
+#include <asm/kprobes.h>
+
+struct soft_mask_table_entry {
+ unsigned long start;
+ unsigned long end;
+};
+
+struct restart_table_entry {
+ unsigned long start;
+ unsigned long end;
+ unsigned long fixup;
+};
+
+extern struct soft_mask_table_entry __start___soft_mask_table[];
+extern struct soft_mask_table_entry __stop___soft_mask_table[];
+
+extern struct restart_table_entry __start___restart_table[];
+extern struct restart_table_entry __stop___restart_table[];
+
+/* Given an address, look for it in the soft mask table */
+bool search_kernel_soft_mask_table(unsigned long addr)
+{
+ struct soft_mask_table_entry *smte = __start___soft_mask_table;
+
+ while (smte < __stop___soft_mask_table) {
+ unsigned long start = smte->start;
+ unsigned long end = smte->end;
+
+ if (addr >= start && addr < end)
+ return true;
+
+ smte++;
+ }
+ return false;
+}
+NOKPROBE_SYMBOL(search_kernel_soft_mask_table);
+
+/* Given an address, look for it in the kernel exception table */
+unsigned long search_kernel_restart_table(unsigned long addr)
+{
+ struct restart_table_entry *rte = __start___restart_table;
+
+ while (rte < __stop___restart_table) {
+ unsigned long start = rte->start;
+ unsigned long end = rte->end;
+ unsigned long fixup = rte->fixup;
+
+ if (addr >= start && addr < end)
+ return fixup;
+
+ rte++;
+ }
+ return 0;
+}
+NOKPROBE_SYMBOL(search_kernel_restart_table);
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index bb5c20d4ca91..d8d5f901cee1 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -904,7 +904,7 @@ static nokprobe_inline int do_vsx_load(struct instruction_op *op,
if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs))
return -EFAULT;
- nr_vsx_regs = size / sizeof(__vector128);
+ nr_vsx_regs = max(1ul, size / sizeof(__vector128));
emulate_vsx_load(op, buf, mem, cross_endian);
preempt_disable();
if (reg < 32) {
@@ -951,7 +951,7 @@ static nokprobe_inline int do_vsx_store(struct instruction_op *op,
if (!address_ok(regs, ea, size))
return -EFAULT;
- nr_vsx_regs = size / sizeof(__vector128);
+ nr_vsx_regs = max(1ul, size / sizeof(__vector128));
preempt_disable();
if (reg < 32) {
/* FP regs + extensions */
@@ -1401,10 +1401,6 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
break;
}
- /* Following cases refer to regs->gpr[], so we need all regs */
- if (!FULL_REGS(regs))
- return -1;
-
rd = (word >> 21) & 0x1f;
ra = (word >> 16) & 0x1f;
rb = (word >> 11) & 0x1f;
@@ -1704,6 +1700,28 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
op->val = regs->ccr & imm;
goto compute_done;
+ case 128: /* setb */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
+ /*
+ * 'ra' encodes the CR field number (bfa) in the top 3 bits.
+ * Since each CR field is 4 bits,
+ * we can simply mask off the bottom two bits (bfa * 4)
+ * to yield the first bit in the CR field.
+ */
+ ra = ra & ~0x3;
+ /* 'val' stores bits of the CR field (bfa) */
+ val = regs->ccr >> (CR0_SHIFT - ra);
+ /* checks if the LT bit of CR field (bfa) is set */
+ if (val & 8)
+ op->val = -1;
+ /* checks if the GT bit of CR field (bfa) is set */
+ else if (val & 4)
+ op->val = 1;
+ else
+ op->val = 0;
+ goto compute_done;
+
case 144: /* mtcrf */
op->type = COMPUTE + SETCC;
imm = 0xf0000000UL;
@@ -3086,15 +3104,6 @@ NOKPROBE_SYMBOL(analyse_instr);
*/
static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
{
-#ifdef CONFIG_PPC32
- /*
- * Check if we will touch kernel stack overflow
- */
- if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
- printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n");
- return -EINVAL;
- }
-#endif /* CONFIG_PPC32 */
/*
* Check if we already set since that means we'll
* lose the previous value.
@@ -3216,7 +3225,7 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
default:
WARN_ON_ONCE(1);
}
- regs->nip = next_pc;
+ regs_set_return_ip(regs, next_pc);
}
NOKPROBE_SYMBOL(emulate_update_regs);
@@ -3554,7 +3563,7 @@ int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
/* can't step mtmsr[d] that would clear MSR_RI */
return -1;
/* here op.val is the mask of bits to change */
- regs->msr = (regs->msr & ~op.val) | (val & op.val);
+ regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val));
goto instr_done;
#ifdef CONFIG_PPC64
@@ -3567,7 +3576,7 @@ int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
if (IS_ENABLED(CONFIG_PPC_FAST_ENDIAN_SWITCH) &&
cpu_has_feature(CPU_FTR_REAL_LE) &&
regs->gpr[0] == 0x1ebe) {
- regs->msr ^= MSR_LE;
+ regs_set_return_msr(regs, regs->msr ^ MSR_LE);
goto instr_done;
}
regs->gpr[9] = regs->gpr[13];
@@ -3575,8 +3584,8 @@ int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
regs->gpr[11] = regs->nip + 4;
regs->gpr[12] = regs->msr & MSR_MASK;
regs->gpr[13] = (unsigned long) get_paca();
- regs->nip = (unsigned long) &system_call_common;
- regs->msr = MSR_KERNEL;
+ regs_set_return_ip(regs, (unsigned long) &system_call_common);
+ regs_set_return_msr(regs, MSR_KERNEL);
return 1;
#ifdef CONFIG_PPC_BOOK3S_64
@@ -3586,8 +3595,8 @@ int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
regs->gpr[11] = regs->nip + 4;
regs->gpr[12] = regs->msr & MSR_MASK;
regs->gpr[13] = (unsigned long) get_paca();
- regs->nip = (unsigned long) &system_call_vectored_emulate;
- regs->msr = MSR_KERNEL;
+ regs_set_return_ip(regs, (unsigned long) &system_call_vectored_emulate);
+ regs_set_return_msr(regs, MSR_KERNEL);
return 1;
#endif
@@ -3598,7 +3607,8 @@ int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
return 0;
instr_done:
- regs->nip = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type));
+ regs_set_return_ip(regs,
+ truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type)));
return 1;
}
NOKPROBE_SYMBOL(emulate_step);
diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c
index 783d1b85ecfe..8b4f6b3e96c4 100644
--- a/arch/powerpc/lib/test_emulate_step.c
+++ b/arch/powerpc/lib/test_emulate_step.c
@@ -53,6 +53,8 @@
ppc_inst_prefix(PPC_PREFIX_MLS | __PPC_PRFX_R(pr) | IMM_H(i), \
PPC_RAW_ADDI(t, a, i))
+#define TEST_SETB(t, bfa) ppc_inst(PPC_INST_SETB | ___PPC_RT(t) | ___PPC_RA((bfa & 0x7) << 2))
+
static void __init init_pt_regs(struct pt_regs *regs)
{
@@ -824,8 +826,7 @@ static void __init test_plxvp_pstxvp(void)
* XTp = 32xTX + 2xTp
* let RA=3 R=0 D=d0||d1=0 R=0 Tp=1 TX=1
*/
- instr = ppc_inst_prefix(PPC_RAW_PLXVP(34, 0, 3, 0) >> 32,
- PPC_RAW_PLXVP(34, 0, 3, 0) & 0xffffffff);
+ instr = ppc_inst_prefix(PPC_RAW_PLXVP_P(34, 0, 3, 0), PPC_RAW_PLXVP_S(34, 0, 3, 0));
stepped = emulate_step(&regs, instr);
if (stepped == 1 && cpu_has_feature(CPU_FTR_VSX)) {
@@ -853,8 +854,7 @@ static void __init test_plxvp_pstxvp(void)
* XSp = 32xSX + 2xSp
* let RA=3 D=d0||d1=0 R=0 Sp=1 SX=1
*/
- instr = ppc_inst_prefix(PPC_RAW_PSTXVP(34, 0, 3, 0) >> 32,
- PPC_RAW_PSTXVP(34, 0, 3, 0) & 0xffffffff);
+ instr = ppc_inst_prefix(PPC_RAW_PSTXVP_P(34, 0, 3, 0), PPC_RAW_PSTXVP_S(34, 0, 3, 0));
stepped = emulate_step(&regs, instr);
@@ -922,7 +922,7 @@ static struct compute_test compute_tests[] = {
.subtests = {
{
.descr = "R0 = LONG_MAX",
- .instr = ppc_inst(PPC_INST_NOP),
+ .instr = ppc_inst(PPC_RAW_NOP()),
.regs = {
.gpr[0] = LONG_MAX,
}
@@ -930,6 +930,33 @@ static struct compute_test compute_tests[] = {
}
},
{
+ .mnemonic = "setb",
+ .cpu_feature = CPU_FTR_ARCH_300,
+ .subtests = {
+ {
+ .descr = "BFA = 1, CR = GT",
+ .instr = TEST_SETB(20, 1),
+ .regs = {
+ .ccr = 0x4000000,
+ }
+ },
+ {
+ .descr = "BFA = 4, CR = LT",
+ .instr = TEST_SETB(20, 4),
+ .regs = {
+ .ccr = 0x8000,
+ }
+ },
+ {
+ .descr = "BFA = 5, CR = EQ",
+ .instr = TEST_SETB(20, 5),
+ .regs = {
+ .ccr = 0x200,
+ }
+ }
+ }
+ },
+ {
.mnemonic = "add",
.subtests = {
{
@@ -1582,6 +1609,7 @@ static int __init emulate_compute_instr(struct pt_regs *regs,
if (!regs || !ppc_inst_val(instr))
return -EINVAL;
+ /* This is not a return frame regs */
regs->nip = patch_site_addr(&patch__exec_instr);
analysed = analyse_instr(&op, regs, instr);
diff --git a/arch/powerpc/math-emu/math.c b/arch/powerpc/math-emu/math.c
index 30b4b69c6941..36761bd00f38 100644
--- a/arch/powerpc/math-emu/math.c
+++ b/arch/powerpc/math-emu/math.c
@@ -225,7 +225,7 @@ record_exception(struct pt_regs *regs, int eflag)
int
do_mathemu(struct pt_regs *regs)
{
- void *op0 = 0, *op1 = 0, *op2 = 0, *op3 = 0;
+ void *op0 = NULL, *op1 = NULL, *op2 = NULL, *op3 = NULL;
unsigned long pc = regs->nip;
signed short sdisp;
u32 insn = 0;
@@ -234,7 +234,7 @@ do_mathemu(struct pt_regs *regs)
int type = 0;
int eflag, trap;
- if (get_user(insn, (u32 *)pc))
+ if (get_user(insn, (u32 __user *)pc))
return -EFAULT;
switch (insn >> 26) {
@@ -453,7 +453,7 @@ do_mathemu(struct pt_regs *regs)
break;
}
- regs->nip += 4;
+ regs_add_return_ip(regs, 4);
return 0;
illegal:
diff --git a/arch/powerpc/math-emu/math_efp.c b/arch/powerpc/math-emu/math_efp.c
index 0a05e51964c1..39b84e7452e1 100644
--- a/arch/powerpc/math-emu/math_efp.c
+++ b/arch/powerpc/math-emu/math_efp.c
@@ -710,7 +710,7 @@ update_regs:
illegal:
if (have_e500_cpu_a005_erratum) {
/* according to e500 cpu a005 erratum, reissue efp inst */
- regs->nip -= 4;
+ regs_add_return_ip(regs, -4);
pr_debug("re-issue efp inst: %08lx\n", speinsn);
return 0;
}
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 3b4e9e4e25ea..eae4ec2988fc 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -5,14 +5,15 @@
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
-obj-y := fault.o mem.o pgtable.o mmap.o maccess.o \
+obj-y := fault.o mem.o pgtable.o mmap.o maccess.o pageattr.o \
init_$(BITS).o pgtable_$(BITS).o \
pgtable-frag.o ioremap.o ioremap_$(BITS).o \
- init-common.o mmu_context.o drmem.o
+ init-common.o mmu_context.o drmem.o \
+ cacheflush.o
obj-$(CONFIG_PPC_MMU_NOHASH) += nohash/
obj-$(CONFIG_PPC_BOOK3S_32) += book3s32/
obj-$(CONFIG_PPC_BOOK3S_64) += book3s64/
-obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
+obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_PPC_MM_SLICES) += slice.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
diff --git a/arch/powerpc/mm/book3s32/Makefile b/arch/powerpc/mm/book3s32/Makefile
index 446d9de88ce4..15f4773643d2 100644
--- a/arch/powerpc/mm/book3s32/Makefile
+++ b/arch/powerpc/mm/book3s32/Makefile
@@ -9,3 +9,5 @@ endif
obj-y += mmu.o mmu_context.o
obj-$(CONFIG_PPC_BOOK3S_603) += nohash_low.o
obj-$(CONFIG_PPC_BOOK3S_604) += hash_low.o tlb.o
+obj-$(CONFIG_PPC_KUEP) += kuep.o
+obj-$(CONFIG_PPC_KUAP) += kuap.o
diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S
index 0e6dc830c38b..6925ce998557 100644
--- a/arch/powerpc/mm/book3s32/hash_low.S
+++ b/arch/powerpc/mm/book3s32/hash_low.S
@@ -27,8 +27,10 @@
#include <asm/code-patching-asm.h>
#ifdef CONFIG_PTE_64BIT
+#define PTE_T_SIZE 8
#define PTE_FLAGS_OFFSET 4 /* offset of PTE flags, in bytes */
#else
+#define PTE_T_SIZE 4
#define PTE_FLAGS_OFFSET 0
#endif
@@ -140,10 +142,6 @@ _GLOBAL(hash_page)
bne- .Lretry /* retry if someone got there first */
mfsrin r3,r4 /* get segment reg for segment */
-#ifndef CONFIG_VMAP_STACK
- mfctr r0
- stw r0,_CTR(r11)
-#endif
bl create_hpte /* add the hash table entry */
#ifdef CONFIG_SMP
@@ -152,17 +150,7 @@ _GLOBAL(hash_page)
li r0,0
stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
#endif
-
-#ifdef CONFIG_VMAP_STACK
b fast_hash_page_return
-#else
- /* Return from the exception */
- lwz r5,_CTR(r11)
- mtctr r5
- lwz r0,GPR0(r11)
- lwz r8,GPR8(r11)
- b fast_exception_return
-#endif
#ifdef CONFIG_SMP
.Lhash_page_out:
@@ -502,7 +490,7 @@ _GLOBAL(flush_hash_pages)
bne 2f
ble cr1,19f
addi r4,r4,0x1000
- addi r5,r5,PTE_SIZE
+ addi r5,r5,PTE_T_SIZE
addi r6,r6,-1
b 1b
@@ -587,7 +575,7 @@ _GLOBAL(flush_hash_pages)
8: ble cr1,9f /* if all ptes checked */
81: addi r6,r6,-1
- addi r5,r5,PTE_SIZE
+ addi r5,r5,PTE_T_SIZE
addi r4,r4,0x1000
lwz r0,0(r5) /* check next pte */
cmpwi cr1,r6,1
diff --git a/arch/powerpc/mm/book3s32/kuap.c b/arch/powerpc/mm/book3s32/kuap.c
new file mode 100644
index 000000000000..0f920f09af57
--- /dev/null
+++ b/arch/powerpc/mm/book3s32/kuap.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <asm/kup.h>
+#include <asm/smp.h>
+
+struct static_key_false disable_kuap_key;
+EXPORT_SYMBOL(disable_kuap_key);
+
+void kuap_lock_all_ool(void)
+{
+ kuap_lock_all();
+}
+EXPORT_SYMBOL(kuap_lock_all_ool);
+
+void kuap_unlock_all_ool(void)
+{
+ kuap_unlock_all();
+}
+EXPORT_SYMBOL(kuap_unlock_all_ool);
+
+void setup_kuap(bool disabled)
+{
+ if (!disabled)
+ kuap_lock_all_ool();
+
+ if (smp_processor_id() != boot_cpuid)
+ return;
+
+ if (disabled)
+ static_branch_enable(&disable_kuap_key);
+ else
+ pr_info("Activating Kernel Userspace Access Protection\n");
+}
diff --git a/arch/powerpc/mm/book3s32/kuep.c b/arch/powerpc/mm/book3s32/kuep.c
new file mode 100644
index 000000000000..c20733d6e02c
--- /dev/null
+++ b/arch/powerpc/mm/book3s32/kuep.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <asm/kup.h>
+#include <asm/smp.h>
+
+struct static_key_false disable_kuep_key;
+
+void setup_kuep(bool disabled)
+{
+ if (!disabled)
+ kuep_lock();
+
+ if (smp_processor_id() != boot_cpuid)
+ return;
+
+ if (disabled)
+ static_branch_enable(&disable_kuep_key);
+ else
+ pr_info("Activating Kernel Userspace Execution Prevention\n");
+}
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index d7eb266a3f7a..27061583a010 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -162,7 +162,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
- if (debug_pagealloc_enabled() || __map_without_bats) {
+ if (debug_pagealloc_enabled_or_kfence() || __map_without_bats) {
pr_debug_once("Read-Write memory mapped without BATs\n");
if (base >= border)
return base;
@@ -184,17 +184,10 @@ static bool is_module_segment(unsigned long addr)
{
if (!IS_ENABLED(CONFIG_MODULES))
return false;
-#ifdef MODULES_VADDR
if (addr < ALIGN_DOWN(MODULES_VADDR, SZ_256M))
return false;
if (addr > ALIGN(MODULES_END, SZ_256M) - 1)
return false;
-#else
- if (addr < ALIGN_DOWN(VMALLOC_START, SZ_256M))
- return false;
- if (addr > ALIGN(VMALLOC_END, SZ_256M) - 1)
- return false;
-#endif
return true;
}
@@ -452,26 +445,6 @@ void __init print_system_hash_info(void)
pr_info("Hash_mask = 0x%lx\n", Hash_mask);
}
-#ifdef CONFIG_PPC_KUEP
-void __init setup_kuep(bool disabled)
-{
- pr_info("Activating Kernel Userspace Execution Prevention\n");
-
- if (disabled)
- pr_warn("KUEP cannot be disabled yet on 6xx when compiled in\n");
-}
-#endif
-
-#ifdef CONFIG_PPC_KUAP
-void __init setup_kuap(bool disabled)
-{
- pr_info("Activating Kernel Userspace Access Protection\n");
-
- if (disabled)
- pr_warn("KUAP cannot be disabled yet on 6xx when compiled in\n");
-}
-#endif
-
void __init early_init_mmu(void)
{
}
diff --git a/arch/powerpc/mm/book3s32/mmu_context.c b/arch/powerpc/mm/book3s32/mmu_context.c
index 218996e40a8e..e2708e387dc3 100644
--- a/arch/powerpc/mm/book3s32/mmu_context.c
+++ b/arch/powerpc/mm/book3s32/mmu_context.c
@@ -24,6 +24,12 @@
#include <asm/mmu_context.h>
/*
+ * Room for two PTE pointers, usually the kernel and current user pointers
+ * to their respective root page table.
+ */
+void *abatron_pteptrs[2];
+
+/*
* On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
* (virtual segment identifiers) for each context. Although the
* hardware supports 24-bit VSIDs, and thus >1 million contexts,
@@ -39,19 +45,6 @@
#define LAST_CONTEXT 32767
#define FIRST_CONTEXT 1
-/*
- * This function defines the mapping from contexts to VSIDs (virtual
- * segment IDs). We use a skew on both the context and the high 4 bits
- * of the 32-bit virtual address (the "effective segment ID") in order
- * to spread out the entries in the MMU hash table. Note, if this
- * function is changed then arch/ppc/mm/hashtable.S will have to be
- * changed to correspond.
- *
- *
- * CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
- * & 0xffffff)
- */
-
static unsigned long next_mmu_context;
static unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
@@ -111,3 +104,32 @@ void __init mmu_context_init(void)
context_map[0] = (1 << FIRST_CONTEXT) - 1;
next_mmu_context = FIRST_CONTEXT;
}
+
+void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
+{
+ long id = next->context.id;
+ unsigned long val;
+
+ if (id < 0)
+ panic("mm_struct %p has no context ID", next);
+
+ isync();
+
+ val = CTX_TO_VSID(id, 0);
+ if (!kuep_is_disabled())
+ val |= SR_NX;
+ if (!kuap_is_disabled())
+ val |= SR_KS;
+
+ update_user_segments(val);
+
+ if (IS_ENABLED(CONFIG_BDI_SWITCH))
+ abatron_pteptrs[1] = next->pgd;
+
+ if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ mtspr(SPRN_SDR1, rol32(__pa(next->pgd), 4) & 0xffff01ff);
+
+ mb(); /* sync */
+ isync();
+}
+EXPORT_SYMBOL(switch_mmu_context);
diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c
index 567e0c6b3978..ad5eff097d31 100644
--- a/arch/powerpc/mm/book3s64/hash_pgtable.c
+++ b/arch/powerpc/mm/book3s64/hash_pgtable.c
@@ -8,6 +8,7 @@
#include <linux/sched.h>
#include <linux/mm_types.h>
#include <linux/mm.h>
+#include <linux/stop_machine.h>
#include <asm/sections.h>
#include <asm/mmu.h>
@@ -400,10 +401,103 @@ EXPORT_SYMBOL_GPL(hash__has_transparent_hugepage);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#ifdef CONFIG_STRICT_KERNEL_RWX
+
+struct change_memory_parms {
+ unsigned long start, end, newpp;
+ unsigned int step, nr_cpus, master_cpu;
+ atomic_t cpu_counter;
+};
+
+// We'd rather this was on the stack but it has to be in the RMO
+static struct change_memory_parms chmem_parms;
+
+// And therefore we need a lock to protect it from concurrent use
+static DEFINE_MUTEX(chmem_lock);
+
+static void change_memory_range(unsigned long start, unsigned long end,
+ unsigned int step, unsigned long newpp)
+{
+ unsigned long idx;
+
+ pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n",
+ start, end, newpp, step);
+
+ for (idx = start; idx < end; idx += step)
+ /* Not sure if we can do much with the return value */
+ mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize,
+ mmu_kernel_ssize);
+}
+
+static int notrace chmem_secondary_loop(struct change_memory_parms *parms)
+{
+ unsigned long msr, tmp, flags;
+ int *p;
+
+ p = &parms->cpu_counter.counter;
+
+ local_irq_save(flags);
+ hard_irq_disable();
+
+ asm volatile (
+ // Switch to real mode and leave interrupts off
+ "mfmsr %[msr] ;"
+ "li %[tmp], %[MSR_IR_DR] ;"
+ "andc %[tmp], %[msr], %[tmp] ;"
+ "mtmsrd %[tmp] ;"
+
+ // Tell the master we are in real mode
+ "1: "
+ "lwarx %[tmp], 0, %[p] ;"
+ "addic %[tmp], %[tmp], -1 ;"
+ "stwcx. %[tmp], 0, %[p] ;"
+ "bne- 1b ;"
+
+ // Spin until the counter goes to zero
+ "2: ;"
+ "lwz %[tmp], 0(%[p]) ;"
+ "cmpwi %[tmp], 0 ;"
+ "bne- 2b ;"
+
+ // Switch back to virtual mode
+ "mtmsrd %[msr] ;"
+
+ : // outputs
+ [msr] "=&r" (msr), [tmp] "=&b" (tmp), "+m" (*p)
+ : // inputs
+ [p] "b" (p), [MSR_IR_DR] "i" (MSR_IR | MSR_DR)
+ : // clobbers
+ "cc", "xer"
+ );
+
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static int change_memory_range_fn(void *data)
+{
+ struct change_memory_parms *parms = data;
+
+ if (parms->master_cpu != smp_processor_id())
+ return chmem_secondary_loop(parms);
+
+ // Wait for all but one CPU (this one) to call-in
+ while (atomic_read(&parms->cpu_counter) > 1)
+ barrier();
+
+ change_memory_range(parms->start, parms->end, parms->step, parms->newpp);
+
+ mb();
+
+ // Signal the other CPUs that we're done
+ atomic_dec(&parms->cpu_counter);
+
+ return 0;
+}
+
static bool hash__change_memory_range(unsigned long start, unsigned long end,
unsigned long newpp)
{
- unsigned long idx;
unsigned int step, shift;
shift = mmu_psize_defs[mmu_linear_psize].shift;
@@ -415,25 +509,43 @@ static bool hash__change_memory_range(unsigned long start, unsigned long end,
if (start >= end)
return false;
- pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n",
- start, end, newpp, step);
+ if (firmware_has_feature(FW_FEATURE_LPAR)) {
+ mutex_lock(&chmem_lock);
- for (idx = start; idx < end; idx += step)
- /* Not sure if we can do much with the return value */
- mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize,
- mmu_kernel_ssize);
+ chmem_parms.start = start;
+ chmem_parms.end = end;
+ chmem_parms.step = step;
+ chmem_parms.newpp = newpp;
+ chmem_parms.master_cpu = smp_processor_id();
+
+ cpus_read_lock();
+
+ atomic_set(&chmem_parms.cpu_counter, num_online_cpus());
+
+ // Ensure state is consistent before we call the other CPUs
+ mb();
+
+ stop_machine_cpuslocked(change_memory_range_fn, &chmem_parms,
+ cpu_online_mask);
+
+ cpus_read_unlock();
+ mutex_unlock(&chmem_lock);
+ } else
+ change_memory_range(start, end, step, newpp);
return true;
}
void hash__mark_rodata_ro(void)
{
- unsigned long start, end;
+ unsigned long start, end, pp;
start = (unsigned long)_stext;
end = (unsigned long)__init_begin;
- WARN_ON(!hash__change_memory_range(start, end, PP_RXXX));
+ pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL_ROX), HPTE_USE_KERNEL_KEY);
+
+ WARN_ON(!hash__change_memory_range(start, end, pp));
}
void hash__mark_initmem_nx(void)
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 581b20a2feaf..ac5720371c0d 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -338,7 +338,7 @@ repeat:
int htab_remove_mapping(unsigned long vstart, unsigned long vend,
int psize, int ssize)
{
- unsigned long vaddr;
+ unsigned long vaddr, time_limit;
unsigned int step, shift;
int rc;
int ret = 0;
@@ -351,8 +351,19 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend,
/* Unmap the full range specificied */
vaddr = ALIGN_DOWN(vstart, step);
+ time_limit = jiffies + HZ;
+
for (;vaddr < vend; vaddr += step) {
rc = mmu_hash_ops.hpte_removebolted(vaddr, psize, ssize);
+
+ /*
+ * For large number of mappings introduce a cond_resched()
+ * to prevent softlockup warnings.
+ */
+ if (time_after(jiffies, time_limit)) {
+ cond_resched();
+ time_limit = jiffies + HZ;
+ }
if (rc == -ENOENT) {
ret = -ENOENT;
continue;
@@ -1145,7 +1156,7 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
/* page is dirty */
if (!test_bit(PG_dcache_clean, &page->flags) && !PageReserved(page)) {
- if (trap == 0x400) {
+ if (trap == INTERRUPT_INST_STORAGE) {
flush_dcache_icache_page(page);
set_bit(PG_dcache_clean, &page->flags);
} else
@@ -1511,8 +1522,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
}
EXPORT_SYMBOL_GPL(hash_page);
-DECLARE_INTERRUPT_HANDLER_RET(__do_hash_fault);
-DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault)
+DECLARE_INTERRUPT_HANDLER(__do_hash_fault);
+DEFINE_INTERRUPT_HANDLER(__do_hash_fault)
{
unsigned long ea = regs->dar;
unsigned long dsisr = regs->dsisr;
@@ -1522,6 +1533,11 @@ DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault)
unsigned int region_id;
long err;
+ if (unlikely(dsisr & (DSISR_BAD_FAULT_64S | DSISR_KEYFAULT))) {
+ hash__do_page_fault(regs);
+ return;
+ }
+
region_id = get_region_id(ea);
if ((region_id == VMALLOC_REGION_ID) || (region_id == IO_REGION_ID))
mm = &init_mm;
@@ -1545,10 +1561,10 @@ DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault)
if (user_mode(regs) || (region_id == USER_REGION_ID))
access &= ~_PAGE_PRIVILEGED;
- if (regs->trap == 0x400)
+ if (TRAP(regs) == INTERRUPT_INST_STORAGE)
access |= _PAGE_EXEC;
- err = hash_page_mm(mm, ea, access, regs->trap, flags);
+ err = hash_page_mm(mm, ea, access, TRAP(regs), flags);
if (unlikely(err < 0)) {
// failed to instert a hash PTE due to an hypervisor error
if (user_mode(regs)) {
@@ -1560,9 +1576,10 @@ DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault)
bad_page_fault(regs, SIGBUS);
}
err = 0;
- }
- return err;
+ } else if (err) {
+ hash__do_page_fault(regs);
+ }
}
/*
@@ -1571,12 +1588,6 @@ DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault)
*/
DEFINE_INTERRUPT_HANDLER_RAW(do_hash_fault)
{
- unsigned long dsisr = regs->dsisr;
- long err;
-
- if (unlikely(dsisr & (DSISR_BAD_FAULT_64S | DSISR_KEYFAULT)))
- goto page_fault;
-
/*
* If we are in an "NMI" (e.g., an interrupt when soft-disabled), then
* don't call hash_page, just fail the fault. This is required to
@@ -1595,13 +1606,9 @@ DEFINE_INTERRUPT_HANDLER_RAW(do_hash_fault)
return 0;
}
- err = __do_hash_fault(regs);
- if (err) {
-page_fault:
- err = hash__do_page_fault(regs);
- }
+ __do_hash_fault(regs);
- return err;
+ return 0;
}
#ifdef CONFIG_PPC_MM_SLICES
diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c
index 0c8557220ae2..c10fc8a72fb3 100644
--- a/arch/powerpc/mm/book3s64/mmu_context.c
+++ b/arch/powerpc/mm/book3s64/mmu_context.c
@@ -119,7 +119,7 @@ static int hash__init_new_context(struct mm_struct *mm)
/* This is fork. Copy hash_context details from current->mm */
memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context));
#ifdef CONFIG_PPC_SUBPAGE_PROT
- /* inherit subpage prot detalis if we have one. */
+ /* inherit subpage prot details if we have one. */
if (current->mm->context.hash_context->spt) {
mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table),
GFP_KERNEL);
diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c
index 15dcc5ad91c5..a2d9ad138709 100644
--- a/arch/powerpc/mm/book3s64/pkeys.c
+++ b/arch/powerpc/mm/book3s64/pkeys.c
@@ -301,19 +301,6 @@ void setup_kuap(bool disabled)
}
#endif
-static inline void update_current_thread_amr(u64 value)
-{
- current->thread.regs->amr = value;
-}
-
-static inline void update_current_thread_iamr(u64 value)
-{
- if (!likely(pkey_execute_disable_supported))
- return;
-
- current->thread.regs->iamr = value;
-}
-
#ifdef CONFIG_PPC_MEM_KEYS
void pkey_mm_init(struct mm_struct *mm)
{
@@ -328,7 +315,7 @@ static inline void init_amr(int pkey, u8 init_bits)
u64 new_amr_bits = (((u64)init_bits & 0x3UL) << pkeyshift(pkey));
u64 old_amr = current_thread_amr() & ~((u64)(0x3ul) << pkeyshift(pkey));
- update_current_thread_amr(old_amr | new_amr_bits);
+ current->thread.regs->amr = old_amr | new_amr_bits;
}
static inline void init_iamr(int pkey, u8 init_bits)
@@ -336,7 +323,10 @@ static inline void init_iamr(int pkey, u8 init_bits)
u64 new_iamr_bits = (((u64)init_bits & 0x1UL) << pkeyshift(pkey));
u64 old_iamr = current_thread_iamr() & ~((u64)(0x1ul) << pkeyshift(pkey));
- update_current_thread_iamr(old_iamr | new_iamr_bits);
+ if (!likely(pkey_execute_disable_supported))
+ return;
+
+ current->thread.regs->iamr = old_iamr | new_iamr_bits;
}
/*
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 98f0b243c1ab..2176a5f70746 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/sched/mm.h>
#include <linux/memblock.h>
+#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
@@ -108,7 +109,7 @@ static int early_map_kernel_page(unsigned long ea, unsigned long pa,
set_the_pte:
set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
- smp_wmb();
+ asm volatile("ptesync": : :"memory");
return 0;
}
@@ -168,7 +169,7 @@ static int __map_kernel_page(unsigned long ea, unsigned long pa,
set_the_pte:
set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
- smp_wmb();
+ asm volatile("ptesync": : :"memory");
return 0;
}
@@ -180,8 +181,8 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
}
#ifdef CONFIG_STRICT_KERNEL_RWX
-void radix__change_memory_range(unsigned long start, unsigned long end,
- unsigned long clear)
+static void radix__change_memory_range(unsigned long start, unsigned long end,
+ unsigned long clear)
{
unsigned long idx;
pgd_t *pgdp;
@@ -357,30 +358,19 @@ static void __init radix_init_pgtable(void)
}
/* Find out how many PID bits are supported */
- if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
- if (!mmu_pid_bits)
- mmu_pid_bits = 20;
- mmu_base_pid = 1;
- } else if (cpu_has_feature(CPU_FTR_HVMODE)) {
- if (!mmu_pid_bits)
- mmu_pid_bits = 20;
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ if (!cpu_has_feature(CPU_FTR_HVMODE) &&
+ cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
/*
- * When KVM is possible, we only use the top half of the
- * PID space to avoid collisions between host and guest PIDs
- * which can cause problems due to prefetch when exiting the
- * guest with AIL=3
+ * Older versions of KVM on these machines perfer if the
+ * guest only uses the low 19 PID bits.
*/
- mmu_base_pid = 1 << (mmu_pid_bits - 1);
-#else
- mmu_base_pid = 1;
-#endif
- } else {
- /* The guest uses the bottom half of the PID space */
if (!mmu_pid_bits)
mmu_pid_bits = 19;
- mmu_base_pid = 1;
+ } else {
+ if (!mmu_pid_bits)
+ mmu_pid_bits = 20;
}
+ mmu_base_pid = 1;
/*
* Allocate Partition table and process table for the
@@ -486,6 +476,7 @@ static int __init radix_dt_scan_page_sizes(unsigned long node,
def = &mmu_psize_defs[idx];
def->shift = shift;
def->ap = ap;
+ def->h_rpt_pgsize = psize_to_rpti_pgsize(idx);
}
/* needed ? */
@@ -560,9 +551,13 @@ void __init radix__early_init_devtree(void)
*/
mmu_psize_defs[MMU_PAGE_4K].shift = 12;
mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
+ mmu_psize_defs[MMU_PAGE_4K].h_rpt_pgsize =
+ psize_to_rpti_pgsize(MMU_PAGE_4K);
mmu_psize_defs[MMU_PAGE_64K].shift = 16;
mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
+ mmu_psize_defs[MMU_PAGE_64K].h_rpt_pgsize =
+ psize_to_rpti_pgsize(MMU_PAGE_64K);
}
/*
@@ -1058,7 +1053,7 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
* Book3S does not require a TLB flush when relaxing access
* restrictions when the address space is not attached to a
* NMMU, because the core MMU will reload the pte after taking
- * an access fault, which is defined by the architectue.
+ * an access fault, which is defined by the architecture.
*/
}
/* See ptesync comment in radix__set_pte_at */
@@ -1082,22 +1077,6 @@ void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
set_pte_at(mm, addr, ptep, pte);
}
-int __init arch_ioremap_pud_supported(void)
-{
- /* HPT does not cope with large pages in the vmalloc area */
- return radix_enabled();
-}
-
-int __init arch_ioremap_pmd_supported(void)
-{
- return radix_enabled();
-}
-
-int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
-{
- return 0;
-}
-
int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
{
pte_t *ptep = (pte_t *)pud;
@@ -1181,8 +1160,3 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
return 1;
}
-
-int __init arch_ioremap_p4d_supported(void)
-{
- return 0;
-}
diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
index 409e61210789..ae12fb5559cb 100644
--- a/arch/powerpc/mm/book3s64/radix_tlb.c
+++ b/arch/powerpc/mm/book3s64/radix_tlb.c
@@ -20,10 +20,6 @@
#include "internal.h"
-#define RIC_FLUSH_TLB 0
-#define RIC_FLUSH_PWC 1
-#define RIC_FLUSH_ALL 2
-
/*
* tlbiel instruction for radix, set invalidation
* i.e., r=1 and is=01 or is=10 or is=11
@@ -130,6 +126,21 @@ static __always_inline void __tlbie_pid(unsigned long pid, unsigned long ric)
trace_tlbie(0, 0, rb, rs, ric, prs, r);
}
+static __always_inline void __tlbie_pid_lpid(unsigned long pid,
+ unsigned long lpid,
+ unsigned long ric)
+{
+ unsigned long rb, rs, prs, r;
+
+ rb = PPC_BIT(53); /* IS = 1 */
+ rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31)));
+ prs = 1; /* process scoped */
+ r = 1; /* radix format */
+
+ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+ : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+ trace_tlbie(0, 0, rb, rs, ric, prs, r);
+}
static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
{
unsigned long rb,rs,prs,r;
@@ -190,6 +201,23 @@ static __always_inline void __tlbie_va(unsigned long va, unsigned long pid,
trace_tlbie(0, 0, rb, rs, ric, prs, r);
}
+static __always_inline void __tlbie_va_lpid(unsigned long va, unsigned long pid,
+ unsigned long lpid,
+ unsigned long ap, unsigned long ric)
+{
+ unsigned long rb, rs, prs, r;
+
+ rb = va & ~(PPC_BITMASK(52, 63));
+ rb |= ap << PPC_BITLSHIFT(58);
+ rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31)));
+ prs = 1; /* process scoped */
+ r = 1; /* radix format */
+
+ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+ : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+ trace_tlbie(0, 0, rb, rs, ric, prs, r);
+}
+
static __always_inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid,
unsigned long ap, unsigned long ric)
{
@@ -235,6 +263,22 @@ static inline void fixup_tlbie_va_range(unsigned long va, unsigned long pid,
}
}
+static inline void fixup_tlbie_va_range_lpid(unsigned long va,
+ unsigned long pid,
+ unsigned long lpid,
+ unsigned long ap)
+{
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
+ asm volatile("ptesync" : : : "memory");
+ __tlbie_pid_lpid(0, lpid, RIC_FLUSH_TLB);
+ }
+
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
+ asm volatile("ptesync" : : : "memory");
+ __tlbie_va_lpid(va, pid, lpid, ap, RIC_FLUSH_TLB);
+ }
+}
+
static inline void fixup_tlbie_pid(unsigned long pid)
{
/*
@@ -254,6 +298,25 @@ static inline void fixup_tlbie_pid(unsigned long pid)
}
}
+static inline void fixup_tlbie_pid_lpid(unsigned long pid, unsigned long lpid)
+{
+ /*
+ * We can use any address for the invalidation, pick one which is
+ * probably unused as an optimisation.
+ */
+ unsigned long va = ((1UL << 52) - 1);
+
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
+ asm volatile("ptesync" : : : "memory");
+ __tlbie_pid_lpid(0, lpid, RIC_FLUSH_TLB);
+ }
+
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
+ asm volatile("ptesync" : : : "memory");
+ __tlbie_va_lpid(va, pid, lpid, mmu_get_ap(MMU_PAGE_64K),
+ RIC_FLUSH_TLB);
+ }
+}
static inline void fixup_tlbie_lpid_va(unsigned long va, unsigned long lpid,
unsigned long ap)
@@ -291,22 +354,30 @@ static inline void fixup_tlbie_lpid(unsigned long lpid)
/*
* We use 128 set in radix mode and 256 set in hpt mode.
*/
-static __always_inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
+static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
{
int set;
asm volatile("ptesync": : :"memory");
- /*
- * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
- * also flush the entire Page Walk Cache.
- */
- __tlbiel_pid(pid, 0, ric);
+ switch (ric) {
+ case RIC_FLUSH_PWC:
- /* For PWC, only one flush is needed */
- if (ric == RIC_FLUSH_PWC) {
+ /* For PWC, only one flush is needed */
+ __tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
ppc_after_tlbiel_barrier();
return;
+ case RIC_FLUSH_TLB:
+ __tlbiel_pid(pid, 0, RIC_FLUSH_TLB);
+ break;
+ case RIC_FLUSH_ALL:
+ default:
+ /*
+ * Flush the first set of the TLB, and if
+ * we're doing a RIC_FLUSH_ALL, also flush
+ * the entire Page Walk Cache.
+ */
+ __tlbiel_pid(pid, 0, RIC_FLUSH_ALL);
}
if (!cpu_has_feature(CPU_FTR_ARCH_31)) {
@@ -344,6 +415,31 @@ static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
+static inline void _tlbie_pid_lpid(unsigned long pid, unsigned long lpid,
+ unsigned long ric)
+{
+ asm volatile("ptesync" : : : "memory");
+
+ /*
+ * Workaround the fact that the "ric" argument to __tlbie_pid
+ * must be a compile-time contraint to match the "i" constraint
+ * in the asm statement.
+ */
+ switch (ric) {
+ case RIC_FLUSH_TLB:
+ __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB);
+ fixup_tlbie_pid_lpid(pid, lpid);
+ break;
+ case RIC_FLUSH_PWC:
+ __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC);
+ break;
+ case RIC_FLUSH_ALL:
+ default:
+ __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_ALL);
+ fixup_tlbie_pid_lpid(pid, lpid);
+ }
+ asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+}
struct tlbiel_pid {
unsigned long pid;
unsigned long ric;
@@ -469,6 +565,20 @@ static inline void __tlbie_va_range(unsigned long start, unsigned long end,
fixup_tlbie_va_range(addr - page_size, pid, ap);
}
+static inline void __tlbie_va_range_lpid(unsigned long start, unsigned long end,
+ unsigned long pid, unsigned long lpid,
+ unsigned long page_size,
+ unsigned long psize)
+{
+ unsigned long addr;
+ unsigned long ap = mmu_get_ap(psize);
+
+ for (addr = start; addr < end; addr += page_size)
+ __tlbie_va_lpid(addr, pid, lpid, ap, RIC_FLUSH_TLB);
+
+ fixup_tlbie_va_range_lpid(addr - page_size, pid, lpid, ap);
+}
+
static __always_inline void _tlbie_va(unsigned long va, unsigned long pid,
unsigned long psize, unsigned long ric)
{
@@ -549,6 +659,18 @@ static inline void _tlbie_va_range(unsigned long start, unsigned long end,
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
+static inline void _tlbie_va_range_lpid(unsigned long start, unsigned long end,
+ unsigned long pid, unsigned long lpid,
+ unsigned long page_size,
+ unsigned long psize, bool also_pwc)
+{
+ asm volatile("ptesync" : : : "memory");
+ if (also_pwc)
+ __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC);
+ __tlbie_va_range_lpid(start, end, pid, lpid, page_size, psize);
+ asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+}
+
static inline void _tlbiel_va_range_multicast(struct mm_struct *mm,
unsigned long start, unsigned long end,
unsigned long pid, unsigned long page_size,
@@ -1176,7 +1298,7 @@ void radix__tlb_flush(struct mmu_gather *tlb)
}
}
-static __always_inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
+static void __radix__flush_tlb_range_psize(struct mm_struct *mm,
unsigned long start, unsigned long end,
int psize, bool also_pwc)
{
@@ -1338,47 +1460,57 @@ void radix__flush_tlb_all(void)
}
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
-extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
+/*
+ * Performs process-scoped invalidations for a given LPID
+ * as part of H_RPT_INVALIDATE hcall.
+ */
+void do_h_rpt_invalidate_prt(unsigned long pid, unsigned long lpid,
+ unsigned long type, unsigned long pg_sizes,
+ unsigned long start, unsigned long end)
{
- unsigned long pid = mm->context.id;
+ unsigned long psize, nr_pages;
+ struct mmu_psize_def *def;
+ bool flush_pid;
- if (unlikely(pid == MMU_NO_CONTEXT))
+ /*
+ * A H_RPTI_TYPE_ALL request implies RIC=3, hence
+ * do a single IS=1 based flush.
+ */
+ if ((type & H_RPTI_TYPE_ALL) == H_RPTI_TYPE_ALL) {
+ _tlbie_pid_lpid(pid, lpid, RIC_FLUSH_ALL);
return;
+ }
- if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
- return;
+ if (type & H_RPTI_TYPE_PWC)
+ _tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC);
- /*
- * If this context hasn't run on that CPU before and KVM is
- * around, there's a slim chance that the guest on another
- * CPU just brought in obsolete translation into the TLB of
- * this CPU due to a bad prefetch using the guest PID on
- * the way into the hypervisor.
- *
- * We work around this here. If KVM is possible, we check if
- * any sibling thread is in KVM. If it is, the window may exist
- * and thus we flush that PID from the core.
- *
- * A potential future improvement would be to mark which PIDs
- * have never been used on the system and avoid it if the PID
- * is new and the process has no other cpumask bit set.
- */
- if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
- int cpu = smp_processor_id();
- int sib = cpu_first_thread_sibling(cpu);
- bool flush = false;
-
- for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
- if (sib == cpu)
- continue;
- if (!cpu_possible(sib))
- continue;
- if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu)
- flush = true;
+ /* Full PID flush */
+ if (start == 0 && end == -1)
+ return _tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB);
+
+ /* Do range invalidation for all the valid page sizes */
+ for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
+ def = &mmu_psize_defs[psize];
+ if (!(pg_sizes & def->h_rpt_pgsize))
+ continue;
+
+ nr_pages = (end - start) >> def->shift;
+ flush_pid = nr_pages > tlb_single_page_flush_ceiling;
+
+ /*
+ * If the number of pages spanning the range is above
+ * the ceiling, convert the request into a full PID flush.
+ * And since PID flush takes out all the page sizes, there
+ * is no need to consider remaining page sizes.
+ */
+ if (flush_pid) {
+ _tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB);
+ return;
}
- if (flush)
- _tlbiel_pid(pid, RIC_FLUSH_ALL);
+ _tlbie_va_range_lpid(start, end, pid, lpid,
+ (1UL << def->shift), psize, false);
}
}
-EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
+EXPORT_SYMBOL_GPL(do_h_rpt_invalidate_prt);
+
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
diff --git a/arch/powerpc/mm/cacheflush.c b/arch/powerpc/mm/cacheflush.c
new file mode 100644
index 000000000000..63363787e000
--- /dev/null
+++ b/arch/powerpc/mm/cacheflush.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/highmem.h>
+#include <linux/kprobes.h>
+
+/**
+ * flush_coherent_icache() - if a CPU has a coherent icache, flush it
+ * Return true if the cache was flushed, false otherwise
+ */
+static inline bool flush_coherent_icache(void)
+{
+ /*
+ * For a snooping icache, we still need a dummy icbi to purge all the
+ * prefetched instructions from the ifetch buffers. We also need a sync
+ * before the icbi to order the the actual stores to memory that might
+ * have modified instructions with the icbi.
+ */
+ if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
+ mb(); /* sync */
+ icbi((void *)PAGE_OFFSET);
+ mb(); /* sync */
+ isync();
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * invalidate_icache_range() - Flush the icache by issuing icbi across an address range
+ * @start: the start address
+ * @stop: the stop address (exclusive)
+ */
+static void invalidate_icache_range(unsigned long start, unsigned long stop)
+{
+ unsigned long shift = l1_icache_shift();
+ unsigned long bytes = l1_icache_bytes();
+ char *addr = (char *)(start & ~(bytes - 1));
+ unsigned long size = stop - (unsigned long)addr + (bytes - 1);
+ unsigned long i;
+
+ for (i = 0; i < size >> shift; i++, addr += bytes)
+ icbi(addr);
+
+ mb(); /* sync */
+ isync();
+}
+
+/**
+ * flush_icache_range: Write any modified data cache blocks out to memory
+ * and invalidate the corresponding blocks in the instruction cache
+ *
+ * Generic code will call this after writing memory, before executing from it.
+ *
+ * @start: the start address
+ * @stop: the stop address (exclusive)
+ */
+void flush_icache_range(unsigned long start, unsigned long stop)
+{
+ if (flush_coherent_icache())
+ return;
+
+ clean_dcache_range(start, stop);
+
+ if (IS_ENABLED(CONFIG_44x)) {
+ /*
+ * Flash invalidate on 44x because we are passed kmapped
+ * addresses and this doesn't work for userspace pages due to
+ * the virtually tagged icache.
+ */
+ iccci((void *)start);
+ mb(); /* sync */
+ isync();
+ } else
+ invalidate_icache_range(start, stop);
+}
+EXPORT_SYMBOL(flush_icache_range);
+
+#ifdef CONFIG_HIGHMEM
+/**
+ * flush_dcache_icache_phys() - Flush a page by it's physical address
+ * @physaddr: the physical address of the page
+ */
+static void flush_dcache_icache_phys(unsigned long physaddr)
+{
+ unsigned long bytes = l1_dcache_bytes();
+ unsigned long nb = PAGE_SIZE / bytes;
+ unsigned long addr = physaddr & PAGE_MASK;
+ unsigned long msr, msr0;
+ unsigned long loop1 = addr, loop2 = addr;
+
+ msr0 = mfmsr();
+ msr = msr0 & ~MSR_DR;
+ /*
+ * This must remain as ASM to prevent potential memory accesses
+ * while the data MMU is disabled
+ */
+ asm volatile(
+ " mtctr %2;\n"
+ " mtmsr %3;\n"
+ " isync;\n"
+ "0: dcbst 0, %0;\n"
+ " addi %0, %0, %4;\n"
+ " bdnz 0b;\n"
+ " sync;\n"
+ " mtctr %2;\n"
+ "1: icbi 0, %1;\n"
+ " addi %1, %1, %4;\n"
+ " bdnz 1b;\n"
+ " sync;\n"
+ " mtmsr %5;\n"
+ " isync;\n"
+ : "+&r" (loop1), "+&r" (loop2)
+ : "r" (nb), "r" (msr), "i" (bytes), "r" (msr0)
+ : "ctr", "memory");
+}
+NOKPROBE_SYMBOL(flush_dcache_icache_phys)
+#else
+static void flush_dcache_icache_phys(unsigned long physaddr)
+{
+}
+#endif
+
+/**
+ * __flush_dcache_icache(): Flush a particular page from the data cache to RAM.
+ * Note: this is necessary because the instruction cache does *not*
+ * snoop from the data cache.
+ *
+ * @p: the address of the page to flush
+ */
+static void __flush_dcache_icache(void *p)
+{
+ unsigned long addr = (unsigned long)p & PAGE_MASK;
+
+ clean_dcache_range(addr, addr + PAGE_SIZE);
+
+ /*
+ * We don't flush the icache on 44x. Those have a virtual icache and we
+ * don't have access to the virtual address here (it's not the page
+ * vaddr but where it's mapped in user space). The flushing of the
+ * icache on these is handled elsewhere, when a change in the address
+ * space occurs, before returning to user space.
+ */
+
+ if (mmu_has_feature(MMU_FTR_TYPE_44x))
+ return;
+
+ invalidate_icache_range(addr, addr + PAGE_SIZE);
+}
+
+static void flush_dcache_icache_hugepage(struct page *page)
+{
+ int i;
+ int nr = compound_nr(page);
+
+ if (!PageHighMem(page)) {
+ for (i = 0; i < nr; i++)
+ __flush_dcache_icache(lowmem_page_address(page + i));
+ } else {
+ for (i = 0; i < nr; i++) {
+ void *start = kmap_local_page(page + i);
+
+ __flush_dcache_icache(start);
+ kunmap_local(start);
+ }
+ }
+}
+
+void flush_dcache_icache_page(struct page *page)
+{
+ if (flush_coherent_icache())
+ return;
+
+ if (PageCompound(page))
+ return flush_dcache_icache_hugepage(page);
+
+ if (!PageHighMem(page)) {
+ __flush_dcache_icache(lowmem_page_address(page));
+ } else if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
+ void *start = kmap_local_page(page);
+
+ __flush_dcache_icache(start);
+ kunmap_local(start);
+ } else {
+ flush_dcache_icache_phys(page_to_phys(page));
+ }
+}
+EXPORT_SYMBOL(flush_dcache_icache_page);
+
+void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
+{
+ clear_page(page);
+
+ /*
+ * We shouldn't have to do this, but some versions of glibc
+ * require it (ld.so assumes zero filled pages are icache clean)
+ * - Anton
+ */
+ flush_dcache_page(pg);
+}
+EXPORT_SYMBOL(clear_user_page);
+
+void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
+ struct page *pg)
+{
+ copy_page(vto, vfrom);
+
+ /*
+ * We should be able to use the following optimisation, however
+ * there are two problems.
+ * Firstly a bug in some versions of binutils meant PLT sections
+ * were not marked executable.
+ * Secondly the first word in the GOT section is blrl, used
+ * to establish the GOT address. Until recently the GOT was
+ * not marked executable.
+ * - Anton
+ */
+#if 0
+ if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
+ return;
+#endif
+
+ flush_dcache_page(pg);
+}
+
+void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long addr, int len)
+{
+ void *maddr;
+
+ maddr = kmap_local_page(page) + (addr & ~PAGE_MASK);
+ flush_icache_range((unsigned long)maddr, (unsigned long)maddr + len);
+ kunmap_local(maddr);
+}
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index bb368257b55c..34f641d4a2fe 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -32,6 +32,8 @@
#include <linux/context_tracking.h>
#include <linux/hugetlb.h>
#include <linux/uaccess.h>
+#include <linux/kfence.h>
+#include <linux/pkeys.h>
#include <asm/firmware.h>
#include <asm/interrupt.h>
@@ -87,7 +89,6 @@ static noinline int bad_area(struct pt_regs *regs, unsigned long address)
return __bad_area(regs, address, SEGV_MAPERR);
}
-#ifdef CONFIG_PPC_MEM_KEYS
static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address,
struct vm_area_struct *vma)
{
@@ -127,7 +128,6 @@ static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address,
return 0;
}
-#endif
static noinline int bad_access(struct pt_regs *regs, unsigned long address)
{
@@ -197,7 +197,7 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr,
static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
unsigned long address, bool is_write)
{
- int is_exec = TRAP(regs) == 0x400;
+ int is_exec = TRAP(regs) == INTERRUPT_INST_STORAGE;
/* NX faults set DSISR_PROTFAULT on the 8xx, DSISR_NOEXEC_OR_G on others */
if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT |
@@ -234,7 +234,6 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
return false;
}
-#ifdef CONFIG_PPC_MEM_KEYS
static bool access_pkey_error(bool is_write, bool is_exec, bool is_pkey,
struct vm_area_struct *vma)
{
@@ -248,7 +247,6 @@ static bool access_pkey_error(bool is_write, bool is_exec, bool is_pkey,
return false;
}
-#endif
static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma)
{
@@ -393,7 +391,7 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
struct vm_area_struct * vma;
struct mm_struct *mm = current->mm;
unsigned int flags = FAULT_FLAG_DEFAULT;
- int is_exec = TRAP(regs) == 0x400;
+ int is_exec = TRAP(regs) == INTERRUPT_INST_STORAGE;
int is_user = user_mode(regs);
int is_write = page_fault_is_write(error_code);
vm_fault_t fault, major = 0;
@@ -418,8 +416,12 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
* take a page fault to a kernel address or a page fault to a user
* address outside of dedicated places
*/
- if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write)))
+ if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write))) {
+ if (kfence_handle_page_fault(address, is_write, regs))
+ return 0;
+
return SIGSEGV;
+ }
/*
* If we're in an interrupt, have no user context or are running
@@ -492,11 +494,9 @@ retry:
return bad_area(regs, address);
}
-#ifdef CONFIG_PPC_MEM_KEYS
if (unlikely(access_pkey_error(is_write, is_exec,
(error_code & DSISR_KEYFAULT), vma)))
return bad_access_pkey(regs, address, vma);
-#endif /* CONFIG_PPC_MEM_KEYS */
if (unlikely(access_error(is_write, is_exec, vma)))
return bad_access(regs, address);
@@ -539,39 +539,25 @@ retry:
}
NOKPROBE_SYMBOL(___do_page_fault);
-static long __do_page_fault(struct pt_regs *regs)
+static __always_inline void __do_page_fault(struct pt_regs *regs)
{
- const struct exception_table_entry *entry;
long err;
err = ___do_page_fault(regs, regs->dar, regs->dsisr);
- if (likely(!err))
- return err;
-
- entry = search_exception_tables(regs->nip);
- if (likely(entry)) {
- instruction_pointer_set(regs, extable_fixup(entry));
- return 0;
- } else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
- __bad_page_fault(regs, err);
- return 0;
- } else {
- /* 32 and 64e handle the bad page fault in asm */
- return err;
- }
+ if (unlikely(err))
+ bad_page_fault(regs, err);
}
-NOKPROBE_SYMBOL(__do_page_fault);
-DEFINE_INTERRUPT_HANDLER_RET(do_page_fault)
+DEFINE_INTERRUPT_HANDLER(do_page_fault)
{
- return __do_page_fault(regs);
+ __do_page_fault(regs);
}
#ifdef CONFIG_PPC_BOOK3S_64
/* Same as do_page_fault but interrupt entry has already run in do_hash_fault */
-long hash__do_page_fault(struct pt_regs *regs)
+void hash__do_page_fault(struct pt_regs *regs)
{
- return __do_page_fault(regs);
+ __do_page_fault(regs);
}
NOKPROBE_SYMBOL(hash__do_page_fault);
#endif
@@ -581,27 +567,27 @@ NOKPROBE_SYMBOL(hash__do_page_fault);
* It is called from the DSI and ISI handlers in head.S and from some
* of the procedures in traps.c.
*/
-void __bad_page_fault(struct pt_regs *regs, int sig)
+static void __bad_page_fault(struct pt_regs *regs, int sig)
{
int is_write = page_fault_is_write(regs->dsisr);
/* kernel has accessed a bad area */
switch (TRAP(regs)) {
- case 0x300:
- case 0x380:
- case 0xe00:
+ case INTERRUPT_DATA_STORAGE:
+ case INTERRUPT_DATA_SEGMENT:
+ case INTERRUPT_H_DATA_STORAGE:
pr_alert("BUG: %s on %s at 0x%08lx\n",
regs->dar < PAGE_SIZE ? "Kernel NULL pointer dereference" :
"Unable to handle kernel data access",
is_write ? "write" : "read", regs->dar);
break;
- case 0x400:
- case 0x480:
+ case INTERRUPT_INST_STORAGE:
+ case INTERRUPT_INST_SEGMENT:
pr_alert("BUG: Unable to handle kernel instruction fetch%s",
regs->nip < PAGE_SIZE ? " (NULL pointer?)\n" : "\n");
break;
- case 0x600:
+ case INTERRUPT_ALIGNMENT:
pr_alert("BUG: Unable to handle kernel unaligned access at 0x%08lx\n",
regs->dar);
break;
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index d142b76d507d..9a75ba078e1b 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -106,7 +106,8 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
* At this point we do the placement change only for BOOK3S 64. This would
* possibly work on other subarchs.
*/
-pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
+pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, unsigned long sz)
{
pgd_t *pg;
p4d_t *p4;
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 02c7db4087cb..3d690be48e84 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -97,6 +97,9 @@ static void __init MMU_setup(void)
if (IS_ENABLED(CONFIG_PPC_8xx))
return;
+ if (IS_ENABLED(CONFIG_KFENCE))
+ __map_without_ltlbs = 1;
+
if (debug_pagealloc_enabled())
__map_without_ltlbs = 1;
diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c
index b1a0aebe8c48..57342154d2b0 100644
--- a/arch/powerpc/mm/ioremap.c
+++ b/arch/powerpc/mm/ioremap.c
@@ -93,7 +93,7 @@ void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size,
if (!ret)
return (void __iomem *)area->addr + offset;
- unmap_kernel_range(va, size);
+ vunmap_range(va, va + size);
free_vm_area(area);
return NULL;
diff --git a/arch/powerpc/mm/ioremap_32.c b/arch/powerpc/mm/ioremap_32.c
index 743e11384dea..9d13143b8be4 100644
--- a/arch/powerpc/mm/ioremap_32.c
+++ b/arch/powerpc/mm/ioremap_32.c
@@ -70,10 +70,10 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *call
*/
pr_warn("ioremap() called early from %pS. Use early_ioremap() instead\n", caller);
- err = early_ioremap_range(ioremap_bot - size, p, size, prot);
+ err = early_ioremap_range(ioremap_bot - size - PAGE_SIZE, p, size, prot);
if (err)
return NULL;
- ioremap_bot -= size;
+ ioremap_bot -= size + PAGE_SIZE;
return (void __iomem *)ioremap_bot + offset;
}
diff --git a/arch/powerpc/mm/ioremap_64.c b/arch/powerpc/mm/ioremap_64.c
index ba5cbb0d66bd..3acece00b33e 100644
--- a/arch/powerpc/mm/ioremap_64.c
+++ b/arch/powerpc/mm/ioremap_64.c
@@ -38,7 +38,7 @@ void __iomem *__ioremap_caller(phys_addr_t addr, unsigned long size,
return NULL;
ret = (void __iomem *)ioremap_bot + offset;
- ioremap_bot += size;
+ ioremap_bot += size + PAGE_SIZE;
return ret;
}
diff --git a/arch/powerpc/mm/maccess.c b/arch/powerpc/mm/maccess.c
index fa9a7a718fc6..aad7c47e0030 100644
--- a/arch/powerpc/mm/maccess.c
+++ b/arch/powerpc/mm/maccess.c
@@ -3,7 +3,28 @@
#include <linux/uaccess.h>
#include <linux/kernel.h>
+#include <asm/disassemble.h>
+#include <asm/inst.h>
+#include <asm/ppc-opcode.h>
+
bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
{
return is_kernel_addr((unsigned long)unsafe_src);
}
+
+int copy_inst_from_kernel_nofault(struct ppc_inst *inst, u32 *src)
+{
+ unsigned int val, suffix;
+ int err;
+
+ err = copy_from_kernel_nofault(&val, src, sizeof(val));
+ if (err)
+ return err;
+ if (IS_ENABLED(CONFIG_PPC64) && get_op(val) == OP_PREFIX) {
+ err = copy_from_kernel_nofault(&suffix, src + 1, sizeof(suffix));
+ *inst = ppc_inst_prefix(val, suffix);
+ } else {
+ *inst = ppc_inst(val);
+ }
+ return err;
+}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 4e8ce6d85232..ad198b439222 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -12,52 +12,25 @@
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*/
-#include <linux/export.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/gfp.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/stddef.h>
-#include <linux/init.h>
#include <linux/memblock.h>
#include <linux/highmem.h>
-#include <linux/initrd.h>
-#include <linux/pagemap.h>
#include <linux/suspend.h>
-#include <linux/hugetlb.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/memremap.h>
#include <linux/dma-direct.h>
-#include <linux/kprobes.h>
-#include <asm/prom.h>
-#include <asm/io.h>
-#include <asm/mmu_context.h>
-#include <asm/mmu.h>
-#include <asm/smp.h>
#include <asm/machdep.h>
-#include <asm/btext.h>
-#include <asm/tlb.h>
-#include <asm/sections.h>
-#include <asm/sparsemem.h>
-#include <asm/vdso.h>
-#include <asm/fixmap.h>
-#include <asm/swiotlb.h>
#include <asm/rtas.h>
#include <asm/kasan.h>
+#include <asm/sparsemem.h>
#include <asm/svm.h>
-#include <asm/mmzone.h>
#include <mm/mmu_decl.h>
-static DEFINE_MUTEX(linear_mapping_mutex);
unsigned long long memory_limit;
bool init_mem_is_free;
+unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
+EXPORT_SYMBOL(empty_zero_page);
+
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
@@ -72,6 +45,7 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
EXPORT_SYMBOL(phys_mem_access_prot);
#ifdef CONFIG_MEMORY_HOTPLUG
+static DEFINE_MUTEX(linear_mapping_mutex);
#ifdef CONFIG_NUMA
int memory_add_physaddr_to_nid(u64 start)
@@ -156,7 +130,7 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
}
#endif
-#ifndef CONFIG_NEED_MULTIPLE_NODES
+#ifndef CONFIG_NUMA
void __init mem_topology_setup(void)
{
max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
@@ -191,7 +165,7 @@ static int __init mark_nonram_nosave(void)
return 0;
}
-#else /* CONFIG_NEED_MULTIPLE_NODES */
+#else /* CONFIG_NUMA */
static int __init mark_nonram_nosave(void)
{
return 0;
@@ -312,7 +286,6 @@ void __init mem_init(void)
(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
#endif
- mem_init_print_info(NULL);
#ifdef CONFIG_PPC32
pr_info("Kernel virtual memory layout:\n");
#ifdef CONFIG_KASAN
@@ -329,6 +302,10 @@ void __init mem_init(void)
ioremap_bot, IOREMAP_TOP);
pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
VMALLOC_START, VMALLOC_END);
+#ifdef MODULES_VADDR
+ pr_info(" * 0x%08lx..0x%08lx : modules\n",
+ MODULES_VADDR, MODULES_END);
+#endif
#endif /* CONFIG_PPC32 */
}
@@ -340,257 +317,6 @@ void free_initmem(void)
free_initmem_default(POISON_FREE_INITMEM);
}
-/**
- * flush_coherent_icache() - if a CPU has a coherent icache, flush it
- * @addr: The base address to use (can be any valid address, the whole cache will be flushed)
- * Return true if the cache was flushed, false otherwise
- */
-static inline bool flush_coherent_icache(unsigned long addr)
-{
- /*
- * For a snooping icache, we still need a dummy icbi to purge all the
- * prefetched instructions from the ifetch buffers. We also need a sync
- * before the icbi to order the the actual stores to memory that might
- * have modified instructions with the icbi.
- */
- if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
- mb(); /* sync */
- allow_read_from_user((const void __user *)addr, L1_CACHE_BYTES);
- icbi((void *)addr);
- prevent_read_from_user((const void __user *)addr, L1_CACHE_BYTES);
- mb(); /* sync */
- isync();
- return true;
- }
-
- return false;
-}
-
-/**
- * invalidate_icache_range() - Flush the icache by issuing icbi across an address range
- * @start: the start address
- * @stop: the stop address (exclusive)
- */
-static void invalidate_icache_range(unsigned long start, unsigned long stop)
-{
- unsigned long shift = l1_icache_shift();
- unsigned long bytes = l1_icache_bytes();
- char *addr = (char *)(start & ~(bytes - 1));
- unsigned long size = stop - (unsigned long)addr + (bytes - 1);
- unsigned long i;
-
- for (i = 0; i < size >> shift; i++, addr += bytes)
- icbi(addr);
-
- mb(); /* sync */
- isync();
-}
-
-/**
- * flush_icache_range: Write any modified data cache blocks out to memory
- * and invalidate the corresponding blocks in the instruction cache
- *
- * Generic code will call this after writing memory, before executing from it.
- *
- * @start: the start address
- * @stop: the stop address (exclusive)
- */
-void flush_icache_range(unsigned long start, unsigned long stop)
-{
- if (flush_coherent_icache(start))
- return;
-
- clean_dcache_range(start, stop);
-
- if (IS_ENABLED(CONFIG_44x)) {
- /*
- * Flash invalidate on 44x because we are passed kmapped
- * addresses and this doesn't work for userspace pages due to
- * the virtually tagged icache.
- */
- iccci((void *)start);
- mb(); /* sync */
- isync();
- } else
- invalidate_icache_range(start, stop);
-}
-EXPORT_SYMBOL(flush_icache_range);
-
-#if !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64)
-/**
- * flush_dcache_icache_phys() - Flush a page by it's physical address
- * @physaddr: the physical address of the page
- */
-static void flush_dcache_icache_phys(unsigned long physaddr)
-{
- unsigned long bytes = l1_dcache_bytes();
- unsigned long nb = PAGE_SIZE / bytes;
- unsigned long addr = physaddr & PAGE_MASK;
- unsigned long msr, msr0;
- unsigned long loop1 = addr, loop2 = addr;
-
- msr0 = mfmsr();
- msr = msr0 & ~MSR_DR;
- /*
- * This must remain as ASM to prevent potential memory accesses
- * while the data MMU is disabled
- */
- asm volatile(
- " mtctr %2;\n"
- " mtmsr %3;\n"
- " isync;\n"
- "0: dcbst 0, %0;\n"
- " addi %0, %0, %4;\n"
- " bdnz 0b;\n"
- " sync;\n"
- " mtctr %2;\n"
- "1: icbi 0, %1;\n"
- " addi %1, %1, %4;\n"
- " bdnz 1b;\n"
- " sync;\n"
- " mtmsr %5;\n"
- " isync;\n"
- : "+&r" (loop1), "+&r" (loop2)
- : "r" (nb), "r" (msr), "i" (bytes), "r" (msr0)
- : "ctr", "memory");
-}
-NOKPROBE_SYMBOL(flush_dcache_icache_phys)
-#endif // !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64)
-
-/*
- * This is called when a page has been modified by the kernel.
- * It just marks the page as not i-cache clean. We do the i-cache
- * flush later when the page is given to a user process, if necessary.
- */
-void flush_dcache_page(struct page *page)
-{
- if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
- return;
- /* avoid an atomic op if possible */
- if (test_bit(PG_dcache_clean, &page->flags))
- clear_bit(PG_dcache_clean, &page->flags);
-}
-EXPORT_SYMBOL(flush_dcache_page);
-
-static void flush_dcache_icache_hugepage(struct page *page)
-{
- int i;
- void *start;
-
- BUG_ON(!PageCompound(page));
-
- for (i = 0; i < compound_nr(page); i++) {
- if (!PageHighMem(page)) {
- __flush_dcache_icache(page_address(page+i));
- } else {
- start = kmap_atomic(page+i);
- __flush_dcache_icache(start);
- kunmap_atomic(start);
- }
- }
-}
-
-void flush_dcache_icache_page(struct page *page)
-{
-
- if (PageCompound(page))
- return flush_dcache_icache_hugepage(page);
-
-#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64)
- /* On 8xx there is no need to kmap since highmem is not supported */
- __flush_dcache_icache(page_address(page));
-#else
- if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
- void *start = kmap_atomic(page);
- __flush_dcache_icache(start);
- kunmap_atomic(start);
- } else {
- unsigned long addr = page_to_pfn(page) << PAGE_SHIFT;
-
- if (flush_coherent_icache(addr))
- return;
- flush_dcache_icache_phys(addr);
- }
-#endif
-}
-EXPORT_SYMBOL(flush_dcache_icache_page);
-
-/**
- * __flush_dcache_icache(): Flush a particular page from the data cache to RAM.
- * Note: this is necessary because the instruction cache does *not*
- * snoop from the data cache.
- *
- * @page: the address of the page to flush
- */
-void __flush_dcache_icache(void *p)
-{
- unsigned long addr = (unsigned long)p;
-
- if (flush_coherent_icache(addr))
- return;
-
- clean_dcache_range(addr, addr + PAGE_SIZE);
-
- /*
- * We don't flush the icache on 44x. Those have a virtual icache and we
- * don't have access to the virtual address here (it's not the page
- * vaddr but where it's mapped in user space). The flushing of the
- * icache on these is handled elsewhere, when a change in the address
- * space occurs, before returning to user space.
- */
-
- if (mmu_has_feature(MMU_FTR_TYPE_44x))
- return;
-
- invalidate_icache_range(addr, addr + PAGE_SIZE);
-}
-
-void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
-{
- clear_page(page);
-
- /*
- * We shouldn't have to do this, but some versions of glibc
- * require it (ld.so assumes zero filled pages are icache clean)
- * - Anton
- */
- flush_dcache_page(pg);
-}
-EXPORT_SYMBOL(clear_user_page);
-
-void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
- struct page *pg)
-{
- copy_page(vto, vfrom);
-
- /*
- * We should be able to use the following optimisation, however
- * there are two problems.
- * Firstly a bug in some versions of binutils meant PLT sections
- * were not marked executable.
- * Secondly the first word in the GOT section is blrl, used
- * to establish the GOT address. Until recently the GOT was
- * not marked executable.
- * - Anton
- */
-#if 0
- if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
- return;
-#endif
-
- flush_dcache_page(pg);
-}
-
-void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
- unsigned long addr, int len)
-{
- unsigned long maddr;
-
- maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
- flush_icache_range(maddr, maddr + len);
- kunmap(page);
-}
-
/*
* System memory should not be in /proc/iomem but various tools expect it
* (eg kdump).
diff --git a/arch/powerpc/mm/mmu_context.c b/arch/powerpc/mm/mmu_context.c
index 18f20da0d348..74246536b832 100644
--- a/arch/powerpc/mm/mmu_context.c
+++ b/arch/powerpc/mm/mmu_context.c
@@ -43,24 +43,26 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
/*
* This full barrier orders the store to the cpumask above vs
- * a subsequent operation which allows this CPU to begin loading
- * translations for next.
+ * a subsequent load which allows this CPU/MMU to begin loading
+ * translations for 'next' from page table PTEs into the TLB.
*
- * When using the radix MMU that operation is the load of the
+ * When using the radix MMU, that operation is the load of the
* MMU context id, which is then moved to SPRN_PID.
*
* For the hash MMU it is either the first load from slb_cache
- * in switch_slb(), and/or the store of paca->mm_ctx_id in
- * copy_mm_to_paca().
+ * in switch_slb() to preload the SLBs, or the load of
+ * get_user_context which loads the context for the VSID hash
+ * to insert a new SLB, in the SLB fault handler.
*
* On the other side, the barrier is in mm/tlb-radix.c for
- * radix which orders earlier stores to clear the PTEs vs
- * the load of mm_cpumask. And pte_xchg which does the same
- * thing for hash.
+ * radix which orders earlier stores to clear the PTEs before
+ * the load of mm_cpumask to check which CPU TLBs should be
+ * flushed. For hash, pte_xchg to clear the PTE includes the
+ * barrier.
*
- * This full barrier is needed by membarrier when switching
- * between processes after store to rq->curr, before user-space
- * memory accesses.
+ * This full barrier is also needed by membarrier when
+ * switching between processes after store to rq->curr, before
+ * user-space memory accesses.
*/
smp_mb();
@@ -81,9 +83,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
if (cpu_has_feature(CPU_FTR_ALTIVEC))
asm volatile ("dssall");
- if (new_on_cpu)
- radix_kvm_prefetch_workaround(next);
- else
+ if (!new_on_cpu)
membarrier_arch_switch_mm(prev, next, tsk);
/*
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 998810e68562..7dac910c0b21 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -185,3 +185,8 @@ void ptdump_check_wx(void);
#else
static inline void ptdump_check_wx(void) { }
#endif
+
+static inline bool debug_pagealloc_enabled_or_kfence(void)
+{
+ return IS_ENABLED(CONFIG_KFENCE) || debug_pagealloc_enabled();
+}
diff --git a/arch/powerpc/mm/nohash/44x.c b/arch/powerpc/mm/nohash/44x.c
index 3d6ae7c72412..e079f26b267e 100644
--- a/arch/powerpc/mm/nohash/44x.c
+++ b/arch/powerpc/mm/nohash/44x.c
@@ -25,6 +25,7 @@
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/code-patching.h>
+#include <asm/smp.h>
#include <mm/mmu_decl.h>
@@ -239,3 +240,19 @@ void __init mmu_init_secondary(int cpu)
}
}
#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_PPC_KUEP
+void setup_kuep(bool disabled)
+{
+ if (smp_processor_id() != boot_cpuid)
+ return;
+
+ if (disabled)
+ patch_instruction_site(&patch__tlb_44x_kuep, ppc_inst(PPC_RAW_NOP()));
+ else
+ pr_info("Activating Kernel Userspace Execution Prevention\n");
+
+ if (IS_ENABLED(CONFIG_PPC_47x) && disabled)
+ patch_instruction_site(&patch__tlb_47x_kuep, ppc_inst(PPC_RAW_NOP()));
+}
+#endif
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index 19a3eec1d8c5..60780e089118 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -149,7 +149,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
{
unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
unsigned long sinittext = __pa(_sinittext);
- bool strict_boundary = strict_kernel_rwx_enabled() || debug_pagealloc_enabled();
+ bool strict_boundary = strict_kernel_rwx_enabled() || debug_pagealloc_enabled_or_kfence();
unsigned long boundary = strict_boundary ? sinittext : etext8;
unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
@@ -161,7 +161,7 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
return 0;
mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true);
- if (debug_pagealloc_enabled()) {
+ if (debug_pagealloc_enabled_or_kfence()) {
top = boundary;
} else {
mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true);
@@ -212,37 +212,6 @@ void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M));
}
-/*
- * Set up to use a given MMU context.
- * id is context number, pgd is PGD pointer.
- *
- * We place the physical address of the new task page directory loaded
- * into the MMU base register, and set the ASID compare register with
- * the new "context."
- */
-void set_context(unsigned long id, pgd_t *pgd)
-{
- s16 offset = (s16)(__pa(swapper_pg_dir));
-
- /* Context switch the PTE pointer for the Abatron BDI2000.
- * The PGDIR is passed as second argument.
- */
- if (IS_ENABLED(CONFIG_BDI_SWITCH))
- abatron_pteptrs[1] = pgd;
-
- /* Register M_TWB will contain base address of level 1 table minus the
- * lower part of the kernel PGDIR base address, so that all accesses to
- * level 1 table are done relative to lower part of kernel PGDIR base
- * address.
- */
- mtspr(SPRN_M_TWB, __pa(pgd) - offset);
-
- /* Update context */
- mtspr(SPRN_M_CASID, id - 1);
- /* sync */
- mb();
-}
-
#ifdef CONFIG_PPC_KUEP
void __init setup_kuep(bool disabled)
{
@@ -256,12 +225,17 @@ void __init setup_kuep(bool disabled)
#endif
#ifdef CONFIG_PPC_KUAP
+struct static_key_false disable_kuap_key;
+EXPORT_SYMBOL(disable_kuap_key);
+
void __init setup_kuap(bool disabled)
{
- pr_info("Activating Kernel Userspace Access Protection\n");
+ if (disabled) {
+ static_branch_enable(&disable_kuap_key);
+ return;
+ }
- if (disabled)
- pr_warn("KUAP cannot be disabled yet on 8xx when compiled in\n");
+ pr_info("Activating Kernel Userspace Access Protection\n");
mtspr(SPRN_MD_AP, MD_APG_KUAP);
}
diff --git a/arch/powerpc/mm/nohash/mmu_context.c b/arch/powerpc/mm/nohash/mmu_context.c
index aac81c9f84a5..44b2b5e7cabe 100644
--- a/arch/powerpc/mm/nohash/mmu_context.c
+++ b/arch/powerpc/mm/nohash/mmu_context.c
@@ -21,21 +21,6 @@
* also clear mm->cpu_vm_mask bits when processes are migrated
*/
-//#define DEBUG_MAP_CONSISTENCY
-//#define DEBUG_CLAMP_LAST_CONTEXT 31
-//#define DEBUG_HARDER
-
-/* We don't use DEBUG because it tends to be compiled in always nowadays
- * and this would generate way too much output
- */
-#ifdef DEBUG_HARDER
-#define pr_hard(args...) printk(KERN_DEBUG args)
-#define pr_hardcont(args...) printk(KERN_CONT args)
-#else
-#define pr_hard(args...) do { } while(0)
-#define pr_hardcont(args...) do { } while(0)
-#endif
-
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
@@ -47,10 +32,17 @@
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
+#include <asm/smp.h>
#include <mm/mmu_decl.h>
/*
+ * Room for two PTE table pointers, usually the kernel and current user
+ * pointer to their respective root page table (pgdir).
+ */
+void *abatron_pteptrs[2];
+
+/*
* The MPC8xx has only 16 contexts. We rotate through them on each task switch.
* A better way would be to keep track of tasks that own contexts, and implement
* an LRU usage. That way very active tasks don't always have to pay the TLB
@@ -68,9 +60,7 @@
* -- BenH
*/
#define FIRST_CONTEXT 1
-#ifdef DEBUG_CLAMP_LAST_CONTEXT
-#define LAST_CONTEXT DEBUG_CLAMP_LAST_CONTEXT
-#elif defined(CONFIG_PPC_8xx)
+#if defined(CONFIG_PPC_8xx)
#define LAST_CONTEXT 16
#elif defined(CONFIG_PPC_47x)
#define LAST_CONTEXT 65535
@@ -80,9 +70,7 @@
static unsigned int next_context, nr_free_contexts;
static unsigned long *context_map;
-#ifdef CONFIG_SMP
static unsigned long *stale_map[NR_CPUS];
-#endif
static struct mm_struct **context_mm;
static DEFINE_RAW_SPINLOCK(context_lock);
@@ -105,7 +93,6 @@ static DEFINE_RAW_SPINLOCK(context_lock);
* the stale map as we can just flush the local CPU
* -- benh
*/
-#ifdef CONFIG_SMP
static unsigned int steal_context_smp(unsigned int id)
{
struct mm_struct *mm;
@@ -127,7 +114,6 @@ static unsigned int steal_context_smp(unsigned int id)
id = FIRST_CONTEXT;
continue;
}
- pr_hardcont(" | steal %d from 0x%p", id, mm);
/* Mark this mm has having no context anymore */
mm->context.id = MMU_NO_CONTEXT;
@@ -158,34 +144,25 @@ static unsigned int steal_context_smp(unsigned int id)
/* This will cause the caller to try again */
return MMU_NO_CONTEXT;
}
-#endif /* CONFIG_SMP */
static unsigned int steal_all_contexts(void)
{
struct mm_struct *mm;
-#ifdef CONFIG_SMP
int cpu = smp_processor_id();
-#endif
unsigned int id;
for (id = FIRST_CONTEXT; id <= LAST_CONTEXT; id++) {
/* Pick up the victim mm */
mm = context_mm[id];
- pr_hardcont(" | steal %d from 0x%p", id, mm);
-
/* Mark this mm as having no context anymore */
mm->context.id = MMU_NO_CONTEXT;
if (id != FIRST_CONTEXT) {
context_mm[id] = NULL;
__clear_bit(id, context_map);
-#ifdef DEBUG_MAP_CONSISTENCY
- mm->context.active = 0;
-#endif
}
-#ifdef CONFIG_SMP
- __clear_bit(id, stale_map[cpu]);
-#endif
+ if (IS_ENABLED(CONFIG_SMP))
+ __clear_bit(id, stale_map[cpu]);
}
/* Flush the TLB for all contexts (not to be used on SMP) */
@@ -204,15 +181,11 @@ static unsigned int steal_all_contexts(void)
static unsigned int steal_context_up(unsigned int id)
{
struct mm_struct *mm;
-#ifdef CONFIG_SMP
int cpu = smp_processor_id();
-#endif
/* Pick up the victim mm */
mm = context_mm[id];
- pr_hardcont(" | steal %d from 0x%p", id, mm);
-
/* Flush the TLB for that context */
local_flush_tlb_mm(mm);
@@ -220,81 +193,64 @@ static unsigned int steal_context_up(unsigned int id)
mm->context.id = MMU_NO_CONTEXT;
/* XXX This clear should ultimately be part of local_flush_tlb_mm */
-#ifdef CONFIG_SMP
- __clear_bit(id, stale_map[cpu]);
-#endif
+ if (IS_ENABLED(CONFIG_SMP))
+ __clear_bit(id, stale_map[cpu]);
return id;
}
-#ifdef DEBUG_MAP_CONSISTENCY
-static void context_check_map(void)
+static void set_context(unsigned long id, pgd_t *pgd)
{
- unsigned int id, nrf, nact;
+ if (IS_ENABLED(CONFIG_PPC_8xx)) {
+ s16 offset = (s16)(__pa(swapper_pg_dir));
+
+ /*
+ * Register M_TWB will contain base address of level 1 table minus the
+ * lower part of the kernel PGDIR base address, so that all accesses to
+ * level 1 table are done relative to lower part of kernel PGDIR base
+ * address.
+ */
+ mtspr(SPRN_M_TWB, __pa(pgd) - offset);
- nrf = nact = 0;
- for (id = FIRST_CONTEXT; id <= LAST_CONTEXT; id++) {
- int used = test_bit(id, context_map);
- if (!used)
- nrf++;
- if (used != (context_mm[id] != NULL))
- pr_err("MMU: Context %d is %s and MM is %p !\n",
- id, used ? "used" : "free", context_mm[id]);
- if (context_mm[id] != NULL)
- nact += context_mm[id]->context.active;
- }
- if (nrf != nr_free_contexts) {
- pr_err("MMU: Free context count out of sync ! (%d vs %d)\n",
- nr_free_contexts, nrf);
- nr_free_contexts = nrf;
+ /* Update context */
+ mtspr(SPRN_M_CASID, id - 1);
+
+ /* sync */
+ mb();
+ } else {
+ if (IS_ENABLED(CONFIG_40x))
+ mb(); /* sync */
+
+ mtspr(SPRN_PID, id);
+ isync();
}
- if (nact > num_online_cpus())
- pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n",
- nact, num_online_cpus());
- if (FIRST_CONTEXT > 0 && !test_bit(0, context_map))
- pr_err("MMU: Context 0 has been freed !!!\n");
}
-#else
-static void context_check_map(void) { }
-#endif
void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned int id;
-#ifdef CONFIG_SMP
unsigned int i, cpu = smp_processor_id();
-#endif
unsigned long *map;
/* No lockless fast path .. yet */
raw_spin_lock(&context_lock);
- pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
- cpu, next, next->context.active, next->context.id);
-
-#ifdef CONFIG_SMP
- /* Mark us active and the previous one not anymore */
- next->context.active++;
- if (prev) {
- pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active);
- WARN_ON(prev->context.active < 1);
- prev->context.active--;
+ if (IS_ENABLED(CONFIG_SMP)) {
+ /* Mark us active and the previous one not anymore */
+ next->context.active++;
+ if (prev) {
+ WARN_ON(prev->context.active < 1);
+ prev->context.active--;
+ }
}
again:
-#endif /* CONFIG_SMP */
/* If we already have a valid assigned context, skip all that */
id = next->context.id;
- if (likely(id != MMU_NO_CONTEXT)) {
-#ifdef DEBUG_MAP_CONSISTENCY
- if (context_mm[id] != next)
- pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n",
- next, id, id, context_mm[id]);
-#endif
+ if (likely(id != MMU_NO_CONTEXT))
goto ctxt_ok;
- }
/* We really don't have a context, let's try to acquire one */
id = next_context;
@@ -304,14 +260,12 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
/* No more free contexts, let's try to steal one */
if (nr_free_contexts == 0) {
-#ifdef CONFIG_SMP
if (num_online_cpus() > 1) {
id = steal_context_smp(id);
if (id == MMU_NO_CONTEXT)
goto again;
goto stolen;
}
-#endif /* CONFIG_SMP */
if (IS_ENABLED(CONFIG_PPC_8xx))
id = steal_all_contexts();
else
@@ -330,20 +284,13 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
next_context = id + 1;
context_mm[id] = next;
next->context.id = id;
- pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts);
- context_check_map();
ctxt_ok:
/* If that context got marked stale on this CPU, then flush the
* local TLB for it and unmark it before we use it
*/
-#ifdef CONFIG_SMP
- if (test_bit(id, stale_map[cpu])) {
- pr_hardcont(" | stale flush %d [%d..%d]",
- id, cpu_first_thread_sibling(cpu),
- cpu_last_thread_sibling(cpu));
-
+ if (IS_ENABLED(CONFIG_SMP) && test_bit(id, stale_map[cpu])) {
local_flush_tlb_mm(next);
/* XXX This clear should ultimately be part of local_flush_tlb_mm */
@@ -353,10 +300,10 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
__clear_bit(id, stale_map[i]);
}
}
-#endif
/* Flick the MMU and release lock */
- pr_hardcont(" -> %d\n", id);
+ if (IS_ENABLED(CONFIG_BDI_SWITCH))
+ abatron_pteptrs[1] = next->pgd;
set_context(id, next->pgd);
raw_spin_unlock(&context_lock);
}
@@ -366,8 +313,6 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
*/
int init_new_context(struct task_struct *t, struct mm_struct *mm)
{
- pr_hard("initing context for mm @%p\n", mm);
-
/*
* We have MMU_NO_CONTEXT set to be ~0. Hence check
* explicitly against context.id == 0. This ensures that we properly
@@ -401,16 +346,12 @@ void destroy_context(struct mm_struct *mm)
if (id != MMU_NO_CONTEXT) {
__clear_bit(id, context_map);
mm->context.id = MMU_NO_CONTEXT;
-#ifdef DEBUG_MAP_CONSISTENCY
- mm->context.active = 0;
-#endif
context_mm[id] = NULL;
nr_free_contexts++;
}
raw_spin_unlock_irqrestore(&context_lock, flags);
}
-#ifdef CONFIG_SMP
static int mmu_ctx_cpu_prepare(unsigned int cpu)
{
/* We don't touch CPU 0 map, it's allocated at aboot and kept
@@ -419,7 +360,6 @@ static int mmu_ctx_cpu_prepare(unsigned int cpu)
if (cpu == boot_cpuid)
return 0;
- pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
return 0;
}
@@ -430,7 +370,6 @@ static int mmu_ctx_cpu_dead(unsigned int cpu)
if (cpu == boot_cpuid)
return 0;
- pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
kfree(stale_map[cpu]);
stale_map[cpu] = NULL;
@@ -440,8 +379,6 @@ static int mmu_ctx_cpu_dead(unsigned int cpu)
return 0;
}
-#endif /* CONFIG_SMP */
-
/*
* Initialize the context management stuff.
*/
@@ -465,16 +402,16 @@ void __init mmu_context_init(void)
if (!context_mm)
panic("%s: Failed to allocate %zu bytes\n", __func__,
sizeof(void *) * (LAST_CONTEXT + 1));
-#ifdef CONFIG_SMP
- stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
- if (!stale_map[boot_cpuid])
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- CTX_MAP_SIZE);
-
- cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
- "powerpc/mmu/ctx:prepare",
- mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead);
-#endif
+ if (IS_ENABLED(CONFIG_SMP)) {
+ stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
+ if (!stale_map[boot_cpuid])
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ CTX_MAP_SIZE);
+
+ cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
+ "powerpc/mmu/ctx:prepare",
+ mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead);
+ }
printk(KERN_INFO
"MMU: Allocated %zu bytes of context maps for %d contexts\n",
diff --git a/arch/powerpc/mm/nohash/tlb_low.S b/arch/powerpc/mm/nohash/tlb_low.S
index 68797e072f55..4613bf8e9aae 100644
--- a/arch/powerpc/mm/nohash/tlb_low.S
+++ b/arch/powerpc/mm/nohash/tlb_low.S
@@ -360,19 +360,6 @@ _GLOBAL(_tlbivax_bcast)
sync
wrtee r10
blr
-
-_GLOBAL(set_context)
-#ifdef CONFIG_BDI_SWITCH
- /* Context switch the PTE pointer for the Abatron BDI2000.
- * The PGDIR is the second parameter.
- */
- lis r5, abatron_pteptrs@h
- ori r5, r5, abatron_pteptrs@l
- stw r4, 0x4(r5)
-#endif
- mtspr SPRN_PID,r3
- isync /* Force context change */
- blr
#else
#error Unsupported processor type !
#endif
diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c
new file mode 100644
index 000000000000..0876216ceee6
--- /dev/null
+++ b/arch/powerpc/mm/pageattr.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * MMU-generic set_memory implementation for powerpc
+ *
+ * Copyright 2019-2021, IBM Corporation.
+ */
+
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/set_memory.h>
+
+#include <asm/mmu.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+
+/*
+ * Updates the attributes of a page in three steps:
+ *
+ * 1. invalidate the page table entry
+ * 2. flush the TLB
+ * 3. install the new entry with the updated attributes
+ *
+ * Invalidating the pte means there are situations where this will not work
+ * when in theory it should.
+ * For example:
+ * - removing write from page whilst it is being executed
+ * - setting a page read-only whilst it is being read by another CPU
+ *
+ */
+static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
+{
+ long action = (long)data;
+ pte_t pte;
+
+ spin_lock(&init_mm.page_table_lock);
+
+ /* invalidate the PTE so it's safe to modify */
+ pte = ptep_get_and_clear(&init_mm, addr, ptep);
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+
+ /* modify the PTE bits as desired, then apply */
+ switch (action) {
+ case SET_MEMORY_RO:
+ pte = pte_wrprotect(pte);
+ break;
+ case SET_MEMORY_RW:
+ pte = pte_mkwrite(pte_mkdirty(pte));
+ break;
+ case SET_MEMORY_NX:
+ pte = pte_exprotect(pte);
+ break;
+ case SET_MEMORY_X:
+ pte = pte_mkexec(pte);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ set_pte_at(&init_mm, addr, ptep, pte);
+
+ /* See ptesync comment in radix__set_pte_at() */
+ if (radix_enabled())
+ asm volatile("ptesync": : :"memory");
+ spin_unlock(&init_mm.page_table_lock);
+
+ return 0;
+}
+
+int change_memory_attr(unsigned long addr, int numpages, long action)
+{
+ unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
+ unsigned long size = numpages * PAGE_SIZE;
+
+ if (!numpages)
+ return 0;
+
+ if (WARN_ON_ONCE(is_vmalloc_or_module_addr((void *)addr) &&
+ is_vm_area_hugepages((void *)addr)))
+ return -EINVAL;
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ /*
+ * On hash, the linear mapping is not in the Linux page table so
+ * apply_to_existing_page_range() will have no effect. If in the future
+ * the set_memory_* functions are used on the linear map this will need
+ * to be updated.
+ */
+ if (!radix_enabled()) {
+ int region = get_region_id(addr);
+
+ if (WARN_ON_ONCE(region != VMALLOC_REGION_ID && region != IO_REGION_ID))
+ return -EINVAL;
+ }
+#endif
+
+ return apply_to_existing_page_range(&init_mm, start, size,
+ change_page_attr, (void *)action);
+}
+
+/*
+ * Set the attributes of a page:
+ *
+ * This function is used by PPC32 at the end of init to set final kernel memory
+ * protection. It includes changing the maping of the page it is executing from
+ * and data pages it is using.
+ */
+static int set_page_attr(pte_t *ptep, unsigned long addr, void *data)
+{
+ pgprot_t prot = __pgprot((unsigned long)data);
+
+ spin_lock(&init_mm.page_table_lock);
+
+ set_pte_at(&init_mm, addr, ptep, pte_modify(*ptep, prot));
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+
+ spin_unlock(&init_mm.page_table_lock);
+
+ return 0;
+}
+
+int set_memory_attr(unsigned long addr, int numpages, pgprot_t prot)
+{
+ unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
+ unsigned long sz = numpages * PAGE_SIZE;
+
+ if (numpages <= 0)
+ return 0;
+
+ return apply_to_existing_page_range(&init_mm, start, sz, set_page_attr,
+ (void *)pgprot_val(prot));
+}
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 354611940118..cd16b407f47e 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -28,6 +28,14 @@
#include <asm/hugetlb.h>
#include <asm/pte-walk.h>
+#ifdef CONFIG_PPC64
+#define PGD_ALIGN (sizeof(pgd_t) * MAX_PTRS_PER_PGD)
+#else
+#define PGD_ALIGN PAGE_SIZE
+#endif
+
+pgd_t swapper_pg_dir[MAX_PTRS_PER_PGD] __section(".bss..page_aligned") __aligned(PGD_ALIGN);
+
static inline int is_exec_fault(void)
{
return current->thread.regs && TRAP(current->thread.regs) == 0x400;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index e0ec67a16887..dcf5ecca19d9 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -23,6 +23,7 @@
#include <linux/highmem.h>
#include <linux/memblock.h>
#include <linux/slab.h>
+#include <linux/set_memory.h>
#include <asm/pgalloc.h>
#include <asm/fixmap.h>
@@ -132,64 +133,20 @@ void __init mapin_ram(void)
}
}
-static int __change_page_attr_noflush(struct page *page, pgprot_t prot)
-{
- pte_t *kpte;
- unsigned long address;
-
- BUG_ON(PageHighMem(page));
- address = (unsigned long)page_address(page);
-
- if (v_block_mapped(address))
- return 0;
- kpte = virt_to_kpte(address);
- if (!kpte)
- return -EINVAL;
- __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
-
- return 0;
-}
-
-/*
- * Change the page attributes of an page in the linear mapping.
- *
- * THIS DOES NOTHING WITH BAT MAPPINGS, DEBUG USE ONLY
- */
-static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
-{
- int i, err = 0;
- unsigned long flags;
- struct page *start = page;
-
- local_irq_save(flags);
- for (i = 0; i < numpages; i++, page++) {
- err = __change_page_attr_noflush(page, prot);
- if (err)
- break;
- }
- wmb();
- local_irq_restore(flags);
- flush_tlb_kernel_range((unsigned long)page_address(start),
- (unsigned long)page_address(page));
- return err;
-}
-
void mark_initmem_nx(void)
{
- struct page *page = virt_to_page(_sinittext);
unsigned long numpages = PFN_UP((unsigned long)_einittext) -
PFN_DOWN((unsigned long)_sinittext);
if (v_block_mapped((unsigned long)_sinittext))
mmu_mark_initmem_nx();
else
- change_page_attr(page, numpages, PAGE_KERNEL);
+ set_memory_attr((unsigned long)_sinittext, numpages, PAGE_KERNEL);
}
#ifdef CONFIG_STRICT_KERNEL_RWX
void mark_rodata_ro(void)
{
- struct page *page;
unsigned long numpages;
if (v_block_mapped((unsigned long)_stext + 1)) {
@@ -198,20 +155,18 @@ void mark_rodata_ro(void)
return;
}
- page = virt_to_page(_stext);
numpages = PFN_UP((unsigned long)_etext) -
PFN_DOWN((unsigned long)_stext);
- change_page_attr(page, numpages, PAGE_KERNEL_ROX);
+ set_memory_attr((unsigned long)_stext, numpages, PAGE_KERNEL_ROX);
/*
* mark .rodata as read only. Use __init_begin rather than __end_rodata
* to cover NOTES and EXCEPTION_TABLE.
*/
- page = virt_to_page(__start_rodata);
numpages = PFN_UP((unsigned long)__init_begin) -
PFN_DOWN((unsigned long)__start_rodata);
- change_page_attr(page, numpages, PAGE_KERNEL_RO);
+ set_memory_attr((unsigned long)__start_rodata, numpages, PAGE_KERNEL_RO);
// mark_initmem_nx() should have already run by now
ptdump_check_wx();
@@ -221,9 +176,14 @@ void mark_rodata_ro(void)
#ifdef CONFIG_DEBUG_PAGEALLOC
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
+ unsigned long addr = (unsigned long)page_address(page);
+
if (PageHighMem(page))
return;
- change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
+ if (enable)
+ set_memory_attr(addr, numpages, PAGE_KERNEL);
+ else
+ set_memory_attr(addr, numpages, __pgprot(0));
}
#endif /* CONFIG_DEBUG_PAGEALLOC */
diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c
index aca354fb670b..5062c58b1e5b 100644
--- a/arch/powerpc/mm/ptdump/ptdump.c
+++ b/arch/powerpc/mm/ptdump/ptdump.c
@@ -58,8 +58,6 @@ struct pg_state {
const struct addr_marker *marker;
unsigned long start_address;
unsigned long start_pa;
- unsigned long last_pa;
- unsigned long page_size;
unsigned int level;
u64 current_flags;
bool check_wx;
@@ -163,8 +161,6 @@ static void dump_flag_info(struct pg_state *st, const struct flag_info
static void dump_addr(struct pg_state *st, unsigned long addr)
{
- unsigned long delta;
-
#ifdef CONFIG_PPC64
#define REG "0x%016lx"
#else
@@ -172,14 +168,8 @@ static void dump_addr(struct pg_state *st, unsigned long addr)
#endif
pt_dump_seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1);
- if (st->start_pa == st->last_pa && st->start_address + st->page_size != addr) {
- pt_dump_seq_printf(st->seq, "[" REG "]", st->start_pa);
- delta = st->page_size >> 10;
- } else {
- pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa);
- delta = (addr - st->start_address) >> 10;
- }
- pt_dump_size(st->seq, delta);
+ pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa);
+ pt_dump_size(st->seq, (addr - st->start_address) >> 10);
}
static void note_prot_wx(struct pg_state *st, unsigned long addr)
@@ -208,7 +198,6 @@ static void note_page_update_state(struct pg_state *st, unsigned long addr,
st->current_flags = flag;
st->start_address = addr;
st->start_pa = pa;
- st->page_size = page_size;
while (addr >= st->marker[1].start_address) {
st->marker++;
@@ -220,7 +209,6 @@ static void note_page(struct pg_state *st, unsigned long addr,
unsigned int level, u64 val, unsigned long page_size)
{
u64 flag = val & pg_level[level].mask;
- u64 pa = val & PTE_RPN_MASK;
/* At first no level is set */
if (!st->level) {
@@ -232,12 +220,9 @@ static void note_page(struct pg_state *st, unsigned long addr,
* - we change levels in the tree.
* - the address is in a different section of memory and is thus
* used for a different purpose, regardless of the flags.
- * - the pa of this page is not adjacent to the last inspected page
*/
} else if (flag != st->current_flags || level != st->level ||
- addr >= st->marker[1].start_address ||
- (pa != st->last_pa + st->page_size &&
- (pa != st->start_pa || st->start_pa != st->last_pa))) {
+ addr >= st->marker[1].start_address) {
/* Check the PTE flags */
if (st->current_flags) {
@@ -259,7 +244,6 @@ static void note_page(struct pg_state *st, unsigned long addr,
*/
note_page_update_state(st, addr, level, val, page_size);
}
- st->last_pa = pa;
}
static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
diff --git a/arch/powerpc/net/Makefile b/arch/powerpc/net/Makefile
index c2dec3a68d4c..8e60af32e51e 100644
--- a/arch/powerpc/net/Makefile
+++ b/arch/powerpc/net/Makefile
@@ -2,8 +2,4 @@
#
# Arch-specific network modules
#
-ifdef CONFIG_PPC64
-obj-$(CONFIG_BPF_JIT) += bpf_jit_comp64.o
-else
-obj-$(CONFIG_BPF_JIT) += bpf_jit_asm.o bpf_jit_comp.o
-endif
+obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o bpf_jit_comp$(BITS).o
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index d0a67a1bbaf1..99fad093f43e 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -26,6 +26,9 @@
/* Long jump; (unconditional 'branch') */
#define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \
(((dest) - (ctx->idx * 4)) & 0x03fffffc))
+/* blr; (unconditional 'branch' with link) to absolute address */
+#define PPC_BL_ABS(dest) EMIT(PPC_INST_BL | \
+ (((dest) - (unsigned long)(image + ctx->idx)) & 0x03fffffc))
/* "cond" here covers BO:BI fields. */
#define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \
(((cond) & 0x3ff) << 16) | \
@@ -42,6 +45,10 @@
EMIT(PPC_RAW_ORI(d, d, IMM_L(i))); \
} } while(0)
+#ifdef CONFIG_PPC32
+#define PPC_EX32(r, i) EMIT(PPC_RAW_LI((r), (i) < 0 ? -1 : 0))
+#endif
+
#define PPC_LI64(d, i) do { \
if ((long)(i) >= -2147483648 && \
(long)(i) < 2147483648) \
@@ -108,6 +115,63 @@ static inline bool is_nearbranch(int offset)
#define COND_LT (CR0_LT | COND_CMP_TRUE)
#define COND_LE (CR0_GT | COND_CMP_FALSE)
+#define SEEN_FUNC 0x20000000 /* might call external helpers */
+#define SEEN_STACK 0x40000000 /* uses BPF stack */
+#define SEEN_TAILCALL 0x80000000 /* uses tail calls */
+
+#define SEEN_VREG_MASK 0x1ff80000 /* Volatile registers r3-r12 */
+#define SEEN_NVREG_MASK 0x0003ffff /* Non volatile registers r14-r31 */
+
+#ifdef CONFIG_PPC64
+extern const int b2p[MAX_BPF_JIT_REG + 2];
+#else
+extern const int b2p[MAX_BPF_JIT_REG + 1];
+#endif
+
+struct codegen_context {
+ /*
+ * This is used to track register usage as well
+ * as calls to external helpers.
+ * - register usage is tracked with corresponding
+ * bits (r3-r31)
+ * - rest of the bits can be used to track other
+ * things -- for now, we use bits 0 to 2
+ * encoded in SEEN_* macros above
+ */
+ unsigned int seen;
+ unsigned int idx;
+ unsigned int stack_size;
+ int b2p[ARRAY_SIZE(b2p)];
+};
+
+static inline void bpf_flush_icache(void *start, void *end)
+{
+ smp_wmb(); /* smp write barrier */
+ flush_icache_range((unsigned long)start, (unsigned long)end);
+}
+
+static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
+{
+ return ctx->seen & (1 << (31 - i));
+}
+
+static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
+{
+ ctx->seen |= 1 << (31 - i);
+}
+
+static inline void bpf_clear_seen_register(struct codegen_context *ctx, int i)
+{
+ ctx->seen &= ~(1 << (31 - i));
+}
+
+void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func);
+int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
+ u32 *addrs, bool extra_pass);
+void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx);
+void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx);
+void bpf_jit_realloc_regs(struct codegen_context *ctx);
+
#endif
#endif
diff --git a/arch/powerpc/net/bpf_jit32.h b/arch/powerpc/net/bpf_jit32.h
deleted file mode 100644
index 448dfd4d98e1..000000000000
--- a/arch/powerpc/net/bpf_jit32.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * bpf_jit32.h: BPF JIT compiler for PPC
- *
- * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
- *
- * Split from bpf_jit.h
- */
-#ifndef _BPF_JIT32_H
-#define _BPF_JIT32_H
-
-#include <asm/asm-compat.h>
-#include "bpf_jit.h"
-
-#ifdef CONFIG_PPC64
-#define BPF_PPC_STACK_R3_OFF 48
-#define BPF_PPC_STACK_LOCALS 32
-#define BPF_PPC_STACK_BASIC (48+64)
-#define BPF_PPC_STACK_SAVE (18*8)
-#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
- BPF_PPC_STACK_SAVE)
-#define BPF_PPC_SLOWPATH_FRAME (48+64)
-#else
-#define BPF_PPC_STACK_R3_OFF 24
-#define BPF_PPC_STACK_LOCALS 16
-#define BPF_PPC_STACK_BASIC (24+32)
-#define BPF_PPC_STACK_SAVE (18*4)
-#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
- BPF_PPC_STACK_SAVE)
-#define BPF_PPC_SLOWPATH_FRAME (24+32)
-#endif
-
-#define REG_SZ (BITS_PER_LONG/8)
-
-/*
- * Generated code register usage:
- *
- * As normal PPC C ABI (e.g. r1=sp, r2=TOC), with:
- *
- * skb r3 (Entry parameter)
- * A register r4
- * X register r5
- * addr param r6
- * r7-r10 scratch
- * skb->data r14
- * skb headlen r15 (skb->len - skb->data_len)
- * m[0] r16
- * m[...] ...
- * m[15] r31
- */
-#define r_skb 3
-#define r_ret 3
-#define r_A 4
-#define r_X 5
-#define r_addr 6
-#define r_scratch1 7
-#define r_scratch2 8
-#define r_D 14
-#define r_HL 15
-#define r_M 16
-
-#ifndef __ASSEMBLY__
-
-/*
- * Assembly helpers from arch/powerpc/net/bpf_jit.S:
- */
-#define DECLARE_LOAD_FUNC(func) \
- extern u8 func[], func##_negative_offset[], func##_positive_offset[]
-
-DECLARE_LOAD_FUNC(sk_load_word);
-DECLARE_LOAD_FUNC(sk_load_half);
-DECLARE_LOAD_FUNC(sk_load_byte);
-DECLARE_LOAD_FUNC(sk_load_byte_msh);
-
-#define PPC_LBZ_OFFS(r, base, i) do { if ((i) < 32768) EMIT(PPC_RAW_LBZ(r, base, i)); \
- else { EMIT(PPC_RAW_ADDIS(r, base, IMM_HA(i))); \
- EMIT(PPC_RAW_LBZ(r, r, IMM_L(i))); } } while(0)
-
-#define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) EMIT(PPC_RAW_LD(r, base, i)); \
- else { EMIT(PPC_RAW_ADDIS(r, base, IMM_HA(i))); \
- EMIT(PPC_RAW_LD(r, r, IMM_L(i))); } } while(0)
-
-#define PPC_LWZ_OFFS(r, base, i) do { if ((i) < 32768) EMIT(PPC_RAW_LWZ(r, base, i)); \
- else { EMIT(PPC_RAW_ADDIS(r, base, IMM_HA(i))); \
- EMIT(PPC_RAW_LWZ(r, r, IMM_L(i))); } } while(0)
-
-#define PPC_LHZ_OFFS(r, base, i) do { if ((i) < 32768) EMIT(PPC_RAW_LHZ(r, base, i)); \
- else { EMIT(PPC_RAW_ADDIS(r, base, IMM_HA(i))); \
- EMIT(PPC_RAW_LHZ(r, r, IMM_L(i))); } } while(0)
-
-#ifdef CONFIG_PPC64
-#define PPC_LL_OFFS(r, base, i) do { PPC_LD_OFFS(r, base, i); } while(0)
-#else
-#define PPC_LL_OFFS(r, base, i) do { PPC_LWZ_OFFS(r, base, i); } while(0)
-#endif
-
-#ifdef CONFIG_SMP
-#ifdef CONFIG_PPC64
-#define PPC_BPF_LOAD_CPU(r) \
- do { BUILD_BUG_ON(sizeof_field(struct paca_struct, paca_index) != 2); \
- PPC_LHZ_OFFS(r, 13, offsetof(struct paca_struct, paca_index)); \
- } while (0)
-#else
-#define PPC_BPF_LOAD_CPU(r) \
- do { BUILD_BUG_ON(sizeof_field(struct task_struct, cpu) != 4); \
- PPC_LHZ_OFFS(r, 2, offsetof(struct task_struct, cpu)); \
- } while(0)
-#endif
-#else
-#define PPC_BPF_LOAD_CPU(r) do { EMIT(PPC_RAW_LI(r, 0)); } while(0)
-#endif
-
-#define PPC_LHBRX_OFFS(r, base, i) \
- do { PPC_LI32(r, i); EMIT(PPC_RAW_LHBRX(r, r, base)); } while(0)
-#ifdef __LITTLE_ENDIAN__
-#define PPC_NTOHS_OFFS(r, base, i) PPC_LHBRX_OFFS(r, base, i)
-#else
-#define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i)
-#endif
-
-#define PPC_BPF_LL(r, base, i) do { EMIT(PPC_RAW_LWZ(r, base, i)); } while(0)
-#define PPC_BPF_STL(r, base, i) do { EMIT(PPC_RAW_STW(r, base, i)); } while(0)
-#define PPC_BPF_STLU(r, base, i) do { EMIT(PPC_RAW_STWU(r, base, i)); } while(0)
-
-#define SEEN_DATAREF 0x10000 /* might call external helpers */
-#define SEEN_XREG 0x20000 /* X reg is used */
-#define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary
- * storage */
-#define SEEN_MEM_MSK 0x0ffff
-
-struct codegen_context {
- unsigned int seen;
- unsigned int idx;
- int pc_ret0; /* bpf index of first RET #0 instruction (if any) */
-};
-
-#endif
-
-#endif
diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h
index 2e33c6673ff9..7b713edfa7e2 100644
--- a/arch/powerpc/net/bpf_jit64.h
+++ b/arch/powerpc/net/bpf_jit64.h
@@ -39,7 +39,7 @@
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
/* BPF to ppc register mappings */
-static const int b2p[] = {
+const int b2p[MAX_BPF_JIT_REG + 2] = {
/* function return value */
[BPF_REG_0] = 8,
/* function arguments */
@@ -86,25 +86,6 @@ static const int b2p[] = {
} while(0)
#define PPC_BPF_STLU(r, base, i) do { EMIT(PPC_RAW_STDU(r, base, i)); } while(0)
-#define SEEN_FUNC 0x1000 /* might call external helpers */
-#define SEEN_STACK 0x2000 /* uses BPF stack */
-#define SEEN_TAILCALL 0x4000 /* uses tail calls */
-
-struct codegen_context {
- /*
- * This is used to track register usage as well
- * as calls to external helpers.
- * - register usage is tracked with corresponding
- * bits (r3-r10 and r27-r31)
- * - rest of the bits can be used to track other
- * things -- for now, we use bits 16 to 23
- * encoded in SEEN_* macros above
- */
- unsigned int seen;
- unsigned int idx;
- unsigned int stack_size;
-};
-
#endif /* !__ASSEMBLY__ */
#endif
diff --git a/arch/powerpc/net/bpf_jit_asm.S b/arch/powerpc/net/bpf_jit_asm.S
deleted file mode 100644
index 2f5030d8383f..000000000000
--- a/arch/powerpc/net/bpf_jit_asm.S
+++ /dev/null
@@ -1,226 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* bpf_jit.S: Packet/header access helper functions
- * for PPC64 BPF compiler.
- *
- * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
- */
-
-#include <asm/ppc_asm.h>
-#include <asm/asm-compat.h>
-#include "bpf_jit32.h"
-
-/*
- * All of these routines are called directly from generated code,
- * whose register usage is:
- *
- * r3 skb
- * r4,r5 A,X
- * r6 *** address parameter to helper ***
- * r7-r10 scratch
- * r14 skb->data
- * r15 skb headlen
- * r16-31 M[]
- */
-
-/*
- * To consider: These helpers are so small it could be better to just
- * generate them inline. Inline code can do the simple headlen check
- * then branch directly to slow_path_XXX if required. (In fact, could
- * load a spare GPR with the address of slow_path_generic and pass size
- * as an argument, making the call site a mtlr, li and bllr.)
- */
- .globl sk_load_word
-sk_load_word:
- PPC_LCMPI r_addr, 0
- blt bpf_slow_path_word_neg
- .globl sk_load_word_positive_offset
-sk_load_word_positive_offset:
- /* Are we accessing past headlen? */
- subi r_scratch1, r_HL, 4
- PPC_LCMP r_scratch1, r_addr
- blt bpf_slow_path_word
- /* Nope, just hitting the header. cr0 here is eq or gt! */
-#ifdef __LITTLE_ENDIAN__
- lwbrx r_A, r_D, r_addr
-#else
- lwzx r_A, r_D, r_addr
-#endif
- blr /* Return success, cr0 != LT */
-
- .globl sk_load_half
-sk_load_half:
- PPC_LCMPI r_addr, 0
- blt bpf_slow_path_half_neg
- .globl sk_load_half_positive_offset
-sk_load_half_positive_offset:
- subi r_scratch1, r_HL, 2
- PPC_LCMP r_scratch1, r_addr
- blt bpf_slow_path_half
-#ifdef __LITTLE_ENDIAN__
- lhbrx r_A, r_D, r_addr
-#else
- lhzx r_A, r_D, r_addr
-#endif
- blr
-
- .globl sk_load_byte
-sk_load_byte:
- PPC_LCMPI r_addr, 0
- blt bpf_slow_path_byte_neg
- .globl sk_load_byte_positive_offset
-sk_load_byte_positive_offset:
- PPC_LCMP r_HL, r_addr
- ble bpf_slow_path_byte
- lbzx r_A, r_D, r_addr
- blr
-
-/*
- * BPF_LDX | BPF_B | BPF_MSH: ldxb 4*([offset]&0xf)
- * r_addr is the offset value
- */
- .globl sk_load_byte_msh
-sk_load_byte_msh:
- PPC_LCMPI r_addr, 0
- blt bpf_slow_path_byte_msh_neg
- .globl sk_load_byte_msh_positive_offset
-sk_load_byte_msh_positive_offset:
- PPC_LCMP r_HL, r_addr
- ble bpf_slow_path_byte_msh
- lbzx r_X, r_D, r_addr
- rlwinm r_X, r_X, 2, 32-4-2, 31-2
- blr
-
-/* Call out to skb_copy_bits:
- * We'll need to back up our volatile regs first; we have
- * local variable space at r1+(BPF_PPC_STACK_BASIC).
- * Allocate a new stack frame here to remain ABI-compliant in
- * stashing LR.
- */
-#define bpf_slow_path_common(SIZE) \
- mflr r0; \
- PPC_STL r0, PPC_LR_STKOFF(r1); \
- /* R3 goes in parameter space of caller's frame */ \
- PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
- PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
- PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
- addi r5, r1, BPF_PPC_STACK_BASIC+(2*REG_SZ); \
- PPC_STLU r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
- /* R3 = r_skb, as passed */ \
- mr r4, r_addr; \
- li r6, SIZE; \
- bl skb_copy_bits; \
- nop; \
- /* R3 = 0 on success */ \
- addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
- PPC_LL r0, PPC_LR_STKOFF(r1); \
- PPC_LL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
- PPC_LL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
- mtlr r0; \
- PPC_LCMPI r3, 0; \
- blt bpf_error; /* cr0 = LT */ \
- PPC_LL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
- /* Great success! */
-
-bpf_slow_path_word:
- bpf_slow_path_common(4)
- /* Data value is on stack, and cr0 != LT */
- lwz r_A, BPF_PPC_STACK_BASIC+(2*REG_SZ)(r1)
- blr
-
-bpf_slow_path_half:
- bpf_slow_path_common(2)
- lhz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
- blr
-
-bpf_slow_path_byte:
- bpf_slow_path_common(1)
- lbz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
- blr
-
-bpf_slow_path_byte_msh:
- bpf_slow_path_common(1)
- lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1)
- rlwinm r_X, r_X, 2, 32-4-2, 31-2
- blr
-
-/* Call out to bpf_internal_load_pointer_neg_helper:
- * We'll need to back up our volatile regs first; we have
- * local variable space at r1+(BPF_PPC_STACK_BASIC).
- * Allocate a new stack frame here to remain ABI-compliant in
- * stashing LR.
- */
-#define sk_negative_common(SIZE) \
- mflr r0; \
- PPC_STL r0, PPC_LR_STKOFF(r1); \
- /* R3 goes in parameter space of caller's frame */ \
- PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
- PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
- PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
- PPC_STLU r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
- /* R3 = r_skb, as passed */ \
- mr r4, r_addr; \
- li r5, SIZE; \
- bl bpf_internal_load_pointer_neg_helper; \
- nop; \
- /* R3 != 0 on success */ \
- addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
- PPC_LL r0, PPC_LR_STKOFF(r1); \
- PPC_LL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
- PPC_LL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
- mtlr r0; \
- PPC_LCMPLI r3, 0; \
- beq bpf_error_slow; /* cr0 = EQ */ \
- mr r_addr, r3; \
- PPC_LL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
- /* Great success! */
-
-bpf_slow_path_word_neg:
- lis r_scratch1,-32 /* SKF_LL_OFF */
- PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
- blt bpf_error /* cr0 = LT */
- .globl sk_load_word_negative_offset
-sk_load_word_negative_offset:
- sk_negative_common(4)
- lwz r_A, 0(r_addr)
- blr
-
-bpf_slow_path_half_neg:
- lis r_scratch1,-32 /* SKF_LL_OFF */
- PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
- blt bpf_error /* cr0 = LT */
- .globl sk_load_half_negative_offset
-sk_load_half_negative_offset:
- sk_negative_common(2)
- lhz r_A, 0(r_addr)
- blr
-
-bpf_slow_path_byte_neg:
- lis r_scratch1,-32 /* SKF_LL_OFF */
- PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
- blt bpf_error /* cr0 = LT */
- .globl sk_load_byte_negative_offset
-sk_load_byte_negative_offset:
- sk_negative_common(1)
- lbz r_A, 0(r_addr)
- blr
-
-bpf_slow_path_byte_msh_neg:
- lis r_scratch1,-32 /* SKF_LL_OFF */
- PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
- blt bpf_error /* cr0 = LT */
- .globl sk_load_byte_msh_negative_offset
-sk_load_byte_msh_negative_offset:
- sk_negative_common(1)
- lbz r_X, 0(r_addr)
- rlwinm r_X, r_X, 2, 32-4-2, 31-2
- blr
-
-bpf_error_slow:
- /* fabricate a cr0 = lt */
- li r_scratch1, -1
- PPC_LCMPI r_scratch1, 0
-bpf_error:
- /* Entered with cr0 = lt */
- li r3, 0
- /* Generated code will 'blt epilogue', returning 0. */
- blr
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index e809cb5a1631..53aefee3fe70 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -1,10 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* bpf_jit_comp.c: BPF JIT compiler
+/*
+ * eBPF JIT compiler
*
- * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
+ * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+ * IBM Corporation
*
- * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
- * Ported to ppc32 by Denis Kirjanov <kda@linux-powerpc.org>
+ * Based on the powerpc classic BPF JIT compiler by Matt Evans
*/
#include <linux/moduleloader.h>
#include <asm/cacheflush.h>
@@ -12,639 +13,204 @@
#include <linux/netdevice.h>
#include <linux/filter.h>
#include <linux/if_vlan.h>
+#include <asm/kprobes.h>
+#include <linux/bpf.h>
-#include "bpf_jit32.h"
+#include "bpf_jit.h"
-static inline void bpf_flush_icache(void *start, void *end)
+static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
{
- smp_wmb();
- flush_icache_range((unsigned long)start, (unsigned long)end);
+ memset32(area, BREAKPOINT_INSTRUCTION, size / 4);
}
-static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
- struct codegen_context *ctx)
+/* Fix the branch target addresses for subprog calls */
+static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image,
+ struct codegen_context *ctx, u32 *addrs)
{
- int i;
- const struct sock_filter *filter = fp->insns;
-
- if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
- /* Make stackframe */
- if (ctx->seen & SEEN_DATAREF) {
- /* If we call any helpers (for loads), save LR */
- EMIT(PPC_INST_MFLR | __PPC_RT(R0));
- PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
-
- /* Back up non-volatile regs. */
- PPC_BPF_STL(r_D, 1, -(REG_SZ*(32-r_D)));
- PPC_BPF_STL(r_HL, 1, -(REG_SZ*(32-r_HL)));
- }
- if (ctx->seen & SEEN_MEM) {
- /*
- * Conditionally save regs r15-r31 as some will be used
- * for M[] data.
- */
- for (i = r_M; i < (r_M+16); i++) {
- if (ctx->seen & (1 << (i-r_M)))
- PPC_BPF_STL(i, 1, -(REG_SZ*(32-i)));
- }
- }
- PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME);
- }
-
- if (ctx->seen & SEEN_DATAREF) {
- /*
- * If this filter needs to access skb data,
- * prepare r_D and r_HL:
- * r_HL = skb->len - skb->data_len
- * r_D = skb->data
- */
- PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
- data_len));
- PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
- EMIT(PPC_RAW_SUB(r_HL, r_HL, r_scratch1));
- PPC_LL_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
- }
+ const struct bpf_insn *insn = fp->insnsi;
+ bool func_addr_fixed;
+ u64 func_addr;
+ u32 tmp_idx;
+ int i, ret;
- if (ctx->seen & SEEN_XREG) {
+ for (i = 0; i < fp->len; i++) {
/*
- * TODO: Could also detect whether first instr. sets X and
- * avoid this (as below, with A).
+ * During the extra pass, only the branch target addresses for
+ * the subprog calls need to be fixed. All other instructions
+ * can left untouched.
+ *
+ * The JITed image length does not change because we already
+ * ensure that the JITed instruction sequence for these calls
+ * are of fixed length by padding them with NOPs.
*/
- EMIT(PPC_RAW_LI(r_X, 0));
- }
-
- /* make sure we dont leak kernel information to user */
- if (bpf_needs_clear_a(&filter[0]))
- EMIT(PPC_RAW_LI(r_A, 0));
-}
-
-static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
-{
- int i;
-
- if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
- EMIT(PPC_RAW_ADDI(1, 1, BPF_PPC_STACKFRAME));
- if (ctx->seen & SEEN_DATAREF) {
- PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
- EMIT(PPC_RAW_MTLR(0));
- PPC_BPF_LL(r_D, 1, -(REG_SZ*(32-r_D)));
- PPC_BPF_LL(r_HL, 1, -(REG_SZ*(32-r_HL)));
- }
- if (ctx->seen & SEEN_MEM) {
- /* Restore any saved non-vol registers */
- for (i = r_M; i < (r_M+16); i++) {
- if (ctx->seen & (1 << (i-r_M)))
- PPC_BPF_LL(i, 1, -(REG_SZ*(32-i)));
- }
- }
- }
- /* The RETs have left a return value in R3. */
+ if (insn[i].code == (BPF_JMP | BPF_CALL) &&
+ insn[i].src_reg == BPF_PSEUDO_CALL) {
+ ret = bpf_jit_get_func_addr(fp, &insn[i], true,
+ &func_addr,
+ &func_addr_fixed);
+ if (ret < 0)
+ return ret;
- EMIT(PPC_RAW_BLR());
-}
-
-#define CHOOSE_LOAD_FUNC(K, func) \
- ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
-
-/* Assemble the body code between the prologue & epilogue. */
-static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
- struct codegen_context *ctx,
- unsigned int *addrs)
-{
- const struct sock_filter *filter = fp->insns;
- int flen = fp->len;
- u8 *func;
- unsigned int true_cond;
- int i;
-
- /* Start of epilogue code */
- unsigned int exit_addr = addrs[flen];
-
- for (i = 0; i < flen; i++) {
- unsigned int K = filter[i].k;
- u16 code = bpf_anc_helper(&filter[i]);
-
- /*
- * addrs[] maps a BPF bytecode address into a real offset from
- * the start of the body code.
- */
- addrs[i] = ctx->idx * 4;
-
- switch (code) {
- /*** ALU ops ***/
- case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
- ctx->seen |= SEEN_XREG;
- EMIT(PPC_RAW_ADD(r_A, r_A, r_X));
- break;
- case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
- if (!K)
- break;
- EMIT(PPC_RAW_ADDI(r_A, r_A, IMM_L(K)));
- if (K >= 32768)
- EMIT(PPC_RAW_ADDIS(r_A, r_A, IMM_HA(K)));
- break;
- case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
- ctx->seen |= SEEN_XREG;
- EMIT(PPC_RAW_SUB(r_A, r_A, r_X));
- break;
- case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
- if (!K)
- break;
- EMIT(PPC_RAW_ADDI(r_A, r_A, IMM_L(-K)));
- if (K >= 32768)
- EMIT(PPC_RAW_ADDIS(r_A, r_A, IMM_HA(-K)));
- break;
- case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
- ctx->seen |= SEEN_XREG;
- EMIT(PPC_RAW_MULW(r_A, r_A, r_X));
- break;
- case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
- if (K < 32768)
- EMIT(PPC_RAW_MULI(r_A, r_A, K));
- else {
- PPC_LI32(r_scratch1, K);
- EMIT(PPC_RAW_MULW(r_A, r_A, r_scratch1));
- }
- break;
- case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */
- case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
- ctx->seen |= SEEN_XREG;
- EMIT(PPC_RAW_CMPWI(r_X, 0));
- if (ctx->pc_ret0 != -1) {
- PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
- } else {
- PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
- EMIT(PPC_RAW_LI(r_ret, 0));
- PPC_JMP(exit_addr);
- }
- if (code == (BPF_ALU | BPF_MOD | BPF_X)) {
- EMIT(PPC_RAW_DIVWU(r_scratch1, r_A, r_X));
- EMIT(PPC_RAW_MULW(r_scratch1, r_X, r_scratch1));
- EMIT(PPC_RAW_SUB(r_A, r_A, r_scratch1));
- } else {
- EMIT(PPC_RAW_DIVWU(r_A, r_A, r_X));
- }
- break;
- case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */
- PPC_LI32(r_scratch2, K);
- EMIT(PPC_RAW_DIVWU(r_scratch1, r_A, r_scratch2));
- EMIT(PPC_RAW_MULW(r_scratch1, r_scratch2, r_scratch1));
- EMIT(PPC_RAW_SUB(r_A, r_A, r_scratch1));
- break;
- case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
- if (K == 1)
- break;
- PPC_LI32(r_scratch1, K);
- EMIT(PPC_RAW_DIVWU(r_A, r_A, r_scratch1));
- break;
- case BPF_ALU | BPF_AND | BPF_X:
- ctx->seen |= SEEN_XREG;
- EMIT(PPC_RAW_AND(r_A, r_A, r_X));
- break;
- case BPF_ALU | BPF_AND | BPF_K:
- if (!IMM_H(K))
- EMIT(PPC_RAW_ANDI(r_A, r_A, K));
- else {
- PPC_LI32(r_scratch1, K);
- EMIT(PPC_RAW_AND(r_A, r_A, r_scratch1));
- }
- break;
- case BPF_ALU | BPF_OR | BPF_X:
- ctx->seen |= SEEN_XREG;
- EMIT(PPC_RAW_OR(r_A, r_A, r_X));
- break;
- case BPF_ALU | BPF_OR | BPF_K:
- if (IMM_L(K))
- EMIT(PPC_RAW_ORI(r_A, r_A, IMM_L(K)));
- if (K >= 65536)
- EMIT(PPC_RAW_ORIS(r_A, r_A, IMM_H(K)));
- break;
- case BPF_ANC | SKF_AD_ALU_XOR_X:
- case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */
- ctx->seen |= SEEN_XREG;
- EMIT(PPC_RAW_XOR(r_A, r_A, r_X));
- break;
- case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
- if (IMM_L(K))
- EMIT(PPC_RAW_XORI(r_A, r_A, IMM_L(K)));
- if (K >= 65536)
- EMIT(PPC_RAW_XORIS(r_A, r_A, IMM_H(K)));
- break;
- case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
- ctx->seen |= SEEN_XREG;
- EMIT(PPC_RAW_SLW(r_A, r_A, r_X));
- break;
- case BPF_ALU | BPF_LSH | BPF_K:
- if (K == 0)
- break;
- else
- EMIT(PPC_RAW_SLWI(r_A, r_A, K));
- break;
- case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
- ctx->seen |= SEEN_XREG;
- EMIT(PPC_RAW_SRW(r_A, r_A, r_X));
- break;
- case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
- if (K == 0)
- break;
- else
- EMIT(PPC_RAW_SRWI(r_A, r_A, K));
- break;
- case BPF_ALU | BPF_NEG:
- EMIT(PPC_RAW_NEG(r_A, r_A));
- break;
- case BPF_RET | BPF_K:
- PPC_LI32(r_ret, K);
- if (!K) {
- if (ctx->pc_ret0 == -1)
- ctx->pc_ret0 = i;
- }
- /*
- * If this isn't the very last instruction, branch to
- * the epilogue if we've stuff to clean up. Otherwise,
- * if there's nothing to tidy, just return. If we /are/
- * the last instruction, we're about to fall through to
- * the epilogue to return.
- */
- if (i != flen - 1) {
- /*
- * Note: 'seen' is properly valid only on pass
- * #2. Both parts of this conditional are the
- * same instruction size though, meaning the
- * first pass will still correctly determine the
- * code size/addresses.
- */
- if (ctx->seen)
- PPC_JMP(exit_addr);
- else
- EMIT(PPC_RAW_BLR());
- }
- break;
- case BPF_RET | BPF_A:
- EMIT(PPC_RAW_MR(r_ret, r_A));
- if (i != flen - 1) {
- if (ctx->seen)
- PPC_JMP(exit_addr);
- else
- EMIT(PPC_RAW_BLR());
- }
- break;
- case BPF_MISC | BPF_TAX: /* X = A */
- EMIT(PPC_RAW_MR(r_X, r_A));
- break;
- case BPF_MISC | BPF_TXA: /* A = X */
- ctx->seen |= SEEN_XREG;
- EMIT(PPC_RAW_MR(r_A, r_X));
- break;
-
- /*** Constant loads/M[] access ***/
- case BPF_LD | BPF_IMM: /* A = K */
- PPC_LI32(r_A, K);
- break;
- case BPF_LDX | BPF_IMM: /* X = K */
- PPC_LI32(r_X, K);
- break;
- case BPF_LD | BPF_MEM: /* A = mem[K] */
- EMIT(PPC_RAW_MR(r_A, r_M + (K & 0xf)));
- ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
- break;
- case BPF_LDX | BPF_MEM: /* X = mem[K] */
- EMIT(PPC_RAW_MR(r_X, r_M + (K & 0xf)));
- ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
- break;
- case BPF_ST: /* mem[K] = A */
- EMIT(PPC_RAW_MR(r_M + (K & 0xf), r_A));
- ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
- break;
- case BPF_STX: /* mem[K] = X */
- EMIT(PPC_RAW_MR(r_M + (K & 0xf), r_X));
- ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
- break;
- case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */
- BUILD_BUG_ON(sizeof_field(struct sk_buff, len) != 4);
- PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
- break;
- case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */
- PPC_LWZ_OFFS(r_A, r_skb, K);
- break;
- case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
- PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
- break;
-
- /*** Ancillary info loads ***/
- case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
- BUILD_BUG_ON(sizeof_field(struct sk_buff,
- protocol) != 2);
- PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
- protocol));
- break;
- case BPF_ANC | SKF_AD_IFINDEX:
- case BPF_ANC | SKF_AD_HATYPE:
- BUILD_BUG_ON(sizeof_field(struct net_device,
- ifindex) != 4);
- BUILD_BUG_ON(sizeof_field(struct net_device,
- type) != 2);
- PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
- dev));
- EMIT(PPC_RAW_CMPDI(r_scratch1, 0));
- if (ctx->pc_ret0 != -1) {
- PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
- } else {
- /* Exit, returning 0; first pass hits here. */
- PPC_BCC_SHORT(COND_NE, ctx->idx * 4 + 12);
- EMIT(PPC_RAW_LI(r_ret, 0));
- PPC_JMP(exit_addr);
- }
- if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
- PPC_LWZ_OFFS(r_A, r_scratch1,
- offsetof(struct net_device, ifindex));
- } else {
- PPC_LHZ_OFFS(r_A, r_scratch1,
- offsetof(struct net_device, type));
- }
-
- break;
- case BPF_ANC | SKF_AD_MARK:
- BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4);
- PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
- mark));
- break;
- case BPF_ANC | SKF_AD_RXHASH:
- BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4);
- PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
- hash));
- break;
- case BPF_ANC | SKF_AD_VLAN_TAG:
- BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_tci) != 2);
-
- PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
- vlan_tci));
- break;
- case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
- PPC_LBZ_OFFS(r_A, r_skb, PKT_VLAN_PRESENT_OFFSET());
- if (PKT_VLAN_PRESENT_BIT)
- EMIT(PPC_RAW_SRWI(r_A, r_A, PKT_VLAN_PRESENT_BIT));
- if (PKT_VLAN_PRESENT_BIT < 7)
- EMIT(PPC_RAW_ANDI(r_A, r_A, 1));
- break;
- case BPF_ANC | SKF_AD_QUEUE:
- BUILD_BUG_ON(sizeof_field(struct sk_buff,
- queue_mapping) != 2);
- PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
- queue_mapping));
- break;
- case BPF_ANC | SKF_AD_PKTTYPE:
- PPC_LBZ_OFFS(r_A, r_skb, PKT_TYPE_OFFSET());
- EMIT(PPC_RAW_ANDI(r_A, r_A, PKT_TYPE_MAX));
- EMIT(PPC_RAW_SRWI(r_A, r_A, 5));
- break;
- case BPF_ANC | SKF_AD_CPU:
- PPC_BPF_LOAD_CPU(r_A);
- break;
- /*** Absolute loads from packet header/data ***/
- case BPF_LD | BPF_W | BPF_ABS:
- func = CHOOSE_LOAD_FUNC(K, sk_load_word);
- goto common_load;
- case BPF_LD | BPF_H | BPF_ABS:
- func = CHOOSE_LOAD_FUNC(K, sk_load_half);
- goto common_load;
- case BPF_LD | BPF_B | BPF_ABS:
- func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
- common_load:
- /* Load from [K]. */
- ctx->seen |= SEEN_DATAREF;
- PPC_FUNC_ADDR(r_scratch1, func);
- EMIT(PPC_RAW_MTLR(r_scratch1));
- PPC_LI32(r_addr, K);
- EMIT(PPC_RAW_BLRL());
/*
- * Helper returns 'lt' condition on error, and an
- * appropriate return value in r3
+ * Save ctx->idx as this would currently point to the
+ * end of the JITed image and set it to the offset of
+ * the instruction sequence corresponding to the
+ * subprog call temporarily.
*/
- PPC_BCC(COND_LT, exit_addr);
- break;
-
- /*** Indirect loads from packet header/data ***/
- case BPF_LD | BPF_W | BPF_IND:
- func = sk_load_word;
- goto common_load_ind;
- case BPF_LD | BPF_H | BPF_IND:
- func = sk_load_half;
- goto common_load_ind;
- case BPF_LD | BPF_B | BPF_IND:
- func = sk_load_byte;
- common_load_ind:
+ tmp_idx = ctx->idx;
+ ctx->idx = addrs[i] / 4;
+ bpf_jit_emit_func_call_rel(image, ctx, func_addr);
+
/*
- * Load from [X + K]. Negative offsets are tested for
- * in the helper functions.
+ * Restore ctx->idx here. This is safe as the length
+ * of the JITed sequence remains unchanged.
*/
- ctx->seen |= SEEN_DATAREF | SEEN_XREG;
- PPC_FUNC_ADDR(r_scratch1, func);
- EMIT(PPC_RAW_MTLR(r_scratch1));
- EMIT(PPC_RAW_ADDI(r_addr, r_X, IMM_L(K)));
- if (K >= 32768)
- EMIT(PPC_RAW_ADDIS(r_addr, r_addr, IMM_HA(K)));
- EMIT(PPC_RAW_BLRL());
- /* If error, cr0.LT set */
- PPC_BCC(COND_LT, exit_addr);
- break;
-
- case BPF_LDX | BPF_B | BPF_MSH:
- func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
- goto common_load;
- break;
-
- /*** Jump and branches ***/
- case BPF_JMP | BPF_JA:
- if (K != 0)
- PPC_JMP(addrs[i + 1 + K]);
- break;
-
- case BPF_JMP | BPF_JGT | BPF_K:
- case BPF_JMP | BPF_JGT | BPF_X:
- true_cond = COND_GT;
- goto cond_branch;
- case BPF_JMP | BPF_JGE | BPF_K:
- case BPF_JMP | BPF_JGE | BPF_X:
- true_cond = COND_GE;
- goto cond_branch;
- case BPF_JMP | BPF_JEQ | BPF_K:
- case BPF_JMP | BPF_JEQ | BPF_X:
- true_cond = COND_EQ;
- goto cond_branch;
- case BPF_JMP | BPF_JSET | BPF_K:
- case BPF_JMP | BPF_JSET | BPF_X:
- true_cond = COND_NE;
- cond_branch:
- /* same targets, can avoid doing the test :) */
- if (filter[i].jt == filter[i].jf) {
- if (filter[i].jt > 0)
- PPC_JMP(addrs[i + 1 + filter[i].jt]);
- break;
- }
-
- switch (code) {
- case BPF_JMP | BPF_JGT | BPF_X:
- case BPF_JMP | BPF_JGE | BPF_X:
- case BPF_JMP | BPF_JEQ | BPF_X:
- ctx->seen |= SEEN_XREG;
- EMIT(PPC_RAW_CMPLW(r_A, r_X));
- break;
- case BPF_JMP | BPF_JSET | BPF_X:
- ctx->seen |= SEEN_XREG;
- EMIT(PPC_RAW_AND_DOT(r_scratch1, r_A, r_X));
- break;
- case BPF_JMP | BPF_JEQ | BPF_K:
- case BPF_JMP | BPF_JGT | BPF_K:
- case BPF_JMP | BPF_JGE | BPF_K:
- if (K < 32768)
- EMIT(PPC_RAW_CMPLWI(r_A, K));
- else {
- PPC_LI32(r_scratch1, K);
- EMIT(PPC_RAW_CMPLW(r_A, r_scratch1));
- }
- break;
- case BPF_JMP | BPF_JSET | BPF_K:
- if (K < 32768)
- /* PPC_ANDI is /only/ dot-form */
- EMIT(PPC_RAW_ANDI(r_scratch1, r_A, K));
- else {
- PPC_LI32(r_scratch1, K);
- EMIT(PPC_RAW_AND_DOT(r_scratch1, r_A,
- r_scratch1));
- }
- break;
- }
- /* Sometimes branches are constructed "backward", with
- * the false path being the branch and true path being
- * a fallthrough to the next instruction.
- */
- if (filter[i].jt == 0)
- /* Swap the sense of the branch */
- PPC_BCC(true_cond ^ COND_CMP_TRUE,
- addrs[i + 1 + filter[i].jf]);
- else {
- PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
- if (filter[i].jf != 0)
- PPC_JMP(addrs[i + 1 + filter[i].jf]);
- }
- break;
- default:
- /* The filter contains something cruel & unusual.
- * We don't handle it, but also there shouldn't be
- * anything missing from our list.
- */
- if (printk_ratelimit())
- pr_err("BPF filter opcode %04x (@%d) unsupported\n",
- filter[i].code, i);
- return -ENOTSUPP;
+ ctx->idx = tmp_idx;
}
-
}
- /* Set end-of-body-code address for exit. */
- addrs[i] = ctx->idx * 4;
return 0;
}
-void bpf_jit_compile(struct bpf_prog *fp)
+struct powerpc64_jit_data {
+ struct bpf_binary_header *header;
+ u32 *addrs;
+ u8 *image;
+ u32 proglen;
+ struct codegen_context ctx;
+};
+
+bool bpf_jit_needs_zext(void)
{
- unsigned int proglen;
- unsigned int alloclen;
- u32 *image = NULL;
+ return true;
+}
+
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
+{
+ u32 proglen;
+ u32 alloclen;
+ u8 *image = NULL;
u32 *code_base;
- unsigned int *addrs;
+ u32 *addrs;
+ struct powerpc64_jit_data *jit_data;
struct codegen_context cgctx;
int pass;
- int flen = fp->len;
+ int flen;
+ struct bpf_binary_header *bpf_hdr;
+ struct bpf_prog *org_fp = fp;
+ struct bpf_prog *tmp_fp;
+ bool bpf_blinded = false;
+ bool extra_pass = false;
+
+ if (!fp->jit_requested)
+ return org_fp;
+
+ tmp_fp = bpf_jit_blind_constants(org_fp);
+ if (IS_ERR(tmp_fp))
+ return org_fp;
+
+ if (tmp_fp != org_fp) {
+ bpf_blinded = true;
+ fp = tmp_fp;
+ }
- if (!bpf_jit_enable)
- return;
+ jit_data = fp->aux->jit_data;
+ if (!jit_data) {
+ jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
+ if (!jit_data) {
+ fp = org_fp;
+ goto out;
+ }
+ fp->aux->jit_data = jit_data;
+ }
+
+ flen = fp->len;
+ addrs = jit_data->addrs;
+ if (addrs) {
+ cgctx = jit_data->ctx;
+ image = jit_data->image;
+ bpf_hdr = jit_data->header;
+ proglen = jit_data->proglen;
+ alloclen = proglen + FUNCTION_DESCR_SIZE;
+ extra_pass = true;
+ goto skip_init_ctx;
+ }
addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
- if (addrs == NULL)
- return;
+ if (addrs == NULL) {
+ fp = org_fp;
+ goto out_addrs;
+ }
- /*
- * There are multiple assembly passes as the generated code will change
- * size as it settles down, figuring out the max branch offsets/exit
- * paths required.
- *
- * The range of standard conditional branches is +/- 32Kbytes. Since
- * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
- * finish with 8 bytes/instruction. Not feasible, so long jumps are
- * used, distinct from short branches.
- *
- * Current:
- *
- * For now, both branch types assemble to 2 words (short branches padded
- * with a NOP); this is less efficient, but assembly will always complete
- * after exactly 3 passes:
- *
- * First pass: No code buffer; Program is "faux-generated" -- no code
- * emitted but maximum size of output determined (and addrs[] filled
- * in). Also, we note whether we use M[], whether we use skb data, etc.
- * All generation choices assumed to be 'worst-case', e.g. branches all
- * far (2 instructions), return path code reduction not available, etc.
- *
- * Second pass: Code buffer allocated with size determined previously.
- * Prologue generated to support features we have seen used. Exit paths
- * determined and addrs[] is filled in again, as code may be slightly
- * smaller as a result.
- *
- * Third pass: Code generated 'for real', and branch destinations
- * determined from now-accurate addrs[] map.
- *
- * Ideal:
- *
- * If we optimise this, near branches will be shorter. On the
- * first assembly pass, we should err on the side of caution and
- * generate the biggest code. On subsequent passes, branches will be
- * generated short or long and code size will reduce. With smaller
- * code, more branches may fall into the short category, and code will
- * reduce more.
- *
- * Finally, if we see one pass generate code the same size as the
- * previous pass we have converged and should now generate code for
- * real. Allocating at the end will also save the memory that would
- * otherwise be wasted by the (small) current code shrinkage.
- * Preferably, we should do a small number of passes (e.g. 5) and if we
- * haven't converged by then, get impatient and force code to generate
- * as-is, even if the odd branch would be left long. The chances of a
- * long jump are tiny with all but the most enormous of BPF filter
- * inputs, so we should usually converge on the third pass.
- */
+ memset(&cgctx, 0, sizeof(struct codegen_context));
+ memcpy(cgctx.b2p, b2p, sizeof(cgctx.b2p));
+
+ /* Make sure that the stack is quadword aligned. */
+ cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
- cgctx.idx = 0;
- cgctx.seen = 0;
- cgctx.pc_ret0 = -1;
/* Scouting faux-generate pass 0 */
- if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
+ if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
/* We hit something illegal or unsupported. */
- goto out;
+ fp = org_fp;
+ goto out_addrs;
+ }
+
+ /*
+ * If we have seen a tail call, we need a second pass.
+ * This is because bpf_jit_emit_common_epilogue() is called
+ * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
+ */
+ if (cgctx.seen & SEEN_TAILCALL) {
+ cgctx.idx = 0;
+ if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
+ fp = org_fp;
+ goto out_addrs;
+ }
+ }
+ bpf_jit_realloc_regs(&cgctx);
/*
* Pretend to build prologue, given the features we've seen. This will
* update ctgtx.idx as it pretends to output instructions, then we can
* calculate total size from idx.
*/
- bpf_jit_build_prologue(fp, 0, &cgctx);
+ bpf_jit_build_prologue(0, &cgctx);
bpf_jit_build_epilogue(0, &cgctx);
proglen = cgctx.idx * 4;
alloclen = proglen + FUNCTION_DESCR_SIZE;
- image = module_alloc(alloclen);
- if (!image)
- goto out;
- code_base = image + (FUNCTION_DESCR_SIZE/4);
+ bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4, bpf_jit_fill_ill_insns);
+ if (!bpf_hdr) {
+ fp = org_fp;
+ goto out_addrs;
+ }
+
+skip_init_ctx:
+ code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
+
+ if (extra_pass) {
+ /*
+ * Do not touch the prologue and epilogue as they will remain
+ * unchanged. Only fix the branch target address for subprog
+ * calls in the body.
+ *
+ * This does not change the offsets and lengths of the subprog
+ * call instruction sequences and hence, the size of the JITed
+ * image as well.
+ */
+ bpf_jit_fixup_subprog_calls(fp, code_base, &cgctx, addrs);
+
+ /* There is no need to perform the usual passes. */
+ goto skip_codegen_passes;
+ }
/* Code generation passes 1-2 */
for (pass = 1; pass < 3; pass++) {
/* Now build the prologue, body code & epilogue for real. */
cgctx.idx = 0;
- bpf_jit_build_prologue(fp, code_base, &cgctx);
- bpf_jit_build_body(fp, code_base, &cgctx, addrs);
+ bpf_jit_build_prologue(code_base, &cgctx);
+ bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass);
bpf_jit_build_epilogue(code_base, &cgctx);
if (bpf_jit_enable > 1)
@@ -652,15 +218,15 @@ void bpf_jit_compile(struct bpf_prog *fp)
proglen - (cgctx.idx * 4), cgctx.seen);
}
+skip_codegen_passes:
if (bpf_jit_enable > 1)
- /* Note that we output the base address of the code_base
+ /*
+ * Note that we output the base address of the code_base
* rather than image, since opcodes are in code_base.
*/
bpf_jit_dump(flen, proglen, pass, code_base);
- bpf_flush_icache(code_base, code_base + (proglen/4));
-
-#ifdef CONFIG_PPC64
+#ifdef PPC64_ELF_ABI_v1
/* Function descriptor nastiness: Address + TOC */
((u64 *)image)[0] = (u64)code_base;
((u64 *)image)[1] = local_paca->kernel_toc;
@@ -668,16 +234,27 @@ void bpf_jit_compile(struct bpf_prog *fp)
fp->bpf_func = (void *)image;
fp->jited = 1;
+ fp->jited_len = alloclen;
+
+ bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
+ bpf_jit_binary_lock_ro(bpf_hdr);
+ if (!fp->is_func || extra_pass) {
+ bpf_prog_fill_jited_linfo(fp, addrs);
+out_addrs:
+ kfree(addrs);
+ kfree(jit_data);
+ fp->aux->jit_data = NULL;
+ } else {
+ jit_data->addrs = addrs;
+ jit_data->ctx = cgctx;
+ jit_data->proglen = proglen;
+ jit_data->image = image;
+ jit_data->header = bpf_hdr;
+ }
out:
- kfree(addrs);
- return;
-}
-
-void bpf_jit_free(struct bpf_prog *fp)
-{
- if (fp->jited)
- module_memfree(fp->bpf_func);
+ if (bpf_blinded)
+ bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
- bpf_prog_unlock_free(fp);
+ return fp;
}
diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
new file mode 100644
index 000000000000..cbe5b399ed86
--- /dev/null
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -0,0 +1,1100 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * eBPF JIT compiler for PPC32
+ *
+ * Copyright 2020 Christophe Leroy <christophe.leroy@csgroup.eu>
+ * CS GROUP France
+ *
+ * Based on PPC64 eBPF JIT compiler by Naveen N. Rao
+ */
+#include <linux/moduleloader.h>
+#include <asm/cacheflush.h>
+#include <asm/asm-compat.h>
+#include <linux/netdevice.h>
+#include <linux/filter.h>
+#include <linux/if_vlan.h>
+#include <asm/kprobes.h>
+#include <linux/bpf.h>
+
+#include "bpf_jit.h"
+
+/*
+ * Stack layout:
+ *
+ * [ prev sp ] <-------------
+ * [ nv gpr save area ] 16 * 4 |
+ * fp (r31) --> [ ebpf stack space ] upto 512 |
+ * [ frame header ] 16 |
+ * sp (r1) ---> [ stack pointer ] --------------
+ */
+
+/* for gpr non volatile registers r17 to r31 (14) + tail call */
+#define BPF_PPC_STACK_SAVE (15 * 4 + 4)
+/* stack frame, ensure this is quadword aligned */
+#define BPF_PPC_STACKFRAME(ctx) (STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_SAVE + (ctx)->stack_size)
+
+/* BPF register usage */
+#define TMP_REG (MAX_BPF_JIT_REG + 0)
+
+/* BPF to ppc register mappings */
+const int b2p[MAX_BPF_JIT_REG + 1] = {
+ /* function return value */
+ [BPF_REG_0] = 12,
+ /* function arguments */
+ [BPF_REG_1] = 4,
+ [BPF_REG_2] = 6,
+ [BPF_REG_3] = 8,
+ [BPF_REG_4] = 10,
+ [BPF_REG_5] = 22,
+ /* non volatile registers */
+ [BPF_REG_6] = 24,
+ [BPF_REG_7] = 26,
+ [BPF_REG_8] = 28,
+ [BPF_REG_9] = 30,
+ /* frame pointer aka BPF_REG_10 */
+ [BPF_REG_FP] = 18,
+ /* eBPF jit internal registers */
+ [BPF_REG_AX] = 20,
+ [TMP_REG] = 31, /* 32 bits */
+};
+
+static int bpf_to_ppc(struct codegen_context *ctx, int reg)
+{
+ return ctx->b2p[reg];
+}
+
+/* PPC NVR range -- update this if we ever use NVRs below r17 */
+#define BPF_PPC_NVR_MIN 17
+#define BPF_PPC_TC 16
+
+static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
+{
+ if ((reg >= BPF_PPC_NVR_MIN && reg < 32) || reg == BPF_PPC_TC)
+ return BPF_PPC_STACKFRAME(ctx) - 4 * (32 - reg);
+
+ WARN(true, "BPF JIT is asking about unknown registers, will crash the stack");
+ /* Use the hole we have left for alignment */
+ return BPF_PPC_STACKFRAME(ctx) - 4;
+}
+
+void bpf_jit_realloc_regs(struct codegen_context *ctx)
+{
+ if (ctx->seen & SEEN_FUNC)
+ return;
+
+ while (ctx->seen & SEEN_NVREG_MASK &&
+ (ctx->seen & SEEN_VREG_MASK) != SEEN_VREG_MASK) {
+ int old = 32 - fls(ctx->seen & (SEEN_NVREG_MASK & 0xaaaaaaab));
+ int new = 32 - fls(~ctx->seen & (SEEN_VREG_MASK & 0xaaaaaaaa));
+ int i;
+
+ for (i = BPF_REG_0; i <= TMP_REG; i++) {
+ if (ctx->b2p[i] != old)
+ continue;
+ ctx->b2p[i] = new;
+ bpf_set_seen_register(ctx, new);
+ bpf_clear_seen_register(ctx, old);
+ if (i != TMP_REG) {
+ bpf_set_seen_register(ctx, new - 1);
+ bpf_clear_seen_register(ctx, old - 1);
+ }
+ break;
+ }
+ }
+}
+
+void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
+{
+ int i;
+
+ /* First arg comes in as a 32 bits pointer. */
+ EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_1), _R3));
+ EMIT(PPC_RAW_LI(bpf_to_ppc(ctx, BPF_REG_1) - 1, 0));
+ EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx)));
+
+ /*
+ * Initialize tail_call_cnt in stack frame if we do tail calls.
+ * Otherwise, put in NOPs so that it can be skipped when we are
+ * invoked through a tail call.
+ */
+ if (ctx->seen & SEEN_TAILCALL)
+ EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_1) - 1, _R1,
+ bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
+ else
+ EMIT(PPC_RAW_NOP());
+
+#define BPF_TAILCALL_PROLOGUE_SIZE 16
+
+ /*
+ * We need a stack frame, but we don't necessarily need to
+ * save/restore LR unless we call other functions
+ */
+ if (ctx->seen & SEEN_FUNC)
+ EMIT(PPC_RAW_MFLR(_R0));
+
+ /*
+ * Back up non-volatile regs -- registers r18-r31
+ */
+ for (i = BPF_PPC_NVR_MIN; i <= 31; i++)
+ if (bpf_is_seen_register(ctx, i))
+ EMIT(PPC_RAW_STW(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
+
+ /* If needed retrieve arguments 9 and 10, ie 5th 64 bits arg.*/
+ if (bpf_is_seen_register(ctx, bpf_to_ppc(ctx, BPF_REG_5))) {
+ EMIT(PPC_RAW_LWZ(bpf_to_ppc(ctx, BPF_REG_5) - 1, _R1, BPF_PPC_STACKFRAME(ctx)) + 8);
+ EMIT(PPC_RAW_LWZ(bpf_to_ppc(ctx, BPF_REG_5), _R1, BPF_PPC_STACKFRAME(ctx)) + 12);
+ }
+
+ /* Setup frame pointer to point to the bpf stack area */
+ if (bpf_is_seen_register(ctx, bpf_to_ppc(ctx, BPF_REG_FP))) {
+ EMIT(PPC_RAW_LI(bpf_to_ppc(ctx, BPF_REG_FP) - 1, 0));
+ EMIT(PPC_RAW_ADDI(bpf_to_ppc(ctx, BPF_REG_FP), _R1,
+ STACK_FRAME_MIN_SIZE + ctx->stack_size));
+ }
+
+ if (ctx->seen & SEEN_FUNC)
+ EMIT(PPC_RAW_STW(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
+}
+
+static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
+{
+ int i;
+
+ /* Restore NVRs */
+ for (i = BPF_PPC_NVR_MIN; i <= 31; i++)
+ if (bpf_is_seen_register(ctx, i))
+ EMIT(PPC_RAW_LWZ(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
+}
+
+void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
+{
+ EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(ctx, BPF_REG_0)));
+
+ bpf_jit_emit_common_epilogue(image, ctx);
+
+ /* Tear down our stack frame */
+
+ if (ctx->seen & SEEN_FUNC)
+ EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
+
+ EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx)));
+
+ if (ctx->seen & SEEN_FUNC)
+ EMIT(PPC_RAW_MTLR(_R0));
+
+ EMIT(PPC_RAW_BLR());
+}
+
+void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
+{
+ s32 rel = (s32)func - (s32)(image + ctx->idx);
+
+ if (image && rel < 0x2000000 && rel >= -0x2000000) {
+ PPC_BL_ABS(func);
+ } else {
+ /* Load function address into r0 */
+ EMIT(PPC_RAW_LIS(_R0, IMM_H(func)));
+ EMIT(PPC_RAW_ORI(_R0, _R0, IMM_L(func)));
+ EMIT(PPC_RAW_MTCTR(_R0));
+ EMIT(PPC_RAW_BCTRL());
+ }
+}
+
+static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
+{
+ /*
+ * By now, the eBPF program has already setup parameters in r3-r6
+ * r3-r4/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
+ * r5-r6/BPF_REG_2 - pointer to bpf_array
+ * r7-r8/BPF_REG_3 - index in bpf_array
+ */
+ int b2p_bpf_array = bpf_to_ppc(ctx, BPF_REG_2);
+ int b2p_index = bpf_to_ppc(ctx, BPF_REG_3);
+
+ /*
+ * if (index >= array->map.max_entries)
+ * goto out;
+ */
+ EMIT(PPC_RAW_LWZ(_R0, b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
+ EMIT(PPC_RAW_CMPLW(b2p_index, _R0));
+ EMIT(PPC_RAW_LWZ(_R0, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
+ PPC_BCC(COND_GE, out);
+
+ /*
+ * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
+ * goto out;
+ */
+ EMIT(PPC_RAW_CMPLWI(_R0, MAX_TAIL_CALL_CNT));
+ /* tail_call_cnt++; */
+ EMIT(PPC_RAW_ADDIC(_R0, _R0, 1));
+ PPC_BCC(COND_GT, out);
+
+ /* prog = array->ptrs[index]; */
+ EMIT(PPC_RAW_RLWINM(_R3, b2p_index, 2, 0, 29));
+ EMIT(PPC_RAW_ADD(_R3, _R3, b2p_bpf_array));
+ EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_array, ptrs)));
+ EMIT(PPC_RAW_STW(_R0, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
+
+ /*
+ * if (prog == NULL)
+ * goto out;
+ */
+ EMIT(PPC_RAW_CMPLWI(_R3, 0));
+ PPC_BCC(COND_EQ, out);
+
+ /* goto *(prog->bpf_func + prologue_size); */
+ EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_prog, bpf_func)));
+
+ if (ctx->seen & SEEN_FUNC)
+ EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
+
+ EMIT(PPC_RAW_ADDIC(_R3, _R3, BPF_TAILCALL_PROLOGUE_SIZE));
+
+ if (ctx->seen & SEEN_FUNC)
+ EMIT(PPC_RAW_MTLR(_R0));
+
+ EMIT(PPC_RAW_MTCTR(_R3));
+
+ EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(ctx, BPF_REG_1)));
+
+ /* tear restore NVRs, ... */
+ bpf_jit_emit_common_epilogue(image, ctx);
+
+ EMIT(PPC_RAW_BCTR());
+ /* out: */
+}
+
+/* Assemble the body code between the prologue & epilogue */
+int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
+ u32 *addrs, bool extra_pass)
+{
+ const struct bpf_insn *insn = fp->insnsi;
+ int flen = fp->len;
+ int i, ret;
+
+ /* Start of epilogue code - will only be valid 2nd pass onwards */
+ u32 exit_addr = addrs[flen];
+
+ for (i = 0; i < flen; i++) {
+ u32 code = insn[i].code;
+ u32 dst_reg = bpf_to_ppc(ctx, insn[i].dst_reg);
+ u32 dst_reg_h = dst_reg - 1;
+ u32 src_reg = bpf_to_ppc(ctx, insn[i].src_reg);
+ u32 src_reg_h = src_reg - 1;
+ u32 tmp_reg = bpf_to_ppc(ctx, TMP_REG);
+ s16 off = insn[i].off;
+ s32 imm = insn[i].imm;
+ bool func_addr_fixed;
+ u64 func_addr;
+ u32 true_cond;
+
+ /*
+ * addrs[] maps a BPF bytecode address into a real offset from
+ * the start of the body code.
+ */
+ addrs[i] = ctx->idx * 4;
+
+ /*
+ * As an optimization, we note down which registers
+ * are used so that we can only save/restore those in our
+ * prologue and epilogue. We do this here regardless of whether
+ * the actual BPF instruction uses src/dst registers or not
+ * (for instance, BPF_CALL does not use them). The expectation
+ * is that those instructions will have src_reg/dst_reg set to
+ * 0. Even otherwise, we just lose some prologue/epilogue
+ * optimization but everything else should work without
+ * any issues.
+ */
+ if (dst_reg >= 3 && dst_reg < 32) {
+ bpf_set_seen_register(ctx, dst_reg);
+ bpf_set_seen_register(ctx, dst_reg_h);
+ }
+
+ if (src_reg >= 3 && src_reg < 32) {
+ bpf_set_seen_register(ctx, src_reg);
+ bpf_set_seen_register(ctx, src_reg_h);
+ }
+
+ switch (code) {
+ /*
+ * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
+ */
+ case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
+ EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
+ break;
+ case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
+ EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, src_reg));
+ EMIT(PPC_RAW_ADDE(dst_reg_h, dst_reg_h, src_reg_h));
+ break;
+ case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
+ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
+ break;
+ case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
+ EMIT(PPC_RAW_SUBFC(dst_reg, src_reg, dst_reg));
+ EMIT(PPC_RAW_SUBFE(dst_reg_h, src_reg_h, dst_reg_h));
+ break;
+ case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
+ imm = -imm;
+ fallthrough;
+ case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
+ if (IMM_HA(imm) & 0xffff)
+ EMIT(PPC_RAW_ADDIS(dst_reg, dst_reg, IMM_HA(imm)));
+ if (IMM_L(imm))
+ EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
+ break;
+ case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
+ imm = -imm;
+ fallthrough;
+ case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
+ if (!imm)
+ break;
+
+ if (imm >= -32768 && imm < 32768) {
+ EMIT(PPC_RAW_ADDIC(dst_reg, dst_reg, imm));
+ } else {
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, _R0));
+ }
+ if (imm >= 0)
+ EMIT(PPC_RAW_ADDZE(dst_reg_h, dst_reg_h));
+ else
+ EMIT(PPC_RAW_ADDME(dst_reg_h, dst_reg_h));
+ break;
+ case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
+ bpf_set_seen_register(ctx, tmp_reg);
+ EMIT(PPC_RAW_MULW(_R0, dst_reg, src_reg_h));
+ EMIT(PPC_RAW_MULW(dst_reg_h, dst_reg_h, src_reg));
+ EMIT(PPC_RAW_MULHWU(tmp_reg, dst_reg, src_reg));
+ EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
+ EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0));
+ EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, tmp_reg));
+ break;
+ case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
+ EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
+ break;
+ case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
+ if (imm >= -32768 && imm < 32768) {
+ EMIT(PPC_RAW_MULI(dst_reg, dst_reg, imm));
+ } else {
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_MULW(dst_reg, dst_reg, _R0));
+ }
+ break;
+ case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
+ if (!imm) {
+ PPC_LI32(dst_reg, 0);
+ PPC_LI32(dst_reg_h, 0);
+ break;
+ }
+ if (imm == 1)
+ break;
+ if (imm == -1) {
+ EMIT(PPC_RAW_SUBFIC(dst_reg, dst_reg, 0));
+ EMIT(PPC_RAW_SUBFZE(dst_reg_h, dst_reg_h));
+ break;
+ }
+ bpf_set_seen_register(ctx, tmp_reg);
+ PPC_LI32(tmp_reg, imm);
+ EMIT(PPC_RAW_MULW(dst_reg_h, dst_reg_h, tmp_reg));
+ if (imm < 0)
+ EMIT(PPC_RAW_SUB(dst_reg_h, dst_reg_h, dst_reg));
+ EMIT(PPC_RAW_MULHWU(_R0, dst_reg, tmp_reg));
+ EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp_reg));
+ EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0));
+ break;
+ case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
+ EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
+ break;
+ case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
+ EMIT(PPC_RAW_DIVWU(_R0, dst_reg, src_reg));
+ EMIT(PPC_RAW_MULW(_R0, src_reg, _R0));
+ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, _R0));
+ break;
+ case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
+ return -EOPNOTSUPP;
+ case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
+ return -EOPNOTSUPP;
+ case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
+ if (!imm)
+ return -EINVAL;
+ if (imm == 1)
+ break;
+
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, _R0));
+ break;
+ case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
+ if (!imm)
+ return -EINVAL;
+
+ if (!is_power_of_2((u32)imm)) {
+ bpf_set_seen_register(ctx, tmp_reg);
+ PPC_LI32(tmp_reg, imm);
+ EMIT(PPC_RAW_DIVWU(_R0, dst_reg, tmp_reg));
+ EMIT(PPC_RAW_MULW(_R0, tmp_reg, _R0));
+ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, _R0));
+ break;
+ }
+ if (imm == 1)
+ EMIT(PPC_RAW_LI(dst_reg, 0));
+ else
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 32 - ilog2((u32)imm), 31));
+
+ break;
+ case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
+ if (!imm)
+ return -EINVAL;
+ if (imm < 0)
+ imm = -imm;
+ if (!is_power_of_2(imm))
+ return -EOPNOTSUPP;
+ if (imm == 1)
+ EMIT(PPC_RAW_LI(dst_reg, 0));
+ else
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 32 - ilog2(imm), 31));
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ break;
+ case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
+ if (!imm)
+ return -EINVAL;
+ if (!is_power_of_2(abs(imm)))
+ return -EOPNOTSUPP;
+
+ if (imm < 0) {
+ EMIT(PPC_RAW_SUBFIC(dst_reg, dst_reg, 0));
+ EMIT(PPC_RAW_SUBFZE(dst_reg_h, dst_reg_h));
+ imm = -imm;
+ }
+ if (imm == 1)
+ break;
+ imm = ilog2(imm);
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 32 - imm, imm, 31));
+ EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg_h, 32 - imm, 0, imm - 1));
+ EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg_h, imm));
+ break;
+ case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
+ EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
+ break;
+ case BPF_ALU64 | BPF_NEG: /* dst = -dst */
+ EMIT(PPC_RAW_SUBFIC(dst_reg, dst_reg, 0));
+ EMIT(PPC_RAW_SUBFZE(dst_reg_h, dst_reg_h));
+ break;
+
+ /*
+ * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
+ */
+ case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
+ EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
+ EMIT(PPC_RAW_AND(dst_reg_h, dst_reg_h, src_reg_h));
+ break;
+ case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
+ EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
+ break;
+ case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
+ if (imm >= 0)
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ fallthrough;
+ case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
+ if (!IMM_H(imm)) {
+ EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
+ } else if (!IMM_L(imm)) {
+ EMIT(PPC_RAW_ANDIS(dst_reg, dst_reg, IMM_H(imm)));
+ } else if (imm == (((1 << fls(imm)) - 1) ^ ((1 << (ffs(i) - 1)) - 1))) {
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0,
+ 32 - fls(imm), 32 - ffs(imm)));
+ } else {
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_AND(dst_reg, dst_reg, _R0));
+ }
+ break;
+ case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
+ EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
+ EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, src_reg_h));
+ break;
+ case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
+ EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
+ break;
+ case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
+ /* Sign-extended */
+ if (imm < 0)
+ EMIT(PPC_RAW_LI(dst_reg_h, -1));
+ fallthrough;
+ case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
+ if (IMM_L(imm))
+ EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
+ if (IMM_H(imm))
+ EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
+ break;
+ case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
+ if (dst_reg == src_reg) {
+ EMIT(PPC_RAW_LI(dst_reg, 0));
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ } else {
+ EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
+ EMIT(PPC_RAW_XOR(dst_reg_h, dst_reg_h, src_reg_h));
+ }
+ break;
+ case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
+ if (dst_reg == src_reg)
+ EMIT(PPC_RAW_LI(dst_reg, 0));
+ else
+ EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
+ break;
+ case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
+ if (imm < 0)
+ EMIT(PPC_RAW_NOR(dst_reg_h, dst_reg_h, dst_reg_h));
+ fallthrough;
+ case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
+ if (IMM_L(imm))
+ EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
+ if (IMM_H(imm))
+ EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
+ break;
+ case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
+ EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
+ break;
+ case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
+ bpf_set_seen_register(ctx, tmp_reg);
+ EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
+ EMIT(PPC_RAW_SLW(dst_reg_h, dst_reg_h, src_reg));
+ EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
+ EMIT(PPC_RAW_SRW(_R0, dst_reg, _R0));
+ EMIT(PPC_RAW_SLW(tmp_reg, dst_reg, tmp_reg));
+ EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, _R0));
+ EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
+ EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, tmp_reg));
+ break;
+ case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<= (u32) imm */
+ if (!imm)
+ break;
+ EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
+ break;
+ case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<= imm */
+ if (imm < 0)
+ return -EINVAL;
+ if (!imm)
+ break;
+ if (imm < 32) {
+ EMIT(PPC_RAW_RLWINM(dst_reg_h, dst_reg_h, imm, 0, 31 - imm));
+ EMIT(PPC_RAW_RLWIMI(dst_reg_h, dst_reg, imm, 32 - imm, 31));
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, imm, 0, 31 - imm));
+ break;
+ }
+ if (imm < 64)
+ EMIT(PPC_RAW_RLWINM(dst_reg_h, dst_reg, imm, 0, 31 - imm));
+ else
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ EMIT(PPC_RAW_LI(dst_reg, 0));
+ break;
+ case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
+ EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
+ break;
+ case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
+ bpf_set_seen_register(ctx, tmp_reg);
+ EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
+ EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
+ EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
+ EMIT(PPC_RAW_SLW(_R0, dst_reg_h, _R0));
+ EMIT(PPC_RAW_SRW(tmp_reg, dst_reg_h, tmp_reg));
+ EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0));
+ EMIT(PPC_RAW_SRW(dst_reg_h, dst_reg_h, src_reg));
+ EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg));
+ break;
+ case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
+ if (!imm)
+ break;
+ EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
+ break;
+ case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
+ if (imm < 0)
+ return -EINVAL;
+ if (!imm)
+ break;
+ if (imm < 32) {
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 32 - imm, imm, 31));
+ EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg_h, 32 - imm, 0, imm - 1));
+ EMIT(PPC_RAW_RLWINM(dst_reg_h, dst_reg_h, 32 - imm, imm, 31));
+ break;
+ }
+ if (imm < 64)
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg_h, 64 - imm, imm - 32, 31));
+ else
+ EMIT(PPC_RAW_LI(dst_reg, 0));
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ break;
+ case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
+ EMIT(PPC_RAW_SRAW(dst_reg_h, dst_reg, src_reg));
+ break;
+ case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
+ bpf_set_seen_register(ctx, tmp_reg);
+ EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
+ EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
+ EMIT(PPC_RAW_SLW(_R0, dst_reg_h, _R0));
+ EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
+ EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0));
+ EMIT(PPC_RAW_RLWINM(_R0, tmp_reg, 0, 26, 26));
+ EMIT(PPC_RAW_SRAW(tmp_reg, dst_reg_h, tmp_reg));
+ EMIT(PPC_RAW_SRAW(dst_reg_h, dst_reg_h, src_reg));
+ EMIT(PPC_RAW_SLW(tmp_reg, tmp_reg, _R0));
+ EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg));
+ break;
+ case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
+ if (!imm)
+ break;
+ EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
+ break;
+ case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
+ if (imm < 0)
+ return -EINVAL;
+ if (!imm)
+ break;
+ if (imm < 32) {
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 32 - imm, imm, 31));
+ EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg_h, 32 - imm, 0, imm - 1));
+ EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg_h, imm));
+ break;
+ }
+ if (imm < 64)
+ EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg_h, imm - 32));
+ else
+ EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg_h, 31));
+ EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg_h, 31));
+ break;
+
+ /*
+ * MOV
+ */
+ case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
+ if (dst_reg == src_reg)
+ break;
+ EMIT(PPC_RAW_MR(dst_reg, src_reg));
+ EMIT(PPC_RAW_MR(dst_reg_h, src_reg_h));
+ break;
+ case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
+ /* special mov32 for zext */
+ if (imm == 1)
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ else if (dst_reg != src_reg)
+ EMIT(PPC_RAW_MR(dst_reg, src_reg));
+ break;
+ case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
+ PPC_LI32(dst_reg, imm);
+ PPC_EX32(dst_reg_h, imm);
+ break;
+ case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
+ PPC_LI32(dst_reg, imm);
+ break;
+
+ /*
+ * BPF_FROM_BE/LE
+ */
+ case BPF_ALU | BPF_END | BPF_FROM_LE:
+ switch (imm) {
+ case 16:
+ /* Copy 16 bits to upper part */
+ EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg, 16, 0, 15));
+ /* Rotate 8 bits right & mask */
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 24, 16, 31));
+ break;
+ case 32:
+ /*
+ * Rotate word left by 8 bits:
+ * 2 bytes are already in their final position
+ * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
+ */
+ EMIT(PPC_RAW_RLWINM(_R0, dst_reg, 8, 0, 31));
+ /* Rotate 24 bits and insert byte 1 */
+ EMIT(PPC_RAW_RLWIMI(_R0, dst_reg, 24, 0, 7));
+ /* Rotate 24 bits and insert byte 3 */
+ EMIT(PPC_RAW_RLWIMI(_R0, dst_reg, 24, 16, 23));
+ EMIT(PPC_RAW_MR(dst_reg, _R0));
+ break;
+ case 64:
+ bpf_set_seen_register(ctx, tmp_reg);
+ EMIT(PPC_RAW_RLWINM(tmp_reg, dst_reg, 8, 0, 31));
+ EMIT(PPC_RAW_RLWINM(_R0, dst_reg_h, 8, 0, 31));
+ /* Rotate 24 bits and insert byte 1 */
+ EMIT(PPC_RAW_RLWIMI(tmp_reg, dst_reg, 24, 0, 7));
+ EMIT(PPC_RAW_RLWIMI(_R0, dst_reg_h, 24, 0, 7));
+ /* Rotate 24 bits and insert byte 3 */
+ EMIT(PPC_RAW_RLWIMI(tmp_reg, dst_reg, 24, 16, 23));
+ EMIT(PPC_RAW_RLWIMI(_R0, dst_reg_h, 24, 16, 23));
+ EMIT(PPC_RAW_MR(dst_reg, _R0));
+ EMIT(PPC_RAW_MR(dst_reg_h, tmp_reg));
+ break;
+ }
+ break;
+ case BPF_ALU | BPF_END | BPF_FROM_BE:
+ switch (imm) {
+ case 16:
+ /* zero-extend 16 bits into 32 bits */
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 16, 31));
+ break;
+ case 32:
+ case 64:
+ /* nop */
+ break;
+ }
+ break;
+
+ /*
+ * BPF_ST(X)
+ */
+ case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
+ EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
+ break;
+ case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_STB(_R0, dst_reg, off));
+ break;
+ case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
+ EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
+ break;
+ case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_STH(_R0, dst_reg, off));
+ break;
+ case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
+ EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
+ break;
+ case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_STW(_R0, dst_reg, off));
+ break;
+ case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
+ EMIT(PPC_RAW_STW(src_reg_h, dst_reg, off));
+ EMIT(PPC_RAW_STW(src_reg, dst_reg, off + 4));
+ break;
+ case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_STW(_R0, dst_reg, off + 4));
+ PPC_EX32(_R0, imm);
+ EMIT(PPC_RAW_STW(_R0, dst_reg, off));
+ break;
+
+ /*
+ * BPF_STX XADD (atomic_add)
+ */
+ case BPF_STX | BPF_XADD | BPF_W: /* *(u32 *)(dst + off) += src */
+ bpf_set_seen_register(ctx, tmp_reg);
+ /* Get offset into TMP_REG */
+ EMIT(PPC_RAW_LI(tmp_reg, off));
+ /* load value from memory into r0 */
+ EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
+ /* add value from src_reg into this */
+ EMIT(PPC_RAW_ADD(_R0, _R0, src_reg));
+ /* store result back */
+ EMIT(PPC_RAW_STWCX(_R0, tmp_reg, dst_reg));
+ /* we're done if this succeeded */
+ PPC_BCC_SHORT(COND_NE, (ctx->idx - 3) * 4);
+ break;
+
+ case BPF_STX | BPF_XADD | BPF_DW: /* *(u64 *)(dst + off) += src */
+ return -EOPNOTSUPP;
+
+ /*
+ * BPF_LDX
+ */
+ case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
+ EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
+ if (!fp->aux->verifier_zext)
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ break;
+ case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
+ EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
+ if (!fp->aux->verifier_zext)
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ break;
+ case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
+ EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
+ if (!fp->aux->verifier_zext)
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ break;
+ case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
+ EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off));
+ EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4));
+ break;
+
+ /*
+ * Doubleword load
+ * 16 byte instruction that uses two 'struct bpf_insn'
+ */
+ case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
+ PPC_LI32(dst_reg_h, (u32)insn[i + 1].imm);
+ PPC_LI32(dst_reg, (u32)insn[i].imm);
+ /* Adjust for two bpf instructions */
+ addrs[++i] = ctx->idx * 4;
+ break;
+
+ /*
+ * Return/Exit
+ */
+ case BPF_JMP | BPF_EXIT:
+ /*
+ * If this isn't the very last instruction, branch to
+ * the epilogue. If we _are_ the last instruction,
+ * we'll just fall through to the epilogue.
+ */
+ if (i != flen - 1)
+ PPC_JMP(exit_addr);
+ /* else fall through to the epilogue */
+ break;
+
+ /*
+ * Call kernel helper or bpf function
+ */
+ case BPF_JMP | BPF_CALL:
+ ctx->seen |= SEEN_FUNC;
+
+ ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
+ &func_addr, &func_addr_fixed);
+ if (ret < 0)
+ return ret;
+
+ if (bpf_is_seen_register(ctx, bpf_to_ppc(ctx, BPF_REG_5))) {
+ EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_5) - 1, _R1, 8));
+ EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_5), _R1, 12));
+ }
+
+ bpf_jit_emit_func_call_rel(image, ctx, func_addr);
+
+ EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_0) - 1, _R3));
+ EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_0), _R4));
+ break;
+
+ /*
+ * Jumps and branches
+ */
+ case BPF_JMP | BPF_JA:
+ PPC_JMP(addrs[i + 1 + off]);
+ break;
+
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP32 | BPF_JGT | BPF_K:
+ case BPF_JMP32 | BPF_JGT | BPF_X:
+ case BPF_JMP32 | BPF_JSGT | BPF_K:
+ case BPF_JMP32 | BPF_JSGT | BPF_X:
+ true_cond = COND_GT;
+ goto cond_branch;
+ case BPF_JMP | BPF_JLT | BPF_K:
+ case BPF_JMP | BPF_JLT | BPF_X:
+ case BPF_JMP | BPF_JSLT | BPF_K:
+ case BPF_JMP | BPF_JSLT | BPF_X:
+ case BPF_JMP32 | BPF_JLT | BPF_K:
+ case BPF_JMP32 | BPF_JLT | BPF_X:
+ case BPF_JMP32 | BPF_JSLT | BPF_K:
+ case BPF_JMP32 | BPF_JSLT | BPF_X:
+ true_cond = COND_LT;
+ goto cond_branch;
+ case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JSGE | BPF_K:
+ case BPF_JMP | BPF_JSGE | BPF_X:
+ case BPF_JMP32 | BPF_JGE | BPF_K:
+ case BPF_JMP32 | BPF_JGE | BPF_X:
+ case BPF_JMP32 | BPF_JSGE | BPF_K:
+ case BPF_JMP32 | BPF_JSGE | BPF_X:
+ true_cond = COND_GE;
+ goto cond_branch;
+ case BPF_JMP | BPF_JLE | BPF_K:
+ case BPF_JMP | BPF_JLE | BPF_X:
+ case BPF_JMP | BPF_JSLE | BPF_K:
+ case BPF_JMP | BPF_JSLE | BPF_X:
+ case BPF_JMP32 | BPF_JLE | BPF_K:
+ case BPF_JMP32 | BPF_JLE | BPF_X:
+ case BPF_JMP32 | BPF_JSLE | BPF_K:
+ case BPF_JMP32 | BPF_JSLE | BPF_X:
+ true_cond = COND_LE;
+ goto cond_branch;
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ case BPF_JMP | BPF_JEQ | BPF_X:
+ case BPF_JMP32 | BPF_JEQ | BPF_K:
+ case BPF_JMP32 | BPF_JEQ | BPF_X:
+ true_cond = COND_EQ;
+ goto cond_branch;
+ case BPF_JMP | BPF_JNE | BPF_K:
+ case BPF_JMP | BPF_JNE | BPF_X:
+ case BPF_JMP32 | BPF_JNE | BPF_K:
+ case BPF_JMP32 | BPF_JNE | BPF_X:
+ true_cond = COND_NE;
+ goto cond_branch;
+ case BPF_JMP | BPF_JSET | BPF_K:
+ case BPF_JMP | BPF_JSET | BPF_X:
+ case BPF_JMP32 | BPF_JSET | BPF_K:
+ case BPF_JMP32 | BPF_JSET | BPF_X:
+ true_cond = COND_NE;
+ /* fallthrough; */
+
+cond_branch:
+ switch (code) {
+ case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JLT | BPF_X:
+ case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JLE | BPF_X:
+ case BPF_JMP | BPF_JEQ | BPF_X:
+ case BPF_JMP | BPF_JNE | BPF_X:
+ /* unsigned comparison */
+ EMIT(PPC_RAW_CMPLW(dst_reg_h, src_reg_h));
+ PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
+ EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
+ break;
+ case BPF_JMP32 | BPF_JGT | BPF_X:
+ case BPF_JMP32 | BPF_JLT | BPF_X:
+ case BPF_JMP32 | BPF_JGE | BPF_X:
+ case BPF_JMP32 | BPF_JLE | BPF_X:
+ case BPF_JMP32 | BPF_JEQ | BPF_X:
+ case BPF_JMP32 | BPF_JNE | BPF_X:
+ /* unsigned comparison */
+ EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
+ break;
+ case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP | BPF_JSLT | BPF_X:
+ case BPF_JMP | BPF_JSGE | BPF_X:
+ case BPF_JMP | BPF_JSLE | BPF_X:
+ /* signed comparison */
+ EMIT(PPC_RAW_CMPW(dst_reg_h, src_reg_h));
+ PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
+ EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
+ break;
+ case BPF_JMP32 | BPF_JSGT | BPF_X:
+ case BPF_JMP32 | BPF_JSLT | BPF_X:
+ case BPF_JMP32 | BPF_JSGE | BPF_X:
+ case BPF_JMP32 | BPF_JSLE | BPF_X:
+ /* signed comparison */
+ EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
+ break;
+ case BPF_JMP | BPF_JSET | BPF_X:
+ EMIT(PPC_RAW_AND_DOT(_R0, dst_reg_h, src_reg_h));
+ PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
+ EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg));
+ break;
+ case BPF_JMP32 | BPF_JSET | BPF_X: {
+ EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg));
+ break;
+ case BPF_JMP | BPF_JNE | BPF_K:
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JLT | BPF_K:
+ case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP | BPF_JLE | BPF_K:
+ /*
+ * Need sign-extended load, so only positive
+ * values can be used as imm in cmplwi
+ */
+ if (imm >= 0 && imm < 32768) {
+ EMIT(PPC_RAW_CMPLWI(dst_reg_h, 0));
+ PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
+ EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
+ } else {
+ /* sign-extending load ... but unsigned comparison */
+ PPC_EX32(_R0, imm);
+ EMIT(PPC_RAW_CMPLW(dst_reg_h, _R0));
+ PPC_LI32(_R0, imm);
+ PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
+ EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
+ }
+ break;
+ case BPF_JMP32 | BPF_JNE | BPF_K:
+ case BPF_JMP32 | BPF_JEQ | BPF_K:
+ case BPF_JMP32 | BPF_JGT | BPF_K:
+ case BPF_JMP32 | BPF_JLT | BPF_K:
+ case BPF_JMP32 | BPF_JGE | BPF_K:
+ case BPF_JMP32 | BPF_JLE | BPF_K:
+ if (imm >= 0 && imm < 65536) {
+ EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
+ } else {
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
+ }
+ break;
+ }
+ case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP | BPF_JSLT | BPF_K:
+ case BPF_JMP | BPF_JSGE | BPF_K:
+ case BPF_JMP | BPF_JSLE | BPF_K:
+ if (imm >= 0 && imm < 65536) {
+ EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0));
+ PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
+ EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
+ } else {
+ /* sign-extending load */
+ EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0));
+ PPC_LI32(_R0, imm);
+ PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
+ EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
+ }
+ break;
+ case BPF_JMP32 | BPF_JSGT | BPF_K:
+ case BPF_JMP32 | BPF_JSLT | BPF_K:
+ case BPF_JMP32 | BPF_JSGE | BPF_K:
+ case BPF_JMP32 | BPF_JSLE | BPF_K:
+ /*
+ * signed comparison, so any 16-bit value
+ * can be used in cmpwi
+ */
+ if (imm >= -32768 && imm < 32768) {
+ EMIT(PPC_RAW_CMPWI(dst_reg, imm));
+ } else {
+ /* sign-extending load */
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_CMPW(dst_reg, _R0));
+ }
+ break;
+ case BPF_JMP | BPF_JSET | BPF_K:
+ /* andi does not sign-extend the immediate */
+ if (imm >= 0 && imm < 32768) {
+ /* PPC_ANDI is _only/always_ dot-form */
+ EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
+ } else {
+ PPC_LI32(_R0, imm);
+ if (imm < 0) {
+ EMIT(PPC_RAW_CMPWI(dst_reg_h, 0));
+ PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
+ }
+ EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0));
+ }
+ break;
+ case BPF_JMP32 | BPF_JSET | BPF_K:
+ /* andi does not sign-extend the immediate */
+ if (imm >= -32768 && imm < 32768) {
+ /* PPC_ANDI is _only/always_ dot-form */
+ EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
+ } else {
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0));
+ }
+ break;
+ }
+ PPC_BCC(true_cond, addrs[i + 1 + off]);
+ break;
+
+ /*
+ * Tail call
+ */
+ case BPF_JMP | BPF_TAIL_CALL:
+ ctx->seen |= SEEN_TAILCALL;
+ bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+ break;
+
+ default:
+ /*
+ * The filter contains something cruel & unusual.
+ * We don't handle it, but also there shouldn't be
+ * anything missing from our list.
+ */
+ pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n", code, i);
+ return -EOPNOTSUPP;
+ }
+ if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext &&
+ !insn_is_zext(&insn[i + 1]))
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ }
+
+ /* Set end-of-body-code address for exit. */
+ addrs[i] = ctx->idx * 4;
+
+ return 0;
+}
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index aaf1a887f653..5cad5b5a7e97 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -18,27 +18,6 @@
#include "bpf_jit64.h"
-static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
-{
- memset32(area, BREAKPOINT_INSTRUCTION, size/4);
-}
-
-static inline void bpf_flush_icache(void *start, void *end)
-{
- smp_wmb();
- flush_icache_range((unsigned long)start, (unsigned long)end);
-}
-
-static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
-{
- return (ctx->seen & (1 << (31 - b2p[i])));
-}
-
-static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
-{
- ctx->seen |= (1 << (31 - b2p[i]));
-}
-
static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
{
/*
@@ -47,7 +26,7 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
* - the bpf program uses its stack area
* The latter condition is deduced from the usage of BPF_REG_FP
*/
- return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, BPF_REG_FP);
+ return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, b2p[BPF_REG_FP]);
}
/*
@@ -85,7 +64,11 @@ static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
BUG();
}
-static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
+void bpf_jit_realloc_regs(struct codegen_context *ctx)
+{
+}
+
+void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
{
int i;
@@ -111,7 +94,7 @@ static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
* save/restore LR unless we call other functions
*/
if (ctx->seen & SEEN_FUNC) {
- EMIT(PPC_INST_MFLR | __PPC_RT(R0));
+ EMIT(PPC_RAW_MFLR(_R0));
PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
}
@@ -124,11 +107,11 @@ static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
* in the protected zone below the previous stack frame
*/
for (i = BPF_REG_6; i <= BPF_REG_10; i++)
- if (bpf_is_seen_register(ctx, i))
+ if (bpf_is_seen_register(ctx, b2p[i]))
PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
/* Setup frame pointer to point to the bpf stack area */
- if (bpf_is_seen_register(ctx, BPF_REG_FP))
+ if (bpf_is_seen_register(ctx, b2p[BPF_REG_FP]))
EMIT(PPC_RAW_ADDI(b2p[BPF_REG_FP], 1,
STACK_FRAME_MIN_SIZE + ctx->stack_size));
}
@@ -139,7 +122,7 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx
/* Restore NVRs */
for (i = BPF_REG_6; i <= BPF_REG_10; i++)
- if (bpf_is_seen_register(ctx, i))
+ if (bpf_is_seen_register(ctx, b2p[i]))
PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
/* Tear down our stack frame */
@@ -152,7 +135,7 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx
}
}
-static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
+void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
{
bpf_jit_emit_common_epilogue(image, ctx);
@@ -170,8 +153,8 @@ static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
PPC_LI64(b2p[TMP_REG_2], func);
/* Load actual entry point from function descriptor */
PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
- /* ... and move it to LR */
- EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
+ /* ... and move it to CTR */
+ EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
/*
* Load TOC from function descriptor at offset 8.
* We can clobber r2 since we get called through a
@@ -182,13 +165,12 @@ static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
#else
/* We can clobber r12 */
PPC_FUNC_ADDR(12, func);
- EMIT(PPC_RAW_MTLR(12));
+ EMIT(PPC_RAW_MTCTR(12));
#endif
- EMIT(PPC_RAW_BLRL());
+ EMIT(PPC_RAW_BCTRL());
}
-static void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx,
- u64 func)
+void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
{
unsigned int i, ctx_idx = ctx->idx;
@@ -220,8 +202,8 @@ static void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx,
PPC_BPF_LL(12, 12, 0);
#endif
- EMIT(PPC_RAW_MTLR(12));
- EMIT(PPC_RAW_BLRL());
+ EMIT(PPC_RAW_MTCTR(12));
+ EMIT(PPC_RAW_BCTRL());
}
static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
@@ -289,9 +271,8 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
}
/* Assemble the body code between the prologue & epilogue */
-static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
- struct codegen_context *ctx,
- u32 *addrs, bool extra_pass)
+int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
+ u32 *addrs, bool extra_pass)
{
const struct bpf_insn *insn = fp->insnsi;
int flen = fp->len;
@@ -330,9 +311,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
* any issues.
*/
if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
- bpf_set_seen_register(ctx, insn[i].dst_reg);
+ bpf_set_seen_register(ctx, dst_reg);
if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
- bpf_set_seen_register(ctx, insn[i].src_reg);
+ bpf_set_seen_register(ctx, src_reg);
switch (code) {
/*
@@ -1026,249 +1007,3 @@ cond_branch:
return 0;
}
-
-/* Fix the branch target addresses for subprog calls */
-static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image,
- struct codegen_context *ctx, u32 *addrs)
-{
- const struct bpf_insn *insn = fp->insnsi;
- bool func_addr_fixed;
- u64 func_addr;
- u32 tmp_idx;
- int i, ret;
-
- for (i = 0; i < fp->len; i++) {
- /*
- * During the extra pass, only the branch target addresses for
- * the subprog calls need to be fixed. All other instructions
- * can left untouched.
- *
- * The JITed image length does not change because we already
- * ensure that the JITed instruction sequence for these calls
- * are of fixed length by padding them with NOPs.
- */
- if (insn[i].code == (BPF_JMP | BPF_CALL) &&
- insn[i].src_reg == BPF_PSEUDO_CALL) {
- ret = bpf_jit_get_func_addr(fp, &insn[i], true,
- &func_addr,
- &func_addr_fixed);
- if (ret < 0)
- return ret;
-
- /*
- * Save ctx->idx as this would currently point to the
- * end of the JITed image and set it to the offset of
- * the instruction sequence corresponding to the
- * subprog call temporarily.
- */
- tmp_idx = ctx->idx;
- ctx->idx = addrs[i] / 4;
- bpf_jit_emit_func_call_rel(image, ctx, func_addr);
-
- /*
- * Restore ctx->idx here. This is safe as the length
- * of the JITed sequence remains unchanged.
- */
- ctx->idx = tmp_idx;
- }
- }
-
- return 0;
-}
-
-struct powerpc64_jit_data {
- struct bpf_binary_header *header;
- u32 *addrs;
- u8 *image;
- u32 proglen;
- struct codegen_context ctx;
-};
-
-bool bpf_jit_needs_zext(void)
-{
- return true;
-}
-
-struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
-{
- u32 proglen;
- u32 alloclen;
- u8 *image = NULL;
- u32 *code_base;
- u32 *addrs;
- struct powerpc64_jit_data *jit_data;
- struct codegen_context cgctx;
- int pass;
- int flen;
- struct bpf_binary_header *bpf_hdr;
- struct bpf_prog *org_fp = fp;
- struct bpf_prog *tmp_fp;
- bool bpf_blinded = false;
- bool extra_pass = false;
-
- if (!fp->jit_requested)
- return org_fp;
-
- tmp_fp = bpf_jit_blind_constants(org_fp);
- if (IS_ERR(tmp_fp))
- return org_fp;
-
- if (tmp_fp != org_fp) {
- bpf_blinded = true;
- fp = tmp_fp;
- }
-
- jit_data = fp->aux->jit_data;
- if (!jit_data) {
- jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
- if (!jit_data) {
- fp = org_fp;
- goto out;
- }
- fp->aux->jit_data = jit_data;
- }
-
- flen = fp->len;
- addrs = jit_data->addrs;
- if (addrs) {
- cgctx = jit_data->ctx;
- image = jit_data->image;
- bpf_hdr = jit_data->header;
- proglen = jit_data->proglen;
- alloclen = proglen + FUNCTION_DESCR_SIZE;
- extra_pass = true;
- goto skip_init_ctx;
- }
-
- addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
- if (addrs == NULL) {
- fp = org_fp;
- goto out_addrs;
- }
-
- memset(&cgctx, 0, sizeof(struct codegen_context));
-
- /* Make sure that the stack is quadword aligned. */
- cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
-
- /* Scouting faux-generate pass 0 */
- if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
- /* We hit something illegal or unsupported. */
- fp = org_fp;
- goto out_addrs;
- }
-
- /*
- * If we have seen a tail call, we need a second pass.
- * This is because bpf_jit_emit_common_epilogue() is called
- * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
- */
- if (cgctx.seen & SEEN_TAILCALL) {
- cgctx.idx = 0;
- if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
- fp = org_fp;
- goto out_addrs;
- }
- }
-
- /*
- * Pretend to build prologue, given the features we've seen. This will
- * update ctgtx.idx as it pretends to output instructions, then we can
- * calculate total size from idx.
- */
- bpf_jit_build_prologue(0, &cgctx);
- bpf_jit_build_epilogue(0, &cgctx);
-
- proglen = cgctx.idx * 4;
- alloclen = proglen + FUNCTION_DESCR_SIZE;
-
- bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4,
- bpf_jit_fill_ill_insns);
- if (!bpf_hdr) {
- fp = org_fp;
- goto out_addrs;
- }
-
-skip_init_ctx:
- code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
-
- if (extra_pass) {
- /*
- * Do not touch the prologue and epilogue as they will remain
- * unchanged. Only fix the branch target address for subprog
- * calls in the body.
- *
- * This does not change the offsets and lengths of the subprog
- * call instruction sequences and hence, the size of the JITed
- * image as well.
- */
- bpf_jit_fixup_subprog_calls(fp, code_base, &cgctx, addrs);
-
- /* There is no need to perform the usual passes. */
- goto skip_codegen_passes;
- }
-
- /* Code generation passes 1-2 */
- for (pass = 1; pass < 3; pass++) {
- /* Now build the prologue, body code & epilogue for real. */
- cgctx.idx = 0;
- bpf_jit_build_prologue(code_base, &cgctx);
- bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass);
- bpf_jit_build_epilogue(code_base, &cgctx);
-
- if (bpf_jit_enable > 1)
- pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
- proglen - (cgctx.idx * 4), cgctx.seen);
- }
-
-skip_codegen_passes:
- if (bpf_jit_enable > 1)
- /*
- * Note that we output the base address of the code_base
- * rather than image, since opcodes are in code_base.
- */
- bpf_jit_dump(flen, proglen, pass, code_base);
-
-#ifdef PPC64_ELF_ABI_v1
- /* Function descriptor nastiness: Address + TOC */
- ((u64 *)image)[0] = (u64)code_base;
- ((u64 *)image)[1] = local_paca->kernel_toc;
-#endif
-
- fp->bpf_func = (void *)image;
- fp->jited = 1;
- fp->jited_len = alloclen;
-
- bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
- if (!fp->is_func || extra_pass) {
- bpf_prog_fill_jited_linfo(fp, addrs);
-out_addrs:
- kfree(addrs);
- kfree(jit_data);
- fp->aux->jit_data = NULL;
- } else {
- jit_data->addrs = addrs;
- jit_data->ctx = cgctx;
- jit_data->proglen = proglen;
- jit_data->image = image;
- jit_data->header = bpf_hdr;
- }
-
-out:
- if (bpf_blinded)
- bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
-
- return fp;
-}
-
-/* Overriding bpf_jit_free() as we don't set images read-only. */
-void bpf_jit_free(struct bpf_prog *fp)
-{
- unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
- struct bpf_binary_header *bpf_hdr = (void *)addr;
-
- if (fp->jited)
- bpf_jit_binary_free(bpf_hdr);
-
- bpf_prog_unlock_free(fp);
-}
diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile
index c02854dea2b2..2f46e31c7612 100644
--- a/arch/powerpc/perf/Makefile
+++ b/arch/powerpc/perf/Makefile
@@ -1,9 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_PERF_EVENTS) += callchain.o callchain_$(BITS).o perf_regs.o
-ifdef CONFIG_COMPAT
-obj-$(CONFIG_PERF_EVENTS) += callchain_32.o
-endif
+obj-y += callchain.o callchain_$(BITS).o perf_regs.o
+obj-$(CONFIG_COMPAT) += callchain_32.o
obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o bhrb.o
obj64-$(CONFIG_PPC_PERF_CTRS) += ppc970-pmu.o power5-pmu.o \
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index 6c028ee513c0..082f6d0308a4 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -40,7 +40,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
return 0;
}
-void
+void __no_sanitize_address
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
unsigned long sp, next_sp;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 6817331e22ff..bb0ee716de91 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -17,6 +17,7 @@
#include <asm/firmware.h>
#include <asm/ptrace.h>
#include <asm/code-patching.h>
+#include <asm/interrupt.h>
#ifdef CONFIG_PPC64
#include "internal.h"
@@ -168,7 +169,7 @@ static bool regs_use_siar(struct pt_regs *regs)
* they have not been setup using perf_read_regs() and so regs->result
* is something random.
*/
- return ((TRAP(regs) == 0xf00) && regs->result);
+ return ((TRAP(regs) == INTERRUPT_PERFMON) && regs->result);
}
/*
@@ -222,7 +223,7 @@ static inline void perf_get_data_addr(struct perf_event *event, struct pt_regs *
if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid)
*addrp = mfspr(SPRN_SDAR);
- if (is_kernel_addr(mfspr(SPRN_SDAR)) && perf_allow_kernel(&event->attr) != 0)
+ if (is_kernel_addr(mfspr(SPRN_SDAR)) && event->attr.exclude_kernel)
*addrp = 0;
}
@@ -347,7 +348,7 @@ static inline void perf_read_regs(struct pt_regs *regs)
* hypervisor samples as well as samples in the kernel with
* interrupts off hence the userspace check.
*/
- if (TRAP(regs) != 0xf00)
+ if (TRAP(regs) != INTERRUPT_PERFMON)
use_siar = 0;
else if ((ppmu->flags & PPMU_NO_SIAR))
use_siar = 0;
@@ -459,7 +460,7 @@ static __u64 power_pmu_bhrb_to(u64 addr)
sizeof(instr)))
return 0;
- return branch_target((struct ppc_inst *)&instr);
+ return branch_target(&instr);
}
/* Userspace: need copy instruction here then translate it */
@@ -467,7 +468,7 @@ static __u64 power_pmu_bhrb_to(u64 addr)
sizeof(instr)))
return 0;
- target = branch_target((struct ppc_inst *)&instr);
+ target = branch_target(&instr);
if ((!target) || (instr & BRANCH_ABSOLUTE))
return target;
@@ -507,7 +508,7 @@ static void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *
* addresses, hence include a check before filtering code
*/
if (!(ppmu->flags & PPMU_ARCH_31) &&
- is_kernel_addr(addr) && perf_allow_kernel(&event->attr) != 0)
+ is_kernel_addr(addr) && event->attr.exclude_kernel)
continue;
/* Branches are read most recent first (ie. mfbhrb 0 is
@@ -1963,6 +1964,17 @@ static int power_pmu_event_init(struct perf_event *event)
return -ENOENT;
}
+ /*
+ * PMU config registers have fields that are
+ * reserved and some specific values for bit fields are reserved.
+ * For ex., MMCRA[61:62] is Randome Sampling Mode (SM)
+ * and value of 0b11 to this field is reserved.
+ * Check for invalid values in attr.config.
+ */
+ if (ppmu->check_attr_config &&
+ ppmu->check_attr_config(event))
+ return -EINVAL;
+
event->hw.config_base = ev;
event->hw.idx = 0;
@@ -2206,9 +2218,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
ppmu->get_mem_data_src)
ppmu->get_mem_data_src(&data.data_src, ppmu->flags, regs);
- if (event->attr.sample_type & PERF_SAMPLE_WEIGHT &&
+ if (event->attr.sample_type & PERF_SAMPLE_WEIGHT_TYPE &&
ppmu->get_mem_weight)
- ppmu->get_mem_weight(&data.weight.full);
+ ppmu->get_mem_weight(&data.weight.full, event->attr.sample_type);
if (perf_event_overflow(event, &data, regs))
power_pmu_stop(event, 0);
@@ -2242,7 +2254,7 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs)
bool use_siar = regs_use_siar(regs);
unsigned long siar = mfspr(SPRN_SIAR);
- if (ppmu->flags & PPMU_P10_DD1) {
+ if (ppmu && (ppmu->flags & PPMU_P10_DD1)) {
if (siar)
return siar;
else
diff --git a/arch/powerpc/perf/generic-compat-pmu.c b/arch/powerpc/perf/generic-compat-pmu.c
index eb8a6aaf4cc1..695975227e60 100644
--- a/arch/powerpc/perf/generic-compat-pmu.c
+++ b/arch/powerpc/perf/generic-compat-pmu.c
@@ -14,45 +14,119 @@
*
* 28 24 20 16 12 8 4 0
* | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
- * [ pmc ] [unit ] [ ] m [ pmcxsel ]
- * | |
- * | *- mark
- * |
- * |
- * *- combine
- *
- * Below uses IBM bit numbering.
- *
- * MMCR1[x:y] = unit (PMCxUNIT)
- * MMCR1[24] = pmc1combine[0]
- * MMCR1[25] = pmc1combine[1]
- * MMCR1[26] = pmc2combine[0]
- * MMCR1[27] = pmc2combine[1]
- * MMCR1[28] = pmc3combine[0]
- * MMCR1[29] = pmc3combine[1]
- * MMCR1[30] = pmc4combine[0]
- * MMCR1[31] = pmc4combine[1]
- *
+ * [ pmc ] [ pmcxsel ]
*/
/*
- * Some power9 event codes.
+ * Event codes defined in ISA v3.0B
*/
#define EVENT(_name, _code) _name = _code,
enum {
-EVENT(PM_CYC, 0x0001e)
-EVENT(PM_INST_CMPL, 0x00002)
+ /* Cycles, alternate code */
+ EVENT(PM_CYC_ALT, 0x100f0)
+ /* One or more instructions completed in a cycle */
+ EVENT(PM_CYC_INST_CMPL, 0x100f2)
+ /* Floating-point instruction completed */
+ EVENT(PM_FLOP_CMPL, 0x100f4)
+ /* Instruction ERAT/L1-TLB miss */
+ EVENT(PM_L1_ITLB_MISS, 0x100f6)
+ /* All instructions completed and none available */
+ EVENT(PM_NO_INST_AVAIL, 0x100f8)
+ /* A load-type instruction completed (ISA v3.0+) */
+ EVENT(PM_LD_CMPL, 0x100fc)
+ /* Instruction completed, alternate code (ISA v3.0+) */
+ EVENT(PM_INST_CMPL_ALT, 0x100fe)
+ /* A store-type instruction completed */
+ EVENT(PM_ST_CMPL, 0x200f0)
+ /* Instruction Dispatched */
+ EVENT(PM_INST_DISP, 0x200f2)
+ /* Run_cycles */
+ EVENT(PM_RUN_CYC, 0x200f4)
+ /* Data ERAT/L1-TLB miss/reload */
+ EVENT(PM_L1_DTLB_RELOAD, 0x200f6)
+ /* Taken branch completed */
+ EVENT(PM_BR_TAKEN_CMPL, 0x200fa)
+ /* Demand iCache Miss */
+ EVENT(PM_L1_ICACHE_MISS, 0x200fc)
+ /* L1 Dcache reload from memory */
+ EVENT(PM_L1_RELOAD_FROM_MEM, 0x200fe)
+ /* L1 Dcache store miss */
+ EVENT(PM_ST_MISS_L1, 0x300f0)
+ /* Alternate code for PM_INST_DISP */
+ EVENT(PM_INST_DISP_ALT, 0x300f2)
+ /* Branch direction or target mispredicted */
+ EVENT(PM_BR_MISPREDICT, 0x300f6)
+ /* Data TLB miss/reload */
+ EVENT(PM_DTLB_MISS, 0x300fc)
+ /* Demand LD - L3 Miss (not L2 hit and not L3 hit) */
+ EVENT(PM_DATA_FROM_L3MISS, 0x300fe)
+ /* L1 Dcache load miss */
+ EVENT(PM_LD_MISS_L1, 0x400f0)
+ /* Cycle when instruction(s) dispatched */
+ EVENT(PM_CYC_INST_DISP, 0x400f2)
+ /* Branch or branch target mispredicted */
+ EVENT(PM_BR_MPRED_CMPL, 0x400f6)
+ /* Instructions completed with run latch set */
+ EVENT(PM_RUN_INST_CMPL, 0x400fa)
+ /* Instruction TLB miss/reload */
+ EVENT(PM_ITLB_MISS, 0x400fc)
+ /* Load data not cached */
+ EVENT(PM_LD_NOT_CACHED, 0x400fe)
+ /* Instructions */
+ EVENT(PM_INST_CMPL, 0x500fa)
+ /* Cycles */
+ EVENT(PM_CYC, 0x600f4)
};
#undef EVENT
+/* Table of alternatives, sorted in increasing order of column 0 */
+/* Note that in each row, column 0 must be the smallest */
+static const unsigned int generic_event_alternatives[][MAX_ALT] = {
+ { PM_CYC_ALT, PM_CYC },
+ { PM_INST_CMPL_ALT, PM_INST_CMPL },
+ { PM_INST_DISP, PM_INST_DISP_ALT },
+};
+
+static int generic_get_alternatives(u64 event, unsigned int flags, u64 alt[])
+{
+ int num_alt = 0;
+
+ num_alt = isa207_get_alternatives(event, alt,
+ ARRAY_SIZE(generic_event_alternatives), flags,
+ generic_event_alternatives);
+
+ return num_alt;
+}
+
GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
+GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_NO_INST_AVAIL);
+GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
+GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1);
+
+CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1);
+CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
+CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
+CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
+CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
+CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
+CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
static struct attribute *generic_compat_events_attr[] = {
GENERIC_EVENT_PTR(PM_CYC),
GENERIC_EVENT_PTR(PM_INST_CMPL),
+ GENERIC_EVENT_PTR(PM_NO_INST_AVAIL),
+ GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
+ GENERIC_EVENT_PTR(PM_LD_MISS_L1),
+ CACHE_EVENT_PTR(PM_LD_MISS_L1),
+ CACHE_EVENT_PTR(PM_ST_MISS_L1),
+ CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
+ CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
+ CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
+ CACHE_EVENT_PTR(PM_DTLB_MISS),
+ CACHE_EVENT_PTR(PM_ITLB_MISS),
NULL
};
@@ -63,17 +137,11 @@ static struct attribute_group generic_compat_pmu_events_group = {
PMU_FORMAT_ATTR(event, "config:0-19");
PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
-PMU_FORMAT_ATTR(mark, "config:8");
-PMU_FORMAT_ATTR(combine, "config:10-11");
-PMU_FORMAT_ATTR(unit, "config:12-15");
PMU_FORMAT_ATTR(pmc, "config:16-19");
static struct attribute *generic_compat_pmu_format_attr[] = {
&format_attr_event.attr,
&format_attr_pmcxsel.attr,
- &format_attr_mark.attr,
- &format_attr_combine.attr,
- &format_attr_unit.attr,
&format_attr_pmc.attr,
NULL,
};
@@ -92,6 +160,9 @@ static const struct attribute_group *generic_compat_pmu_attr_groups[] = {
static int compat_generic_events[] = {
[PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
[PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_NO_INST_AVAIL,
+ [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
+ [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
};
#define C(x) PERF_COUNT_HW_CACHE_##x
@@ -105,11 +176,11 @@ static u64 generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(L1D) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_LD_MISS_L1,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_ST_MISS_L1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
@@ -119,7 +190,7 @@ static u64 generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(L1I) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
@@ -133,7 +204,7 @@ static u64 generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(LL) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
@@ -147,7 +218,7 @@ static u64 generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_DTLB_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
@@ -161,7 +232,7 @@ static u64 generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_ITLB_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
@@ -175,7 +246,7 @@ static u64 generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(BPU) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
@@ -204,13 +275,30 @@ static u64 generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
#undef C
+/*
+ * We set MMCR0[CC5-6RUN] so we can use counters 5 and 6 for
+ * PM_INST_CMPL and PM_CYC.
+ */
+static int generic_compute_mmcr(u64 event[], int n_ev,
+ unsigned int hwc[], struct mmcr_regs *mmcr,
+ struct perf_event *pevents[], u32 flags)
+{
+ int ret;
+
+ ret = isa207_compute_mmcr(event, n_ev, hwc, mmcr, pevents, flags);
+ if (!ret)
+ mmcr->mmcr0 |= MMCR0_C56RUN;
+ return ret;
+}
+
static struct power_pmu generic_compat_pmu = {
.name = "GENERIC_COMPAT",
.n_counter = MAX_PMU_COUNTERS,
.add_fields = ISA207_ADD_FIELDS,
.test_adder = ISA207_TEST_ADDER,
- .compute_mmcr = isa207_compute_mmcr,
+ .compute_mmcr = generic_compute_mmcr,
.get_constraint = isa207_get_constraint,
+ .get_alternatives = generic_get_alternatives,
.disable_pmc = isa207_disable_pmc,
.flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
.n_generic = ARRAY_SIZE(compat_generic_events),
@@ -223,6 +311,16 @@ int init_generic_compat_pmu(void)
{
int rc = 0;
+ /*
+ * From ISA v2.07 on, PMU features are architected;
+ * we require >= v3.0 because (a) that has PM_LD_CMPL and
+ * PM_INST_CMPL_ALT, which v2.07 doesn't have, and
+ * (b) we don't expect any non-IBM Power ISA
+ * implementations that conform to v2.07 but not v3.0.
+ */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ return -ENODEV;
+
rc = register_power_pmu(&generic_compat_pmu);
if (rc)
return rc;
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index e5eb33255066..1816f560a465 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -226,14 +226,14 @@ static struct attribute_group event_long_desc_group = {
static struct kmem_cache *hv_page_cache;
-DEFINE_PER_CPU(int, hv_24x7_txn_flags);
-DEFINE_PER_CPU(int, hv_24x7_txn_err);
+static DEFINE_PER_CPU(int, hv_24x7_txn_flags);
+static DEFINE_PER_CPU(int, hv_24x7_txn_err);
struct hv_24x7_hw {
struct perf_event *events[255];
};
-DEFINE_PER_CPU(struct hv_24x7_hw, hv_24x7_hw);
+static DEFINE_PER_CPU(struct hv_24x7_hw, hv_24x7_hw);
/*
* request_buffer and result_buffer are not required to be 4k aligned,
@@ -241,8 +241,8 @@ DEFINE_PER_CPU(struct hv_24x7_hw, hv_24x7_hw);
* the simplest way to ensure that.
*/
#define H24x7_DATA_BUFFER_SIZE 4096
-DEFINE_PER_CPU(char, hv_24x7_reqb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
-DEFINE_PER_CPU(char, hv_24x7_resb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
+static DEFINE_PER_CPU(char, hv_24x7_reqb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
+static DEFINE_PER_CPU(char, hv_24x7_resb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
static unsigned int max_num_requests(int interface_version)
{
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index e4f577da33d8..f92bf5f6b74f 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -21,7 +21,7 @@ PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
PMU_FORMAT_ATTR(thresh_start, "config:36-39");
PMU_FORMAT_ATTR(thresh_cmp, "config:40-49");
-struct attribute *isa207_pmu_format_attr[] = {
+static struct attribute *isa207_pmu_format_attr[] = {
&format_attr_event.attr,
&format_attr_pmcxsel.attr,
&format_attr_mark.attr,
@@ -275,17 +275,47 @@ void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags,
sier = mfspr(SPRN_SIER);
val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
- if (val == 1 || val == 2) {
- idx = (sier & ISA207_SIER_LDST_MASK) >> ISA207_SIER_LDST_SHIFT;
- sub_idx = (sier & ISA207_SIER_DATA_SRC_MASK) >> ISA207_SIER_DATA_SRC_SHIFT;
+ if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31)))
+ return;
+
+ idx = (sier & ISA207_SIER_LDST_MASK) >> ISA207_SIER_LDST_SHIFT;
+ sub_idx = (sier & ISA207_SIER_DATA_SRC_MASK) >> ISA207_SIER_DATA_SRC_SHIFT;
+
+ dsrc->val = isa207_find_source(idx, sub_idx);
+ if (val == 7) {
+ u64 mmcra;
+ u32 op_type;
- dsrc->val = isa207_find_source(idx, sub_idx);
+ /*
+ * Type 0b111 denotes either larx or stcx instruction. Use the
+ * MMCRA sampling bits [57:59] along with the type value
+ * to determine the exact instruction type. If the sampling
+ * criteria is neither load or store, set the type as default
+ * to NA.
+ */
+ mmcra = mfspr(SPRN_MMCRA);
+
+ op_type = (mmcra >> MMCRA_SAMP_ELIG_SHIFT) & MMCRA_SAMP_ELIG_MASK;
+ switch (op_type) {
+ case 5:
+ dsrc->val |= P(OP, LOAD);
+ break;
+ case 7:
+ dsrc->val |= P(OP, STORE);
+ break;
+ default:
+ dsrc->val |= P(OP, NA);
+ break;
+ }
+ } else {
dsrc->val |= (val == 1) ? P(OP, LOAD) : P(OP, STORE);
}
}
-void isa207_get_mem_weight(u64 *weight)
+void isa207_get_mem_weight(u64 *weight, u64 type)
{
+ union perf_sample_weight *weight_fields;
+ u64 weight_lat;
u64 mmcra = mfspr(SPRN_MMCRA);
u64 exp = MMCRA_THR_CTR_EXP(mmcra);
u64 mantissa = MMCRA_THR_CTR_MANT(mmcra);
@@ -295,10 +325,31 @@ void isa207_get_mem_weight(u64 *weight)
if (cpu_has_feature(CPU_FTR_ARCH_31))
mantissa = P10_MMCRA_THR_CTR_MANT(mmcra);
- if (val == 0 || val == 7)
- *weight = 0;
+ if (val == 0 || (val == 7 && !cpu_has_feature(CPU_FTR_ARCH_31)))
+ weight_lat = 0;
else
- *weight = mantissa << (2 * exp);
+ weight_lat = mantissa << (2 * exp);
+
+ /*
+ * Use 64 bit weight field (full) if sample type is
+ * WEIGHT.
+ *
+ * if sample type is WEIGHT_STRUCT:
+ * - store memory latency in the lower 32 bits.
+ * - For ISA v3.1, use remaining two 16 bit fields of
+ * perf_sample_weight to store cycle counter values
+ * from sier2.
+ */
+ weight_fields = (union perf_sample_weight *)weight;
+ if (type & PERF_SAMPLE_WEIGHT)
+ weight_fields->full = weight_lat;
+ else {
+ weight_fields->var1_dw = (u32)weight_lat;
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ weight_fields->var2_w = P10_SIER2_FINISH_CYC(mfspr(SPRN_SIER2));
+ weight_fields->var3_w = P10_SIER2_DISPATCH_CYC(mfspr(SPRN_SIER2));
+ }
+ }
}
int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp, u64 event_config1)
@@ -447,8 +498,8 @@ ebb_bhrb:
* EBB events are pinned & exclusive, so this should never actually
* hit, but we leave it as a fallback in case.
*/
- mask |= CNST_EBB_VAL(ebb);
- value |= CNST_EBB_MASK;
+ mask |= CNST_EBB_MASK;
+ value |= CNST_EBB_VAL(ebb);
*maskp = mask;
*valp = value;
@@ -694,3 +745,45 @@ int isa207_get_alternatives(u64 event, u64 alt[], int size, unsigned int flags,
return num_alt;
}
+
+int isa3XX_check_attr_config(struct perf_event *ev)
+{
+ u64 val, sample_mode;
+ u64 event = ev->attr.config;
+
+ val = (event >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
+ sample_mode = val & 0x3;
+
+ /*
+ * MMCRA[61:62] is Random Sampling Mode (SM).
+ * value of 0b11 is reserved.
+ */
+ if (sample_mode == 0x3)
+ return -EINVAL;
+
+ /*
+ * Check for all reserved value
+ * Source: Performance Monitoring Unit User Guide
+ */
+ switch (val) {
+ case 0x5:
+ case 0x9:
+ case 0xD:
+ case 0x19:
+ case 0x1D:
+ case 0x1A:
+ case 0x1E:
+ return -EINVAL;
+ }
+
+ /*
+ * MMCRA[48:51]/[52:55]) Threshold Start/Stop
+ * Events Selection.
+ * 0b11110000/0b00001111 is reserved.
+ */
+ val = (event >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
+ if (((val & 0xF0) == 0xF0) || ((val & 0xF) == 0xF))
+ return -EINVAL;
+
+ return 0;
+}
diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h
index 1af0e8c97ac7..4a2cbc3dc047 100644
--- a/arch/powerpc/perf/isa207-common.h
+++ b/arch/powerpc/perf/isa207-common.h
@@ -220,6 +220,7 @@
/* Bits in MMCRA for PowerISA v2.07 */
#define MMCRA_SAMP_MODE_SHIFT 1
#define MMCRA_SAMP_ELIG_SHIFT 4
+#define MMCRA_SAMP_ELIG_MASK 7
#define MMCRA_THR_CTL_SHIFT 8
#define MMCRA_THR_SEL_SHIFT 16
#define MMCRA_THR_CMP_SHIFT 32
@@ -265,6 +266,10 @@
#define ISA207_SIER_DATA_SRC_SHIFT 53
#define ISA207_SIER_DATA_SRC_MASK (0x7ull << ISA207_SIER_DATA_SRC_SHIFT)
+/* Bits in SIER2/SIER3 for Power10 */
+#define P10_SIER2_FINISH_CYC(sier2) (((sier2) >> (63 - 37)) & 0x7fful)
+#define P10_SIER2_DISPATCH_CYC(sier2) (((sier2) >> (63 - 13)) & 0x7fful)
+
#define P(a, b) PERF_MEM_S(a, b)
#define PH(a, b) (P(LVL, HIT) | P(a, b))
#define PM(a, b) (P(LVL, MISS) | P(a, b))
@@ -278,6 +283,8 @@ int isa207_get_alternatives(u64 event, u64 alt[], int size, unsigned int flags,
const unsigned int ev_alt[][MAX_ALT]);
void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags,
struct pt_regs *regs);
-void isa207_get_mem_weight(u64 *weight);
+void isa207_get_mem_weight(u64 *weight, u64 type);
+
+int isa3XX_check_attr_config(struct perf_event *ev);
#endif
diff --git a/arch/powerpc/perf/power10-events-list.h b/arch/powerpc/perf/power10-events-list.h
index e45dafe818ed..93be7197d250 100644
--- a/arch/powerpc/perf/power10-events-list.h
+++ b/arch/powerpc/perf/power10-events-list.h
@@ -75,5 +75,5 @@ EVENT(PM_RUN_INST_CMPL_ALT, 0x00002);
* thresh end (TE)
*/
-EVENT(MEM_LOADS, 0x34340401e0);
-EVENT(MEM_STORES, 0x343c0401e0);
+EVENT(MEM_LOADS, 0x35340401e0);
+EVENT(MEM_STORES, 0x353c0401e0);
diff --git a/arch/powerpc/perf/power10-pmu.c b/arch/powerpc/perf/power10-pmu.c
index a901c1348cad..f9d64c63bb4a 100644
--- a/arch/powerpc/perf/power10-pmu.c
+++ b/arch/powerpc/perf/power10-pmu.c
@@ -106,6 +106,18 @@ static int power10_get_alternatives(u64 event, unsigned int flags, u64 alt[])
return num_alt;
}
+static int power10_check_attr_config(struct perf_event *ev)
+{
+ u64 val;
+ u64 event = ev->attr.config;
+
+ val = (event >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
+ if (val == 0x10 || isa3XX_check_attr_config(ev))
+ return -EINVAL;
+
+ return 0;
+}
+
GENERIC_EVENT_ATTR(cpu-cycles, PM_RUN_CYC);
GENERIC_EVENT_ATTR(instructions, PM_RUN_INST_CMPL);
GENERIC_EVENT_ATTR(branch-instructions, PM_BR_CMPL);
@@ -559,6 +571,7 @@ static struct power_pmu power10_pmu = {
.attr_groups = power10_pmu_attr_groups,
.bhrb_nr = 32,
.capabilities = PERF_PMU_CAP_EXTENDED_REGS,
+ .check_attr_config = power10_check_attr_config,
};
int init_power10_pmu(void)
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 2a57e93a79dc..ff3382140d7e 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -151,6 +151,18 @@ static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[])
return num_alt;
}
+static int power9_check_attr_config(struct perf_event *ev)
+{
+ u64 val;
+ u64 event = ev->attr.config;
+
+ val = (event >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
+ if (val == 0xC || isa3XX_check_attr_config(ev))
+ return -EINVAL;
+
+ return 0;
+}
+
GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_ICT_NOSLOT_CYC);
GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
@@ -437,6 +449,7 @@ static struct power_pmu power9_pmu = {
.attr_groups = power9_pmu_attr_groups,
.bhrb_nr = 32,
.capabilities = PERF_PMU_CAP_EXTENDED_REGS,
+ .check_attr_config = power9_check_attr_config,
};
int init_power9_pmu(void)
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 7d41e9264510..83975ef50975 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -5,7 +5,7 @@ config PPC_47x
select MPIC
help
This option enables support for the 47x family of processors and is
- not currently compatible with other 44x or 46x varients
+ not currently compatible with other 44x or 46x variants
config BAMBOO
bool "Bamboo"
diff --git a/arch/powerpc/platforms/52xx/lite5200_sleep.S b/arch/powerpc/platforms/52xx/lite5200_sleep.S
index 11475c58ea43..afee8b1515a8 100644
--- a/arch/powerpc/platforms/52xx/lite5200_sleep.S
+++ b/arch/powerpc/platforms/52xx/lite5200_sleep.S
@@ -181,7 +181,7 @@ sram_code:
udelay: /* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */
mullw r12, r12, r11
mftb r13 /* start */
- addi r12, r13, r12 /* end */
+ add r12, r13, r12 /* end */
1:
mftb r13 /* current */
cmp cr0, r13, r12
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index 8c0d324f657e..3823df235f25 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -582,6 +582,7 @@ static long mpc52xx_wdt_ioctl(struct file *file, unsigned int cmd,
if (ret)
break;
/* fall through and return the timeout */
+ fallthrough;
case WDIOC_GETTIMEOUT:
/* we need to round here as to avoid e.g. the following
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
index 87f524e4b09c..8a7e55acf090 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
@@ -73,7 +73,7 @@ smp_86xx_kick_cpu(int nr)
/* Setup fake reset vector to call __secondary_start_mpc86xx. */
target = (unsigned long) __secondary_start_mpc86xx;
- patch_branch((struct ppc_inst *)vector, target, BRANCH_SET_LINK);
+ patch_branch(vector, target, BRANCH_SET_LINK);
/* Kick that CPU */
smp_86xx_release_core(nr);
@@ -83,7 +83,7 @@ smp_86xx_kick_cpu(int nr)
mdelay(1);
/* Restore the exception vector */
- patch_instruction((struct ppc_inst *)vector, ppc_inst(save_vector));
+ patch_instruction(vector, ppc_inst(save_vector));
local_irq_restore(flags);
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 7a5e8f4541e3..e02d29a9d12f 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -20,6 +20,8 @@ source "arch/powerpc/platforms/embedded6xx/Kconfig"
source "arch/powerpc/platforms/44x/Kconfig"
source "arch/powerpc/platforms/40x/Kconfig"
source "arch/powerpc/platforms/amigaone/Kconfig"
+source "arch/powerpc/platforms/book3s/Kconfig"
+source "arch/powerpc/platforms/microwatt/Kconfig"
config KVM_GUEST
bool "KVM Guest support"
@@ -49,6 +51,7 @@ config PPC_NATIVE
config PPC_OF_BOOT_TRAMPOLINE
bool "Support booting from Open Firmware or yaboot"
depends on PPC_BOOK3S_32 || PPC64
+ select RELOCATABLE if PPC64
default y
help
Support from booting from Open Firmware or yaboot using an
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 3ce907523b1e..0e1bb1cedd13 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -40,8 +40,8 @@ config PPC_85xx
config PPC_8xx
bool "Freescale 8xx"
+ select ARCH_SUPPORTS_HUGETLBFS
select FSL_SOC
- select SYS_SUPPORTS_HUGETLBFS
select PPC_HAVE_KUEP
select PPC_HAVE_KUAP
select HAVE_ARCH_VMAP_STACK
@@ -61,6 +61,7 @@ config 44x
select 4xx_SOC
select HAVE_PCI
select PHYS_64BIT
+ select PPC_HAVE_KUEP
endchoice
@@ -95,18 +96,23 @@ config PPC_BOOK3S_64
bool "Server processors"
select PPC_FPU
select PPC_HAVE_PMU_SUPPORT
- select SYS_SUPPORTS_HUGETLBFS
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
+ select ARCH_ENABLE_PMD_SPLIT_PTLOCK
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
+ select ARCH_SUPPORTS_HUGETLBFS
select ARCH_SUPPORTS_NUMA_BALANCING
select IRQ_WORK
select PPC_MM_SLICES
+ select PPC_HAVE_KUEP
+ select PPC_HAVE_KUAP
config PPC_BOOK3E_64
bool "Embedded processors"
select PPC_FPU # Make it a choice ?
select PPC_SMP_MUXED_IPI
select PPC_DOORBELL
+ select ZONE_DMA
endchoice
@@ -278,9 +284,9 @@ config FSL_BOOKE
# this is for common code between PPC32 & PPC64 FSL BOOKE
config PPC_FSL_BOOK3E
bool
+ select ARCH_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64
select FSL_EMB_PERFMON
select PPC_SMP_MUXED_IPI
- select SYS_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64
select PPC_DOORBELL
default y if FSL_BOOKE
@@ -306,6 +312,7 @@ config PHYS_64BIT
config ALTIVEC
bool "AltiVec Support"
depends on PPC_BOOK3S_32 || PPC_BOOK3S_64 || (PPC_E500MC && PPC64)
+ select PPC_FPU
help
This option enables kernel support for the Altivec extensions to the
PowerPC processor. The kernel currently supports saving and restoring
@@ -355,16 +362,10 @@ config SPE
If in doubt, say Y here.
-config ARCH_ENABLE_SPLIT_PMD_PTLOCK
- def_bool y
- depends on PPC_BOOK3S_64
-
config PPC_RADIX_MMU
bool "Radix MMU Support"
depends on PPC_BOOK3S_64
select ARCH_HAS_GIGANTIC_PAGE
- select PPC_HAVE_KUEP
- select PPC_HAVE_KUAP
default y
help
Enable support for the Power ISA 3.0 Radix style MMU. Currently this
@@ -390,7 +391,7 @@ config PPC_HAVE_KUEP
config PPC_KUEP
bool "Kernel Userspace Execution Prevention"
depends on PPC_HAVE_KUEP
- default y if !PPC_BOOK3S_32
+ default y
help
Enable support for Kernel Userspace Execution Prevention (KUEP)
@@ -402,7 +403,7 @@ config PPC_HAVE_KUAP
config PPC_KUAP
bool "Kernel Userspace Access Protection"
depends on PPC_HAVE_KUAP
- default y if !PPC_BOOK3S_32
+ default y
help
Enable support for Kernel Userspace Access Protection (KUAP)
@@ -420,19 +421,11 @@ config PPC_PKEY
depends on PPC_BOOK3S_64
depends on PPC_MEM_KEYS || PPC_KUAP || PPC_KUEP
-config ARCH_ENABLE_HUGEPAGE_MIGRATION
- def_bool y
- depends on PPC_BOOK3S_64 && HUGETLB_PAGE && MIGRATION
-
config PPC_MMU_NOHASH
def_bool y
depends on !PPC_BOOK3S
-config PPC_MMU_NOHASH_32
- def_bool y
- depends on PPC_MMU_NOHASH && PPC32
-
config PPC_BOOK3E_MMU
def_bool y
depends on FSL_BOOKE || PPC_BOOK3E
@@ -481,9 +474,9 @@ config SMP
If you don't know what to do here, say N.
config NR_CPUS
- int "Maximum number of CPUs (2-8192)"
- range 2 8192
- depends on SMP
+ int "Maximum number of CPUs (2-8192)" if SMP
+ range 2 8192 if SMP
+ default "1" if !SMP
default "32" if PPC64
default "4"
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index 143d4417f6cc..94470fb27c99 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -22,3 +22,5 @@ obj-$(CONFIG_PPC_CELL) += cell/
obj-$(CONFIG_PPC_PS3) += ps3/
obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/
obj-$(CONFIG_AMIGAONE) += amigaone/
+obj-$(CONFIG_PPC_BOOK3S) += book3s/
+obj-$(CONFIG_PPC_MICROWATT) += microwatt/
diff --git a/arch/powerpc/platforms/book3s/Kconfig b/arch/powerpc/platforms/book3s/Kconfig
new file mode 100644
index 000000000000..34c931592ef0
--- /dev/null
+++ b/arch/powerpc/platforms/book3s/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+config PPC_VAS
+ bool "IBM Virtual Accelerator Switchboard (VAS)"
+ depends on (PPC_POWERNV || PPC_PSERIES) && PPC_64K_PAGES
+ default y
+ help
+ This enables support for IBM Virtual Accelerator Switchboard (VAS).
+
+ VAS devices are found in POWER9-based and later systems, they
+ provide access to accelerator coprocessors such as NX-GZIP and
+ NX-842. This config allows the kernel to use NX-842 accelerators,
+ and user-mode APIs for the NX-GZIP accelerator on POWER9 PowerNV
+ and POWER10 PowerVM platforms.
+
+ If unsure, say "N".
diff --git a/arch/powerpc/platforms/book3s/Makefile b/arch/powerpc/platforms/book3s/Makefile
new file mode 100644
index 000000000000..e790f1910f61
--- /dev/null
+++ b/arch/powerpc/platforms/book3s/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_PPC_VAS) += vas-api.o
diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c
new file mode 100644
index 000000000000..30172e52e16b
--- /dev/null
+++ b/arch/powerpc/platforms/book3s/vas-api.c
@@ -0,0 +1,493 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * VAS user space API for its accelerators (Only NX-GZIP is supported now)
+ * Copyright (C) 2019 Haren Myneni, IBM Corp
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/kthread.h>
+#include <linux/sched/signal.h>
+#include <linux/mmu_context.h>
+#include <linux/io.h>
+#include <asm/vas.h>
+#include <uapi/asm/vas-api.h>
+
+/*
+ * The driver creates the device node that can be used as follows:
+ * For NX-GZIP
+ *
+ * fd = open("/dev/crypto/nx-gzip", O_RDWR);
+ * rc = ioctl(fd, VAS_TX_WIN_OPEN, &attr);
+ * paste_addr = mmap(NULL, PAGE_SIZE, prot, MAP_SHARED, fd, 0ULL).
+ * vas_copy(&crb, 0, 1);
+ * vas_paste(paste_addr, 0, 1);
+ * close(fd) or exit process to close window.
+ *
+ * where "vas_copy" and "vas_paste" are defined in copy-paste.h.
+ * copy/paste returns to the user space directly. So refer NX hardware
+ * documententation for exact copy/paste usage and completion / error
+ * conditions.
+ */
+
+/*
+ * Wrapper object for the nx-gzip device - there is just one instance of
+ * this node for the whole system.
+ */
+static struct coproc_dev {
+ struct cdev cdev;
+ struct device *device;
+ char *name;
+ dev_t devt;
+ struct class *class;
+ enum vas_cop_type cop_type;
+ const struct vas_user_win_ops *vops;
+} coproc_device;
+
+struct coproc_instance {
+ struct coproc_dev *coproc;
+ struct vas_window *txwin;
+};
+
+static char *coproc_devnode(struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "crypto/%s", dev_name(dev));
+}
+
+/*
+ * Take reference to pid and mm
+ */
+int get_vas_user_win_ref(struct vas_user_win_ref *task_ref)
+{
+ /*
+ * Window opened by a child thread may not be closed when
+ * it exits. So take reference to its pid and release it
+ * when the window is free by parent thread.
+ * Acquire a reference to the task's pid to make sure
+ * pid will not be re-used - needed only for multithread
+ * applications.
+ */
+ task_ref->pid = get_task_pid(current, PIDTYPE_PID);
+ /*
+ * Acquire a reference to the task's mm.
+ */
+ task_ref->mm = get_task_mm(current);
+ if (!task_ref->mm) {
+ put_pid(task_ref->pid);
+ pr_err("VAS: pid(%d): mm_struct is not found\n",
+ current->pid);
+ return -EPERM;
+ }
+
+ mmgrab(task_ref->mm);
+ mmput(task_ref->mm);
+ /*
+ * Process closes window during exit. In the case of
+ * multithread application, the child thread can open
+ * window and can exit without closing it. So takes tgid
+ * reference until window closed to make sure tgid is not
+ * reused.
+ */
+ task_ref->tgid = find_get_pid(task_tgid_vnr(current));
+
+ return 0;
+}
+
+/*
+ * Successful return must release the task reference with
+ * put_task_struct
+ */
+static bool ref_get_pid_and_task(struct vas_user_win_ref *task_ref,
+ struct task_struct **tskp, struct pid **pidp)
+{
+ struct task_struct *tsk;
+ struct pid *pid;
+
+ pid = task_ref->pid;
+ tsk = get_pid_task(pid, PIDTYPE_PID);
+ if (!tsk) {
+ pid = task_ref->tgid;
+ tsk = get_pid_task(pid, PIDTYPE_PID);
+ /*
+ * Parent thread (tgid) will be closing window when it
+ * exits. So should not get here.
+ */
+ if (WARN_ON_ONCE(!tsk))
+ return false;
+ }
+
+ /* Return if the task is exiting. */
+ if (tsk->flags & PF_EXITING) {
+ put_task_struct(tsk);
+ return false;
+ }
+
+ *tskp = tsk;
+ *pidp = pid;
+
+ return true;
+}
+
+/*
+ * Update the CSB to indicate a translation error.
+ *
+ * User space will be polling on CSB after the request is issued.
+ * If NX can handle the request without any issues, it updates CSB.
+ * Whereas if NX encounters page fault, the kernel will handle the
+ * fault and update CSB with translation error.
+ *
+ * If we are unable to update the CSB means copy_to_user failed due to
+ * invalid csb_addr, send a signal to the process.
+ */
+void vas_update_csb(struct coprocessor_request_block *crb,
+ struct vas_user_win_ref *task_ref)
+{
+ struct coprocessor_status_block csb;
+ struct kernel_siginfo info;
+ struct task_struct *tsk;
+ void __user *csb_addr;
+ struct pid *pid;
+ int rc;
+
+ /*
+ * NX user space windows can not be opened for task->mm=NULL
+ * and faults will not be generated for kernel requests.
+ */
+ if (WARN_ON_ONCE(!task_ref->mm))
+ return;
+
+ csb_addr = (void __user *)be64_to_cpu(crb->csb_addr);
+
+ memset(&csb, 0, sizeof(csb));
+ csb.cc = CSB_CC_FAULT_ADDRESS;
+ csb.ce = CSB_CE_TERMINATION;
+ csb.cs = 0;
+ csb.count = 0;
+
+ /*
+ * NX operates and returns in BE format as defined CRB struct.
+ * So saves fault_storage_addr in BE as NX pastes in FIFO and
+ * expects user space to convert to CPU format.
+ */
+ csb.address = crb->stamp.nx.fault_storage_addr;
+ csb.flags = 0;
+
+ /*
+ * Process closes send window after all pending NX requests are
+ * completed. In multi-thread applications, a child thread can
+ * open a window and can exit without closing it. May be some
+ * requests are pending or this window can be used by other
+ * threads later. We should handle faults if NX encounters
+ * pages faults on these requests. Update CSB with translation
+ * error and fault address. If csb_addr passed by user space is
+ * invalid, send SEGV signal to pid saved in window. If the
+ * child thread is not running, send the signal to tgid.
+ * Parent thread (tgid) will close this window upon its exit.
+ *
+ * pid and mm references are taken when window is opened by
+ * process (pid). So tgid is used only when child thread opens
+ * a window and exits without closing it.
+ */
+
+ if (!ref_get_pid_and_task(task_ref, &tsk, &pid))
+ return;
+
+ kthread_use_mm(task_ref->mm);
+ rc = copy_to_user(csb_addr, &csb, sizeof(csb));
+ /*
+ * User space polls on csb.flags (first byte). So add barrier
+ * then copy first byte with csb flags update.
+ */
+ if (!rc) {
+ csb.flags = CSB_V;
+ /* Make sure update to csb.flags is visible now */
+ smp_mb();
+ rc = copy_to_user(csb_addr, &csb, sizeof(u8));
+ }
+ kthread_unuse_mm(task_ref->mm);
+ put_task_struct(tsk);
+
+ /* Success */
+ if (!rc)
+ return;
+
+
+ pr_debug("Invalid CSB address 0x%p signalling pid(%d)\n",
+ csb_addr, pid_vnr(pid));
+
+ clear_siginfo(&info);
+ info.si_signo = SIGSEGV;
+ info.si_errno = EFAULT;
+ info.si_code = SEGV_MAPERR;
+ info.si_addr = csb_addr;
+ /*
+ * process will be polling on csb.flags after request is sent to
+ * NX. So generally CSB update should not fail except when an
+ * application passes invalid csb_addr. So an error message will
+ * be displayed and leave it to user space whether to ignore or
+ * handle this signal.
+ */
+ rcu_read_lock();
+ rc = kill_pid_info(SIGSEGV, &info, pid);
+ rcu_read_unlock();
+
+ pr_devel("%s(): pid %d kill_proc_info() rc %d\n", __func__,
+ pid_vnr(pid), rc);
+}
+
+void vas_dump_crb(struct coprocessor_request_block *crb)
+{
+ struct data_descriptor_entry *dde;
+ struct nx_fault_stamp *nx;
+
+ dde = &crb->source;
+ pr_devel("SrcDDE: addr 0x%llx, len %d, count %d, idx %d, flags %d\n",
+ be64_to_cpu(dde->address), be32_to_cpu(dde->length),
+ dde->count, dde->index, dde->flags);
+
+ dde = &crb->target;
+ pr_devel("TgtDDE: addr 0x%llx, len %d, count %d, idx %d, flags %d\n",
+ be64_to_cpu(dde->address), be32_to_cpu(dde->length),
+ dde->count, dde->index, dde->flags);
+
+ nx = &crb->stamp.nx;
+ pr_devel("NX Stamp: PSWID 0x%x, FSA 0x%llx, flags 0x%x, FS 0x%x\n",
+ be32_to_cpu(nx->pswid),
+ be64_to_cpu(crb->stamp.nx.fault_storage_addr),
+ nx->flags, nx->fault_status);
+}
+
+static int coproc_open(struct inode *inode, struct file *fp)
+{
+ struct coproc_instance *cp_inst;
+
+ cp_inst = kzalloc(sizeof(*cp_inst), GFP_KERNEL);
+ if (!cp_inst)
+ return -ENOMEM;
+
+ cp_inst->coproc = container_of(inode->i_cdev, struct coproc_dev,
+ cdev);
+ fp->private_data = cp_inst;
+
+ return 0;
+}
+
+static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg)
+{
+ void __user *uptr = (void __user *)arg;
+ struct vas_tx_win_open_attr uattr;
+ struct coproc_instance *cp_inst;
+ struct vas_window *txwin;
+ int rc;
+
+ cp_inst = fp->private_data;
+
+ /*
+ * One window for file descriptor
+ */
+ if (cp_inst->txwin)
+ return -EEXIST;
+
+ rc = copy_from_user(&uattr, uptr, sizeof(uattr));
+ if (rc) {
+ pr_err("%s(): copy_from_user() returns %d\n", __func__, rc);
+ return -EFAULT;
+ }
+
+ if (uattr.version != 1) {
+ pr_err("Invalid window open API version\n");
+ return -EINVAL;
+ }
+
+ if (!cp_inst->coproc->vops && !cp_inst->coproc->vops->open_win) {
+ pr_err("VAS API is not registered\n");
+ return -EACCES;
+ }
+
+ txwin = cp_inst->coproc->vops->open_win(uattr.vas_id, uattr.flags,
+ cp_inst->coproc->cop_type);
+ if (IS_ERR(txwin)) {
+ pr_err("%s() VAS window open failed, %ld\n", __func__,
+ PTR_ERR(txwin));
+ return PTR_ERR(txwin);
+ }
+
+ cp_inst->txwin = txwin;
+
+ return 0;
+}
+
+static int coproc_release(struct inode *inode, struct file *fp)
+{
+ struct coproc_instance *cp_inst = fp->private_data;
+ int rc;
+
+ if (cp_inst->txwin) {
+ if (cp_inst->coproc->vops &&
+ cp_inst->coproc->vops->close_win) {
+ rc = cp_inst->coproc->vops->close_win(cp_inst->txwin);
+ if (rc)
+ return rc;
+ }
+ cp_inst->txwin = NULL;
+ }
+
+ kfree(cp_inst);
+ fp->private_data = NULL;
+
+ /*
+ * We don't know here if user has other receive windows
+ * open, so we can't really call clear_thread_tidr().
+ * So, once the process calls set_thread_tidr(), the
+ * TIDR value sticks around until process exits, resulting
+ * in an extra copy in restore_sprs().
+ */
+
+ return 0;
+}
+
+static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
+{
+ struct coproc_instance *cp_inst = fp->private_data;
+ struct vas_window *txwin;
+ unsigned long pfn;
+ u64 paste_addr;
+ pgprot_t prot;
+ int rc;
+
+ txwin = cp_inst->txwin;
+
+ if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
+ pr_debug("%s(): size 0x%zx, PAGE_SIZE 0x%zx\n", __func__,
+ (vma->vm_end - vma->vm_start), PAGE_SIZE);
+ return -EINVAL;
+ }
+
+ /* Ensure instance has an open send window */
+ if (!txwin) {
+ pr_err("%s(): No send window open?\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!cp_inst->coproc->vops && !cp_inst->coproc->vops->paste_addr) {
+ pr_err("%s(): VAS API is not registered\n", __func__);
+ return -EACCES;
+ }
+
+ paste_addr = cp_inst->coproc->vops->paste_addr(txwin);
+ if (!paste_addr) {
+ pr_err("%s(): Window paste address failed\n", __func__);
+ return -EINVAL;
+ }
+
+ pfn = paste_addr >> PAGE_SHIFT;
+
+ /* flags, page_prot from cxl_mmap(), except we want cachable */
+ vma->vm_flags |= VM_IO | VM_PFNMAP;
+ vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
+
+ prot = __pgprot(pgprot_val(vma->vm_page_prot) | _PAGE_DIRTY);
+
+ rc = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start, prot);
+
+ pr_devel("%s(): paste addr %llx at %lx, rc %d\n", __func__,
+ paste_addr, vma->vm_start, rc);
+
+ return rc;
+}
+
+static long coproc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case VAS_TX_WIN_OPEN:
+ return coproc_ioc_tx_win_open(fp, arg);
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct file_operations coproc_fops = {
+ .open = coproc_open,
+ .release = coproc_release,
+ .mmap = coproc_mmap,
+ .unlocked_ioctl = coproc_ioctl,
+};
+
+/*
+ * Supporting only nx-gzip coprocessor type now, but this API code
+ * extended to other coprocessor types later.
+ */
+int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type,
+ const char *name,
+ const struct vas_user_win_ops *vops)
+{
+ int rc = -EINVAL;
+ dev_t devno;
+
+ rc = alloc_chrdev_region(&coproc_device.devt, 1, 1, name);
+ if (rc) {
+ pr_err("Unable to allocate coproc major number: %i\n", rc);
+ return rc;
+ }
+
+ pr_devel("%s device allocated, dev [%i,%i]\n", name,
+ MAJOR(coproc_device.devt), MINOR(coproc_device.devt));
+
+ coproc_device.class = class_create(mod, name);
+ if (IS_ERR(coproc_device.class)) {
+ rc = PTR_ERR(coproc_device.class);
+ pr_err("Unable to create %s class %d\n", name, rc);
+ goto err_class;
+ }
+ coproc_device.class->devnode = coproc_devnode;
+ coproc_device.cop_type = cop_type;
+ coproc_device.vops = vops;
+
+ coproc_fops.owner = mod;
+ cdev_init(&coproc_device.cdev, &coproc_fops);
+
+ devno = MKDEV(MAJOR(coproc_device.devt), 0);
+ rc = cdev_add(&coproc_device.cdev, devno, 1);
+ if (rc) {
+ pr_err("cdev_add() failed %d\n", rc);
+ goto err_cdev;
+ }
+
+ coproc_device.device = device_create(coproc_device.class, NULL,
+ devno, NULL, name, MINOR(devno));
+ if (IS_ERR(coproc_device.device)) {
+ rc = PTR_ERR(coproc_device.device);
+ pr_err("Unable to create coproc-%d %d\n", MINOR(devno), rc);
+ goto err;
+ }
+
+ pr_devel("%s: Added dev [%d,%d]\n", __func__, MAJOR(devno),
+ MINOR(devno));
+
+ return 0;
+
+err:
+ cdev_del(&coproc_device.cdev);
+err_cdev:
+ class_destroy(coproc_device.class);
+err_class:
+ unregister_chrdev_region(coproc_device.devt, 1);
+ return rc;
+}
+
+void vas_unregister_coproc_api(void)
+{
+ dev_t devno;
+
+ cdev_del(&coproc_device.cdev);
+ devno = MKDEV(MAJOR(coproc_device.devt), 0);
+ device_destroy(coproc_device.class, devno);
+
+ class_destroy(coproc_device.class);
+ unregister_chrdev_region(coproc_device.devt, 1);
+}
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index e7c976bcadff..cb70c5f25bc6 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -35,6 +35,7 @@ config PPC_IBM_CELL_BLADE
config AXON_MSI
bool
depends on PPC_IBM_CELL_BLADE && PCI_MSI
+ select IRQ_DOMAIN_NOMAP
default y
menu "Cell Broadband Engine options"
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 2124831cf57c..fa08699aedeb 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -486,7 +486,8 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
window->table.it_size = size >> window->table.it_page_shift;
window->table.it_ops = &cell_iommu_ops;
- iommu_init_table(&window->table, iommu->nid, 0, 0);
+ if (!iommu_init_table(&window->table, iommu->nid, 0, 0))
+ panic("Failed to initialize iommu table");
pr_debug("\tioid %d\n", window->ioid);
pr_debug("\tblocksize %ld\n", window->table.it_blocksize);
diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c
index 35bbd15582af..b207a7f99be5 100644
--- a/arch/powerpc/platforms/cell/pmu.c
+++ b/arch/powerpc/platforms/cell/pmu.c
@@ -10,6 +10,7 @@
*/
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/types.h>
#include <linux/export.h>
#include <asm/io.h>
diff --git a/arch/powerpc/platforms/cell/spider-pci.c b/arch/powerpc/platforms/cell/spider-pci.c
index 93ea41680f54..a1c293f42a1f 100644
--- a/arch/powerpc/platforms/cell/spider-pci.c
+++ b/arch/powerpc/platforms/cell/spider-pci.c
@@ -25,10 +25,9 @@ struct spiderpci_iowa_private {
static void spiderpci_io_flush(struct iowa_bus *bus)
{
struct spiderpci_iowa_private *priv;
- u32 val;
priv = bus->private;
- val = in_be32(priv->regs + SPIDER_PCI_DUMMY_READ);
+ in_be32(priv->regs + SPIDER_PCI_DUMMY_READ);
iosync();
}
diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c
index abdef9bcf432..fe0d8797a00a 100644
--- a/arch/powerpc/platforms/cell/spu_callbacks.c
+++ b/arch/powerpc/platforms/cell/spu_callbacks.c
@@ -35,9 +35,9 @@
*/
static void *spu_syscall_table[] = {
+#define __SYSCALL_WITH_COMPAT(nr, entry, compat) __SYSCALL(nr, entry)
#define __SYSCALL(nr, entry) [nr] = entry,
#include <asm/syscall_table_spu.h>
-#undef __SYSCALL
};
long spu_sys_callback(struct spu_syscall_block *s)
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index 60b5583e9eaf..1a587618015c 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -149,8 +149,7 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
return -EIO;
}
- if (!dump_skip(cprm, roundup(cprm->pos - ret + sz, 4) - cprm->pos))
- return -EIO;
+ dump_skip_to(cprm, roundup(cprm->pos - ret + sz, 4));
return 0;
}
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index b83a3670bd74..bed05b644c2c 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -236,10 +236,7 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
if (!inode)
return -ENOSPC;
- if (dir->i_mode & S_ISGID) {
- inode->i_gid = dir->i_gid;
- inode->i_mode &= S_ISGID;
- }
+ inode_init_owner(&init_user_ns, inode, dir, mode | S_IFDIR);
ctx = alloc_spu_context(SPUFS_I(dir)->i_gang); /* XXX gang */
SPUFS_I(inode)->i_ctx = ctx;
if (!ctx) {
@@ -470,10 +467,7 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode)
goto out;
ret = 0;
- if (dir->i_mode & S_ISGID) {
- inode->i_gid = dir->i_gid;
- inode->i_mode &= S_ISGID;
- }
+ inode_init_owner(&init_user_ns, inode, dir, mode | S_IFDIR);
gang = alloc_spu_gang();
SPUFS_I(inode)->i_ctx = NULL;
SPUFS_I(inode)->i_gang = gang;
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index d56b4e3241cd..b41e81b22fdc 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -1657,14 +1657,13 @@ static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu)
static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
- u32 dummy = 0;
/* Restore, Step 66:
* If CSA.MB_Stat[P]=0 (mailbox empty) then
* read from the PPU_MB register.
*/
if ((csa->prob.mb_stat_R & 0xFF) == 0) {
- dummy = in_be32(&prob->pu_mb_R);
+ in_be32(&prob->pu_mb_R);
eieio();
}
}
@@ -1672,14 +1671,13 @@ static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
- u64 dummy = 0UL;
/* Restore, Step 66:
* If CSA.MB_Stat[I]=0 (mailbox empty) then
* read from the PPUINT_MB register.
*/
if ((csa->prob.mb_stat_R & 0xFF0000) == 0) {
- dummy = in_be64(&priv2->puint_mb_R);
+ in_be64(&priv2->puint_mb_R);
eieio();
spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
eieio();
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c
index 8c421dc78b28..76e6256cb0a7 100644
--- a/arch/powerpc/platforms/chrp/pci.c
+++ b/arch/powerpc/platforms/chrp/pci.c
@@ -131,8 +131,7 @@ static struct pci_ops rtas_pci_ops =
volatile struct Hydra __iomem *Hydra = NULL;
-int __init
-hydra_init(void)
+static int __init hydra_init(void)
{
struct device_node *np;
struct resource r;
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
index c1920961f410..4c6d703a4284 100644
--- a/arch/powerpc/platforms/embedded6xx/Kconfig
+++ b/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -71,11 +71,6 @@ config MPC10X_BRIDGE
bool
select PPC_INDIRECT_PCI
-config MV64X60
- bool
- select PPC_INDIRECT_PCI
- select CHECK_CACHE_COHERENCY
-
config GAMECUBE_COMMON
bool
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
index d39a9213a3e6..609bda2ad5dd 100644
--- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <asm/io.h>
diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c
index 53065d564161..85521b3e7098 100644
--- a/arch/powerpc/platforms/embedded6xx/holly.c
+++ b/arch/powerpc/platforms/embedded6xx/holly.c
@@ -251,8 +251,8 @@ static int ppc750_machine_check_exception(struct pt_regs *regs)
/* Are we prepared to handle this fault */
if ((entry = search_exception_tables(regs->nip)) != NULL) {
tsi108_clear_pci_cfg_error();
- regs->msr |= MSR_RI;
- regs->nip = extable_fixup(entry);
+ regs_set_return_msr(regs, regs->msr | MSR_RI);
+ regs_set_return_ip(regs, extable_fixup(entry));
return 1;
}
return 0;
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
index 5565647dc879..d8da6a483e59 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
+++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
@@ -173,8 +173,8 @@ static int mpc7448_machine_check_exception(struct pt_regs *regs)
/* Are we prepared to handle this fault */
if ((entry = search_exception_tables(regs->nip)) != NULL) {
tsi108_clear_pci_cfg_error();
- regs->msr |= MSR_RI;
- regs->nip = extable_fixup(entry);
+ regs_set_return_msr(regs, regs->msr | MSR_RI);
+ regs_set_return_ip(regs, extable_fixup(entry));
return 1;
}
return 0;
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index a20b9576de22..37875e478b3a 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -34,7 +34,7 @@ static struct pci_controller *u3_agp, *u3_ht, *u4_pcie;
static int __init fixup_one_level_bus_range(struct device_node *node, int higher)
{
- for (; node != 0;node = node->sibling) {
+ for (; node; node = node->sibling) {
const int *bus_range;
const unsigned int *class_code;
int len;
diff --git a/arch/powerpc/platforms/microwatt/Kconfig b/arch/powerpc/platforms/microwatt/Kconfig
new file mode 100644
index 000000000000..8f6a81978461
--- /dev/null
+++ b/arch/powerpc/platforms/microwatt/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+config PPC_MICROWATT
+ depends on PPC_BOOK3S_64 && !SMP
+ bool "Microwatt SoC platform"
+ select PPC_XICS
+ select PPC_ICS_NATIVE
+ select PPC_ICP_NATIVE
+ select PPC_NATIVE
+ select PPC_UDBG_16550
+ select ARCH_RANDOM
+ help
+ This option enables support for FPGA-based Microwatt implementations.
+
diff --git a/arch/powerpc/platforms/microwatt/Makefile b/arch/powerpc/platforms/microwatt/Makefile
new file mode 100644
index 000000000000..116d6d3ad3f0
--- /dev/null
+++ b/arch/powerpc/platforms/microwatt/Makefile
@@ -0,0 +1 @@
+obj-y += setup.o rng.o
diff --git a/arch/powerpc/platforms/microwatt/rng.c b/arch/powerpc/platforms/microwatt/rng.c
new file mode 100644
index 000000000000..3d8ee6eb7dad
--- /dev/null
+++ b/arch/powerpc/platforms/microwatt/rng.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Derived from arch/powerpc/platforms/powernv/rng.c, which is:
+ * Copyright 2013, Michael Ellerman, IBM Corporation.
+ */
+
+#define pr_fmt(fmt) "microwatt-rng: " fmt
+
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <asm/archrandom.h>
+#include <asm/cputable.h>
+#include <asm/machdep.h>
+
+#define DARN_ERR 0xFFFFFFFFFFFFFFFFul
+
+int microwatt_get_random_darn(unsigned long *v)
+{
+ unsigned long val;
+
+ /* Using DARN with L=1 - 64-bit conditioned random number */
+ asm volatile(PPC_DARN(%0, 1) : "=r"(val));
+
+ if (val == DARN_ERR)
+ return 0;
+
+ *v = val;
+
+ return 1;
+}
+
+static __init int rng_init(void)
+{
+ unsigned long val;
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ if (microwatt_get_random_darn(&val)) {
+ ppc_md.get_random_seed = microwatt_get_random_darn;
+ return 0;
+ }
+ }
+
+ pr_warn("Unable to use DARN for get_random_seed()\n");
+
+ return -EIO;
+}
+machine_subsys_initcall(, rng_init);
diff --git a/arch/powerpc/platforms/microwatt/setup.c b/arch/powerpc/platforms/microwatt/setup.c
new file mode 100644
index 000000000000..0b02603bdb74
--- /dev/null
+++ b/arch/powerpc/platforms/microwatt/setup.c
@@ -0,0 +1,41 @@
+/*
+ * Microwatt FPGA-based SoC platform setup code.
+ *
+ * Copyright 2020 Paul Mackerras (paulus@ozlabs.org), IBM Corp.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+#include <asm/machdep.h>
+#include <asm/time.h>
+#include <asm/xics.h>
+#include <asm/udbg.h>
+
+static void __init microwatt_init_IRQ(void)
+{
+ xics_init();
+}
+
+static int __init microwatt_probe(void)
+{
+ return of_machine_is_compatible("microwatt-soc");
+}
+
+static int __init microwatt_populate(void)
+{
+ return of_platform_default_populate(NULL, NULL, NULL);
+}
+machine_arch_initcall(microwatt, microwatt_populate);
+
+define_machine(microwatt) {
+ .name = "microwatt",
+ .probe = microwatt_probe,
+ .init_IRQ = microwatt_init_IRQ,
+ .progress = udbg_progress,
+ .calibrate_decr = generic_calibrate_decr,
+};
diff --git a/arch/powerpc/platforms/pasemi/idle.c b/arch/powerpc/platforms/pasemi/idle.c
index 1c954c90b0f4..9b88e3cded7d 100644
--- a/arch/powerpc/platforms/pasemi/idle.c
+++ b/arch/powerpc/platforms/pasemi/idle.c
@@ -37,7 +37,7 @@ static int pasemi_system_reset_exception(struct pt_regs *regs)
*/
if (regs->msr & SRR1_WAKEMASK)
- regs->nip = regs->link;
+ regs_set_return_ip(regs, regs->link);
switch (regs->msr & SRR1_WAKEMASK) {
case SRR1_WAKEDEC:
@@ -58,7 +58,7 @@ static int pasemi_system_reset_exception(struct pt_regs *regs)
restore_astate(hard_smp_processor_id());
/* everything handled */
- regs->msr |= MSR_RI;
+ regs_set_return_msr(regs, regs->msr | MSR_RI);
return 1;
}
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index b500a6e47e6b..5be7242fbd86 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -146,7 +146,9 @@ static void iommu_table_iobmap_setup(void)
*/
iommu_table_iobmap.it_blocksize = 4;
iommu_table_iobmap.it_ops = &iommu_table_iobmap_ops;
- iommu_init_table(&iommu_table_iobmap, 0, 0, 0);
+ if (!iommu_init_table(&iommu_table_iobmap, 0, 0, 0))
+ panic("Failed to initialize iommu table");
+
pr_debug(" <- %s\n", __func__);
}
diff --git a/arch/powerpc/platforms/powermac/Kconfig b/arch/powerpc/platforms/powermac/Kconfig
index c02d8c503b29..b97bf12801eb 100644
--- a/arch/powerpc/platforms/powermac/Kconfig
+++ b/arch/powerpc/platforms/powermac/Kconfig
@@ -24,6 +24,7 @@ config PPC_PMAC32_PSURGE
bool "Support for powersurge upgrade cards" if EXPERT
depends on SMP && PPC32 && PPC_PMAC
select PPC_SMP_MUXED_IPI
+ select IRQ_DOMAIN_NOMAP
default y
help
The powersurge cpu boards can be used in the generation
diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c
index 9d4ecd292255..d20ef35e6d9d 100644
--- a/arch/powerpc/platforms/powermac/bootx_init.c
+++ b/arch/powerpc/platforms/powermac/bootx_init.c
@@ -433,7 +433,7 @@ static void __init btext_welcome(boot_infos_t *bi)
bootx_printf("\nframe buffer at : 0x%x", bi->dispDeviceBase);
bootx_printf(" (phys), 0x%x", bi->logicalDisplayBase);
bootx_printf(" (log)");
- bootx_printf("\nklimit : 0x%x",(unsigned long)klimit);
+ bootx_printf("\nklimit : 0x%x",(unsigned long)_end);
bootx_printf("\nboot_info at : 0x%x", bi);
__asm__ __volatile__ ("mfmsr %0" : "=r" (flags));
bootx_printf("\nMSR : 0x%x", flags);
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index adae2a6712e1..bdfea6d6ab69 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -810,7 +810,7 @@ static int smp_core99_kick_cpu(int nr)
* b __secondary_start_pmac_0 + nr*8
*/
target = (unsigned long) __secondary_start_pmac_0 + nr * 8;
- patch_branch((struct ppc_inst *)vector, target, BRANCH_SET_LINK);
+ patch_branch(vector, target, BRANCH_SET_LINK);
/* Put some life in our friend */
pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0);
@@ -823,7 +823,7 @@ static int smp_core99_kick_cpu(int nr)
mdelay(1);
/* Restore our exception vector */
- patch_instruction((struct ppc_inst *)vector, ppc_inst(save_vector));
+ patch_instruction(vector, ppc_inst(save_vector));
local_irq_restore(flags);
if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index 619b093a0657..043eefbbdd28 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -33,20 +33,6 @@ config PPC_MEMTRACE
Enabling this option allows for runtime allocation of memory (RAM)
for hardware tracing.
-config PPC_VAS
- bool "IBM Virtual Accelerator Switchboard (VAS)"
- depends on PPC_POWERNV && PPC_64K_PAGES
- default y
- help
- This enables support for IBM Virtual Accelerator Switchboard (VAS).
-
- VAS allows accelerators in co-processors like NX-GZIP and NX-842
- to be accessible to kernel subsystems and user processes.
-
- VAS adapters are found in POWER9 based systems.
-
- If unsure, say N.
-
config SCOM_DEBUGFS
bool "Expose SCOM controllers via debugfs"
depends on DEBUG_FS
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index 2eb6ae150d1f..dc7b37c23b60 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o
obj-$(CONFIG_FA_DUMP) += opal-fadump.o
obj-$(CONFIG_PRESERVE_FA_DUMP) += opal-fadump.o
obj-$(CONFIG_OPAL_CORE) += opal-core.o
-obj-$(CONFIG_PCI) += pci.o pci-ioda.o npu-dma.o pci-ioda-tce.o
+obj-$(CONFIG_PCI) += pci.o pci-ioda.o pci-ioda-tce.o
obj-$(CONFIG_PCI_IOV) += pci-sriov.o
obj-$(CONFIG_CXL_BASE) += pci-cxl.o
obj-$(CONFIG_EEH) += eeh-powernv.o
@@ -18,7 +18,7 @@ obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o
obj-$(CONFIG_OPAL_PRD) += opal-prd.o
obj-$(CONFIG_PERF_EVENTS) += opal-imc.o
obj-$(CONFIG_PPC_MEMTRACE) += memtrace.o
-obj-$(CONFIG_PPC_VAS) += vas.o vas-window.o vas-debug.o vas-fault.o vas-api.o
+obj-$(CONFIG_PPC_VAS) += vas.o vas-window.o vas-debug.o vas-fault.o
obj-$(CONFIG_OCXL_BASE) += ocxl.o
obj-$(CONFIG_SCOM_DEBUGFS) += opal-xscom.o
obj-$(CONFIG_PPC_SECURE_BOOT) += opal-secvar.o
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index 999997d9e9a9..528a7e0cf83a 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -604,7 +604,7 @@ struct p9_sprs {
u64 uamor;
};
-static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
+static unsigned long power9_idle_stop(unsigned long psscr)
{
int cpu = raw_smp_processor_id();
int first = cpu_first_thread_sibling(cpu);
@@ -620,8 +620,6 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
if (!(psscr & (PSSCR_EC|PSSCR_ESL))) {
/* EC=ESL=0 case */
- BUG_ON(!mmu_on);
-
/*
* Wake synchronously. SRESET via xscom may still cause
* a 0x100 powersave wakeup with SRR1 reason!
@@ -803,8 +801,7 @@ core_woken:
__slb_restore_bolted_realmode();
out:
- if (mmu_on)
- mtmsr(MSR_KERNEL);
+ mtmsr(MSR_KERNEL);
return srr1;
}
@@ -895,7 +892,7 @@ struct p10_sprs {
*/
};
-static unsigned long power10_idle_stop(unsigned long psscr, bool mmu_on)
+static unsigned long power10_idle_stop(unsigned long psscr)
{
int cpu = raw_smp_processor_id();
int first = cpu_first_thread_sibling(cpu);
@@ -909,8 +906,6 @@ static unsigned long power10_idle_stop(unsigned long psscr, bool mmu_on)
if (!(psscr & (PSSCR_EC|PSSCR_ESL))) {
/* EC=ESL=0 case */
- BUG_ON(!mmu_on);
-
/*
* Wake synchronously. SRESET via xscom may still cause
* a 0x100 powersave wakeup with SRR1 reason!
@@ -991,8 +986,7 @@ core_woken:
__slb_restore_bolted_realmode();
out:
- if (mmu_on)
- mtmsr(MSR_KERNEL);
+ mtmsr(MSR_KERNEL);
return srr1;
}
@@ -1002,40 +996,10 @@ static unsigned long arch300_offline_stop(unsigned long psscr)
{
unsigned long srr1;
-#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- __ppc64_runlatch_off();
if (cpu_has_feature(CPU_FTR_ARCH_31))
- srr1 = power10_idle_stop(psscr, true);
+ srr1 = power10_idle_stop(psscr);
else
- srr1 = power9_idle_stop(psscr, true);
- __ppc64_runlatch_on();
-#else
- /*
- * Tell KVM we're entering idle.
- * This does not have to be done in real mode because the P9 MMU
- * is independent per-thread. Some steppings share radix/hash mode
- * between threads, but in that case KVM has a barrier sync in real
- * mode before and after switching between radix and hash.
- *
- * kvm_start_guest must still be called in real mode though, hence
- * the false argument.
- */
- local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE;
-
- __ppc64_runlatch_off();
- if (cpu_has_feature(CPU_FTR_ARCH_31))
- srr1 = power10_idle_stop(psscr, false);
- else
- srr1 = power9_idle_stop(psscr, false);
- __ppc64_runlatch_on();
-
- local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL;
- /* Order setting hwthread_state vs. testing hwthread_req */
- smp_mb();
- if (local_paca->kvm_hstate.hwthread_req)
- srr1 = idle_kvm_start_guest(srr1);
- mtmsr(MSR_KERNEL);
-#endif
+ srr1 = power9_idle_stop(psscr);
return srr1;
}
@@ -1055,9 +1019,9 @@ void arch300_idle_type(unsigned long stop_psscr_val,
__ppc64_runlatch_off();
if (cpu_has_feature(CPU_FTR_ARCH_31))
- srr1 = power10_idle_stop(psscr, true);
+ srr1 = power10_idle_stop(psscr);
else
- srr1 = power9_idle_stop(psscr, true);
+ srr1 = power9_idle_stop(psscr);
__ppc64_runlatch_on();
fini_irq_for_idle_irqsoff();
diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
index 019669eb21d2..537a4daed614 100644
--- a/arch/powerpc/platforms/powernv/memtrace.c
+++ b/arch/powerpc/platforms/powernv/memtrace.c
@@ -46,10 +46,26 @@ static ssize_t memtrace_read(struct file *filp, char __user *ubuf,
return simple_read_from_buffer(ubuf, count, ppos, ent->mem, ent->size);
}
+static int memtrace_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct memtrace_entry *ent = filp->private_data;
+
+ if (ent->size < vma->vm_end - vma->vm_start)
+ return -EINVAL;
+
+ if (vma->vm_pgoff << PAGE_SHIFT >= ent->size)
+ return -EINVAL;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ return remap_pfn_range(vma, vma->vm_start, PHYS_PFN(ent->start) + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+}
+
static const struct file_operations memtrace_fops = {
.llseek = default_llseek,
.read = memtrace_read,
.open = simple_open,
+ .mmap = memtrace_mmap,
};
#define FLUSH_CHUNK_SIZE SZ_1G
@@ -88,8 +104,8 @@ static void memtrace_clear_range(unsigned long start_pfn,
* Before we go ahead and use this range as cache inhibited range
* flush the cache.
*/
- flush_dcache_range_chunked(PFN_PHYS(start_pfn),
- PFN_PHYS(start_pfn + nr_pages),
+ flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn),
+ (unsigned long)pfn_to_kaddr(start_pfn + nr_pages),
FLUSH_CHUNK_SIZE);
}
@@ -187,7 +203,7 @@ static int memtrace_init_debugfs(void)
dir = debugfs_create_dir(ent->name, memtrace_debugfs_dir);
ent->dir = dir;
- debugfs_create_file("trace", 0400, dir, ent, &memtrace_fops);
+ debugfs_create_file_unsafe("trace", 0600, dir, ent, &memtrace_fops);
debugfs_create_x64("start", 0400, dir, &ent->start);
debugfs_create_x64("size", 0400, dir, &ent->size);
}
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
deleted file mode 100644
index b711dc3262a3..000000000000
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ /dev/null
@@ -1,705 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * This file implements the DMA operations for NVLink devices. The NPU
- * devices all point to the same iommu table as the parent PCI device.
- *
- * Copyright Alistair Popple, IBM Corporation 2015.
- */
-
-#include <linux/mmu_notifier.h>
-#include <linux/mmu_context.h>
-#include <linux/of.h>
-#include <linux/pci.h>
-#include <linux/memblock.h>
-#include <linux/sizes.h>
-
-#include <asm/debugfs.h>
-#include <asm/powernv.h>
-#include <asm/ppc-pci.h>
-#include <asm/opal.h>
-
-#include "pci.h"
-
-static struct pci_dev *get_pci_dev(struct device_node *dn)
-{
- struct pci_dn *pdn = PCI_DN(dn);
- struct pci_dev *pdev;
-
- pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdn->phb->bus),
- pdn->busno, pdn->devfn);
-
- /*
- * pci_get_domain_bus_and_slot() increased the reference count of
- * the PCI device, but callers don't need that actually as the PE
- * already holds a reference to the device. Since callers aren't
- * aware of the reference count change, call pci_dev_put() now to
- * avoid leaks.
- */
- if (pdev)
- pci_dev_put(pdev);
-
- return pdev;
-}
-
-/* Given a NPU device get the associated PCI device. */
-struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev)
-{
- struct device_node *dn;
- struct pci_dev *gpdev;
-
- if (WARN_ON(!npdev))
- return NULL;
-
- if (WARN_ON(!npdev->dev.of_node))
- return NULL;
-
- /* Get assoicated PCI device */
- dn = of_parse_phandle(npdev->dev.of_node, "ibm,gpu", 0);
- if (!dn)
- return NULL;
-
- gpdev = get_pci_dev(dn);
- of_node_put(dn);
-
- return gpdev;
-}
-EXPORT_SYMBOL(pnv_pci_get_gpu_dev);
-
-/* Given the real PCI device get a linked NPU device. */
-struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
-{
- struct device_node *dn;
- struct pci_dev *npdev;
-
- if (WARN_ON(!gpdev))
- return NULL;
-
- /* Not all PCI devices have device-tree nodes */
- if (!gpdev->dev.of_node)
- return NULL;
-
- /* Get assoicated PCI device */
- dn = of_parse_phandle(gpdev->dev.of_node, "ibm,npu", index);
- if (!dn)
- return NULL;
-
- npdev = get_pci_dev(dn);
- of_node_put(dn);
-
- return npdev;
-}
-EXPORT_SYMBOL(pnv_pci_get_npu_dev);
-
-#ifdef CONFIG_IOMMU_API
-/*
- * Returns the PE assoicated with the PCI device of the given
- * NPU. Returns the linked pci device if pci_dev != NULL.
- */
-static struct pnv_ioda_pe *get_gpu_pci_dev_and_pe(struct pnv_ioda_pe *npe,
- struct pci_dev **gpdev)
-{
- struct pnv_phb *phb;
- struct pci_controller *hose;
- struct pci_dev *pdev;
- struct pnv_ioda_pe *pe;
- struct pci_dn *pdn;
-
- pdev = pnv_pci_get_gpu_dev(npe->pdev);
- if (!pdev)
- return NULL;
-
- pdn = pci_get_pdn(pdev);
- if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
- return NULL;
-
- hose = pci_bus_to_host(pdev->bus);
- phb = hose->private_data;
- pe = &phb->ioda.pe_array[pdn->pe_number];
-
- if (gpdev)
- *gpdev = pdev;
-
- return pe;
-}
-
-static long pnv_npu_unset_window(struct iommu_table_group *table_group,
- int num);
-
-static long pnv_npu_set_window(struct iommu_table_group *table_group, int num,
- struct iommu_table *tbl)
-{
- struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe,
- table_group);
- struct pnv_phb *phb = npe->phb;
- int64_t rc;
- const unsigned long size = tbl->it_indirect_levels ?
- tbl->it_level_size : tbl->it_size;
- const __u64 start_addr = tbl->it_offset << tbl->it_page_shift;
- const __u64 win_size = tbl->it_size << tbl->it_page_shift;
- int num2 = (num == 0) ? 1 : 0;
-
- /* NPU has just one TVE so if there is another table, remove it first */
- if (npe->table_group.tables[num2])
- pnv_npu_unset_window(&npe->table_group, num2);
-
- pe_info(npe, "Setting up window %llx..%llx pg=%lx\n",
- start_addr, start_addr + win_size - 1,
- IOMMU_PAGE_SIZE(tbl));
-
- rc = opal_pci_map_pe_dma_window(phb->opal_id,
- npe->pe_number,
- npe->pe_number,
- tbl->it_indirect_levels + 1,
- __pa(tbl->it_base),
- size << 3,
- IOMMU_PAGE_SIZE(tbl));
- if (rc) {
- pe_err(npe, "Failed to configure TCE table, err %lld\n", rc);
- return rc;
- }
- pnv_pci_ioda2_tce_invalidate_entire(phb, false);
-
- /* Add the table to the list so its TCE cache will get invalidated */
- pnv_pci_link_table_and_group(phb->hose->node, num,
- tbl, &npe->table_group);
-
- return 0;
-}
-
-static long pnv_npu_unset_window(struct iommu_table_group *table_group, int num)
-{
- struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe,
- table_group);
- struct pnv_phb *phb = npe->phb;
- int64_t rc;
-
- if (!npe->table_group.tables[num])
- return 0;
-
- pe_info(npe, "Removing DMA window\n");
-
- rc = opal_pci_map_pe_dma_window(phb->opal_id, npe->pe_number,
- npe->pe_number,
- 0/* levels */, 0/* table address */,
- 0/* table size */, 0/* page size */);
- if (rc) {
- pe_err(npe, "Unmapping failed, ret = %lld\n", rc);
- return rc;
- }
- pnv_pci_ioda2_tce_invalidate_entire(phb, false);
-
- pnv_pci_unlink_table_and_group(npe->table_group.tables[num],
- &npe->table_group);
-
- return 0;
-}
-
-/* Switch ownership from platform code to external user (e.g. VFIO) */
-static void pnv_npu_take_ownership(struct iommu_table_group *table_group)
-{
- struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe,
- table_group);
- struct pnv_phb *phb = npe->phb;
- int64_t rc;
- struct pci_dev *gpdev = NULL;
-
- /*
- * Note: NPU has just a single TVE in the hardware which means that
- * while used by the kernel, it can have either 32bit window or
- * DMA bypass but never both. So we deconfigure 32bit window only
- * if it was enabled at the moment of ownership change.
- */
- if (npe->table_group.tables[0]) {
- pnv_npu_unset_window(&npe->table_group, 0);
- return;
- }
-
- /* Disable bypass */
- rc = opal_pci_map_pe_dma_window_real(phb->opal_id,
- npe->pe_number, npe->pe_number,
- 0 /* bypass base */, 0);
- if (rc) {
- pe_err(npe, "Failed to disable bypass, err %lld\n", rc);
- return;
- }
- pnv_pci_ioda2_tce_invalidate_entire(npe->phb, false);
-
- get_gpu_pci_dev_and_pe(npe, &gpdev);
- if (gpdev)
- pnv_npu2_unmap_lpar_dev(gpdev);
-}
-
-static void pnv_npu_release_ownership(struct iommu_table_group *table_group)
-{
- struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe,
- table_group);
- struct pci_dev *gpdev = NULL;
-
- get_gpu_pci_dev_and_pe(npe, &gpdev);
- if (gpdev)
- pnv_npu2_map_lpar_dev(gpdev, 0, MSR_DR | MSR_PR | MSR_HV);
-}
-
-static struct iommu_table_group_ops pnv_pci_npu_ops = {
- .set_window = pnv_npu_set_window,
- .unset_window = pnv_npu_unset_window,
- .take_ownership = pnv_npu_take_ownership,
- .release_ownership = pnv_npu_release_ownership,
-};
-#endif /* !CONFIG_IOMMU_API */
-
-/*
- * NPU2 ATS
- */
-/* Maximum possible number of ATSD MMIO registers per NPU */
-#define NV_NMMU_ATSD_REGS 8
-#define NV_NPU_MAX_PE_NUM 16
-
-/*
- * A compound NPU IOMMU group which might consist of 1 GPU + 2xNPUs (POWER8) or
- * up to 3 x (GPU + 2xNPUs) (POWER9).
- */
-struct npu_comp {
- struct iommu_table_group table_group;
- int pe_num;
- struct pnv_ioda_pe *pe[NV_NPU_MAX_PE_NUM];
-};
-
-/* An NPU descriptor, valid for POWER9 only */
-struct npu {
- int index;
- struct npu_comp npucomp;
-};
-
-#ifdef CONFIG_IOMMU_API
-static long pnv_npu_peers_create_table_userspace(
- struct iommu_table_group *table_group,
- int num, __u32 page_shift, __u64 window_size, __u32 levels,
- struct iommu_table **ptbl)
-{
- struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
- table_group);
-
- if (!npucomp->pe_num || !npucomp->pe[0] ||
- !npucomp->pe[0]->table_group.ops ||
- !npucomp->pe[0]->table_group.ops->create_table)
- return -EFAULT;
-
- return npucomp->pe[0]->table_group.ops->create_table(
- &npucomp->pe[0]->table_group, num, page_shift,
- window_size, levels, ptbl);
-}
-
-static long pnv_npu_peers_set_window(struct iommu_table_group *table_group,
- int num, struct iommu_table *tbl)
-{
- int i, j;
- long ret = 0;
- struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
- table_group);
-
- for (i = 0; i < npucomp->pe_num; ++i) {
- struct pnv_ioda_pe *pe = npucomp->pe[i];
-
- if (!pe->table_group.ops->set_window)
- continue;
-
- ret = pe->table_group.ops->set_window(&pe->table_group,
- num, tbl);
- if (ret)
- break;
- }
-
- if (ret) {
- for (j = 0; j < i; ++j) {
- struct pnv_ioda_pe *pe = npucomp->pe[j];
-
- if (!pe->table_group.ops->unset_window)
- continue;
-
- ret = pe->table_group.ops->unset_window(
- &pe->table_group, num);
- if (ret)
- break;
- }
- } else {
- table_group->tables[num] = iommu_tce_table_get(tbl);
- }
-
- return ret;
-}
-
-static long pnv_npu_peers_unset_window(struct iommu_table_group *table_group,
- int num)
-{
- int i, j;
- long ret = 0;
- struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
- table_group);
-
- for (i = 0; i < npucomp->pe_num; ++i) {
- struct pnv_ioda_pe *pe = npucomp->pe[i];
-
- WARN_ON(npucomp->table_group.tables[num] !=
- table_group->tables[num]);
- if (!npucomp->table_group.tables[num])
- continue;
-
- if (!pe->table_group.ops->unset_window)
- continue;
-
- ret = pe->table_group.ops->unset_window(&pe->table_group, num);
- if (ret)
- break;
- }
-
- if (ret) {
- for (j = 0; j < i; ++j) {
- struct pnv_ioda_pe *pe = npucomp->pe[j];
-
- if (!npucomp->table_group.tables[num])
- continue;
-
- if (!pe->table_group.ops->set_window)
- continue;
-
- ret = pe->table_group.ops->set_window(&pe->table_group,
- num, table_group->tables[num]);
- if (ret)
- break;
- }
- } else if (table_group->tables[num]) {
- iommu_tce_table_put(table_group->tables[num]);
- table_group->tables[num] = NULL;
- }
-
- return ret;
-}
-
-static void pnv_npu_peers_take_ownership(struct iommu_table_group *table_group)
-{
- int i;
- struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
- table_group);
-
- for (i = 0; i < npucomp->pe_num; ++i) {
- struct pnv_ioda_pe *pe = npucomp->pe[i];
-
- if (!pe->table_group.ops ||
- !pe->table_group.ops->take_ownership)
- continue;
- pe->table_group.ops->take_ownership(&pe->table_group);
- }
-}
-
-static void pnv_npu_peers_release_ownership(
- struct iommu_table_group *table_group)
-{
- int i;
- struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
- table_group);
-
- for (i = 0; i < npucomp->pe_num; ++i) {
- struct pnv_ioda_pe *pe = npucomp->pe[i];
-
- if (!pe->table_group.ops ||
- !pe->table_group.ops->release_ownership)
- continue;
- pe->table_group.ops->release_ownership(&pe->table_group);
- }
-}
-
-static struct iommu_table_group_ops pnv_npu_peers_ops = {
- .get_table_size = pnv_pci_ioda2_get_table_size,
- .create_table = pnv_npu_peers_create_table_userspace,
- .set_window = pnv_npu_peers_set_window,
- .unset_window = pnv_npu_peers_unset_window,
- .take_ownership = pnv_npu_peers_take_ownership,
- .release_ownership = pnv_npu_peers_release_ownership,
-};
-
-static void pnv_comp_attach_table_group(struct npu_comp *npucomp,
- struct pnv_ioda_pe *pe)
-{
- if (WARN_ON(npucomp->pe_num == NV_NPU_MAX_PE_NUM))
- return;
-
- npucomp->pe[npucomp->pe_num] = pe;
- ++npucomp->pe_num;
-}
-
-static struct iommu_table_group *
- pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe)
-{
- struct iommu_table_group *compound_group;
- struct npu_comp *npucomp;
- struct pci_dev *gpdev = NULL;
- struct pci_controller *hose;
- struct pci_dev *npdev = NULL;
-
- list_for_each_entry(gpdev, &pe->pbus->devices, bus_list) {
- npdev = pnv_pci_get_npu_dev(gpdev, 0);
- if (npdev)
- break;
- }
-
- if (!npdev)
- /* It is not an NPU attached device, skip */
- return NULL;
-
- hose = pci_bus_to_host(npdev->bus);
-
- if (hose->npu) {
- /* P9 case: compound group is per-NPU (all gpus, all links) */
- npucomp = &hose->npu->npucomp;
- } else {
- /* P8 case: Compound group is per-GPU (1 gpu, 2 links) */
- npucomp = pe->npucomp = kzalloc(sizeof(*npucomp), GFP_KERNEL);
- }
-
- compound_group = &npucomp->table_group;
- if (!compound_group->group) {
- compound_group->ops = &pnv_npu_peers_ops;
- iommu_register_group(compound_group, hose->global_number,
- pe->pe_number);
-
- /* Steal capabilities from a GPU PE */
- compound_group->max_dynamic_windows_supported =
- pe->table_group.max_dynamic_windows_supported;
- compound_group->tce32_start = pe->table_group.tce32_start;
- compound_group->tce32_size = pe->table_group.tce32_size;
- compound_group->max_levels = pe->table_group.max_levels;
- if (!compound_group->pgsizes)
- compound_group->pgsizes = pe->table_group.pgsizes;
- }
-
- /*
- * The gpu would have been added to the iommu group that's created
- * for the PE. Pull it out now.
- */
- iommu_del_device(&gpdev->dev);
-
- /*
- * I'm not sure this is strictly required, but it's probably a good idea
- * since the table_group for the PE is going to be attached to the
- * compound table group. If we leave the PE's iommu group active then
- * we might have the same table_group being modifiable via two sepeate
- * iommu groups.
- */
- iommu_group_put(pe->table_group.group);
-
- /* now put the GPU into the compound group */
- pnv_comp_attach_table_group(npucomp, pe);
- iommu_add_device(compound_group, &gpdev->dev);
-
- return compound_group;
-}
-
-static struct iommu_table_group *pnv_npu_compound_attach(struct pnv_ioda_pe *pe)
-{
- struct iommu_table_group *table_group;
- struct npu_comp *npucomp;
- struct pci_dev *gpdev = NULL;
- struct pci_dev *npdev;
- struct pnv_ioda_pe *gpe = get_gpu_pci_dev_and_pe(pe, &gpdev);
-
- WARN_ON(!(pe->flags & PNV_IODA_PE_DEV));
- if (!gpe)
- return NULL;
-
- /*
- * IODA2 bridges get this set up from pci_controller_ops::setup_bridge
- * but NPU bridges do not have this hook defined so we do it here.
- * We do not setup other table group parameters as they won't be used
- * anyway - NVLink bridges are subordinate PEs.
- */
- pe->table_group.ops = &pnv_pci_npu_ops;
-
- table_group = iommu_group_get_iommudata(
- iommu_group_get(&gpdev->dev));
-
- /*
- * On P9 NPU PHB and PCI PHB support different page sizes,
- * keep only matching. We expect here that NVLink bridge PE pgsizes is
- * initialized by the caller.
- */
- table_group->pgsizes &= pe->table_group.pgsizes;
- npucomp = container_of(table_group, struct npu_comp, table_group);
- pnv_comp_attach_table_group(npucomp, pe);
-
- list_for_each_entry(npdev, &pe->phb->hose->bus->devices, bus_list) {
- struct pci_dev *gpdevtmp = pnv_pci_get_gpu_dev(npdev);
-
- if (gpdevtmp != gpdev)
- continue;
-
- iommu_add_device(table_group, &npdev->dev);
- }
-
- return table_group;
-}
-
-void pnv_pci_npu_setup_iommu_groups(void)
-{
- struct pci_controller *hose;
- struct pnv_phb *phb;
- struct pnv_ioda_pe *pe;
-
- /*
- * For non-nvlink devices the IOMMU group is registered when the PE is
- * configured and devices are added to the group when the per-device
- * DMA setup is run. That's done in hose->ops.dma_dev_setup() which is
- * only initialise for "normal" IODA PHBs.
- *
- * For NVLink devices we need to ensure the NVLinks and the GPU end up
- * in the same IOMMU group, so that's handled here.
- */
- list_for_each_entry(hose, &hose_list, list_node) {
- phb = hose->private_data;
-
- if (phb->type == PNV_PHB_IODA2)
- list_for_each_entry(pe, &phb->ioda.pe_list, list)
- pnv_try_setup_npu_table_group(pe);
- }
-
- /*
- * Now we have all PHBs discovered, time to add NPU devices to
- * the corresponding IOMMU groups.
- */
- list_for_each_entry(hose, &hose_list, list_node) {
- unsigned long pgsizes;
-
- phb = hose->private_data;
-
- if (phb->type != PNV_PHB_NPU_NVLINK)
- continue;
-
- pgsizes = pnv_ioda_parse_tce_sizes(phb);
- list_for_each_entry(pe, &phb->ioda.pe_list, list) {
- /*
- * IODA2 bridges get this set up from
- * pci_controller_ops::setup_bridge but NPU bridges
- * do not have this hook defined so we do it here.
- */
- pe->table_group.pgsizes = pgsizes;
- pnv_npu_compound_attach(pe);
- }
- }
-}
-#endif /* CONFIG_IOMMU_API */
-
-int pnv_npu2_init(struct pci_controller *hose)
-{
- static int npu_index;
- struct npu *npu;
- int ret;
-
- npu = kzalloc(sizeof(*npu), GFP_KERNEL);
- if (!npu)
- return -ENOMEM;
-
- npu_index++;
- if (WARN_ON(npu_index >= NV_MAX_NPUS)) {
- ret = -ENOSPC;
- goto fail_exit;
- }
- npu->index = npu_index;
- hose->npu = npu;
-
- return 0;
-
-fail_exit:
- kfree(npu);
- return ret;
-}
-
-int pnv_npu2_map_lpar_dev(struct pci_dev *gpdev, unsigned int lparid,
- unsigned long msr)
-{
- int ret;
- struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
- struct pci_controller *hose;
- struct pnv_phb *nphb;
-
- if (!npdev)
- return -ENODEV;
-
- hose = pci_bus_to_host(npdev->bus);
- if (hose->npu == NULL) {
- dev_info_once(&npdev->dev, "Nvlink1 does not support contexts");
- return 0;
- }
-
- nphb = hose->private_data;
-
- dev_dbg(&gpdev->dev, "Map LPAR opalid=%llu lparid=%u\n",
- nphb->opal_id, lparid);
- /*
- * Currently we only support radix and non-zero LPCR only makes sense
- * for hash tables so skiboot expects the LPCR parameter to be a zero.
- */
- ret = opal_npu_map_lpar(nphb->opal_id, pci_dev_id(gpdev), lparid,
- 0 /* LPCR bits */);
- if (ret) {
- dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret);
- return ret;
- }
-
- dev_dbg(&gpdev->dev, "init context opalid=%llu msr=%lx\n",
- nphb->opal_id, msr);
- ret = opal_npu_init_context(nphb->opal_id, 0/*__unused*/, msr,
- pci_dev_id(gpdev));
- if (ret < 0)
- dev_err(&gpdev->dev, "Failed to init context: %d\n", ret);
- else
- ret = 0;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pnv_npu2_map_lpar_dev);
-
-void pnv_npu2_map_lpar(struct pnv_ioda_pe *gpe, unsigned long msr)
-{
- struct pci_dev *gpdev;
-
- list_for_each_entry(gpdev, &gpe->pbus->devices, bus_list)
- pnv_npu2_map_lpar_dev(gpdev, 0, msr);
-}
-
-int pnv_npu2_unmap_lpar_dev(struct pci_dev *gpdev)
-{
- int ret;
- struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
- struct pci_controller *hose;
- struct pnv_phb *nphb;
-
- if (!npdev)
- return -ENODEV;
-
- hose = pci_bus_to_host(npdev->bus);
- if (hose->npu == NULL) {
- dev_info_once(&npdev->dev, "Nvlink1 does not support contexts");
- return 0;
- }
-
- nphb = hose->private_data;
-
- dev_dbg(&gpdev->dev, "destroy context opalid=%llu\n",
- nphb->opal_id);
- ret = opal_npu_destroy_context(nphb->opal_id, 0/*__unused*/,
- pci_dev_id(gpdev));
- if (ret < 0) {
- dev_err(&gpdev->dev, "Failed to destroy context: %d\n", ret);
- return ret;
- }
-
- /* Set LPID to 0 anyway, just to be safe */
- dev_dbg(&gpdev->dev, "Map LPAR opalid=%llu lparid=0\n", nphb->opal_id);
- ret = opal_npu_map_lpar(nphb->opal_id, pci_dev_id(gpdev), 0 /*LPID*/,
- 0 /* LPCR bits */);
- if (ret)
- dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(pnv_npu2_unmap_lpar_dev);
diff --git a/arch/powerpc/platforms/powernv/opal-call.c b/arch/powerpc/platforms/powernv/opal-call.c
index 5cd0f52d258f..f812c74c61e5 100644
--- a/arch/powerpc/platforms/powernv/opal-call.c
+++ b/arch/powerpc/platforms/powernv/opal-call.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/percpu.h>
#include <linux/jump_label.h>
+#include <asm/interrupt.h>
#include <asm/opal-api.h>
#include <asm/trace.h>
#include <asm/asm-prototypes.h>
@@ -100,6 +101,9 @@ static int64_t opal_call(int64_t a0, int64_t a1, int64_t a2, int64_t a3,
bool mmu = (msr & (MSR_IR|MSR_DR));
int64_t ret;
+ /* OPAL call / firmware may use SRR and/or HSRR */
+ srr_regs_clobbered();
+
msr &= ~MSR_EE;
if (unlikely(!mmu))
@@ -267,8 +271,6 @@ OPAL_CALL(opal_xive_get_queue_state, OPAL_XIVE_GET_QUEUE_STATE);
OPAL_CALL(opal_xive_set_queue_state, OPAL_XIVE_SET_QUEUE_STATE);
OPAL_CALL(opal_xive_get_vp_state, OPAL_XIVE_GET_VP_STATE);
OPAL_CALL(opal_signal_system_reset, OPAL_SIGNAL_SYSTEM_RESET);
-OPAL_CALL(opal_npu_init_context, OPAL_NPU_INIT_CONTEXT);
-OPAL_CALL(opal_npu_destroy_context, OPAL_NPU_DESTROY_CONTEXT);
OPAL_CALL(opal_npu_map_lpar, OPAL_NPU_MAP_LPAR);
OPAL_CALL(opal_imc_counters_init, OPAL_IMC_COUNTERS_INIT);
OPAL_CALL(opal_imc_counters_start, OPAL_IMC_COUNTERS_START);
diff --git a/arch/powerpc/platforms/powernv/opal-core.c b/arch/powerpc/platforms/powernv/opal-core.c
index 0d9ba70f7251..5b9736bbc2aa 100644
--- a/arch/powerpc/platforms/powernv/opal-core.c
+++ b/arch/powerpc/platforms/powernv/opal-core.c
@@ -71,7 +71,7 @@ static LIST_HEAD(opalcore_list);
static struct opalcore_config *oc_conf;
static const struct opal_mpipl_fadump *opalc_metadata;
static const struct opal_mpipl_fadump *opalc_cpu_metadata;
-struct kobject *mpipl_kobj;
+static struct kobject *mpipl_kobj;
/*
* Set crashing CPU's signal to SIGUSR1. if the kernel is triggered
diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c
index deddaebf8c14..a191f4c60ce7 100644
--- a/arch/powerpc/platforms/powernv/opal-prd.c
+++ b/arch/powerpc/platforms/powernv/opal-prd.c
@@ -105,7 +105,6 @@ static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma)
{
size_t addr, size;
pgprot_t page_prot;
- int rc;
pr_devel("opal_prd_mmap(0x%016lx, 0x%016lx, 0x%lx, 0x%lx)\n",
vma->vm_start, vma->vm_end, vma->vm_pgoff,
@@ -121,10 +120,8 @@ static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma)
page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
size, vma->vm_page_prot);
- rc = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size,
+ return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size,
page_prot);
-
- return rc;
}
static bool opal_msg_queue_empty(void)
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 303d7c775740..2835376e61a4 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -773,7 +773,7 @@ bool opal_mce_check_early_recovery(struct pt_regs *regs)
* Setup regs->nip to rfi into fixup address.
*/
if (recover_addr)
- regs->nip = recover_addr;
+ regs_set_return_ip(regs, recover_addr);
out:
return !!recover_addr;
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index f0f901683a2f..7de464679292 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -47,8 +47,7 @@
#define PNV_IODA1_M64_SEGS 8 /* Segments per M64 BAR */
#define PNV_IODA1_DMA32_SEGSIZE 0x10000000
-static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU_NVLINK",
- "NPU_OCAPI" };
+static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU_OCAPI" };
static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable);
static void pnv_pci_configure_bus(struct pci_bus *bus);
@@ -192,8 +191,6 @@ void pnv_ioda_free_pe(struct pnv_ioda_pe *pe)
unsigned int pe_num = pe->pe_number;
WARN_ON(pe->pdev);
- WARN_ON(pe->npucomp); /* NPUs for nvlink are not supposed to be freed */
- kfree(pe->npucomp);
memset(pe, 0, sizeof(struct pnv_ioda_pe));
mutex_lock(&phb->ioda.pe_alloc_mutex);
@@ -875,7 +872,7 @@ int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
* Release from all parents PELT-V. NPUs don't have a PELTV
* table
*/
- if (phb->type != PNV_PHB_NPU_NVLINK && phb->type != PNV_PHB_NPU_OCAPI)
+ if (phb->type != PNV_PHB_NPU_OCAPI)
pnv_ioda_unset_peltv(phb, pe, parent);
rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
@@ -946,7 +943,7 @@ int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
* Configure PELTV. NPUs don't have a PELTV table so skip
* configuration on them.
*/
- if (phb->type != PNV_PHB_NPU_NVLINK && phb->type != PNV_PHB_NPU_OCAPI)
+ if (phb->type != PNV_PHB_NPU_OCAPI)
pnv_ioda_set_peltv(phb, pe, true);
/* Setup reverse map */
@@ -1002,8 +999,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
/* NOTE: We don't get a reference for the pointer in the PE
* data structure, both the device and PE structures should be
- * destroyed at the same time. However, removing nvlink
- * devices will need some work.
+ * destroyed at the same time.
*
* At some point we want to remove the PDN completely anyways
*/
@@ -1099,113 +1095,6 @@ static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
return pe;
}
-static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev)
-{
- int pe_num, found_pe = false, rc;
- long rid;
- struct pnv_ioda_pe *pe;
- struct pci_dev *gpu_pdev;
- struct pci_dn *npu_pdn;
- struct pnv_phb *phb = pci_bus_to_pnvhb(npu_pdev->bus);
-
- /*
- * Intentionally leak a reference on the npu device (for
- * nvlink only; this is not an opencapi path) to make sure it
- * never goes away, as it's been the case all along and some
- * work is needed otherwise.
- */
- pci_dev_get(npu_pdev);
-
- /*
- * Due to a hardware errata PE#0 on the NPU is reserved for
- * error handling. This means we only have three PEs remaining
- * which need to be assigned to four links, implying some
- * links must share PEs.
- *
- * To achieve this we assign PEs such that NPUs linking the
- * same GPU get assigned the same PE.
- */
- gpu_pdev = pnv_pci_get_gpu_dev(npu_pdev);
- for (pe_num = 0; pe_num < phb->ioda.total_pe_num; pe_num++) {
- pe = &phb->ioda.pe_array[pe_num];
- if (!pe->pdev)
- continue;
-
- if (pnv_pci_get_gpu_dev(pe->pdev) == gpu_pdev) {
- /*
- * This device has the same peer GPU so should
- * be assigned the same PE as the existing
- * peer NPU.
- */
- dev_info(&npu_pdev->dev,
- "Associating to existing PE %x\n", pe_num);
- npu_pdn = pci_get_pdn(npu_pdev);
- rid = npu_pdev->bus->number << 8 | npu_pdn->devfn;
- npu_pdn->pe_number = pe_num;
- phb->ioda.pe_rmap[rid] = pe->pe_number;
- pe->device_count++;
-
- /* Map the PE to this link */
- rc = opal_pci_set_pe(phb->opal_id, pe_num, rid,
- OpalPciBusAll,
- OPAL_COMPARE_RID_DEVICE_NUMBER,
- OPAL_COMPARE_RID_FUNCTION_NUMBER,
- OPAL_MAP_PE);
- WARN_ON(rc != OPAL_SUCCESS);
- found_pe = true;
- break;
- }
- }
-
- if (!found_pe)
- /*
- * Could not find an existing PE so allocate a new
- * one.
- */
- return pnv_ioda_setup_dev_PE(npu_pdev);
- else
- return pe;
-}
-
-static void pnv_ioda_setup_npu_PEs(struct pci_bus *bus)
-{
- struct pci_dev *pdev;
-
- list_for_each_entry(pdev, &bus->devices, bus_list)
- pnv_ioda_setup_npu_PE(pdev);
-}
-
-static void pnv_pci_ioda_setup_nvlink(void)
-{
- struct pci_controller *hose;
- struct pnv_phb *phb;
- struct pnv_ioda_pe *pe;
-
- list_for_each_entry(hose, &hose_list, list_node) {
- phb = hose->private_data;
- if (phb->type == PNV_PHB_NPU_NVLINK) {
- /* PE#0 is needed for error reporting */
- pnv_ioda_reserve_pe(phb, 0);
- pnv_ioda_setup_npu_PEs(hose->bus);
- if (phb->model == PNV_PHB_MODEL_NPU2)
- WARN_ON_ONCE(pnv_npu2_init(hose));
- }
- }
- list_for_each_entry(hose, &hose_list, list_node) {
- phb = hose->private_data;
- if (phb->type != PNV_PHB_IODA2)
- continue;
-
- list_for_each_entry(pe, &phb->ioda.pe_list, list)
- pnv_npu2_map_lpar(pe, MSR_DR | MSR_PR | MSR_HV);
- }
-
-#ifdef CONFIG_IOMMU_API
- /* setup iommu groups so we can do nvlink pass-thru */
- pnv_pci_npu_setup_iommu_groups();
-#endif
-}
-
static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb,
struct pnv_ioda_pe *pe);
@@ -1468,18 +1357,6 @@ static struct iommu_table_ops pnv_ioda1_iommu_ops = {
#define PHB3_TCE_KILL_INVAL_PE PPC_BIT(1)
#define PHB3_TCE_KILL_INVAL_ONE PPC_BIT(2)
-static void pnv_pci_phb3_tce_invalidate_entire(struct pnv_phb *phb, bool rm)
-{
- __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(phb, rm);
- const unsigned long val = PHB3_TCE_KILL_INVAL_ALL;
-
- mb(); /* Ensure previous TCE table stores are visible */
- if (rm)
- __raw_rm_writeq_be(val, invalidate);
- else
- __raw_writeq_be(val, invalidate);
-}
-
static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe)
{
/* 01xb - invalidate TCEs that match the specified PE# */
@@ -1539,20 +1416,6 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
struct pnv_phb *phb = pe->phb;
unsigned int shift = tbl->it_page_shift;
- /*
- * NVLink1 can use the TCE kill register directly as
- * it's the same as PHB3. NVLink2 is different and
- * should go via the OPAL call.
- */
- if (phb->model == PNV_PHB_MODEL_NPU) {
- /*
- * The NVLink hardware does not support TCE kill
- * per TCE entry so we have to invalidate
- * the entire cache for it.
- */
- pnv_pci_phb3_tce_invalidate_entire(phb, rm);
- continue;
- }
if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs)
pnv_pci_phb3_tce_invalidate(pe, rm, shift,
index, npages);
@@ -1564,14 +1427,6 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
}
}
-void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm)
-{
- if (phb->model == PNV_PHB_MODEL_NPU || phb->model == PNV_PHB_MODEL_PHB3)
- pnv_pci_phb3_tce_invalidate_entire(phb, rm);
- else
- opal_pci_tce_kill(phb->opal_id, OPAL_PCI_TCE_KILL, 0, 0, 0, 0);
-}
-
static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
@@ -1762,7 +1617,8 @@ found:
tbl->it_ops = &pnv_ioda1_iommu_ops;
pe->table_group.tce32_start = tbl->it_offset << tbl->it_page_shift;
pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift;
- iommu_init_table(tbl, phb->hose->node, 0, 0);
+ if (!iommu_init_table(tbl, phb->hose->node, 0, 0))
+ panic("Failed to initialize iommu table");
pe->dma_setup_done = true;
return;
@@ -1930,16 +1786,16 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
res_start = pe->phb->ioda.m32_pci_base >> tbl->it_page_shift;
res_end = min(window_size, SZ_4G) >> tbl->it_page_shift;
}
- iommu_init_table(tbl, pe->phb->hose->node, res_start, res_end);
- rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl);
+ if (iommu_init_table(tbl, pe->phb->hose->node, res_start, res_end))
+ rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl);
+ else
+ rc = -ENOMEM;
if (rc) {
- pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n",
- rc);
+ pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n", rc);
iommu_tce_table_put(tbl);
- return rc;
+ tbl = NULL; /* This clears iommu_table_base below */
}
-
if (!pnv_iommu_bypass_disabled)
pnv_pci_ioda2_set_bypass(pe, true);
@@ -2450,7 +2306,6 @@ static void pnv_pci_enable_bridges(void)
static void pnv_pci_ioda_fixup(void)
{
- pnv_pci_ioda_setup_nvlink();
pnv_pci_ioda_create_dbgfs();
pnv_pci_enable_bridges();
@@ -2823,15 +2678,6 @@ static void pnv_pci_release_device(struct pci_dev *pdev)
pnv_ioda_release_pe(pe);
}
-static void pnv_npu_disable_device(struct pci_dev *pdev)
-{
- struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
- struct eeh_pe *eehpe = edev ? edev->pe : NULL;
-
- if (eehpe && eeh_ops && eeh_ops->reset)
- eeh_ops->reset(eehpe, EEH_RESET_HOT);
-}
-
static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
{
struct pnv_phb *phb = hose->private_data;
@@ -2873,16 +2719,6 @@ static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
.shutdown = pnv_pci_ioda_shutdown,
};
-static const struct pci_controller_ops pnv_npu_ioda_controller_ops = {
- .setup_msi_irqs = pnv_setup_msi_irqs,
- .teardown_msi_irqs = pnv_teardown_msi_irqs,
- .enable_device_hook = pnv_pci_enable_device_hook,
- .window_alignment = pnv_pci_window_alignment,
- .reset_secondary_bus = pnv_pci_reset_secondary_bus,
- .shutdown = pnv_pci_ioda_shutdown,
- .disable_device = pnv_npu_disable_device,
-};
-
static const struct pci_controller_ops pnv_npu_ocapi_ioda_controller_ops = {
.enable_device_hook = pnv_ocapi_enable_device_hook,
.release_device = pnv_pci_release_device,
@@ -2956,10 +2792,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
phb->model = PNV_PHB_MODEL_P7IOC;
else if (of_device_is_compatible(np, "ibm,power8-pciex"))
phb->model = PNV_PHB_MODEL_PHB3;
- else if (of_device_is_compatible(np, "ibm,power8-npu-pciex"))
- phb->model = PNV_PHB_MODEL_NPU;
- else if (of_device_is_compatible(np, "ibm,power9-npu-pciex"))
- phb->model = PNV_PHB_MODEL_NPU2;
else
phb->model = PNV_PHB_MODEL_UNKNOWN;
@@ -3117,9 +2949,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
switch (phb->type) {
- case PNV_PHB_NPU_NVLINK:
- hose->controller_ops = pnv_npu_ioda_controller_ops;
- break;
case PNV_PHB_NPU_OCAPI:
hose->controller_ops = pnv_npu_ocapi_ioda_controller_ops;
break;
@@ -3172,11 +3001,6 @@ void __init pnv_pci_init_ioda2_phb(struct device_node *np)
pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
}
-void __init pnv_pci_init_npu_phb(struct device_node *np)
-{
- pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU_NVLINK);
-}
-
void __init pnv_pci_init_npu2_opencapi_phb(struct device_node *np)
{
pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU_OCAPI);
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 9b9bca169275..6bb3c52633fb 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -711,7 +711,7 @@ int pnv_pci_cfg_write(struct pci_dn *pdn,
return PCIBIOS_SUCCESSFUL;
}
-#if CONFIG_EEH
+#ifdef CONFIG_EEH
static bool pnv_pci_cfg_check(struct pci_dn *pdn)
{
struct eeh_dev *edev = NULL;
@@ -926,17 +926,6 @@ void __init pnv_pci_init(void)
for_each_compatible_node(np, NULL, "ibm,ioda3-phb")
pnv_pci_init_ioda2_phb(np);
- /* Look for NPU PHBs */
- for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb")
- pnv_pci_init_npu_phb(np);
-
- /*
- * Look for NPU2 PHBs which we treat mostly as NPU PHBs with
- * the exception of TCE kill which requires an OPAL call.
- */
- for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-phb")
- pnv_pci_init_npu_phb(np);
-
/* Look for NPU2 OpenCAPI PHBs */
for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-opencapi-phb")
pnv_pci_init_npu2_opencapi_phb(np);
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 36d22920f5a3..c8d4f222a86f 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -10,10 +10,9 @@
struct pci_dn;
enum pnv_phb_type {
- PNV_PHB_IODA1 = 0,
- PNV_PHB_IODA2 = 1,
- PNV_PHB_NPU_NVLINK = 2,
- PNV_PHB_NPU_OCAPI = 3,
+ PNV_PHB_IODA1,
+ PNV_PHB_IODA2,
+ PNV_PHB_NPU_OCAPI,
};
/* Precise PHB model for error management */
@@ -21,8 +20,6 @@ enum pnv_phb_model {
PNV_PHB_MODEL_UNKNOWN,
PNV_PHB_MODEL_P7IOC,
PNV_PHB_MODEL_PHB3,
- PNV_PHB_MODEL_NPU,
- PNV_PHB_MODEL_NPU2,
};
#define PNV_PCI_DIAG_BUF_SIZE 8192
@@ -81,7 +78,6 @@ struct pnv_ioda_pe {
/* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */
struct iommu_table_group table_group;
- struct npu_comp *npucomp;
/* 64-bit TCE bypass region */
bool tce_bypass_enabled;
@@ -289,9 +285,7 @@ extern struct iommu_table *pnv_pci_table_alloc(int nid);
extern void pnv_pci_init_ioda_hub(struct device_node *np);
extern void pnv_pci_init_ioda2_phb(struct device_node *np);
-extern void pnv_pci_init_npu_phb(struct device_node *np);
extern void pnv_pci_init_npu2_opencapi_phb(struct device_node *np);
-extern void pnv_npu2_map_lpar(struct pnv_ioda_pe *gpe, unsigned long msr);
extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
@@ -314,11 +308,6 @@ extern void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
#define pe_info(pe, fmt, ...) \
pe_level_printk(pe, KERN_INFO, fmt, ##__VA_ARGS__)
-/* Nvlink functions */
-extern void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass);
-extern void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm);
-extern void pnv_pci_npu_setup_iommu_groups(void);
-
/* pci-ioda-tce.c */
#define POWERNV_IOMMU_DEFAULT_LEVELS 2
#define POWERNV_IOMMU_MAX_LEVELS 5
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index aadf932c4e61..a8db3f153063 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -157,7 +157,7 @@ static void __init pnv_check_guarded_cores(void)
for_each_node_by_type(dn, "cpu") {
if (of_property_match_string(dn, "status", "bad") >= 0)
bad_count++;
- };
+ }
if (bad_count) {
printk(" _ _______________\n");
diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c
index 73207b53dc2b..7e98b00ea2e8 100644
--- a/arch/powerpc/platforms/powernv/subcore.c
+++ b/arch/powerpc/platforms/powernv/subcore.c
@@ -169,6 +169,16 @@ static void update_hid_in_slw(u64 hid0)
}
}
+static inline void update_power8_hid0(unsigned long hid0)
+{
+ /*
+ * The HID0 update on Power8 should at the very least be
+ * preceded by a SYNC instruction followed by an ISYNC
+ * instruction
+ */
+ asm volatile("sync; mtspr %0,%1; isync":: "i"(SPRN_HID0), "r"(hid0));
+}
+
static void unsplit_core(void)
{
u64 hid0, mask;
diff --git a/arch/powerpc/platforms/powernv/vas-api.c b/arch/powerpc/platforms/powernv/vas-api.c
deleted file mode 100644
index 98ed5d8c5441..000000000000
--- a/arch/powerpc/platforms/powernv/vas-api.c
+++ /dev/null
@@ -1,278 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * VAS user space API for its accelerators (Only NX-GZIP is supported now)
- * Copyright (C) 2019 Haren Myneni, IBM Corp
- */
-
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/cdev.h>
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <asm/vas.h>
-#include <uapi/asm/vas-api.h>
-#include "vas.h"
-
-/*
- * The driver creates the device node that can be used as follows:
- * For NX-GZIP
- *
- * fd = open("/dev/crypto/nx-gzip", O_RDWR);
- * rc = ioctl(fd, VAS_TX_WIN_OPEN, &attr);
- * paste_addr = mmap(NULL, PAGE_SIZE, prot, MAP_SHARED, fd, 0ULL).
- * vas_copy(&crb, 0, 1);
- * vas_paste(paste_addr, 0, 1);
- * close(fd) or exit process to close window.
- *
- * where "vas_copy" and "vas_paste" are defined in copy-paste.h.
- * copy/paste returns to the user space directly. So refer NX hardware
- * documententation for exact copy/paste usage and completion / error
- * conditions.
- */
-
-/*
- * Wrapper object for the nx-gzip device - there is just one instance of
- * this node for the whole system.
- */
-static struct coproc_dev {
- struct cdev cdev;
- struct device *device;
- char *name;
- dev_t devt;
- struct class *class;
- enum vas_cop_type cop_type;
-} coproc_device;
-
-struct coproc_instance {
- struct coproc_dev *coproc;
- struct vas_window *txwin;
-};
-
-static char *coproc_devnode(struct device *dev, umode_t *mode)
-{
- return kasprintf(GFP_KERNEL, "crypto/%s", dev_name(dev));
-}
-
-static int coproc_open(struct inode *inode, struct file *fp)
-{
- struct coproc_instance *cp_inst;
-
- cp_inst = kzalloc(sizeof(*cp_inst), GFP_KERNEL);
- if (!cp_inst)
- return -ENOMEM;
-
- cp_inst->coproc = container_of(inode->i_cdev, struct coproc_dev,
- cdev);
- fp->private_data = cp_inst;
-
- return 0;
-}
-
-static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg)
-{
- void __user *uptr = (void __user *)arg;
- struct vas_tx_win_attr txattr = {};
- struct vas_tx_win_open_attr uattr;
- struct coproc_instance *cp_inst;
- struct vas_window *txwin;
- int rc, vasid;
-
- cp_inst = fp->private_data;
-
- /*
- * One window for file descriptor
- */
- if (cp_inst->txwin)
- return -EEXIST;
-
- rc = copy_from_user(&uattr, uptr, sizeof(uattr));
- if (rc) {
- pr_err("%s(): copy_from_user() returns %d\n", __func__, rc);
- return -EFAULT;
- }
-
- if (uattr.version != 1) {
- pr_err("Invalid version\n");
- return -EINVAL;
- }
-
- vasid = uattr.vas_id;
-
- vas_init_tx_win_attr(&txattr, cp_inst->coproc->cop_type);
-
- txattr.lpid = mfspr(SPRN_LPID);
- txattr.pidr = mfspr(SPRN_PID);
- txattr.user_win = true;
- txattr.rsvd_txbuf_count = false;
- txattr.pswid = false;
-
- pr_devel("Pid %d: Opening txwin, PIDR %ld\n", txattr.pidr,
- mfspr(SPRN_PID));
-
- txwin = vas_tx_win_open(vasid, cp_inst->coproc->cop_type, &txattr);
- if (IS_ERR(txwin)) {
- pr_err("%s() vas_tx_win_open() failed, %ld\n", __func__,
- PTR_ERR(txwin));
- return PTR_ERR(txwin);
- }
-
- cp_inst->txwin = txwin;
-
- return 0;
-}
-
-static int coproc_release(struct inode *inode, struct file *fp)
-{
- struct coproc_instance *cp_inst = fp->private_data;
-
- if (cp_inst->txwin) {
- vas_win_close(cp_inst->txwin);
- cp_inst->txwin = NULL;
- }
-
- kfree(cp_inst);
- fp->private_data = NULL;
-
- /*
- * We don't know here if user has other receive windows
- * open, so we can't really call clear_thread_tidr().
- * So, once the process calls set_thread_tidr(), the
- * TIDR value sticks around until process exits, resulting
- * in an extra copy in restore_sprs().
- */
-
- return 0;
-}
-
-static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
-{
- struct coproc_instance *cp_inst = fp->private_data;
- struct vas_window *txwin;
- unsigned long pfn;
- u64 paste_addr;
- pgprot_t prot;
- int rc;
-
- txwin = cp_inst->txwin;
-
- if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
- pr_debug("%s(): size 0x%zx, PAGE_SIZE 0x%zx\n", __func__,
- (vma->vm_end - vma->vm_start), PAGE_SIZE);
- return -EINVAL;
- }
-
- /* Ensure instance has an open send window */
- if (!txwin) {
- pr_err("%s(): No send window open?\n", __func__);
- return -EINVAL;
- }
-
- vas_win_paste_addr(txwin, &paste_addr, NULL);
- pfn = paste_addr >> PAGE_SHIFT;
-
- /* flags, page_prot from cxl_mmap(), except we want cachable */
- vma->vm_flags |= VM_IO | VM_PFNMAP;
- vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
-
- prot = __pgprot(pgprot_val(vma->vm_page_prot) | _PAGE_DIRTY);
-
- rc = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
- vma->vm_end - vma->vm_start, prot);
-
- pr_devel("%s(): paste addr %llx at %lx, rc %d\n", __func__,
- paste_addr, vma->vm_start, rc);
-
- return rc;
-}
-
-static long coproc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case VAS_TX_WIN_OPEN:
- return coproc_ioc_tx_win_open(fp, arg);
- default:
- return -EINVAL;
- }
-}
-
-static struct file_operations coproc_fops = {
- .open = coproc_open,
- .release = coproc_release,
- .mmap = coproc_mmap,
- .unlocked_ioctl = coproc_ioctl,
-};
-
-/*
- * Supporting only nx-gzip coprocessor type now, but this API code
- * extended to other coprocessor types later.
- */
-int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type,
- const char *name)
-{
- int rc = -EINVAL;
- dev_t devno;
-
- rc = alloc_chrdev_region(&coproc_device.devt, 1, 1, name);
- if (rc) {
- pr_err("Unable to allocate coproc major number: %i\n", rc);
- return rc;
- }
-
- pr_devel("%s device allocated, dev [%i,%i]\n", name,
- MAJOR(coproc_device.devt), MINOR(coproc_device.devt));
-
- coproc_device.class = class_create(mod, name);
- if (IS_ERR(coproc_device.class)) {
- rc = PTR_ERR(coproc_device.class);
- pr_err("Unable to create %s class %d\n", name, rc);
- goto err_class;
- }
- coproc_device.class->devnode = coproc_devnode;
- coproc_device.cop_type = cop_type;
-
- coproc_fops.owner = mod;
- cdev_init(&coproc_device.cdev, &coproc_fops);
-
- devno = MKDEV(MAJOR(coproc_device.devt), 0);
- rc = cdev_add(&coproc_device.cdev, devno, 1);
- if (rc) {
- pr_err("cdev_add() failed %d\n", rc);
- goto err_cdev;
- }
-
- coproc_device.device = device_create(coproc_device.class, NULL,
- devno, NULL, name, MINOR(devno));
- if (IS_ERR(coproc_device.device)) {
- rc = PTR_ERR(coproc_device.device);
- pr_err("Unable to create coproc-%d %d\n", MINOR(devno), rc);
- goto err;
- }
-
- pr_devel("%s: Added dev [%d,%d]\n", __func__, MAJOR(devno),
- MINOR(devno));
-
- return 0;
-
-err:
- cdev_del(&coproc_device.cdev);
-err_cdev:
- class_destroy(coproc_device.class);
-err_class:
- unregister_chrdev_region(coproc_device.devt, 1);
- return rc;
-}
-EXPORT_SYMBOL_GPL(vas_register_coproc_api);
-
-void vas_unregister_coproc_api(void)
-{
- dev_t devno;
-
- cdev_del(&coproc_device.cdev);
- devno = MKDEV(MAJOR(coproc_device.devt), 0);
- device_destroy(coproc_device.class, devno);
-
- class_destroy(coproc_device.class);
- unregister_chrdev_region(coproc_device.devt, 1);
-}
-EXPORT_SYMBOL_GPL(vas_unregister_coproc_api);
diff --git a/arch/powerpc/platforms/powernv/vas-debug.c b/arch/powerpc/platforms/powernv/vas-debug.c
index 41fa90d2f4ab..3ce89a4b54be 100644
--- a/arch/powerpc/platforms/powernv/vas-debug.c
+++ b/arch/powerpc/platforms/powernv/vas-debug.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <asm/vas.h>
#include "vas.h"
static struct dentry *vas_debugfs;
@@ -28,7 +29,7 @@ static char *cop_to_str(int cop)
static int info_show(struct seq_file *s, void *private)
{
- struct vas_window *window = s->private;
+ struct pnv_vas_window *window = s->private;
mutex_lock(&vas_mutex);
@@ -36,9 +37,9 @@ static int info_show(struct seq_file *s, void *private)
if (!window->hvwc_map)
goto unlock;
- seq_printf(s, "Type: %s, %s\n", cop_to_str(window->cop),
+ seq_printf(s, "Type: %s, %s\n", cop_to_str(window->vas_win.cop),
window->tx_win ? "Send" : "Receive");
- seq_printf(s, "Pid : %d\n", vas_window_pid(window));
+ seq_printf(s, "Pid : %d\n", vas_window_pid(&window->vas_win));
unlock:
mutex_unlock(&vas_mutex);
@@ -47,7 +48,7 @@ unlock:
DEFINE_SHOW_ATTRIBUTE(info);
-static inline void print_reg(struct seq_file *s, struct vas_window *win,
+static inline void print_reg(struct seq_file *s, struct pnv_vas_window *win,
char *name, u32 reg)
{
seq_printf(s, "0x%016llx %s\n", read_hvwc_reg(win, name, reg), name);
@@ -55,7 +56,7 @@ static inline void print_reg(struct seq_file *s, struct vas_window *win,
static int hvwc_show(struct seq_file *s, void *private)
{
- struct vas_window *window = s->private;
+ struct pnv_vas_window *window = s->private;
mutex_lock(&vas_mutex);
@@ -103,8 +104,10 @@ unlock:
DEFINE_SHOW_ATTRIBUTE(hvwc);
-void vas_window_free_dbgdir(struct vas_window *window)
+void vas_window_free_dbgdir(struct pnv_vas_window *pnv_win)
{
+ struct vas_window *window = &pnv_win->vas_win;
+
if (window->dbgdir) {
debugfs_remove_recursive(window->dbgdir);
kfree(window->dbgname);
@@ -113,21 +116,21 @@ void vas_window_free_dbgdir(struct vas_window *window)
}
}
-void vas_window_init_dbgdir(struct vas_window *window)
+void vas_window_init_dbgdir(struct pnv_vas_window *window)
{
struct dentry *d;
if (!window->vinst->dbgdir)
return;
- window->dbgname = kzalloc(16, GFP_KERNEL);
- if (!window->dbgname)
+ window->vas_win.dbgname = kzalloc(16, GFP_KERNEL);
+ if (!window->vas_win.dbgname)
return;
- snprintf(window->dbgname, 16, "w%d", window->winid);
+ snprintf(window->vas_win.dbgname, 16, "w%d", window->vas_win.winid);
- d = debugfs_create_dir(window->dbgname, window->vinst->dbgdir);
- window->dbgdir = d;
+ d = debugfs_create_dir(window->vas_win.dbgname, window->vinst->dbgdir);
+ window->vas_win.dbgdir = d;
debugfs_create_file("info", 0444, d, window, &info_fops);
debugfs_create_file("hvwc", 0444, d, window, &hvwc_fops);
diff --git a/arch/powerpc/platforms/powernv/vas-fault.c b/arch/powerpc/platforms/powernv/vas-fault.c
index 3d21fce254b7..a7aabc18039e 100644
--- a/arch/powerpc/platforms/powernv/vas-fault.c
+++ b/arch/powerpc/platforms/powernv/vas-fault.c
@@ -26,150 +26,6 @@
*/
#define VAS_FAULT_WIN_FIFO_SIZE (4 << 20)
-static void dump_crb(struct coprocessor_request_block *crb)
-{
- struct data_descriptor_entry *dde;
- struct nx_fault_stamp *nx;
-
- dde = &crb->source;
- pr_devel("SrcDDE: addr 0x%llx, len %d, count %d, idx %d, flags %d\n",
- be64_to_cpu(dde->address), be32_to_cpu(dde->length),
- dde->count, dde->index, dde->flags);
-
- dde = &crb->target;
- pr_devel("TgtDDE: addr 0x%llx, len %d, count %d, idx %d, flags %d\n",
- be64_to_cpu(dde->address), be32_to_cpu(dde->length),
- dde->count, dde->index, dde->flags);
-
- nx = &crb->stamp.nx;
- pr_devel("NX Stamp: PSWID 0x%x, FSA 0x%llx, flags 0x%x, FS 0x%x\n",
- be32_to_cpu(nx->pswid),
- be64_to_cpu(crb->stamp.nx.fault_storage_addr),
- nx->flags, nx->fault_status);
-}
-
-/*
- * Update the CSB to indicate a translation error.
- *
- * User space will be polling on CSB after the request is issued.
- * If NX can handle the request without any issues, it updates CSB.
- * Whereas if NX encounters page fault, the kernel will handle the
- * fault and update CSB with translation error.
- *
- * If we are unable to update the CSB means copy_to_user failed due to
- * invalid csb_addr, send a signal to the process.
- */
-static void update_csb(struct vas_window *window,
- struct coprocessor_request_block *crb)
-{
- struct coprocessor_status_block csb;
- struct kernel_siginfo info;
- struct task_struct *tsk;
- void __user *csb_addr;
- struct pid *pid;
- int rc;
-
- /*
- * NX user space windows can not be opened for task->mm=NULL
- * and faults will not be generated for kernel requests.
- */
- if (WARN_ON_ONCE(!window->mm || !window->user_win))
- return;
-
- csb_addr = (void __user *)be64_to_cpu(crb->csb_addr);
-
- memset(&csb, 0, sizeof(csb));
- csb.cc = CSB_CC_FAULT_ADDRESS;
- csb.ce = CSB_CE_TERMINATION;
- csb.cs = 0;
- csb.count = 0;
-
- /*
- * NX operates and returns in BE format as defined CRB struct.
- * So saves fault_storage_addr in BE as NX pastes in FIFO and
- * expects user space to convert to CPU format.
- */
- csb.address = crb->stamp.nx.fault_storage_addr;
- csb.flags = 0;
-
- pid = window->pid;
- tsk = get_pid_task(pid, PIDTYPE_PID);
- /*
- * Process closes send window after all pending NX requests are
- * completed. In multi-thread applications, a child thread can
- * open a window and can exit without closing it. May be some
- * requests are pending or this window can be used by other
- * threads later. We should handle faults if NX encounters
- * pages faults on these requests. Update CSB with translation
- * error and fault address. If csb_addr passed by user space is
- * invalid, send SEGV signal to pid saved in window. If the
- * child thread is not running, send the signal to tgid.
- * Parent thread (tgid) will close this window upon its exit.
- *
- * pid and mm references are taken when window is opened by
- * process (pid). So tgid is used only when child thread opens
- * a window and exits without closing it.
- */
- if (!tsk) {
- pid = window->tgid;
- tsk = get_pid_task(pid, PIDTYPE_PID);
- /*
- * Parent thread (tgid) will be closing window when it
- * exits. So should not get here.
- */
- if (WARN_ON_ONCE(!tsk))
- return;
- }
-
- /* Return if the task is exiting. */
- if (tsk->flags & PF_EXITING) {
- put_task_struct(tsk);
- return;
- }
-
- kthread_use_mm(window->mm);
- rc = copy_to_user(csb_addr, &csb, sizeof(csb));
- /*
- * User space polls on csb.flags (first byte). So add barrier
- * then copy first byte with csb flags update.
- */
- if (!rc) {
- csb.flags = CSB_V;
- /* Make sure update to csb.flags is visible now */
- smp_mb();
- rc = copy_to_user(csb_addr, &csb, sizeof(u8));
- }
- kthread_unuse_mm(window->mm);
- put_task_struct(tsk);
-
- /* Success */
- if (!rc)
- return;
-
- pr_debug("Invalid CSB address 0x%p signalling pid(%d)\n",
- csb_addr, pid_vnr(pid));
-
- clear_siginfo(&info);
- info.si_signo = SIGSEGV;
- info.si_errno = EFAULT;
- info.si_code = SEGV_MAPERR;
- info.si_addr = csb_addr;
-
- /*
- * process will be polling on csb.flags after request is sent to
- * NX. So generally CSB update should not fail except when an
- * application passes invalid csb_addr. So an error message will
- * be displayed and leave it to user space whether to ignore or
- * handle this signal.
- */
- rcu_read_lock();
- rc = kill_pid_info(SIGSEGV, &info, pid);
- rcu_read_unlock();
-
- pr_devel("%s(): pid %d kill_proc_info() rc %d\n", __func__,
- pid_vnr(pid), rc);
-}
-
static void dump_fifo(struct vas_instance *vinst, void *entry)
{
unsigned long *end = vinst->fault_fifo + vinst->fault_fifo_size;
@@ -212,7 +68,7 @@ irqreturn_t vas_fault_thread_fn(int irq, void *data)
struct vas_instance *vinst = data;
struct coprocessor_request_block *crb, *entry;
struct coprocessor_request_block buf;
- struct vas_window *window;
+ struct pnv_vas_window *window;
unsigned long flags;
void *fifo;
@@ -272,7 +128,7 @@ irqreturn_t vas_fault_thread_fn(int irq, void *data)
vinst->vas_id, vinst->fault_fifo, fifo,
vinst->fault_crbs);
- dump_crb(crb);
+ vas_dump_crb(crb);
window = vas_pswid_to_window(vinst,
be32_to_cpu(crb->stamp.nx.pswid));
@@ -293,7 +149,14 @@ irqreturn_t vas_fault_thread_fn(int irq, void *data)
WARN_ON_ONCE(1);
} else {
- update_csb(window, crb);
+ /*
+ * NX sees faults only with user space windows.
+ */
+ if (window->user_win)
+ vas_update_csb(crb, &window->vas_win.task_ref);
+ else
+ WARN_ON_ONCE(!window->user_win);
+
/*
* Return credit for send window after processing
* fault CRB.
@@ -336,6 +199,7 @@ irqreturn_t vas_fault_handler(int irq, void *dev_id)
int vas_setup_fault_window(struct vas_instance *vinst)
{
struct vas_rx_win_attr attr;
+ struct vas_window *win;
vinst->fault_fifo_size = VAS_FAULT_WIN_FIFO_SIZE;
vinst->fault_fifo = kzalloc(vinst->fault_fifo_size, GFP_KERNEL);
@@ -364,18 +228,17 @@ int vas_setup_fault_window(struct vas_instance *vinst)
attr.lnotify_pid = mfspr(SPRN_PID);
attr.lnotify_tid = mfspr(SPRN_PID);
- vinst->fault_win = vas_rx_win_open(vinst->vas_id, VAS_COP_TYPE_FAULT,
- &attr);
-
- if (IS_ERR(vinst->fault_win)) {
- pr_err("VAS: Error %ld opening FaultWin\n",
- PTR_ERR(vinst->fault_win));
+ win = vas_rx_win_open(vinst->vas_id, VAS_COP_TYPE_FAULT, &attr);
+ if (IS_ERR(win)) {
+ pr_err("VAS: Error %ld opening FaultWin\n", PTR_ERR(win));
kfree(vinst->fault_fifo);
- return PTR_ERR(vinst->fault_win);
+ return PTR_ERR(win);
}
+ vinst->fault_win = container_of(win, struct pnv_vas_window, vas_win);
+
pr_devel("VAS: Created FaultWin %d, LPID/PID/TID [%d/%d/%d]\n",
- vinst->fault_win->winid, attr.lnotify_lpid,
+ vinst->fault_win->vas_win.winid, attr.lnotify_lpid,
attr.lnotify_pid, attr.lnotify_tid);
return 0;
diff --git a/arch/powerpc/platforms/powernv/vas-trace.h b/arch/powerpc/platforms/powernv/vas-trace.h
index a449b9f0c12e..ca2e08f2ddc0 100644
--- a/arch/powerpc/platforms/powernv/vas-trace.h
+++ b/arch/powerpc/platforms/powernv/vas-trace.h
@@ -80,7 +80,7 @@ TRACE_EVENT( vas_tx_win_open,
TRACE_EVENT( vas_paste_crb,
TP_PROTO(struct task_struct *tsk,
- struct vas_window *win),
+ struct pnv_vas_window *win),
TP_ARGS(tsk, win),
@@ -96,7 +96,7 @@ TRACE_EVENT( vas_paste_crb,
TP_fast_assign(
__entry->pid = tsk->pid;
__entry->vasid = win->vinst->vas_id;
- __entry->winid = win->winid;
+ __entry->winid = win->vas_win.winid;
__entry->paste_kaddr = (unsigned long)win->paste_kaddr
),
diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c
index 5f5fe63a3d1c..0f8d39fbf2b2 100644
--- a/arch/powerpc/platforms/powernv/vas-window.c
+++ b/arch/powerpc/platforms/powernv/vas-window.c
@@ -16,6 +16,7 @@
#include <linux/mmu_context.h>
#include <asm/switch_to.h>
#include <asm/ppc-opcode.h>
+#include <asm/vas.h>
#include "vas.h"
#include "copy-paste.h"
@@ -26,14 +27,14 @@
* Compute the paste address region for the window @window using the
* ->paste_base_addr and ->paste_win_id_shift we got from device tree.
*/
-void vas_win_paste_addr(struct vas_window *window, u64 *addr, int *len)
+void vas_win_paste_addr(struct pnv_vas_window *window, u64 *addr, int *len)
{
int winid;
u64 base, shift;
base = window->vinst->paste_base_addr;
shift = window->vinst->paste_win_id_shift;
- winid = window->winid;
+ winid = window->vas_win.winid;
*addr = base + (winid << shift);
if (len)
@@ -42,23 +43,23 @@ void vas_win_paste_addr(struct vas_window *window, u64 *addr, int *len)
pr_debug("Txwin #%d: Paste addr 0x%llx\n", winid, *addr);
}
-static inline void get_hvwc_mmio_bar(struct vas_window *window,
+static inline void get_hvwc_mmio_bar(struct pnv_vas_window *window,
u64 *start, int *len)
{
u64 pbaddr;
pbaddr = window->vinst->hvwc_bar_start;
- *start = pbaddr + window->winid * VAS_HVWC_SIZE;
+ *start = pbaddr + window->vas_win.winid * VAS_HVWC_SIZE;
*len = VAS_HVWC_SIZE;
}
-static inline void get_uwc_mmio_bar(struct vas_window *window,
+static inline void get_uwc_mmio_bar(struct pnv_vas_window *window,
u64 *start, int *len)
{
u64 pbaddr;
pbaddr = window->vinst->uwc_bar_start;
- *start = pbaddr + window->winid * VAS_UWC_SIZE;
+ *start = pbaddr + window->vas_win.winid * VAS_UWC_SIZE;
*len = VAS_UWC_SIZE;
}
@@ -67,7 +68,7 @@ static inline void get_uwc_mmio_bar(struct vas_window *window,
* space. Unlike MMIO regions (map_mmio_region() below), paste region must
* be mapped cache-able and is only applicable to send windows.
*/
-static void *map_paste_region(struct vas_window *txwin)
+static void *map_paste_region(struct pnv_vas_window *txwin)
{
int len;
void *map;
@@ -75,7 +76,7 @@ static void *map_paste_region(struct vas_window *txwin)
u64 start;
name = kasprintf(GFP_KERNEL, "window-v%d-w%d", txwin->vinst->vas_id,
- txwin->winid);
+ txwin->vas_win.winid);
if (!name)
goto free_name;
@@ -132,7 +133,7 @@ static void unmap_region(void *addr, u64 start, int len)
/*
* Unmap the paste address region for a window.
*/
-static void unmap_paste_region(struct vas_window *window)
+static void unmap_paste_region(struct pnv_vas_window *window)
{
int len;
u64 busaddr_start;
@@ -153,7 +154,7 @@ static void unmap_paste_region(struct vas_window *window)
* path, just minimize the time we hold the mutex for now. We can add
* a per-instance mutex later if necessary.
*/
-static void unmap_winctx_mmio_bars(struct vas_window *window)
+static void unmap_winctx_mmio_bars(struct pnv_vas_window *window)
{
int len;
void *uwc_map;
@@ -186,7 +187,7 @@ static void unmap_winctx_mmio_bars(struct vas_window *window)
* OS/User Window Context (UWC) MMIO Base Address Region for the given window.
* Map these bus addresses and save the mapped kernel addresses in @window.
*/
-static int map_winctx_mmio_bars(struct vas_window *window)
+static int map_winctx_mmio_bars(struct pnv_vas_window *window)
{
int len;
u64 start;
@@ -214,7 +215,7 @@ static int map_winctx_mmio_bars(struct vas_window *window)
* registers are not sequential. And, we can only write to offsets
* with valid registers.
*/
-static void reset_window_regs(struct vas_window *window)
+static void reset_window_regs(struct pnv_vas_window *window)
{
write_hvwc_reg(window, VREG(LPID), 0ULL);
write_hvwc_reg(window, VREG(PID), 0ULL);
@@ -270,7 +271,7 @@ static void reset_window_regs(struct vas_window *window)
* want to add fields to vas_winctx and move the initialization to
* init_vas_winctx_regs().
*/
-static void init_xlate_regs(struct vas_window *window, bool user_win)
+static void init_xlate_regs(struct pnv_vas_window *window, bool user_win)
{
u64 lpcr, val;
@@ -335,7 +336,7 @@ static void init_xlate_regs(struct vas_window *window, bool user_win)
*
* TODO: Reserved (aka dedicated) send buffers are not supported yet.
*/
-static void init_rsvd_tx_buf_count(struct vas_window *txwin,
+static void init_rsvd_tx_buf_count(struct pnv_vas_window *txwin,
struct vas_winctx *winctx)
{
write_hvwc_reg(txwin, VREG(TX_RSVD_BUF_COUNT), 0ULL);
@@ -357,7 +358,7 @@ static void init_rsvd_tx_buf_count(struct vas_window *txwin,
* as a one-time task? That could work for NX but what about other
* receivers? Let the receivers tell us the rx-fifo buffers for now.
*/
-static void init_winctx_regs(struct vas_window *window,
+static void init_winctx_regs(struct pnv_vas_window *window,
struct vas_winctx *winctx)
{
u64 val;
@@ -519,10 +520,10 @@ static int vas_assign_window_id(struct ida *ida)
return winid;
}
-static void vas_window_free(struct vas_window *window)
+static void vas_window_free(struct pnv_vas_window *window)
{
- int winid = window->winid;
struct vas_instance *vinst = window->vinst;
+ int winid = window->vas_win.winid;
unmap_winctx_mmio_bars(window);
@@ -533,10 +534,10 @@ static void vas_window_free(struct vas_window *window)
vas_release_window_id(&vinst->ida, winid);
}
-static struct vas_window *vas_window_alloc(struct vas_instance *vinst)
+static struct pnv_vas_window *vas_window_alloc(struct vas_instance *vinst)
{
int winid;
- struct vas_window *window;
+ struct pnv_vas_window *window;
winid = vas_assign_window_id(&vinst->ida);
if (winid < 0)
@@ -547,7 +548,7 @@ static struct vas_window *vas_window_alloc(struct vas_instance *vinst)
goto out_free;
window->vinst = vinst;
- window->winid = winid;
+ window->vas_win.winid = winid;
if (map_winctx_mmio_bars(window))
goto out_free;
@@ -562,7 +563,7 @@ out_free:
return ERR_PTR(-ENOMEM);
}
-static void put_rx_win(struct vas_window *rxwin)
+static void put_rx_win(struct pnv_vas_window *rxwin)
{
/* Better not be a send window! */
WARN_ON_ONCE(rxwin->tx_win);
@@ -578,10 +579,11 @@ static void put_rx_win(struct vas_window *rxwin)
*
* NOTE: We access ->windows[] table and assume that vinst->mutex is held.
*/
-static struct vas_window *get_user_rxwin(struct vas_instance *vinst, u32 pswid)
+static struct pnv_vas_window *get_user_rxwin(struct vas_instance *vinst,
+ u32 pswid)
{
int vasid, winid;
- struct vas_window *rxwin;
+ struct pnv_vas_window *rxwin;
decode_pswid(pswid, &vasid, &winid);
@@ -590,7 +592,7 @@ static struct vas_window *get_user_rxwin(struct vas_instance *vinst, u32 pswid)
rxwin = vinst->windows[winid];
- if (!rxwin || rxwin->tx_win || rxwin->cop != VAS_COP_TYPE_FTW)
+ if (!rxwin || rxwin->tx_win || rxwin->vas_win.cop != VAS_COP_TYPE_FTW)
return ERR_PTR(-EINVAL);
return rxwin;
@@ -602,10 +604,10 @@ static struct vas_window *get_user_rxwin(struct vas_instance *vinst, u32 pswid)
*
* See also function header of set_vinst_win().
*/
-static struct vas_window *get_vinst_rxwin(struct vas_instance *vinst,
+static struct pnv_vas_window *get_vinst_rxwin(struct vas_instance *vinst,
enum vas_cop_type cop, u32 pswid)
{
- struct vas_window *rxwin;
+ struct pnv_vas_window *rxwin;
mutex_lock(&vinst->mutex);
@@ -638,9 +640,9 @@ static struct vas_window *get_vinst_rxwin(struct vas_instance *vinst,
* window, we also save the window in the ->rxwin[] table.
*/
static void set_vinst_win(struct vas_instance *vinst,
- struct vas_window *window)
+ struct pnv_vas_window *window)
{
- int id = window->winid;
+ int id = window->vas_win.winid;
mutex_lock(&vinst->mutex);
@@ -649,8 +651,8 @@ static void set_vinst_win(struct vas_instance *vinst,
* unless its a user (FTW) window.
*/
if (!window->user_win && !window->tx_win) {
- WARN_ON_ONCE(vinst->rxwin[window->cop]);
- vinst->rxwin[window->cop] = window;
+ WARN_ON_ONCE(vinst->rxwin[window->vas_win.cop]);
+ vinst->rxwin[window->vas_win.cop] = window;
}
WARN_ON_ONCE(vinst->windows[id] != NULL);
@@ -663,16 +665,16 @@ static void set_vinst_win(struct vas_instance *vinst,
* Clear this window from the table(s) of windows for this VAS instance.
* See also function header of set_vinst_win().
*/
-static void clear_vinst_win(struct vas_window *window)
+static void clear_vinst_win(struct pnv_vas_window *window)
{
- int id = window->winid;
+ int id = window->vas_win.winid;
struct vas_instance *vinst = window->vinst;
mutex_lock(&vinst->mutex);
if (!window->user_win && !window->tx_win) {
- WARN_ON_ONCE(!vinst->rxwin[window->cop]);
- vinst->rxwin[window->cop] = NULL;
+ WARN_ON_ONCE(!vinst->rxwin[window->vas_win.cop]);
+ vinst->rxwin[window->vas_win.cop] = NULL;
}
WARN_ON_ONCE(vinst->windows[id] != window);
@@ -681,7 +683,7 @@ static void clear_vinst_win(struct vas_window *window)
mutex_unlock(&vinst->mutex);
}
-static void init_winctx_for_rxwin(struct vas_window *rxwin,
+static void init_winctx_for_rxwin(struct pnv_vas_window *rxwin,
struct vas_rx_win_attr *rxattr,
struct vas_winctx *winctx)
{
@@ -702,7 +704,7 @@ static void init_winctx_for_rxwin(struct vas_window *rxwin,
winctx->rx_fifo = rxattr->rx_fifo;
winctx->rx_fifo_size = rxattr->rx_fifo_size;
- winctx->wcreds_max = rxwin->wcreds_max;
+ winctx->wcreds_max = rxwin->vas_win.wcreds_max;
winctx->pin_win = rxattr->pin_win;
winctx->nx_win = rxattr->nx_win;
@@ -851,7 +853,7 @@ EXPORT_SYMBOL_GPL(vas_init_rx_win_attr);
struct vas_window *vas_rx_win_open(int vasid, enum vas_cop_type cop,
struct vas_rx_win_attr *rxattr)
{
- struct vas_window *rxwin;
+ struct pnv_vas_window *rxwin;
struct vas_winctx winctx;
struct vas_instance *vinst;
@@ -870,21 +872,21 @@ struct vas_window *vas_rx_win_open(int vasid, enum vas_cop_type cop,
rxwin = vas_window_alloc(vinst);
if (IS_ERR(rxwin)) {
pr_devel("Unable to allocate memory for Rx window\n");
- return rxwin;
+ return (struct vas_window *)rxwin;
}
rxwin->tx_win = false;
rxwin->nx_win = rxattr->nx_win;
rxwin->user_win = rxattr->user_win;
- rxwin->cop = cop;
- rxwin->wcreds_max = rxattr->wcreds_max;
+ rxwin->vas_win.cop = cop;
+ rxwin->vas_win.wcreds_max = rxattr->wcreds_max;
init_winctx_for_rxwin(rxwin, rxattr, &winctx);
init_winctx_regs(rxwin, &winctx);
set_vinst_win(vinst, rxwin);
- return rxwin;
+ return &rxwin->vas_win;
}
EXPORT_SYMBOL_GPL(vas_rx_win_open);
@@ -905,7 +907,7 @@ void vas_init_tx_win_attr(struct vas_tx_win_attr *txattr, enum vas_cop_type cop)
}
EXPORT_SYMBOL_GPL(vas_init_tx_win_attr);
-static void init_winctx_for_txwin(struct vas_window *txwin,
+static void init_winctx_for_txwin(struct pnv_vas_window *txwin,
struct vas_tx_win_attr *txattr,
struct vas_winctx *winctx)
{
@@ -926,7 +928,7 @@ static void init_winctx_for_txwin(struct vas_window *txwin,
*/
memset(winctx, 0, sizeof(struct vas_winctx));
- winctx->wcreds_max = txwin->wcreds_max;
+ winctx->wcreds_max = txwin->vas_win.wcreds_max;
winctx->user_win = txattr->user_win;
winctx->nx_win = txwin->rxwin->nx_win;
@@ -946,13 +948,13 @@ static void init_winctx_for_txwin(struct vas_window *txwin,
winctx->lpid = txattr->lpid;
winctx->pidr = txattr->pidr;
- winctx->rx_win_id = txwin->rxwin->winid;
+ winctx->rx_win_id = txwin->rxwin->vas_win.winid;
/*
* IRQ and fault window setup is successful. Set fault window
* for the send window so that ready to handle faults.
*/
if (txwin->vinst->virq)
- winctx->fault_win_id = txwin->vinst->fault_win->winid;
+ winctx->fault_win_id = txwin->vinst->fault_win->vas_win.winid;
winctx->dma_type = VAS_DMA_TYPE_INJECT;
winctx->tc_mode = txattr->tc_mode;
@@ -962,7 +964,8 @@ static void init_winctx_for_txwin(struct vas_window *txwin,
winctx->irq_port = txwin->vinst->irq_port;
winctx->pswid = txattr->pswid ? txattr->pswid :
- encode_pswid(txwin->vinst->vas_id, txwin->winid);
+ encode_pswid(txwin->vinst->vas_id,
+ txwin->vas_win.winid);
}
static bool tx_win_args_valid(enum vas_cop_type cop,
@@ -993,8 +996,8 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
struct vas_tx_win_attr *attr)
{
int rc;
- struct vas_window *txwin;
- struct vas_window *rxwin;
+ struct pnv_vas_window *txwin;
+ struct pnv_vas_window *rxwin;
struct vas_winctx winctx;
struct vas_instance *vinst;
@@ -1020,7 +1023,7 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
rxwin = get_vinst_rxwin(vinst, cop, attr->pswid);
if (IS_ERR(rxwin)) {
pr_devel("No RxWin for vasid %d, cop %d\n", vasid, cop);
- return rxwin;
+ return (struct vas_window *)rxwin;
}
txwin = vas_window_alloc(vinst);
@@ -1029,12 +1032,12 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
goto put_rxwin;
}
- txwin->cop = cop;
+ txwin->vas_win.cop = cop;
txwin->tx_win = 1;
txwin->rxwin = rxwin;
txwin->nx_win = txwin->rxwin->nx_win;
txwin->user_win = attr->user_win;
- txwin->wcreds_max = attr->wcreds_max ?: VAS_WCREDS_DEFAULT;
+ txwin->vas_win.wcreds_max = attr->wcreds_max ?: VAS_WCREDS_DEFAULT;
init_winctx_for_txwin(txwin, attr, &winctx);
@@ -1064,56 +1067,16 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
rc = -ENODEV;
goto free_window;
}
-
- /*
- * Window opened by a child thread may not be closed when
- * it exits. So take reference to its pid and release it
- * when the window is free by parent thread.
- * Acquire a reference to the task's pid to make sure
- * pid will not be re-used - needed only for multithread
- * applications.
- */
- txwin->pid = get_task_pid(current, PIDTYPE_PID);
- /*
- * Acquire a reference to the task's mm.
- */
- txwin->mm = get_task_mm(current);
-
- if (!txwin->mm) {
- put_pid(txwin->pid);
- pr_err("VAS: pid(%d): mm_struct is not found\n",
- current->pid);
- rc = -EPERM;
+ rc = get_vas_user_win_ref(&txwin->vas_win.task_ref);
+ if (rc)
goto free_window;
- }
- mmgrab(txwin->mm);
- mmput(txwin->mm);
- mm_context_add_vas_window(txwin->mm);
- /*
- * Process closes window during exit. In the case of
- * multithread application, the child thread can open
- * window and can exit without closing it. Expects parent
- * thread to use and close the window. So do not need
- * to take pid reference for parent thread.
- */
- txwin->tgid = find_get_pid(task_tgid_vnr(current));
- /*
- * Even a process that has no foreign real address mapping can
- * use an unpaired COPY instruction (to no real effect). Issue
- * CP_ABORT to clear any pending COPY and prevent a covert
- * channel.
- *
- * __switch_to() will issue CP_ABORT on future context switches
- * if process / thread has any open VAS window (Use
- * current->mm->context.vas_windows).
- */
- asm volatile(PPC_CP_ABORT);
+ vas_user_win_add_mm_context(&txwin->vas_win.task_ref);
}
set_vinst_win(vinst, txwin);
- return txwin;
+ return &txwin->vas_win;
free_window:
vas_window_free(txwin);
@@ -1132,12 +1095,14 @@ int vas_copy_crb(void *crb, int offset)
EXPORT_SYMBOL_GPL(vas_copy_crb);
#define RMA_LSMP_REPORT_ENABLE PPC_BIT(53)
-int vas_paste_crb(struct vas_window *txwin, int offset, bool re)
+int vas_paste_crb(struct vas_window *vwin, int offset, bool re)
{
+ struct pnv_vas_window *txwin;
int rc;
void *addr;
uint64_t val;
+ txwin = container_of(vwin, struct pnv_vas_window, vas_win);
trace_vas_paste_crb(current, txwin);
/*
@@ -1167,7 +1132,7 @@ int vas_paste_crb(struct vas_window *txwin, int offset, bool re)
else
rc = -EINVAL;
- pr_debug("Txwin #%d: Msg count %llu\n", txwin->winid,
+ pr_debug("Txwin #%d: Msg count %llu\n", txwin->vas_win.winid,
read_hvwc_reg(txwin, VREG(LRFIFO_PUSH)));
return rc;
@@ -1187,7 +1152,7 @@ EXPORT_SYMBOL_GPL(vas_paste_crb);
* user space. (NX-842 driver waits for CSB and Fast thread-wakeup
* doesn't use credit checking).
*/
-static void poll_window_credits(struct vas_window *window)
+static void poll_window_credits(struct pnv_vas_window *window)
{
u64 val;
int creds, mode;
@@ -1217,7 +1182,7 @@ retry:
* and issue CRB Kill to stop all pending requests. Need only
* if there is a bug in NX or fault handling in kernel.
*/
- if (creds < window->wcreds_max) {
+ if (creds < window->vas_win.wcreds_max) {
val = 0;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(msecs_to_jiffies(10));
@@ -1228,7 +1193,8 @@ retry:
*/
if (!(count % 1000))
pr_warn_ratelimited("VAS: pid %d stuck. Waiting for credits returned for Window(%d). creds %d, Retries %d\n",
- vas_window_pid(window), window->winid,
+ vas_window_pid(&window->vas_win),
+ window->vas_win.winid,
creds, count);
goto retry;
@@ -1240,7 +1206,7 @@ retry:
* short time to queue a CRB, so window should not be busy for too long.
* Trying 5ms intervals.
*/
-static void poll_window_busy_state(struct vas_window *window)
+static void poll_window_busy_state(struct pnv_vas_window *window)
{
int busy;
u64 val;
@@ -1260,7 +1226,8 @@ retry:
*/
if (!(count % 1000))
pr_warn_ratelimited("VAS: pid %d stuck. Window (ID=%d) is in busy state. Retries %d\n",
- vas_window_pid(window), window->winid, count);
+ vas_window_pid(&window->vas_win),
+ window->vas_win.winid, count);
goto retry;
}
@@ -1282,7 +1249,7 @@ retry:
* casting out becomes necessary we should consider offloading the
* job to a worker thread, so the window close can proceed quickly.
*/
-static void poll_window_castout(struct vas_window *window)
+static void poll_window_castout(struct pnv_vas_window *window)
{
/* stub for now */
}
@@ -1291,7 +1258,7 @@ static void poll_window_castout(struct vas_window *window)
* Unpin and close a window so no new requests are accepted and the
* hardware can evict this window from cache if necessary.
*/
-static void unpin_close_window(struct vas_window *window)
+static void unpin_close_window(struct pnv_vas_window *window)
{
u64 val;
@@ -1313,11 +1280,15 @@ static void unpin_close_window(struct vas_window *window)
*
* Besides the hardware, kernel has some bookkeeping of course.
*/
-int vas_win_close(struct vas_window *window)
+int vas_win_close(struct vas_window *vwin)
{
- if (!window)
+ struct pnv_vas_window *window;
+
+ if (!vwin)
return 0;
+ window = container_of(vwin, struct pnv_vas_window, vas_win);
+
if (!window->tx_win && atomic_read(&window->num_txwins) != 0) {
pr_devel("Attempting to close an active Rx window!\n");
WARN_ON_ONCE(1);
@@ -1339,12 +1310,8 @@ int vas_win_close(struct vas_window *window)
/* if send window, drop reference to matching receive window */
if (window->tx_win) {
if (window->user_win) {
- /* Drop references to pid and mm */
- put_pid(window->pid);
- if (window->mm) {
- mm_context_remove_vas_window(window->mm);
- mmdrop(window->mm);
- }
+ put_vas_user_win_ref(&vwin->task_ref);
+ mm_context_remove_vas_window(vwin->task_ref.mm);
}
put_rx_win(window->rxwin);
}
@@ -1377,7 +1344,7 @@ EXPORT_SYMBOL_GPL(vas_win_close);
* - The kernel with return credit on fault window after reading entry
* from fault FIFO.
*/
-void vas_return_credit(struct vas_window *window, bool tx)
+void vas_return_credit(struct pnv_vas_window *window, bool tx)
{
uint64_t val;
@@ -1391,10 +1358,10 @@ void vas_return_credit(struct vas_window *window, bool tx)
}
}
-struct vas_window *vas_pswid_to_window(struct vas_instance *vinst,
+struct pnv_vas_window *vas_pswid_to_window(struct vas_instance *vinst,
uint32_t pswid)
{
- struct vas_window *window;
+ struct pnv_vas_window *window;
int winid;
if (!pswid) {
@@ -1431,13 +1398,74 @@ struct vas_window *vas_pswid_to_window(struct vas_instance *vinst,
* by NX).
*/
if (!window->tx_win || !window->user_win || !window->nx_win ||
- window->cop == VAS_COP_TYPE_FAULT ||
- window->cop == VAS_COP_TYPE_FTW) {
+ window->vas_win.cop == VAS_COP_TYPE_FAULT ||
+ window->vas_win.cop == VAS_COP_TYPE_FTW) {
pr_err("PSWID decode: id %d, tx %d, user %d, nx %d, cop %d\n",
winid, window->tx_win, window->user_win,
- window->nx_win, window->cop);
+ window->nx_win, window->vas_win.cop);
WARN_ON(1);
}
return window;
}
+
+static struct vas_window *vas_user_win_open(int vas_id, u64 flags,
+ enum vas_cop_type cop_type)
+{
+ struct vas_tx_win_attr txattr = {};
+
+ vas_init_tx_win_attr(&txattr, cop_type);
+
+ txattr.lpid = mfspr(SPRN_LPID);
+ txattr.pidr = mfspr(SPRN_PID);
+ txattr.user_win = true;
+ txattr.rsvd_txbuf_count = false;
+ txattr.pswid = false;
+
+ pr_devel("Pid %d: Opening txwin, PIDR %ld\n", txattr.pidr,
+ mfspr(SPRN_PID));
+
+ return vas_tx_win_open(vas_id, cop_type, &txattr);
+}
+
+static u64 vas_user_win_paste_addr(struct vas_window *txwin)
+{
+ struct pnv_vas_window *win;
+ u64 paste_addr;
+
+ win = container_of(txwin, struct pnv_vas_window, vas_win);
+ vas_win_paste_addr(win, &paste_addr, NULL);
+
+ return paste_addr;
+}
+
+static int vas_user_win_close(struct vas_window *txwin)
+{
+ vas_win_close(txwin);
+
+ return 0;
+}
+
+static const struct vas_user_win_ops vops = {
+ .open_win = vas_user_win_open,
+ .paste_addr = vas_user_win_paste_addr,
+ .close_win = vas_user_win_close,
+};
+
+/*
+ * Supporting only nx-gzip coprocessor type now, but this API code
+ * extended to other coprocessor types later.
+ */
+int vas_register_api_powernv(struct module *mod, enum vas_cop_type cop_type,
+ const char *name)
+{
+
+ return vas_register_coproc_api(mod, cop_type, name, &vops);
+}
+EXPORT_SYMBOL_GPL(vas_register_api_powernv);
+
+void vas_unregister_api_powernv(void)
+{
+ vas_unregister_coproc_api();
+}
+EXPORT_SYMBOL_GPL(vas_unregister_api_powernv);
diff --git a/arch/powerpc/platforms/powernv/vas.h b/arch/powerpc/platforms/powernv/vas.h
index c7db3190baca..8bb08e395de0 100644
--- a/arch/powerpc/platforms/powernv/vas.h
+++ b/arch/powerpc/platforms/powernv/vas.h
@@ -334,11 +334,11 @@ struct vas_instance {
int fifo_in_progress; /* To wake up thread or return IRQ_HANDLED */
spinlock_t fault_lock; /* Protects fifo_in_progress update */
void *fault_fifo;
- struct vas_window *fault_win; /* Fault window */
+ struct pnv_vas_window *fault_win; /* Fault window */
struct mutex mutex;
- struct vas_window *rxwin[VAS_COP_TYPE_MAX];
- struct vas_window *windows[VAS_WINDOWS_PER_CHIP];
+ struct pnv_vas_window *rxwin[VAS_COP_TYPE_MAX];
+ struct pnv_vas_window *windows[VAS_WINDOWS_PER_CHIP];
char *name;
char *dbgname;
@@ -346,32 +346,24 @@ struct vas_instance {
};
/*
- * In-kernel state a VAS window. One per window.
+ * In-kernel state a VAS window on PowerNV. One per window.
*/
-struct vas_window {
+struct pnv_vas_window {
+ struct vas_window vas_win;
/* Fields common to send and receive windows */
struct vas_instance *vinst;
- int winid;
bool tx_win; /* True if send window */
bool nx_win; /* True if NX window */
bool user_win; /* True if user space window */
void *hvwc_map; /* HV window context */
void *uwc_map; /* OS/User window context */
- struct pid *pid; /* Linux process id of owner */
- struct pid *tgid; /* Thread group ID of owner */
- struct mm_struct *mm; /* Linux process mm_struct */
- int wcreds_max; /* Window credits */
-
- char *dbgname;
- struct dentry *dbgdir;
/* Fields applicable only to send windows */
void *paste_kaddr;
char *paste_addr_name;
- struct vas_window *rxwin;
+ struct pnv_vas_window *rxwin;
- /* Feilds applicable only to receive windows */
- enum vas_cop_type cop;
+ /* Fields applicable only to receive windows */
atomic_t num_txwins;
};
@@ -430,32 +422,32 @@ extern struct mutex vas_mutex;
extern struct vas_instance *find_vas_instance(int vasid);
extern void vas_init_dbgdir(void);
extern void vas_instance_init_dbgdir(struct vas_instance *vinst);
-extern void vas_window_init_dbgdir(struct vas_window *win);
-extern void vas_window_free_dbgdir(struct vas_window *win);
+extern void vas_window_init_dbgdir(struct pnv_vas_window *win);
+extern void vas_window_free_dbgdir(struct pnv_vas_window *win);
extern int vas_setup_fault_window(struct vas_instance *vinst);
extern irqreturn_t vas_fault_thread_fn(int irq, void *data);
extern irqreturn_t vas_fault_handler(int irq, void *dev_id);
-extern void vas_return_credit(struct vas_window *window, bool tx);
-extern struct vas_window *vas_pswid_to_window(struct vas_instance *vinst,
+extern void vas_return_credit(struct pnv_vas_window *window, bool tx);
+extern struct pnv_vas_window *vas_pswid_to_window(struct vas_instance *vinst,
uint32_t pswid);
-extern void vas_win_paste_addr(struct vas_window *window, u64 *addr,
- int *len);
+extern void vas_win_paste_addr(struct pnv_vas_window *window, u64 *addr,
+ int *len);
static inline int vas_window_pid(struct vas_window *window)
{
- return pid_vnr(window->pid);
+ return pid_vnr(window->task_ref.pid);
}
-static inline void vas_log_write(struct vas_window *win, char *name,
+static inline void vas_log_write(struct pnv_vas_window *win, char *name,
void *regptr, u64 val)
{
if (val)
pr_debug("%swin #%d: %s reg %p, val 0x%016llx\n",
- win->tx_win ? "Tx" : "Rx", win->winid, name,
- regptr, val);
+ win->tx_win ? "Tx" : "Rx", win->vas_win.winid,
+ name, regptr, val);
}
-static inline void write_uwc_reg(struct vas_window *win, char *name,
+static inline void write_uwc_reg(struct pnv_vas_window *win, char *name,
s32 reg, u64 val)
{
void *regptr;
@@ -466,7 +458,7 @@ static inline void write_uwc_reg(struct vas_window *win, char *name,
out_be64(regptr, val);
}
-static inline void write_hvwc_reg(struct vas_window *win, char *name,
+static inline void write_hvwc_reg(struct pnv_vas_window *win, char *name,
s32 reg, u64 val)
{
void *regptr;
@@ -477,7 +469,7 @@ static inline void write_hvwc_reg(struct vas_window *win, char *name,
out_be64(regptr, val);
}
-static inline u64 read_hvwc_reg(struct vas_window *win,
+static inline u64 read_hvwc_reg(struct pnv_vas_window *win,
char *name __maybe_unused, s32 reg)
{
return in_be64(win->hvwc_map+reg);
diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig
index e32406e918d0..a4048b8c8c50 100644
--- a/arch/powerpc/platforms/ps3/Kconfig
+++ b/arch/powerpc/platforms/ps3/Kconfig
@@ -7,6 +7,7 @@ config PPC_PS3
select USB_OHCI_BIG_ENDIAN_MMIO
select USB_EHCI_BIG_ENDIAN_MMIO
select HAVE_PCI
+ select IRQ_DOMAIN_NOMAP
help
This option enables support for the Sony PS3 game console
and other platforms using the PS3 hypervisor. Enabling this
@@ -85,6 +86,15 @@ config PS3_SYS_MANAGER
This support is required for PS3 system control. In
general, all users will say Y or M.
+config PS3_VERBOSE_RESULT
+ bool "PS3 Verbose LV1 hypercall results" if PS3_ADVANCED
+ depends on PPC_PS3
+ help
+ Enables more verbose log mesages for LV1 hypercall results.
+
+ If in doubt, say N here and reduce the size of the kernel by a
+ small amount.
+
config PS3_REPOSITORY_WRITE
bool "PS3 Repository write support" if PS3_ADVANCED
depends on PPC_PS3
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 78f2339ed5cb..49871427f599 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
@@ -45,7 +46,7 @@
* implementation equates HV plug value to Linux virq value, constrains each
* interrupt to have a system wide unique plug number, and limits the range
* of the plug values to map into the first dword of the bitmaps. This
- * gives a usable range of plug values of {NUM_ISA_INTERRUPTS..63}. Note
+ * gives a usable range of plug values of {NR_IRQS_LEGACY..63}. Note
* that there is no constraint on how many in this set an individual thread
* can acquire.
*
@@ -721,7 +722,7 @@ static unsigned int ps3_get_irq(void)
}
#if defined(DEBUG)
- if (unlikely(plug < NUM_ISA_INTERRUPTS || plug > PS3_PLUG_MAX)) {
+ if (unlikely(plug < NR_IRQS_LEGACY || plug > PS3_PLUG_MAX)) {
dump_bmp(&per_cpu(ps3_private, 0));
dump_bmp(&per_cpu(ps3_private, 1));
BUG();
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
index d094321964fb..a81eac35d900 100644
--- a/arch/powerpc/platforms/ps3/mm.c
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -6,6 +6,7 @@
* Copyright 2006 Sony Corp.
*/
+#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/memblock.h>
@@ -1118,6 +1119,7 @@ int ps3_dma_region_init(struct ps3_system_bus_device *dev,
enum ps3_dma_region_type region_type, void *addr, unsigned long len)
{
unsigned long lpar_addr;
+ int result;
lpar_addr = addr ? ps3_mm_phys_to_lpar(__pa(addr)) : 0;
@@ -1129,6 +1131,16 @@ int ps3_dma_region_init(struct ps3_system_bus_device *dev,
r->offset -= map.r1.offset;
r->len = len ? len : ALIGN(map.total, 1 << r->page_size);
+ dev->core.dma_mask = &r->dma_mask;
+
+ result = dma_set_mask_and_coherent(&dev->core, DMA_BIT_MASK(32));
+
+ if (result < 0) {
+ dev_err(&dev->core, "%s:%d: dma_set_mask_and_coherent failed: %d\n",
+ __func__, __LINE__, result);
+ return result;
+ }
+
switch (dev->dev_type) {
case PS3_DEVICE_TYPE_SB:
r->region_ops = (USE_DYNAMIC_DMA)
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index e9ae5dd03593..3de9145c20bc 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -36,6 +36,7 @@ DEFINE_MUTEX(ps3_gpu_mutex);
EXPORT_SYMBOL_GPL(ps3_gpu_mutex);
static union ps3_firmware_version ps3_firmware_version;
+static char ps3_firmware_version_str[16];
void ps3_get_firmware_version(union ps3_firmware_version *v)
{
@@ -182,6 +183,40 @@ static int ps3_set_dabr(unsigned long dabr, unsigned long dabrx)
return lv1_set_dabr(dabr, dabrx) ? -1 : 0;
}
+static ssize_t ps3_fw_version_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", ps3_firmware_version_str);
+}
+
+static int __init ps3_setup_sysfs(void)
+{
+ static struct kobj_attribute attr = __ATTR(fw-version, S_IRUGO,
+ ps3_fw_version_show, NULL);
+ static struct kobject *kobj;
+ int result;
+
+ kobj = kobject_create_and_add("ps3", firmware_kobj);
+
+ if (!kobj) {
+ pr_warn("%s:%d: kobject_create_and_add failed.\n", __func__,
+ __LINE__);
+ return -ENOMEM;
+ }
+
+ result = sysfs_create_file(kobj, &attr.attr);
+
+ if (result) {
+ pr_warn("%s:%d: sysfs_create_file failed.\n", __func__,
+ __LINE__);
+ kobject_put(kobj);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+core_initcall(ps3_setup_sysfs);
+
static void __init ps3_setup_arch(void)
{
u64 tmp;
@@ -190,9 +225,11 @@ static void __init ps3_setup_arch(void)
lv1_get_version_info(&ps3_firmware_version.raw, &tmp);
- printk(KERN_INFO "PS3 firmware version %u.%u.%u\n",
- ps3_firmware_version.major, ps3_firmware_version.minor,
- ps3_firmware_version.rev);
+ snprintf(ps3_firmware_version_str, sizeof(ps3_firmware_version_str),
+ "%u.%u.%u", ps3_firmware_version.major,
+ ps3_firmware_version.minor, ps3_firmware_version.rev);
+
+ printk(KERN_INFO "PS3 firmware version %s\n", ps3_firmware_version_str);
ps3_spu_set_platform();
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index b431f41c6cb5..1a5665875165 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -64,9 +64,10 @@ static int ps3_open_hv_device_sb(struct ps3_system_bus_device *dev)
result = lv1_open_device(dev->bus_id, dev->dev_id, 0);
if (result) {
- pr_debug("%s:%d: lv1_open_device failed: %s\n", __func__,
- __LINE__, ps3_result(result));
- result = -EPERM;
+ pr_warn("%s:%d: lv1_open_device dev=%u.%u(%s) failed: %s\n",
+ __func__, __LINE__, dev->match_id, dev->match_sub_id,
+ dev_name(&dev->core), ps3_result(result));
+ result = -EPERM;
}
done:
@@ -120,7 +121,7 @@ static int ps3_open_hv_device_gpu(struct ps3_system_bus_device *dev)
result = lv1_gpu_open(0);
if (result) {
- pr_debug("%s:%d: lv1_gpu_open failed: %s\n", __func__,
+ pr_warn("%s:%d: lv1_gpu_open failed: %s\n", __func__,
__LINE__, ps3_result(result));
result = -EPERM;
}
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index c8a2b0b05ac0..4cda0ef87be0 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -30,3 +30,4 @@ obj-$(CONFIG_PPC_SVM) += svm.o
obj-$(CONFIG_FA_DUMP) += rtas-fadump.o
obj-$(CONFIG_SUSPEND) += suspend.o
+obj-$(CONFIG_PPC_VAS) += vas.o
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 233503fcf8f0..b1f01ac0c29e 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -289,8 +289,7 @@ int dlpar_acquire_drc(u32 drc_index)
{
int dr_status, rc;
- rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
- DR_ENTITY_SENSE, drc_index);
+ rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
if (rc || dr_status != DR_ENTITY_UNUSABLE)
return -1;
@@ -311,8 +310,7 @@ int dlpar_release_drc(u32 drc_index)
{
int dr_status, rc;
- rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
- DR_ENTITY_SENSE, drc_index);
+ rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
if (rc || dr_status != DR_ENTITY_PRESENT)
return -1;
@@ -329,6 +327,19 @@ int dlpar_release_drc(u32 drc_index)
return 0;
}
+int dlpar_unisolate_drc(u32 drc_index)
+{
+ int dr_status, rc;
+
+ rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
+ if (rc || dr_status != DR_ENTITY_PRESENT)
+ return -1;
+
+ rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
+
+ return 0;
+}
+
int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
{
int rc;
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 12cbffd3c2e3..7e970f81d8ff 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -47,9 +47,6 @@ static void rtas_stop_self(void)
BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
- printk("cpu %u (hwid %u) Ready to die...\n",
- smp_processor_id(), hard_smp_processor_id());
-
rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL);
panic("Alas, I survived.\n");
@@ -271,6 +268,19 @@ static int dlpar_offline_cpu(struct device_node *dn)
if (!cpu_online(cpu))
break;
+ /*
+ * device_offline() will return -EBUSY (via cpu_down()) if there
+ * is only one CPU left. Check it here to fail earlier and with a
+ * more informative error message, while also retaining the
+ * cpu_add_remove_lock to be sure that no CPUs are being
+ * online/offlined during this check.
+ */
+ if (num_online_cpus() == 1) {
+ pr_warn("Unable to remove last online CPU %pOFn\n", dn);
+ rc = -EBUSY;
+ goto out_unlock;
+ }
+
cpu_maps_update_done();
rc = device_offline(get_cpu_device(cpu));
if (rc)
@@ -283,6 +293,7 @@ static int dlpar_offline_cpu(struct device_node *dn)
thread);
}
}
+out_unlock:
cpu_maps_update_done();
out:
@@ -802,8 +813,16 @@ int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
case PSERIES_HP_ELOG_ACTION_REMOVE:
if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
rc = dlpar_cpu_remove_by_count(count);
- else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
+ else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
rc = dlpar_cpu_remove_by_index(drc_index);
+ /*
+ * Setting the isolation state of an UNISOLATED/CONFIGURED
+ * device to UNISOLATE is a no-op, but the hypervisor can
+ * use it as a hint that the CPU removal failed.
+ */
+ if (rc)
+ dlpar_unisolate_drc(drc_index);
+ }
else
rc = -EINVAL;
break;
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 8377f1f7c78e..377d852f5a9a 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -348,7 +348,8 @@ static int pseries_remove_mem_node(struct device_node *np)
static bool lmb_is_removable(struct drmem_lmb *lmb)
{
- if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
+ if ((lmb->flags & DRCONF_MEM_RESERVED) ||
+ !(lmb->flags & DRCONF_MEM_ASSIGNED))
return false;
#ifdef CONFIG_FA_DUMP
@@ -401,7 +402,7 @@ static int dlpar_remove_lmb(struct drmem_lmb *lmb)
static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
{
struct drmem_lmb *lmb;
- int lmbs_removed = 0;
+ int lmbs_reserved = 0;
int lmbs_available = 0;
int rc;
@@ -435,12 +436,12 @@ static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
*/
drmem_mark_lmb_reserved(lmb);
- lmbs_removed++;
- if (lmbs_removed == lmbs_to_remove)
+ lmbs_reserved++;
+ if (lmbs_reserved == lmbs_to_remove)
break;
}
- if (lmbs_removed != lmbs_to_remove) {
+ if (lmbs_reserved != lmbs_to_remove) {
pr_err("Memory hot-remove failed, adding LMB's back\n");
for_each_drmem_lmb(lmb) {
@@ -453,6 +454,10 @@ static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
lmb->drc_index);
drmem_remove_lmb_reservation(lmb);
+
+ lmbs_reserved--;
+ if (lmbs_reserved == 0)
+ break;
}
rc = -EINVAL;
@@ -466,6 +471,10 @@ static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
lmb->base_addr);
drmem_remove_lmb_reservation(lmb);
+
+ lmbs_reserved--;
+ if (lmbs_reserved == 0)
+ break;
}
rc = 0;
}
@@ -508,7 +517,6 @@ static int dlpar_memory_remove_by_index(u32 drc_index)
static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
{
struct drmem_lmb *lmb, *start_lmb, *end_lmb;
- int lmbs_available = 0;
int rc;
pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
@@ -521,18 +529,29 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
if (rc)
return -EINVAL;
- /* Validate that there are enough LMBs to satisfy the request */
+ /*
+ * Validate that all LMBs in range are not reserved. Note that it
+ * is ok if they are !ASSIGNED since our goal here is to remove the
+ * LMB range, regardless of whether some LMBs were already removed
+ * by any other reason.
+ *
+ * This is a contrast to what is done in remove_by_count() where we
+ * check for both RESERVED and !ASSIGNED (via lmb_is_removable()),
+ * because we want to remove a fixed amount of LMBs in that function.
+ */
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
- if (lmb->flags & DRCONF_MEM_RESERVED)
- break;
-
- lmbs_available++;
+ if (lmb->flags & DRCONF_MEM_RESERVED) {
+ pr_err("Memory at %llx (drc index %x) is reserved\n",
+ lmb->base_addr, lmb->drc_index);
+ return -EINVAL;
+ }
}
- if (lmbs_available < lmbs_to_remove)
- return -EINVAL;
-
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
+ /*
+ * dlpar_remove_lmb() will error out if the LMB is already
+ * !ASSIGNED, but this case is a no-op for us.
+ */
if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
continue;
@@ -551,6 +570,13 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
if (!drmem_lmb_reserved(lmb))
continue;
+ /*
+ * Setting the isolation state of an UNISOLATED/CONFIGURED
+ * device to UNISOLATE is a no-op, but the hypervisor can
+ * use it as a hint that the LMB removal failed.
+ */
+ dlpar_unisolate_drc(lmb->drc_index);
+
rc = dlpar_add_lmb(lmb);
if (rc)
pr_err("Failed to add LMB, drc index %x\n",
@@ -585,10 +611,6 @@ static inline int pseries_remove_mem_node(struct device_node *np)
{
return 0;
}
-static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog)
-{
- return -EOPNOTSUPP;
-}
static int dlpar_remove_lmb(struct drmem_lmb *lmb)
{
return -EOPNOTSUPP;
@@ -651,7 +673,7 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
{
struct drmem_lmb *lmb;
int lmbs_available = 0;
- int lmbs_added = 0;
+ int lmbs_reserved = 0;
int rc;
pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
@@ -661,6 +683,9 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
/* Validate that there are enough LMBs to satisfy the request */
for_each_drmem_lmb(lmb) {
+ if (lmb->flags & DRCONF_MEM_RESERVED)
+ continue;
+
if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
lmbs_available++;
@@ -689,13 +714,12 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
* requested LMBs cannot be added.
*/
drmem_mark_lmb_reserved(lmb);
-
- lmbs_added++;
- if (lmbs_added == lmbs_to_add)
+ lmbs_reserved++;
+ if (lmbs_reserved == lmbs_to_add)
break;
}
- if (lmbs_added != lmbs_to_add) {
+ if (lmbs_reserved != lmbs_to_add) {
pr_err("Memory hot-add failed, removing any added LMBs\n");
for_each_drmem_lmb(lmb) {
@@ -710,6 +734,10 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
dlpar_release_drc(lmb->drc_index);
drmem_remove_lmb_reservation(lmb);
+ lmbs_reserved--;
+
+ if (lmbs_reserved == 0)
+ break;
}
rc = -EINVAL;
} else {
@@ -720,6 +748,10 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
pr_debug("Memory at %llx (drc index %x) was hot-added\n",
lmb->base_addr, lmb->drc_index);
drmem_remove_lmb_reservation(lmb);
+ lmbs_reserved--;
+
+ if (lmbs_reserved == 0)
+ break;
}
rc = 0;
}
@@ -764,7 +796,6 @@ static int dlpar_memory_add_by_index(u32 drc_index)
static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
{
struct drmem_lmb *lmb, *start_lmb, *end_lmb;
- int lmbs_available = 0;
int rc;
pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
@@ -779,15 +810,14 @@ static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
/* Validate that the LMBs in this range are not reserved */
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
- if (lmb->flags & DRCONF_MEM_RESERVED)
- break;
-
- lmbs_available++;
+ /* Fail immediately if the whole range can't be hot-added */
+ if (lmb->flags & DRCONF_MEM_RESERVED) {
+ pr_err("Memory at %llx (drc index %x) is reserved\n",
+ lmb->base_addr, lmb->drc_index);
+ return -EINVAL;
+ }
}
- if (lmbs_available < lmbs_to_add)
- return -EINVAL;
-
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
if (lmb->flags & DRCONF_MEM_ASSIGNED)
continue;
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index 2136e42833af..ab9fc6506861 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -102,6 +102,20 @@ END_FTR_SECTION(0, 1); \
#define HCALL_BRANCH(LABEL)
#endif
+_GLOBAL_TOC(plpar_hcall_norets_notrace)
+ HMT_MEDIUM
+
+ mfcr r0
+ stw r0,8(r1)
+ HVSC /* invoke the hypervisor */
+
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
+ lwz r0,8(r1)
+ mtcrf 0xff,r0
+ blr /* return r3 = status */
+
_GLOBAL_TOC(plpar_hcall_norets)
HMT_MEDIUM
@@ -110,6 +124,9 @@ _GLOBAL_TOC(plpar_hcall_norets)
HCALL_BRANCH(plpar_hcall_norets_trace)
HVSC /* invoke the hypervisor */
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
blr /* return r3 = status */
@@ -119,6 +136,10 @@ plpar_hcall_norets_trace:
HCALL_INST_PRECALL(R4)
HVSC
HCALL_INST_POSTCALL_NORETS
+
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
blr
@@ -149,6 +170,9 @@ _GLOBAL_TOC(plpar_hcall)
std r6, 16(r12)
std r7, 24(r12)
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
@@ -178,6 +202,9 @@ plpar_hcall_trace:
HCALL_INST_POSTCALL(r12)
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
@@ -213,6 +240,9 @@ _GLOBAL(plpar_hcall_raw)
std r6, 16(r12)
std r7, 24(r12)
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
@@ -252,6 +282,9 @@ _GLOBAL_TOC(plpar_hcall9)
std r11,56(r12)
std r0, 64(r12)
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
@@ -290,6 +323,9 @@ plpar_hcall9_trace:
HCALL_INST_POSTCALL(r12)
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
@@ -329,6 +365,9 @@ _GLOBAL(plpar_hcall9_raw)
std r11,56(r12)
std r0, 64(r12)
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c
index 2c59b4986ea5..3a50612a78db 100644
--- a/arch/powerpc/platforms/pseries/hvCall_inst.c
+++ b/arch/powerpc/platforms/pseries/hvCall_inst.c
@@ -26,7 +26,7 @@ struct hcall_stats {
};
#define HCALL_STAT_ARRAY_SIZE ((MAX_HCALL_OPCODE >> 2) + 1)
-DEFINE_PER_CPU(struct hcall_stats[HCALL_STAT_ARRAY_SIZE], hcall_stats);
+static DEFINE_PER_CPU(struct hcall_stats[HCALL_STAT_ARRAY_SIZE], hcall_stats);
/*
* Routines for displaying the statistics in debugfs
diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c
index a15ab33646b3..c6c79ef55e13 100644
--- a/arch/powerpc/platforms/pseries/ibmebus.c
+++ b/arch/powerpc/platforms/pseries/ibmebus.c
@@ -42,6 +42,7 @@
#include <linux/kobject.h>
#include <linux/dma-map-ops.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/stat.h>
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 9fc5217f0c8e..0c55b991f665 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -638,7 +638,8 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
iommu_table_setparms(pci->phb, dn, tbl);
tbl->it_ops = &iommu_table_pseries_ops;
- iommu_init_table(tbl, pci->phb->node, 0, 0);
+ if (!iommu_init_table(tbl, pci->phb->node, 0, 0))
+ panic("Failed to initialize iommu table");
/* Divide the rest (1.75GB) among the children */
pci->phb->dma_window_size = 0x80000000ul;
@@ -720,7 +721,8 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
iommu_table_setparms_lpar(ppci->phb, pdn, tbl,
ppci->table_group, dma_window);
tbl->it_ops = &iommu_table_lpar_multi_ops;
- iommu_init_table(tbl, ppci->phb->node, 0, 0);
+ if (!iommu_init_table(tbl, ppci->phb->node, 0, 0))
+ panic("Failed to initialize iommu table");
iommu_register_group(ppci->table_group,
pci_domain_nr(bus), 0);
pr_debug(" created table: %p\n", ppci->table_group);
@@ -749,7 +751,9 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
tbl = PCI_DN(dn)->table_group->tables[0];
iommu_table_setparms(phb, dn, tbl);
tbl->it_ops = &iommu_table_pseries_ops;
- iommu_init_table(tbl, phb->node, 0, 0);
+ if (!iommu_init_table(tbl, phb->node, 0, 0))
+ panic("Failed to initialize iommu table");
+
set_iommu_table_base(&dev->dev, tbl);
return;
}
@@ -1099,6 +1103,33 @@ static void reset_dma_window(struct pci_dev *dev, struct device_node *par_dn)
ret);
}
+/* Return largest page shift based on "IO Page Sizes" output of ibm,query-pe-dma-window. */
+static int iommu_get_page_shift(u32 query_page_size)
+{
+ /* Supported IO page-sizes according to LoPAR */
+ const int shift[] = {
+ __builtin_ctzll(SZ_4K), __builtin_ctzll(SZ_64K), __builtin_ctzll(SZ_16M),
+ __builtin_ctzll(SZ_32M), __builtin_ctzll(SZ_64M), __builtin_ctzll(SZ_128M),
+ __builtin_ctzll(SZ_256M), __builtin_ctzll(SZ_16G)
+ };
+
+ int i = ARRAY_SIZE(shift) - 1;
+
+ /*
+ * On LoPAR, ibm,query-pe-dma-window outputs "IO Page Sizes" using a bit field:
+ * - bit 31 means 4k pages are supported,
+ * - bit 30 means 64k pages are supported, and so on.
+ * Larger pagesizes map more memory with the same amount of TCEs, so start probing them.
+ */
+ for (; i >= 0 ; i--) {
+ if (query_page_size & (1 << i))
+ return shift[i];
+ }
+
+ /* No valid page size found. */
+ return 0;
+}
+
/*
* If the PE supports dynamic dma windows, and there is space for a table
* that can map all pages in a linear offset, then setup such a table,
@@ -1206,13 +1237,9 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
goto out_failed;
}
}
- if (query.page_size & 4) {
- page_shift = 24; /* 16MB */
- } else if (query.page_size & 2) {
- page_shift = 16; /* 64kB */
- } else if (query.page_size & 1) {
- page_shift = 12; /* 4kB */
- } else {
+
+ page_shift = iommu_get_page_shift(query.page_size);
+ if (!page_shift) {
dev_dbg(&dev->dev, "no supported direct page size in mask %x",
query.page_size);
goto out_failed;
@@ -1229,7 +1256,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
if (pmem_present) {
if (query.largest_available_block >=
(1ULL << (MAX_PHYSMEM_BITS - page_shift)))
- len = MAX_PHYSMEM_BITS - page_shift;
+ len = MAX_PHYSMEM_BITS;
else
dev_info(&dev->dev, "Skipping ibm,pmemory");
}
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 764170fdb0f7..dab356e3ff87 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -887,7 +887,8 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot,
want_v = hpte_encode_avpn(vpn, psize, ssize);
- flags = (newpp & 7) | H_AVPN;
+ flags = (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO)) | H_AVPN;
+ flags |= (newpp & HPTE_R_KEY_HI) >> 48;
if (mmu_has_feature(MMU_FTR_KERNEL_RO))
/* Move pp0 into bit 8 (IBM 55) */
flags |= (newpp & HPTE_R_PP0) >> 55;
@@ -976,11 +977,13 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
BUG_ON(slot == -1);
- flags = newpp & 7;
+ flags = newpp & (HPTE_R_PP | HPTE_R_N);
if (mmu_has_feature(MMU_FTR_KERNEL_RO))
/* Move pp0 into bit 8 (IBM 55) */
flags |= (newpp & HPTE_R_PP0) >> 55;
+ flags |= ((newpp & HPTE_R_KEY_HI) >> 48) | (newpp & HPTE_R_KEY_LO);
+
lpar_rc = plpar_pte_protect(flags, slot, 0);
BUG_ON(lpar_rc != H_SUCCESS);
@@ -1629,7 +1632,7 @@ static int pseries_lpar_resize_hpt(unsigned long shift)
}
msleep(delay);
rc = plpar_resize_hpt_prepare(0, shift);
- };
+ }
switch (rc) {
case H_SUCCESS:
@@ -1826,30 +1829,28 @@ void hcall_tracepoint_unregfunc(void)
#endif
/*
- * Since the tracing code might execute hcalls we need to guard against
- * recursion. One example of this are spinlocks calling H_YIELD on
- * shared processor partitions.
+ * Keep track of hcall tracing depth and prevent recursion. Warn if any is
+ * detected because it may indicate a problem. This will not catch all
+ * problems with tracing code making hcalls, because the tracing might have
+ * been invoked from a non-hcall, so the first hcall could recurse into it
+ * without warning here, but this better than nothing.
+ *
+ * Hcalls with specific problems being traced should use the _notrace
+ * plpar_hcall variants.
*/
static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
-void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
+notrace void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
{
unsigned long flags;
unsigned int *depth;
- /*
- * We cannot call tracepoints inside RCU idle regions which
- * means we must not trace H_CEDE.
- */
- if (opcode == H_CEDE)
- return;
-
local_irq_save(flags);
depth = this_cpu_ptr(&hcall_trace_depth);
- if (*depth)
+ if (WARN_ON_ONCE(*depth))
goto out;
(*depth)++;
@@ -1861,19 +1862,16 @@ out:
local_irq_restore(flags);
}
-void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf)
+notrace void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf)
{
unsigned long flags;
unsigned int *depth;
- if (opcode == H_CEDE)
- return;
-
local_irq_save(flags);
depth = this_cpu_ptr(&hcall_trace_depth);
- if (*depth)
+ if (*depth) /* Don't warn again on the way out */
goto out;
(*depth)++;
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
index e278390ab28d..f71eac74ea92 100644
--- a/arch/powerpc/platforms/pseries/lparcfg.c
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -537,6 +537,8 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
parse_em_data(m);
maxmem_data(m);
+ seq_printf(m, "security_flavor=%u\n", pseries_security_flavor);
+
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index ea4d6a660e0d..e83e0891272d 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -452,12 +452,28 @@ static int do_suspend(void)
return ret;
}
+/**
+ * struct pseries_suspend_info - State shared between CPUs for join/suspend.
+ * @counter: Threads are to increment this upon resuming from suspend
+ * or if an error is received from H_JOIN. The thread which performs
+ * the first increment (i.e. sets it to 1) is responsible for
+ * waking the other threads.
+ * @done: False if join/suspend is in progress. True if the operation is
+ * complete (successful or not).
+ */
+struct pseries_suspend_info {
+ atomic_t counter;
+ bool done;
+};
+
static int do_join(void *arg)
{
- atomic_t *counter = arg;
+ struct pseries_suspend_info *info = arg;
+ atomic_t *counter = &info->counter;
long hvrc;
int ret;
+retry:
/* Must ensure MSR.EE off for H_JOIN. */
hard_irq_disable();
hvrc = plpar_hcall_norets(H_JOIN);
@@ -473,8 +489,20 @@ static int do_join(void *arg)
case H_SUCCESS:
/*
* The suspend is complete and this cpu has received a
- * prod.
+ * prod, or we've received a stray prod from unrelated
+ * code (e.g. paravirt spinlocks) and we need to join
+ * again.
+ *
+ * This barrier orders the return from H_JOIN above vs
+ * the load of info->done. It pairs with the barrier
+ * in the wakeup/prod path below.
*/
+ smp_mb();
+ if (READ_ONCE(info->done) == false) {
+ pr_info_ratelimited("premature return from H_JOIN on CPU %i, retrying",
+ smp_processor_id());
+ goto retry;
+ }
ret = 0;
break;
case H_BAD_MODE:
@@ -488,6 +516,13 @@ static int do_join(void *arg)
if (atomic_inc_return(counter) == 1) {
pr_info("CPU %u waking all threads\n", smp_processor_id());
+ WRITE_ONCE(info->done, true);
+ /*
+ * This barrier orders the store to info->done vs subsequent
+ * H_PRODs to wake the other CPUs. It pairs with the barrier
+ * in the H_SUCCESS case above.
+ */
+ smp_mb();
prod_others();
}
/*
@@ -535,11 +570,16 @@ static int pseries_suspend(u64 handle)
int ret;
while (true) {
- atomic_t counter = ATOMIC_INIT(0);
+ struct pseries_suspend_info info;
unsigned long vasi_state;
int vasi_err;
- ret = stop_machine(do_join, &counter, cpu_online_mask);
+ info = (struct pseries_suspend_info) {
+ .counter = ATOMIC_INIT(0),
+ .done = false,
+ };
+
+ ret = stop_machine(do_join, &info, cpu_online_mask);
if (ret == 0)
break;
/*
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index b3ac2455faad..637300330507 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -4,6 +4,7 @@
* Copyright 2006-2007 Michael Ellerman, IBM Corp.
*/
+#include <linux/crash_dump.h>
#include <linux/device.h>
#include <linux/irq.h>
#include <linux/msi.h>
@@ -458,8 +459,28 @@ again:
return hwirq;
}
- virq = irq_create_mapping_affinity(NULL, hwirq,
- entry->affinity);
+ /*
+ * Depending on the number of online CPUs in the original
+ * kernel, it is likely for CPU #0 to be offline in a kdump
+ * kernel. The associated IRQs in the affinity mappings
+ * provided by irq_create_affinity_masks() are thus not
+ * started by irq_startup(), as per-design for managed IRQs.
+ * This can be a problem with multi-queue block devices driven
+ * by blk-mq : such a non-started IRQ is very likely paired
+ * with the single queue enforced by blk-mq during kdump (see
+ * blk_mq_alloc_tag_set()). This causes the device to remain
+ * silent and likely hangs the guest at some point.
+ *
+ * We don't really care for fine-grained affinity when doing
+ * kdump actually : simply ignore the pre-computed affinity
+ * masks in this case and let the default mask with all CPUs
+ * be used when creating the IRQ mappings.
+ */
+ if (is_kdump_kernel())
+ virq = irq_create_mapping(NULL, hwirq);
+ else
+ virq = irq_create_mapping_affinity(NULL, hwirq,
+ entry->affinity);
if (!virq) {
pr_debug("rtas_msi: Failed mapping hwirq %d\n", hwirq);
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index 835163f54244..f48e87ac89c9 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -18,6 +18,7 @@
#include <asm/plpar_wrappers.h>
#include <asm/papr_pdsm.h>
#include <asm/mce.h>
+#include <asm/unaligned.h>
#define BIND_ANY_ADDR (~0ul)
@@ -93,6 +94,7 @@ struct papr_scm_priv {
uint64_t block_size;
int metadata_size;
bool is_volatile;
+ bool hcall_flush_required;
uint64_t bound_addr;
@@ -113,10 +115,45 @@ struct papr_scm_priv {
/* Health information for the dimm */
u64 health_bitmap;
+ /* Holds the last known dirty shutdown counter value */
+ u64 dirty_shutdown_counter;
+
/* length of the stat buffer as expected by phyp */
size_t stat_buffer_len;
};
+static int papr_scm_pmem_flush(struct nd_region *nd_region,
+ struct bio *bio __maybe_unused)
+{
+ struct papr_scm_priv *p = nd_region_provider_data(nd_region);
+ unsigned long ret_buf[PLPAR_HCALL_BUFSIZE], token = 0;
+ long rc;
+
+ dev_dbg(&p->pdev->dev, "flush drc 0x%x", p->drc_index);
+
+ do {
+ rc = plpar_hcall(H_SCM_FLUSH, ret_buf, p->drc_index, token);
+ token = ret_buf[0];
+
+ /* Check if we are stalled for some time */
+ if (H_IS_LONG_BUSY(rc)) {
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ } else if (rc == H_BUSY) {
+ cond_resched();
+ }
+ } while (rc == H_BUSY);
+
+ if (rc) {
+ dev_err(&p->pdev->dev, "flush error: %ld", rc);
+ rc = -EIO;
+ } else {
+ dev_dbg(&p->pdev->dev, "flush drc 0x%x complete", p->drc_index);
+ }
+
+ return rc;
+}
+
static LIST_HEAD(papr_nd_regions);
static DEFINE_MUTEX(papr_ndr_lock);
@@ -227,7 +264,7 @@ err_out:
* Query the Dimm performance stats from PHYP and copy them (if returned) to
* provided struct papr_scm_perf_stats instance 'stats' that can hold atleast
* (num_stats + header) bytes.
- * - If buff_stats == NULL the return value is the size in byes of the buffer
+ * - If buff_stats == NULL the return value is the size in bytes of the buffer
* needed to hold all supported performance-statistics.
* - If buff_stats != NULL and num_stats == 0 then we copy all known
* performance-statistics to 'buff_stat' and expect to be large enough to
@@ -277,6 +314,13 @@ static ssize_t drc_pmem_query_stats(struct papr_scm_priv *p,
dev_err(&p->pdev->dev,
"Unknown performance stats, Err:0x%016lX\n", ret[0]);
return -ENOENT;
+ } else if (rc == H_AUTHORITY) {
+ dev_info(&p->pdev->dev,
+ "Permission denied while accessing performance stats");
+ return -EPERM;
+ } else if (rc == H_UNSUPPORTED) {
+ dev_dbg(&p->pdev->dev, "Performance stats unsupported\n");
+ return -EOPNOTSUPP;
} else if (rc != H_SUCCESS) {
dev_err(&p->pdev->dev,
"Failed to query performance stats, Err:%lld\n", rc);
@@ -563,6 +607,16 @@ free_stats:
return rc;
}
+/* Add the dirty-shutdown-counter value to the pdsm */
+static int papr_pdsm_dsc(struct papr_scm_priv *p,
+ union nd_pdsm_payload *payload)
+{
+ payload->health.extension_flags |= PDSM_DIMM_DSC_VALID;
+ payload->health.dimm_dsc = p->dirty_shutdown_counter;
+
+ return sizeof(struct nd_papr_pdsm_health);
+}
+
/* Fetch the DIMM health info and populate it in provided package. */
static int papr_pdsm_health(struct papr_scm_priv *p,
union nd_pdsm_payload *payload)
@@ -606,6 +660,8 @@ static int papr_pdsm_health(struct papr_scm_priv *p,
/* Populate the fuel gauge meter in the payload */
papr_pdsm_fuel_gauge(p, payload);
+ /* Populate the dirty-shutdown-counter field */
+ papr_pdsm_dsc(p, payload);
rc = sizeof(struct nd_papr_pdsm_health);
@@ -867,15 +923,41 @@ static ssize_t flags_show(struct device *dev,
}
DEVICE_ATTR_RO(flags);
+static ssize_t dirty_shutdown_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *dimm = to_nvdimm(dev);
+ struct papr_scm_priv *p = nvdimm_provider_data(dimm);
+
+ return sysfs_emit(buf, "%llu\n", p->dirty_shutdown_counter);
+}
+DEVICE_ATTR_RO(dirty_shutdown);
+
+static umode_t papr_nd_attribute_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct papr_scm_priv *p = nvdimm_provider_data(nvdimm);
+
+ /* For if perf-stats not available remove perf_stats sysfs */
+ if (attr == &dev_attr_perf_stats.attr && p->stat_buffer_len == 0)
+ return 0;
+
+ return attr->mode;
+}
+
/* papr_scm specific dimm attributes */
static struct attribute *papr_nd_attributes[] = {
&dev_attr_flags.attr,
&dev_attr_perf_stats.attr,
+ &dev_attr_dirty_shutdown.attr,
NULL,
};
static struct attribute_group papr_nd_attribute_group = {
.name = "papr",
+ .is_visible = papr_nd_attribute_visible,
.attrs = papr_nd_attributes,
};
@@ -891,7 +973,6 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
struct nd_region_desc ndr_desc;
unsigned long dimm_flags;
int target_nid, online_nid;
- ssize_t stat_size;
p->bus_desc.ndctl = papr_scm_ndctl;
p->bus_desc.module = THIS_MODULE;
@@ -914,6 +995,15 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
dimm_flags = 0;
set_bit(NDD_LABELING, &dimm_flags);
+ /*
+ * Check if the nvdimm is unarmed. No locking needed as we are still
+ * initializing. Ignore error encountered if any.
+ */
+ __drc_pmem_query_health(p);
+
+ if (p->health_bitmap & PAPR_PMEM_UNARMED_MASK)
+ set_bit(NDD_UNARMED, &dimm_flags);
+
p->nvdimm = nvdimm_create(p->bus, p, papr_nd_attr_groups,
dimm_flags, PAPR_SCM_DIMM_CMD_MASK, 0, NULL);
if (!p->nvdimm) {
@@ -943,6 +1033,11 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
ndr_desc.num_mappings = 1;
ndr_desc.nd_set = &p->nd_set;
+ if (p->hcall_flush_required) {
+ set_bit(ND_REGION_ASYNC, &ndr_desc.flags);
+ ndr_desc.flush = papr_scm_pmem_flush;
+ }
+
if (p->is_volatile)
p->region = nvdimm_volatile_region_create(p->bus, &ndr_desc);
else {
@@ -962,16 +1057,6 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
list_add_tail(&p->region_list, &papr_nd_regions);
mutex_unlock(&papr_ndr_lock);
- /* Try retriving the stat buffer and see if its supported */
- stat_size = drc_pmem_query_stats(p, NULL, 0);
- if (stat_size > 0) {
- p->stat_buffer_len = stat_size;
- dev_dbg(&p->pdev->dev, "Max perf-stat size %lu-bytes\n",
- p->stat_buffer_len);
- } else {
- dev_info(&p->pdev->dev, "Dimm performance stats unavailable\n");
- }
-
return 0;
err: nvdimm_bus_unregister(p->bus);
@@ -1047,8 +1132,10 @@ static int papr_scm_probe(struct platform_device *pdev)
u32 drc_index, metadata_size;
u64 blocks, block_size;
struct papr_scm_priv *p;
+ u8 uuid_raw[UUID_SIZE];
const char *uuid_str;
- u64 uuid[2];
+ ssize_t stat_size;
+ uuid_t uuid;
int rc;
/* check we have all the required DT properties */
@@ -1088,18 +1175,30 @@ static int papr_scm_probe(struct platform_device *pdev)
p->block_size = block_size;
p->blocks = blocks;
p->is_volatile = !of_property_read_bool(dn, "ibm,cache-flush-required");
+ p->hcall_flush_required = of_property_read_bool(dn, "ibm,hcall-flush-required");
+
+ if (of_property_read_u64(dn, "ibm,persistence-failed-count",
+ &p->dirty_shutdown_counter))
+ p->dirty_shutdown_counter = 0;
/* We just need to ensure that set cookies are unique across */
- uuid_parse(uuid_str, (uuid_t *) uuid);
+ uuid_parse(uuid_str, &uuid);
+
/*
- * cookie1 and cookie2 are not really little endian
- * we store a little endian representation of the
- * uuid str so that we can compare this with the label
- * area cookie irrespective of the endian config with which
- * the kernel is built.
+ * The cookie1 and cookie2 are not really little endian.
+ * We store a raw buffer representation of the
+ * uuid string so that we can compare this with the label
+ * area cookie irrespective of the endian configuration
+ * with which the kernel is built.
+ *
+ * Historically we stored the cookie in the below format.
+ * for a uuid string 72511b67-0b3b-42fd-8d1d-5be3cae8bcaa
+ * cookie1 was 0xfd423b0b671b5172
+ * cookie2 was 0xaabce8cae35b1d8d
*/
- p->nd_set.cookie1 = cpu_to_le64(uuid[0]);
- p->nd_set.cookie2 = cpu_to_le64(uuid[1]);
+ export_uuid(uuid_raw, &uuid);
+ p->nd_set.cookie1 = get_unaligned_le64(&uuid_raw[0]);
+ p->nd_set.cookie2 = get_unaligned_le64(&uuid_raw[8]);
/* might be zero */
p->metadata_size = metadata_size;
@@ -1124,6 +1223,14 @@ static int papr_scm_probe(struct platform_device *pdev)
p->res.name = pdev->name;
p->res.flags = IORESOURCE_MEM;
+ /* Try retrieving the stat buffer and see if its supported */
+ stat_size = drc_pmem_query_stats(p, NULL, 0);
+ if (stat_size > 0) {
+ p->stat_buffer_len = stat_size;
+ dev_dbg(&p->pdev->dev, "Max perf-stat size %lu-bytes\n",
+ p->stat_buffer_len);
+ }
+
rc = papr_scm_nvdimm_init(p);
if (rc)
goto err2;
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 1bffbd1c9a94..3b6800f774c2 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -224,8 +224,6 @@ static void __init pSeries_request_regions(void)
void __init pSeries_final_fixup(void)
{
- struct pci_controller *hose;
-
pSeries_request_regions();
eeh_show_enabled();
@@ -234,27 +232,6 @@ void __init pSeries_final_fixup(void)
ppc_md.pcibios_sriov_enable = pseries_pcibios_sriov_enable;
ppc_md.pcibios_sriov_disable = pseries_pcibios_sriov_disable;
#endif
- list_for_each_entry(hose, &hose_list, list_node) {
- struct device_node *dn = hose->dn, *nvdn;
-
- while (1) {
- dn = of_find_all_nodes(dn);
- if (!dn)
- break;
- nvdn = of_parse_phandle(dn, "ibm,nvlink", 0);
- if (!nvdn)
- continue;
- if (!of_device_is_compatible(nvdn, "ibm,npu-link"))
- continue;
- if (!of_device_is_compatible(nvdn->parent,
- "ibm,power9-npu"))
- continue;
-#ifdef CONFIG_PPC_POWERNV
- WARN_ON_ONCE(pnv_npu2_init(hose));
-#endif
- break;
- }
- }
}
/*
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index f9ae17e8a0f4..a8f9140a24fa 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -50,6 +50,7 @@ EXPORT_SYMBOL_GPL(init_phb_dynamic);
int remove_phb_dynamic(struct pci_controller *phb)
{
struct pci_bus *b = phb->bus;
+ struct pci_host_bridge *host_bridge = to_pci_host_bridge(b->bridge);
struct resource *res;
int rc, i;
@@ -76,7 +77,8 @@ int remove_phb_dynamic(struct pci_controller *phb)
/* Remove the PCI bus and unregister the bridge device from sysfs */
phb->bus = NULL;
pci_remove_bus(b);
- device_unregister(b->bridge);
+ host_bridge->bus = NULL;
+ device_unregister(&host_bridge->dev);
/* Now release the IO resource */
if (res->flags & IORESOURCE_IO)
diff --git a/arch/powerpc/platforms/pseries/pmem.c b/arch/powerpc/platforms/pseries/pmem.c
index e1dc5d3254df..439ac72c2470 100644
--- a/arch/powerpc/platforms/pseries/pmem.c
+++ b/arch/powerpc/platforms/pseries/pmem.c
@@ -139,7 +139,7 @@ int dlpar_hp_pmem(struct pseries_hp_errorlog *hp_elog)
return rc;
}
-const struct of_device_id drc_pmem_match[] = {
+static const struct of_device_id drc_pmem_match[] = {
{ .type = "ibm,persistent-memory", },
{}
};
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 4fe48c04c6c2..1f051a786fb3 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -43,9 +43,6 @@ extern void pSeries_final_fixup(void);
/* Poweron flag used for enabling auto ups restart */
extern unsigned long rtas_poweron_auto;
-/* Provided by HVC VIO */
-extern void hvc_vio_init_early(void);
-
/* Dynamic logical Partitioning/Mobility */
extern void dlpar_free_cc_nodes(struct device_node *);
extern void dlpar_free_cc_property(struct property *);
@@ -55,6 +52,7 @@ extern int dlpar_attach_node(struct device_node *, struct device_node *);
extern int dlpar_detach_node(struct device_node *);
extern int dlpar_acquire_drc(u32 drc_index);
extern int dlpar_release_drc(u32 drc_index);
+extern int dlpar_unisolate_drc(u32 drc_index);
void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog);
int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_errlog);
@@ -111,6 +109,7 @@ static inline unsigned long cmo_get_page_size(void)
int dlpar_workqueue_init(void);
+extern u32 pseries_security_flavor;
void pseries_setup_security_mitigations(void);
void pseries_lpar_read_hblkrm_characteristics(void);
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index f8b390a9d9fb..167f2e1b8d39 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -487,8 +487,8 @@ int pSeries_system_reset_exception(struct pt_regs *regs)
if ((be64_to_cpu(regs->msr) &
(MSR_LE|MSR_RI|MSR_DR|MSR_IR|MSR_ME|MSR_PR|
MSR_ILE|MSR_HV|MSR_SF)) == (MSR_DR|MSR_SF)) {
- regs->nip = be64_to_cpu((__be64)regs->nip);
- regs->msr = 0;
+ regs_set_return_ip(regs, be64_to_cpu((__be64)regs->nip));
+ regs_set_return_msr(regs, 0);
}
#endif
@@ -593,8 +593,6 @@ static int mce_handle_err_virtmode(struct pt_regs *regs,
mce_err.severity = MCE_SEV_SEVERE;
else if (severity == RTAS_SEVERITY_ERROR)
mce_err.severity = MCE_SEV_SEVERE;
- else if (severity == RTAS_SEVERITY_FATAL)
- mce_err.severity = MCE_SEV_FATAL;
else
mce_err.severity = MCE_SEV_FATAL;
@@ -699,7 +697,7 @@ static int mce_handle_err_virtmode(struct pt_regs *regs,
mce_err.error_type = MCE_ERROR_TYPE_DCACHE;
break;
case MC_ERROR_TYPE_I_CACHE:
- mce_err.error_type = MCE_ERROR_TYPE_DCACHE;
+ mce_err.error_type = MCE_ERROR_TYPE_ICACHE;
break;
case MC_ERROR_TYPE_UNKNOWN:
default:
diff --git a/arch/powerpc/platforms/pseries/rtas-fadump.c b/arch/powerpc/platforms/pseries/rtas-fadump.c
index 81343908ed33..f8f73b47b107 100644
--- a/arch/powerpc/platforms/pseries/rtas-fadump.c
+++ b/arch/powerpc/platforms/pseries/rtas-fadump.c
@@ -247,7 +247,7 @@ static inline int rtas_fadump_gpr_index(u64 id)
return i;
}
-void rtas_fadump_set_regval(struct pt_regs *regs, u64 reg_id, u64 reg_val)
+static void rtas_fadump_set_regval(struct pt_regs *regs, u64 reg_id, u64 reg_val)
{
int i;
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 46e1540abc22..631a0d57b6cd 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -71,6 +71,7 @@
#include <asm/swiotlb.h>
#include <asm/svm.h>
#include <asm/dtl.h>
+#include <asm/hvconsole.h>
#include "pseries.h"
#include "../../../../drivers/pci/pci.h"
@@ -85,6 +86,7 @@ EXPORT_SYMBOL(CMO_PageSize);
int fwnmi_active; /* TRUE if an FWNMI handler is present */
int ibm_nmi_interlock_token;
+u32 pseries_security_flavor;
static void pSeries_show_cpuinfo(struct seq_file *m)
{
@@ -534,13 +536,28 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
/*
* The features below are enabled by default, so we instead look to see
* if firmware has *disabled* them, and clear them if so.
+ * H_CPU_BEHAV_FAVOUR_SECURITY_H could be set only if
+ * H_CPU_BEHAV_FAVOUR_SECURITY is.
*/
if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY))
security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
+ else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H)
+ pseries_security_flavor = 1;
+ else
+ pseries_security_flavor = 2;
if (!(result->behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
+ if (result->behaviour & H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY)
+ security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY);
+
+ if (result->behaviour & H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS)
+ security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS);
+
+ if (result->behaviour & H_CPU_BEHAV_NO_STF_BARRIER)
+ security_ftr_clear(SEC_FTR_STF_BARRIER);
+
if (!(result->behaviour & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR))
security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
}
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index c70b4be9f0a5..096629f54576 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -211,7 +211,9 @@ static __init void pSeries_smp_probe(void)
if (!cpu_has_feature(CPU_FTR_SMT))
return;
- if (check_kvm_guest()) {
+ check_kvm_guest();
+
+ if (is_kvm_guest()) {
/*
* KVM emulates doorbells by disabling FSCR[MSGP] so msgsndp
* faults to the hypervisor which then reads the instruction
diff --git a/arch/powerpc/platforms/pseries/svm.c b/arch/powerpc/platforms/pseries/svm.c
index 7b739cc7a8a9..1d829e257996 100644
--- a/arch/powerpc/platforms/pseries/svm.c
+++ b/arch/powerpc/platforms/pseries/svm.c
@@ -55,9 +55,9 @@ void __init svm_swiotlb_init(void)
if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, false))
return;
- if (io_tlb_start)
- memblock_free_early(io_tlb_start,
- PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
+
+ memblock_free_early(__pa(vstart),
+ PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
panic("SVM: Cannot allocate SWIOTLB buffer");
}
diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c
new file mode 100644
index 000000000000..b5c1cf1bc64d
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/vas.c
@@ -0,0 +1,595 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2020-21 IBM Corp.
+ */
+
+#define pr_fmt(fmt) "vas: " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <asm/machdep.h>
+#include <asm/hvcall.h>
+#include <asm/plpar_wrappers.h>
+#include <asm/vas.h>
+#include "vas.h"
+
+#define VAS_INVALID_WIN_ADDRESS 0xFFFFFFFFFFFFFFFFul
+#define VAS_DEFAULT_DOMAIN_ID 0xFFFFFFFFFFFFFFFFul
+/* The hypervisor allows one credit per window right now */
+#define DEF_WIN_CREDS 1
+
+static struct vas_all_caps caps_all;
+static bool copypaste_feat;
+
+static struct vas_caps vascaps[VAS_MAX_FEAT_TYPE];
+static DEFINE_MUTEX(vas_pseries_mutex);
+
+static long hcall_return_busy_check(long rc)
+{
+ /* Check if we are stalled for some time */
+ if (H_IS_LONG_BUSY(rc)) {
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ } else if (rc == H_BUSY) {
+ cond_resched();
+ }
+
+ return rc;
+}
+
+/*
+ * Allocate VAS window hcall
+ */
+static int h_allocate_vas_window(struct pseries_vas_window *win, u64 *domain,
+ u8 wintype, u16 credits)
+{
+ long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
+ long rc;
+
+ do {
+ rc = plpar_hcall9(H_ALLOCATE_VAS_WINDOW, retbuf, wintype,
+ credits, domain[0], domain[1], domain[2],
+ domain[3], domain[4], domain[5]);
+
+ rc = hcall_return_busy_check(rc);
+ } while (rc == H_BUSY);
+
+ if (rc == H_SUCCESS) {
+ if (win->win_addr == VAS_INVALID_WIN_ADDRESS) {
+ pr_err("H_ALLOCATE_VAS_WINDOW: COPY/PASTE is not supported\n");
+ return -ENOTSUPP;
+ }
+ win->vas_win.winid = retbuf[0];
+ win->win_addr = retbuf[1];
+ win->complete_irq = retbuf[2];
+ win->fault_irq = retbuf[3];
+ return 0;
+ }
+
+ pr_err("H_ALLOCATE_VAS_WINDOW error: %ld, wintype: %u, credits: %u\n",
+ rc, wintype, credits);
+
+ return -EIO;
+}
+
+/*
+ * Deallocate VAS window hcall.
+ */
+static int h_deallocate_vas_window(u64 winid)
+{
+ long rc;
+
+ do {
+ rc = plpar_hcall_norets(H_DEALLOCATE_VAS_WINDOW, winid);
+
+ rc = hcall_return_busy_check(rc);
+ } while (rc == H_BUSY);
+
+ if (rc == H_SUCCESS)
+ return 0;
+
+ pr_err("H_DEALLOCATE_VAS_WINDOW error: %ld, winid: %llu\n",
+ rc, winid);
+ return -EIO;
+}
+
+/*
+ * Modify VAS window.
+ * After the window is opened with allocate window hcall, configure it
+ * with flags and LPAR PID before using.
+ */
+static int h_modify_vas_window(struct pseries_vas_window *win)
+{
+ long rc;
+ u32 lpid = mfspr(SPRN_PID);
+
+ /*
+ * AMR value is not supported in Linux VAS implementation.
+ * The hypervisor ignores it if 0 is passed.
+ */
+ do {
+ rc = plpar_hcall_norets(H_MODIFY_VAS_WINDOW,
+ win->vas_win.winid, lpid, 0,
+ VAS_MOD_WIN_FLAGS, 0);
+
+ rc = hcall_return_busy_check(rc);
+ } while (rc == H_BUSY);
+
+ if (rc == H_SUCCESS)
+ return 0;
+
+ pr_err("H_MODIFY_VAS_WINDOW error: %ld, winid %u lpid %u\n",
+ rc, win->vas_win.winid, lpid);
+ return -EIO;
+}
+
+/*
+ * This hcall is used to determine the capabilities from the hypervisor.
+ * @hcall: H_QUERY_VAS_CAPABILITIES or H_QUERY_NX_CAPABILITIES
+ * @query_type: If 0 is passed, the hypervisor returns the overall
+ * capabilities which provides all feature(s) that are
+ * available. Then query the hypervisor to get the
+ * corresponding capabilities for the specific feature.
+ * Example: H_QUERY_VAS_CAPABILITIES provides VAS GZIP QoS
+ * and VAS GZIP Default capabilities.
+ * H_QUERY_NX_CAPABILITIES provides NX GZIP
+ * capabilities.
+ * @result: Return buffer to save capabilities.
+ */
+int h_query_vas_capabilities(const u64 hcall, u8 query_type, u64 result)
+{
+ long rc;
+
+ rc = plpar_hcall_norets(hcall, query_type, result);
+
+ if (rc == H_SUCCESS)
+ return 0;
+
+ pr_err("HCALL(%llx) error %ld, query_type %u, result buffer 0x%llx\n",
+ hcall, rc, query_type, result);
+ return -EIO;
+}
+EXPORT_SYMBOL_GPL(h_query_vas_capabilities);
+
+/*
+ * hcall to get fault CRB from the hypervisor.
+ */
+static int h_get_nx_fault(u32 winid, u64 buffer)
+{
+ long rc;
+
+ rc = plpar_hcall_norets(H_GET_NX_FAULT, winid, buffer);
+
+ if (rc == H_SUCCESS)
+ return 0;
+
+ pr_err("H_GET_NX_FAULT error: %ld, winid %u, buffer 0x%llx\n",
+ rc, winid, buffer);
+ return -EIO;
+
+}
+
+/*
+ * Handle the fault interrupt.
+ * When the fault interrupt is received for each window, query the
+ * hypervisor to get the fault CRB on the specific fault. Then
+ * process the CRB by updating CSB or send signal if the user space
+ * CSB is invalid.
+ * Note: The hypervisor forwards an interrupt for each fault request.
+ * So one fault CRB to process for each H_GET_NX_FAULT hcall.
+ */
+irqreturn_t pseries_vas_fault_thread_fn(int irq, void *data)
+{
+ struct pseries_vas_window *txwin = data;
+ struct coprocessor_request_block crb;
+ struct vas_user_win_ref *tsk_ref;
+ int rc;
+
+ rc = h_get_nx_fault(txwin->vas_win.winid, (u64)virt_to_phys(&crb));
+ if (!rc) {
+ tsk_ref = &txwin->vas_win.task_ref;
+ vas_dump_crb(&crb);
+ vas_update_csb(&crb, tsk_ref);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Allocate window and setup IRQ mapping.
+ */
+static int allocate_setup_window(struct pseries_vas_window *txwin,
+ u64 *domain, u8 wintype)
+{
+ int rc;
+
+ rc = h_allocate_vas_window(txwin, domain, wintype, DEF_WIN_CREDS);
+ if (rc)
+ return rc;
+ /*
+ * On PowerVM, the hypervisor setup and forwards the fault
+ * interrupt per window. So the IRQ setup and fault handling
+ * will be done for each open window separately.
+ */
+ txwin->fault_virq = irq_create_mapping(NULL, txwin->fault_irq);
+ if (!txwin->fault_virq) {
+ pr_err("Failed irq mapping %d\n", txwin->fault_irq);
+ rc = -EINVAL;
+ goto out_win;
+ }
+
+ txwin->name = kasprintf(GFP_KERNEL, "vas-win-%d",
+ txwin->vas_win.winid);
+ if (!txwin->name) {
+ rc = -ENOMEM;
+ goto out_irq;
+ }
+
+ rc = request_threaded_irq(txwin->fault_virq, NULL,
+ pseries_vas_fault_thread_fn, IRQF_ONESHOT,
+ txwin->name, txwin);
+ if (rc) {
+ pr_err("VAS-Window[%d]: Request IRQ(%u) failed with %d\n",
+ txwin->vas_win.winid, txwin->fault_virq, rc);
+ goto out_free;
+ }
+
+ txwin->vas_win.wcreds_max = DEF_WIN_CREDS;
+
+ return 0;
+out_free:
+ kfree(txwin->name);
+out_irq:
+ irq_dispose_mapping(txwin->fault_virq);
+out_win:
+ h_deallocate_vas_window(txwin->vas_win.winid);
+ return rc;
+}
+
+static inline void free_irq_setup(struct pseries_vas_window *txwin)
+{
+ free_irq(txwin->fault_virq, txwin);
+ kfree(txwin->name);
+ irq_dispose_mapping(txwin->fault_virq);
+}
+
+static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
+ enum vas_cop_type cop_type)
+{
+ long domain[PLPAR_HCALL9_BUFSIZE] = {VAS_DEFAULT_DOMAIN_ID};
+ struct vas_cop_feat_caps *cop_feat_caps;
+ struct vas_caps *caps;
+ struct pseries_vas_window *txwin;
+ int rc;
+
+ txwin = kzalloc(sizeof(*txwin), GFP_KERNEL);
+ if (!txwin)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * A VAS window can have many credits which means that many
+ * requests can be issued simultaneously. But the hypervisor
+ * restricts one credit per window.
+ * The hypervisor introduces 2 different types of credits:
+ * Default credit type (Uses normal priority FIFO):
+ * A limited number of credits are assigned to partitions
+ * based on processor entitlement. But these credits may be
+ * over-committed on a system depends on whether the CPUs
+ * are in shared or dedicated modes - that is, more requests
+ * may be issued across the system than NX can service at
+ * once which can result in paste command failure (RMA_busy).
+ * Then the process has to resend requests or fall-back to
+ * SW compression.
+ * Quality of Service (QoS) credit type (Uses high priority FIFO):
+ * To avoid NX HW contention, the system admins can assign
+ * QoS credits for each LPAR so that this partition is
+ * guaranteed access to NX resources. These credits are
+ * assigned to partitions via the HMC.
+ * Refer PAPR for more information.
+ *
+ * Allocate window with QoS credits if user requested. Otherwise
+ * default credits are used.
+ */
+ if (flags & VAS_TX_WIN_FLAG_QOS_CREDIT)
+ caps = &vascaps[VAS_GZIP_QOS_FEAT_TYPE];
+ else
+ caps = &vascaps[VAS_GZIP_DEF_FEAT_TYPE];
+
+ cop_feat_caps = &caps->caps;
+
+ if (atomic_inc_return(&cop_feat_caps->used_lpar_creds) >
+ atomic_read(&cop_feat_caps->target_lpar_creds)) {
+ pr_err("Credits are not available to allocate window\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (vas_id == -1) {
+ /*
+ * The user space is requesting to allocate a window on
+ * a VAS instance where the process is executing.
+ * On PowerVM, domain values are passed to the hypervisor
+ * to select VAS instance. Useful if the process is
+ * affinity to NUMA node.
+ * The hypervisor selects VAS instance if
+ * VAS_DEFAULT_DOMAIN_ID (-1) is passed for domain values.
+ * The h_allocate_vas_window hcall is defined to take a
+ * domain values as specified by h_home_node_associativity,
+ * So no unpacking needs to be done.
+ */
+ rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, domain,
+ VPHN_FLAG_VCPU, smp_processor_id());
+ if (rc != H_SUCCESS) {
+ pr_err("H_HOME_NODE_ASSOCIATIVITY error: %d\n", rc);
+ goto out;
+ }
+ }
+
+ /*
+ * Allocate / Deallocate window hcalls and setup / free IRQs
+ * have to be protected with mutex.
+ * Open VAS window: Allocate window hcall and setup IRQ
+ * Close VAS window: Deallocate window hcall and free IRQ
+ * The hypervisor waits until all NX requests are
+ * completed before closing the window. So expects OS
+ * to handle NX faults, means IRQ can be freed only
+ * after the deallocate window hcall is returned.
+ * So once the window is closed with deallocate hcall before
+ * the IRQ is freed, it can be assigned to new allocate
+ * hcall with the same fault IRQ by the hypervisor. It can
+ * result in setup IRQ fail for the new window since the
+ * same fault IRQ is not freed by the OS before.
+ */
+ mutex_lock(&vas_pseries_mutex);
+ rc = allocate_setup_window(txwin, (u64 *)&domain[0],
+ cop_feat_caps->win_type);
+ mutex_unlock(&vas_pseries_mutex);
+ if (rc)
+ goto out;
+
+ /*
+ * Modify window and it is ready to use.
+ */
+ rc = h_modify_vas_window(txwin);
+ if (!rc)
+ rc = get_vas_user_win_ref(&txwin->vas_win.task_ref);
+ if (rc)
+ goto out_free;
+
+ vas_user_win_add_mm_context(&txwin->vas_win.task_ref);
+ txwin->win_type = cop_feat_caps->win_type;
+ mutex_lock(&vas_pseries_mutex);
+ list_add(&txwin->win_list, &caps->list);
+ mutex_unlock(&vas_pseries_mutex);
+
+ return &txwin->vas_win;
+
+out_free:
+ /*
+ * Window is not operational. Free IRQ before closing
+ * window so that do not have to hold mutex.
+ */
+ free_irq_setup(txwin);
+ h_deallocate_vas_window(txwin->vas_win.winid);
+out:
+ atomic_dec(&cop_feat_caps->used_lpar_creds);
+ kfree(txwin);
+ return ERR_PTR(rc);
+}
+
+static u64 vas_paste_address(struct vas_window *vwin)
+{
+ struct pseries_vas_window *win;
+
+ win = container_of(vwin, struct pseries_vas_window, vas_win);
+ return win->win_addr;
+}
+
+static int deallocate_free_window(struct pseries_vas_window *win)
+{
+ int rc = 0;
+
+ /*
+ * The hypervisor waits for all requests including faults
+ * are processed before closing the window - Means all
+ * credits have to be returned. In the case of fault
+ * request, a credit is returned after OS issues
+ * H_GET_NX_FAULT hcall.
+ * So free IRQ after executing H_DEALLOCATE_VAS_WINDOW
+ * hcall.
+ */
+ rc = h_deallocate_vas_window(win->vas_win.winid);
+ if (!rc)
+ free_irq_setup(win);
+
+ return rc;
+}
+
+static int vas_deallocate_window(struct vas_window *vwin)
+{
+ struct pseries_vas_window *win;
+ struct vas_cop_feat_caps *caps;
+ int rc = 0;
+
+ if (!vwin)
+ return -EINVAL;
+
+ win = container_of(vwin, struct pseries_vas_window, vas_win);
+
+ /* Should not happen */
+ if (win->win_type >= VAS_MAX_FEAT_TYPE) {
+ pr_err("Window (%u): Invalid window type %u\n",
+ vwin->winid, win->win_type);
+ return -EINVAL;
+ }
+
+ caps = &vascaps[win->win_type].caps;
+ mutex_lock(&vas_pseries_mutex);
+ rc = deallocate_free_window(win);
+ if (rc) {
+ mutex_unlock(&vas_pseries_mutex);
+ return rc;
+ }
+
+ list_del(&win->win_list);
+ atomic_dec(&caps->used_lpar_creds);
+ mutex_unlock(&vas_pseries_mutex);
+
+ put_vas_user_win_ref(&vwin->task_ref);
+ mm_context_remove_vas_window(vwin->task_ref.mm);
+
+ kfree(win);
+ return 0;
+}
+
+static const struct vas_user_win_ops vops_pseries = {
+ .open_win = vas_allocate_window, /* Open and configure window */
+ .paste_addr = vas_paste_address, /* To do copy/paste */
+ .close_win = vas_deallocate_window, /* Close window */
+};
+
+/*
+ * Supporting only nx-gzip coprocessor type now, but this API code
+ * extended to other coprocessor types later.
+ */
+int vas_register_api_pseries(struct module *mod, enum vas_cop_type cop_type,
+ const char *name)
+{
+ int rc;
+
+ if (!copypaste_feat)
+ return -ENOTSUPP;
+
+ rc = vas_register_coproc_api(mod, cop_type, name, &vops_pseries);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(vas_register_api_pseries);
+
+void vas_unregister_api_pseries(void)
+{
+ vas_unregister_coproc_api();
+}
+EXPORT_SYMBOL_GPL(vas_unregister_api_pseries);
+
+/*
+ * Get the specific capabilities based on the feature type.
+ * Right now supports GZIP default and GZIP QoS capabilities.
+ */
+static int get_vas_capabilities(u8 feat, enum vas_cop_feat_type type,
+ struct hv_vas_cop_feat_caps *hv_caps)
+{
+ struct vas_cop_feat_caps *caps;
+ struct vas_caps *vcaps;
+ int rc = 0;
+
+ vcaps = &vascaps[type];
+ memset(vcaps, 0, sizeof(*vcaps));
+ INIT_LIST_HEAD(&vcaps->list);
+
+ caps = &vcaps->caps;
+
+ rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES, feat,
+ (u64)virt_to_phys(hv_caps));
+ if (rc)
+ return rc;
+
+ caps->user_mode = hv_caps->user_mode;
+ if (!(caps->user_mode & VAS_COPY_PASTE_USER_MODE)) {
+ pr_err("User space COPY/PASTE is not supported\n");
+ return -ENOTSUPP;
+ }
+
+ caps->descriptor = be64_to_cpu(hv_caps->descriptor);
+ caps->win_type = hv_caps->win_type;
+ if (caps->win_type >= VAS_MAX_FEAT_TYPE) {
+ pr_err("Unsupported window type %u\n", caps->win_type);
+ return -EINVAL;
+ }
+ caps->max_lpar_creds = be16_to_cpu(hv_caps->max_lpar_creds);
+ caps->max_win_creds = be16_to_cpu(hv_caps->max_win_creds);
+ atomic_set(&caps->target_lpar_creds,
+ be16_to_cpu(hv_caps->target_lpar_creds));
+ if (feat == VAS_GZIP_DEF_FEAT) {
+ caps->def_lpar_creds = be16_to_cpu(hv_caps->def_lpar_creds);
+
+ if (caps->max_win_creds < DEF_WIN_CREDS) {
+ pr_err("Window creds(%u) > max allowed window creds(%u)\n",
+ DEF_WIN_CREDS, caps->max_win_creds);
+ return -EINVAL;
+ }
+ }
+
+ copypaste_feat = true;
+
+ return 0;
+}
+
+static int __init pseries_vas_init(void)
+{
+ struct hv_vas_cop_feat_caps *hv_cop_caps;
+ struct hv_vas_all_caps *hv_caps;
+ int rc;
+
+ /*
+ * Linux supports user space COPY/PASTE only with Radix
+ */
+ if (!radix_enabled()) {
+ pr_err("API is supported only with radix page tables\n");
+ return -ENOTSUPP;
+ }
+
+ hv_caps = kmalloc(sizeof(*hv_caps), GFP_KERNEL);
+ if (!hv_caps)
+ return -ENOMEM;
+ /*
+ * Get VAS overall capabilities by passing 0 to feature type.
+ */
+ rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES, 0,
+ (u64)virt_to_phys(hv_caps));
+ if (rc)
+ goto out;
+
+ caps_all.descriptor = be64_to_cpu(hv_caps->descriptor);
+ caps_all.feat_type = be64_to_cpu(hv_caps->feat_type);
+
+ hv_cop_caps = kmalloc(sizeof(*hv_cop_caps), GFP_KERNEL);
+ if (!hv_cop_caps) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ /*
+ * QOS capabilities available
+ */
+ if (caps_all.feat_type & VAS_GZIP_QOS_FEAT_BIT) {
+ rc = get_vas_capabilities(VAS_GZIP_QOS_FEAT,
+ VAS_GZIP_QOS_FEAT_TYPE, hv_cop_caps);
+
+ if (rc)
+ goto out_cop;
+ }
+ /*
+ * Default capabilities available
+ */
+ if (caps_all.feat_type & VAS_GZIP_DEF_FEAT_BIT) {
+ rc = get_vas_capabilities(VAS_GZIP_DEF_FEAT,
+ VAS_GZIP_DEF_FEAT_TYPE, hv_cop_caps);
+ if (rc)
+ goto out_cop;
+ }
+
+ pr_info("GZIP feature is available\n");
+
+out_cop:
+ kfree(hv_cop_caps);
+out:
+ kfree(hv_caps);
+ return rc;
+}
+machine_device_initcall(pseries, pseries_vas_init);
diff --git a/arch/powerpc/platforms/pseries/vas.h b/arch/powerpc/platforms/pseries/vas.h
new file mode 100644
index 000000000000..4ecb3fcabd10
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/vas.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2020-21 IBM Corp.
+ */
+
+#ifndef _VAS_H
+#define _VAS_H
+#include <asm/vas.h>
+#include <linux/mutex.h>
+#include <linux/stringify.h>
+
+/*
+ * VAS window modify flags
+ */
+#define VAS_MOD_WIN_CLOSE PPC_BIT(0)
+#define VAS_MOD_WIN_JOBS_KILL PPC_BIT(1)
+#define VAS_MOD_WIN_DR PPC_BIT(3)
+#define VAS_MOD_WIN_PR PPC_BIT(4)
+#define VAS_MOD_WIN_SF PPC_BIT(5)
+#define VAS_MOD_WIN_TA PPC_BIT(6)
+#define VAS_MOD_WIN_FLAGS (VAS_MOD_WIN_JOBS_KILL | VAS_MOD_WIN_DR | \
+ VAS_MOD_WIN_PR | VAS_MOD_WIN_SF)
+
+#define VAS_WIN_ACTIVE 0x0
+#define VAS_WIN_CLOSED 0x1
+#define VAS_WIN_INACTIVE 0x2 /* Inactive due to HW failure */
+/* Process of being modified, deallocated, or quiesced */
+#define VAS_WIN_MOD_IN_PROCESS 0x3
+
+#define VAS_COPY_PASTE_USER_MODE 0x00000001
+#define VAS_COP_OP_USER_MODE 0x00000010
+
+/*
+ * Co-processor feature - GZIP QoS windows or GZIP default windows
+ */
+enum vas_cop_feat_type {
+ VAS_GZIP_QOS_FEAT_TYPE,
+ VAS_GZIP_DEF_FEAT_TYPE,
+ VAS_MAX_FEAT_TYPE,
+};
+
+/*
+ * Use to get feature specific capabilities from the
+ * hypervisor.
+ */
+struct hv_vas_cop_feat_caps {
+ __be64 descriptor;
+ u8 win_type; /* Default or QoS type */
+ u8 user_mode;
+ __be16 max_lpar_creds;
+ __be16 max_win_creds;
+ union {
+ __be16 reserved;
+ __be16 def_lpar_creds; /* Used for default capabilities */
+ };
+ __be16 target_lpar_creds;
+} __packed __aligned(0x1000);
+
+/*
+ * Feature specific (QoS or default) capabilities.
+ */
+struct vas_cop_feat_caps {
+ u64 descriptor;
+ u8 win_type; /* Default or QoS type */
+ u8 user_mode; /* User mode copy/paste or COP HCALL */
+ u16 max_lpar_creds; /* Max credits available in LPAR */
+ /* Max credits can be assigned per window */
+ u16 max_win_creds;
+ union {
+ u16 reserved; /* Used for QoS credit type */
+ u16 def_lpar_creds; /* Used for default credit type */
+ };
+ /* Total LPAR available credits. Can be different from max LPAR */
+ /* credits due to DLPAR operation */
+ atomic_t target_lpar_creds;
+ atomic_t used_lpar_creds; /* Used credits so far */
+ u16 avail_lpar_creds; /* Remaining available credits */
+};
+
+/*
+ * Feature (QoS or Default) specific to store capabilities and
+ * the list of open windows.
+ */
+struct vas_caps {
+ struct vas_cop_feat_caps caps;
+ struct list_head list; /* List of open windows */
+};
+
+/*
+ * To get window information from the hypervisor.
+ */
+struct hv_vas_win_lpar {
+ __be16 version;
+ u8 win_type;
+ u8 status;
+ __be16 credits; /* No of credits assigned to this window */
+ __be16 reserved;
+ __be32 pid; /* LPAR Process ID */
+ __be32 tid; /* LPAR Thread ID */
+ __be64 win_addr; /* Paste address */
+ __be32 interrupt; /* Interrupt when NX request completes */
+ __be32 fault; /* Interrupt when NX sees fault */
+ /* Associativity Domain Identifiers as returned in */
+ /* H_HOME_NODE_ASSOCIATIVITY */
+ __be64 domain[6];
+ __be64 win_util; /* Number of bytes processed */
+} __packed __aligned(0x1000);
+
+struct pseries_vas_window {
+ struct vas_window vas_win;
+ u64 win_addr; /* Physical paste address */
+ u8 win_type; /* QoS or Default window */
+ u32 complete_irq; /* Completion interrupt */
+ u32 fault_irq; /* Fault interrupt */
+ u64 domain[6]; /* Associativity domain Ids */
+ /* this window is allocated */
+ u64 util;
+
+ /* List of windows opened which is used for LPM */
+ struct list_head win_list;
+ u64 flags;
+ char *name;
+ int fault_virq;
+};
+#endif /* _VAS_H */
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index b2797cfe4e2b..e00f3725ec96 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -22,6 +22,7 @@
#include <linux/mm.h>
#include <linux/dma-map-ops.h>
#include <linux/kobject.h>
+#include <linux/kexec.h>
#include <asm/iommu.h>
#include <asm/dma.h>
@@ -1261,7 +1262,6 @@ static int vio_bus_remove(struct device *dev)
struct vio_dev *viodev = to_vio_dev(dev);
struct vio_driver *viodrv = to_vio_driver(dev->driver);
struct device *devptr;
- int ret = 1;
/*
* Hold a reference to the device after the remove function is called
@@ -1270,13 +1270,27 @@ static int vio_bus_remove(struct device *dev)
devptr = get_device(dev);
if (viodrv->remove)
- ret = viodrv->remove(viodev);
+ viodrv->remove(viodev);
- if (!ret && firmware_has_feature(FW_FEATURE_CMO))
+ if (firmware_has_feature(FW_FEATURE_CMO))
vio_cmo_bus_remove(viodev);
put_device(devptr);
- return ret;
+ return 0;
+}
+
+static void vio_bus_shutdown(struct device *dev)
+{
+ struct vio_dev *viodev = to_vio_dev(dev);
+ struct vio_driver *viodrv;
+
+ if (dev->driver) {
+ viodrv = to_vio_driver(dev->driver);
+ if (viodrv->shutdown)
+ viodrv->shutdown(viodev);
+ else if (kexec_in_progress)
+ vio_bus_remove(dev);
+ }
}
/**
@@ -1286,6 +1300,10 @@ static int vio_bus_remove(struct device *dev)
int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
const char *mod_name)
{
+ // vio_bus_type is only initialised for pseries
+ if (!machine_is(pseries))
+ return -ENODEV;
+
pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
/* fill in 'struct driver' fields */
@@ -1614,6 +1632,7 @@ struct bus_type vio_bus_type = {
.match = vio_bus_match,
.probe = vio_bus_probe,
.remove = vio_bus_remove,
+ .shutdown = vio_bus_shutdown,
};
/**
diff --git a/arch/powerpc/purgatory/trampoline_64.S b/arch/powerpc/purgatory/trampoline_64.S
index d956b8a35fd1..b35837c13852 100644
--- a/arch/powerpc/purgatory/trampoline_64.S
+++ b/arch/powerpc/purgatory/trampoline_64.S
@@ -12,7 +12,6 @@
#include <asm/asm-compat.h>
#include <asm/crashdump-ppc64.h>
- .machine ppc64
.balign 256
.globl purgatory_start
purgatory_start:
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 6b4a34b36d98..1d33b7a5ea83 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -344,7 +344,8 @@ static void iommu_table_dart_setup(void)
iommu_table_dart.it_index = 0;
iommu_table_dart.it_blocksize = 1;
iommu_table_dart.it_ops = &iommu_dart_ops;
- iommu_init_table(&iommu_table_dart, -1, 0, 0);
+ if (!iommu_init_table(&iommu_table_dart, -1, 0, 0))
+ panic("Failed to initialize iommu table");
/* Reserve the last page of the DART to avoid possible prefetch
* past the DART mapped area
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c
index 48866e6c1efb..00705258ecf9 100644
--- a/arch/powerpc/sysdev/ehv_pic.c
+++ b/arch/powerpc/sysdev/ehv_pic.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
diff --git a/arch/powerpc/sysdev/fsl_mpic_err.c b/arch/powerpc/sysdev/fsl_mpic_err.c
index 13583bbc3e8e..5fa5fa215541 100644
--- a/arch/powerpc/sysdev/fsl_mpic_err.c
+++ b/arch/powerpc/sysdev/fsl_mpic_err.c
@@ -8,6 +8,7 @@
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <asm/io.h>
#include <asm/irq.h>
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 040b9d01c079..b8f76f3fd994 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -455,7 +455,7 @@ static void setup_pci_atmu(struct pci_controller *hose)
}
}
-static void __init setup_pci_cmd(struct pci_controller *hose)
+static void setup_pci_cmd(struct pci_controller *hose)
{
u16 cmd;
int cap_x;
@@ -1072,7 +1072,7 @@ int fsl_pci_mcheck_exception(struct pt_regs *regs)
ret = get_kernel_nofault(inst, (void *)regs->nip);
if (!ret && mcheck_handle_load(regs, inst)) {
- regs->nip += 4;
+ regs_add_return_ip(regs, 4);
return 1;
}
}
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 07c164f7f8cf..5a95b8ea23d8 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -108,8 +108,8 @@ int fsl_rio_mcheck_exception(struct pt_regs *regs)
__func__);
out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR),
0);
- regs->msr |= MSR_RI;
- regs->nip = extable_fixup(entry);
+ regs_set_return_msr(regs, regs->msr | MSR_RI);
+ regs_set_return_ip(regs, extable_fixup(entry));
return 1;
}
}
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index c1d76c344351..dc1a151c63d7 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -260,7 +260,8 @@ void i8259_init(struct device_node *node, unsigned long intack_addr)
raw_spin_unlock_irqrestore(&i8259_lock, flags);
/* create a legacy host */
- i8259_host = irq_domain_add_legacy_isa(node, &i8259_host_ops, NULL);
+ i8259_host = irq_domain_add_legacy(node, NR_IRQS_LEGACY, 0, 0,
+ &i8259_host_ops, NULL);
if (i8259_host == NULL) {
printk(KERN_ERR "i8259: failed to allocate irq host !\n");
return;
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index b0426f28946a..995fb2ada507 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -602,7 +602,7 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
/* Find an mpic associated with a given linux interrupt */
static struct mpic *mpic_find(unsigned int irq)
{
- if (irq < NUM_ISA_INTERRUPTS)
+ if (irq < NR_IRQS_LEGACY)
return NULL;
return irq_get_chip_data(irq);
diff --git a/arch/powerpc/sysdev/tsi108_dev.c b/arch/powerpc/sysdev/tsi108_dev.c
index 0baec82510b9..4c4a6efd5e5f 100644
--- a/arch/powerpc/sysdev/tsi108_dev.c
+++ b/arch/powerpc/sysdev/tsi108_dev.c
@@ -73,7 +73,6 @@ static int __init tsi108_eth_of_init(void)
struct device_node *phy, *mdio;
hw_info tsi_eth_data;
const unsigned int *phy_id;
- const void *mac_addr;
const phandle *ph;
memset(r, 0, sizeof(r));
@@ -101,9 +100,7 @@ static int __init tsi108_eth_of_init(void)
goto err;
}
- mac_addr = of_get_mac_address(np);
- if (!IS_ERR(mac_addr))
- ether_addr_copy(tsi_eth_data.mac_addr, mac_addr);
+ of_get_mac_address(np, tsi_eth_data.mac_addr);
ph = of_get_property(np, "mdio-handle", NULL);
mdio = of_find_node_by_phandle(*ph);
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 49f9541954f8..042bb38fa5c2 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -404,7 +404,8 @@ void __init tsi108_pci_int_init(struct device_node *node)
{
DBG("Tsi108_pci_int_init: initializing PCI interrupts\n");
- pci_irq_host = irq_domain_add_legacy_isa(node, &pci_irq_domain_ops, NULL);
+ pci_irq_host = irq_domain_add_legacy(node, NR_IRQS_LEGACY, 0, 0,
+ &pci_irq_domain_ops, NULL);
if (pci_irq_host == NULL) {
printk(KERN_ERR "pci_irq_host: failed to allocate irq domain!\n");
return;
diff --git a/arch/powerpc/sysdev/xics/Kconfig b/arch/powerpc/sysdev/xics/Kconfig
index 304614c920aa..063d9195891f 100644
--- a/arch/powerpc/sysdev/xics/Kconfig
+++ b/arch/powerpc/sysdev/xics/Kconfig
@@ -12,3 +12,6 @@ config PPC_ICP_HV
config PPC_ICS_RTAS
def_bool n
+
+config PPC_ICS_NATIVE
+ def_bool n
diff --git a/arch/powerpc/sysdev/xics/Makefile b/arch/powerpc/sysdev/xics/Makefile
index ba1e3117b1c0..747063927c6c 100644
--- a/arch/powerpc/sysdev/xics/Makefile
+++ b/arch/powerpc/sysdev/xics/Makefile
@@ -4,4 +4,5 @@ obj-y += xics-common.o
obj-$(CONFIG_PPC_ICP_NATIVE) += icp-native.o
obj-$(CONFIG_PPC_ICP_HV) += icp-hv.o
obj-$(CONFIG_PPC_ICS_RTAS) += ics-rtas.o
+obj-$(CONFIG_PPC_ICS_NATIVE) += ics-native.o
obj-$(CONFIG_PPC_POWERNV) += ics-opal.o icp-opal.o
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
index 21b9d1bf39ff..6765d9e264a3 100644
--- a/arch/powerpc/sysdev/xics/icp-hv.c
+++ b/arch/powerpc/sysdev/xics/icp-hv.c
@@ -7,6 +7,7 @@
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/cpu.h>
#include <linux/of.h>
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index 68fd2540b093..675d708863d5 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -7,6 +7,7 @@
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/cpu.h>
#include <linux/of.h>
diff --git a/arch/powerpc/sysdev/xics/ics-native.c b/arch/powerpc/sysdev/xics/ics-native.c
new file mode 100644
index 000000000000..d450502f4053
--- /dev/null
+++ b/arch/powerpc/sysdev/xics/ics-native.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ICS backend for OPAL managed interrupts.
+ *
+ * Copyright 2011 IBM Corp.
+ */
+
+//#define DEBUG
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/spinlock.h>
+#include <linux/msi.h>
+#include <linux/list.h>
+
+#include <asm/prom.h>
+#include <asm/smp.h>
+#include <asm/machdep.h>
+#include <asm/irq.h>
+#include <asm/errno.h>
+#include <asm/xics.h>
+#include <asm/opal.h>
+#include <asm/firmware.h>
+
+struct ics_native {
+ struct ics ics;
+ struct device_node *node;
+ void __iomem *base;
+ u32 ibase;
+ u32 icount;
+};
+#define to_ics_native(_ics) container_of(_ics, struct ics_native, ics)
+
+static void __iomem *ics_native_xive(struct ics_native *in, unsigned int vec)
+{
+ return in->base + 0x800 + ((vec - in->ibase) << 2);
+}
+
+static void ics_native_unmask_irq(struct irq_data *d)
+{
+ unsigned int vec = (unsigned int)irqd_to_hwirq(d);
+ struct ics *ics = irq_data_get_irq_chip_data(d);
+ struct ics_native *in = to_ics_native(ics);
+ unsigned int server;
+
+ pr_devel("ics-native: unmask virq %d [hw 0x%x]\n", d->irq, vec);
+
+ if (vec < in->ibase || vec >= (in->ibase + in->icount))
+ return;
+
+ server = xics_get_irq_server(d->irq, irq_data_get_affinity_mask(d), 0);
+ out_be32(ics_native_xive(in, vec), (server << 8) | DEFAULT_PRIORITY);
+}
+
+static unsigned int ics_native_startup(struct irq_data *d)
+{
+#ifdef CONFIG_PCI_MSI
+ /*
+ * The generic MSI code returns with the interrupt disabled on the
+ * card, using the MSI mask bits. Firmware doesn't appear to unmask
+ * at that level, so we do it here by hand.
+ */
+ if (irq_data_get_msi_desc(d))
+ pci_msi_unmask_irq(d);
+#endif
+
+ /* unmask it */
+ ics_native_unmask_irq(d);
+ return 0;
+}
+
+static void ics_native_do_mask(struct ics_native *in, unsigned int vec)
+{
+ out_be32(ics_native_xive(in, vec), 0xff);
+}
+
+static void ics_native_mask_irq(struct irq_data *d)
+{
+ unsigned int vec = (unsigned int)irqd_to_hwirq(d);
+ struct ics *ics = irq_data_get_irq_chip_data(d);
+ struct ics_native *in = to_ics_native(ics);
+
+ pr_devel("ics-native: mask virq %d [hw 0x%x]\n", d->irq, vec);
+
+ if (vec < in->ibase || vec >= (in->ibase + in->icount))
+ return;
+ ics_native_do_mask(in, vec);
+}
+
+static int ics_native_set_affinity(struct irq_data *d,
+ const struct cpumask *cpumask,
+ bool force)
+{
+ unsigned int vec = (unsigned int)irqd_to_hwirq(d);
+ struct ics *ics = irq_data_get_irq_chip_data(d);
+ struct ics_native *in = to_ics_native(ics);
+ int server;
+ u32 xive;
+
+ if (vec < in->ibase || vec >= (in->ibase + in->icount))
+ return -EINVAL;
+
+ server = xics_get_irq_server(d->irq, cpumask, 1);
+ if (server == -1) {
+ pr_warn("%s: No online cpus in the mask %*pb for irq %d\n",
+ __func__, cpumask_pr_args(cpumask), d->irq);
+ return -1;
+ }
+
+ xive = in_be32(ics_native_xive(in, vec));
+ xive = (xive & 0xff) | (server << 8);
+ out_be32(ics_native_xive(in, vec), xive);
+
+ return IRQ_SET_MASK_OK;
+}
+
+static struct irq_chip ics_native_irq_chip = {
+ .name = "ICS",
+ .irq_startup = ics_native_startup,
+ .irq_mask = ics_native_mask_irq,
+ .irq_unmask = ics_native_unmask_irq,
+ .irq_eoi = NULL, /* Patched at init time */
+ .irq_set_affinity = ics_native_set_affinity,
+ .irq_set_type = xics_set_irq_type,
+ .irq_retrigger = xics_retrigger,
+};
+
+static int ics_native_map(struct ics *ics, unsigned int virq)
+{
+ unsigned int vec = (unsigned int)virq_to_hw(virq);
+ struct ics_native *in = to_ics_native(ics);
+
+ pr_devel("%s: vec=0x%x\n", __func__, vec);
+
+ if (vec < in->ibase || vec >= (in->ibase + in->icount))
+ return -EINVAL;
+
+ irq_set_chip_and_handler(virq, &ics_native_irq_chip, handle_fasteoi_irq);
+ irq_set_chip_data(virq, ics);
+
+ return 0;
+}
+
+static void ics_native_mask_unknown(struct ics *ics, unsigned long vec)
+{
+ struct ics_native *in = to_ics_native(ics);
+
+ if (vec < in->ibase || vec >= (in->ibase + in->icount))
+ return;
+
+ ics_native_do_mask(in, vec);
+}
+
+static long ics_native_get_server(struct ics *ics, unsigned long vec)
+{
+ struct ics_native *in = to_ics_native(ics);
+ u32 xive;
+
+ if (vec < in->ibase || vec >= (in->ibase + in->icount))
+ return -EINVAL;
+
+ xive = in_be32(ics_native_xive(in, vec));
+ return (xive >> 8) & 0xfff;
+}
+
+static int ics_native_host_match(struct ics *ics, struct device_node *node)
+{
+ struct ics_native *in = to_ics_native(ics);
+
+ return in->node == node;
+}
+
+static struct ics ics_native_template = {
+ .map = ics_native_map,
+ .mask_unknown = ics_native_mask_unknown,
+ .get_server = ics_native_get_server,
+ .host_match = ics_native_host_match,
+};
+
+static int __init ics_native_add_one(struct device_node *np)
+{
+ struct ics_native *ics;
+ u32 ranges[2];
+ int rc, count;
+
+ ics = kzalloc(sizeof(struct ics_native), GFP_KERNEL);
+ if (!ics)
+ return -ENOMEM;
+ ics->node = of_node_get(np);
+ memcpy(&ics->ics, &ics_native_template, sizeof(struct ics));
+
+ ics->base = of_iomap(np, 0);
+ if (!ics->base) {
+ pr_err("Failed to map %pOFP\n", np);
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ count = of_property_count_u32_elems(np, "interrupt-ranges");
+ if (count < 2 || count & 1) {
+ pr_err("Failed to read interrupt-ranges of %pOFP\n", np);
+ rc = -EINVAL;
+ goto fail;
+ }
+ if (count > 2) {
+ pr_warn("ICS %pOFP has %d ranges, only one supported\n",
+ np, count >> 1);
+ }
+ rc = of_property_read_u32_array(np, "interrupt-ranges",
+ ranges, 2);
+ if (rc) {
+ pr_err("Failed to read interrupt-ranges of %pOFP\n", np);
+ goto fail;
+ }
+ ics->ibase = ranges[0];
+ ics->icount = ranges[1];
+
+ pr_info("ICS native initialized for sources %d..%d\n",
+ ics->ibase, ics->ibase + ics->icount - 1);
+
+ /* Register ourselves */
+ xics_register_ics(&ics->ics);
+
+ return 0;
+fail:
+ of_node_put(ics->node);
+ kfree(ics);
+ return rc;
+}
+
+int __init ics_native_init(void)
+{
+ struct device_node *ics;
+ bool found_one = false;
+
+ /* We need to patch our irq chip's EOI to point to the
+ * right ICP
+ */
+ ics_native_irq_chip.irq_eoi = icp_ops->eoi;
+
+ /* Find native ICS in the device-tree */
+ for_each_compatible_node(ics, NULL, "openpower,xics-sources") {
+ if (ics_native_add_one(ics) == 0)
+ found_one = true;
+ }
+
+ if (found_one)
+ pr_info("ICS native backend registered\n");
+
+ return found_one ? 0 : -ENODEV;
+}
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 7e4305c01bac..b14c502e56a8 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -201,7 +201,7 @@ void xics_migrate_irqs_away(void)
struct ics *ics;
/* We can't set affinity on ISA interrupts */
- if (virq < NUM_ISA_INTERRUPTS)
+ if (virq < NR_IRQS_LEGACY)
continue;
/* We only need to migrate enabled IRQS */
if (!desc->action)
@@ -477,6 +477,8 @@ void __init xics_init(void)
if (rc < 0)
rc = ics_opal_init();
if (rc < 0)
+ rc = ics_native_init();
+ if (rc < 0)
pr_warn("XICS: Cannot find a Source Controller !\n");
/* Initialize common bits */
diff --git a/arch/powerpc/sysdev/xive/Kconfig b/arch/powerpc/sysdev/xive/Kconfig
index 785c292d104b..97796c6b63f0 100644
--- a/arch/powerpc/sysdev/xive/Kconfig
+++ b/arch/powerpc/sysdev/xive/Kconfig
@@ -3,6 +3,7 @@ config PPC_XIVE
bool
select PPC_SMP_MUXED_IPI
select HARDIRQS_SW_RESEND
+ select IRQ_DOMAIN_NOMAP
config PPC_XIVE_NATIVE
bool
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index 595310e056f4..a8304327072d 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -63,8 +63,19 @@ static const struct xive_ops *xive_ops;
static struct irq_domain *xive_irq_domain;
#ifdef CONFIG_SMP
-/* The IPIs all use the same logical irq number */
-static u32 xive_ipi_irq;
+/* The IPIs use the same logical irq number when on the same chip */
+static struct xive_ipi_desc {
+ unsigned int irq;
+ char name[16];
+} *xive_ipis;
+
+/*
+ * Use early_cpu_to_node() for hot-plugged CPUs
+ */
+static unsigned int xive_ipi_cpu_to_irq(unsigned int cpu)
+{
+ return xive_ipis[early_cpu_to_node(cpu)].irq;
+}
#endif
/* Xive state for each CPU */
@@ -253,17 +264,20 @@ notrace void xmon_xive_do_dump(int cpu)
xmon_printf("\n");
}
+static struct irq_data *xive_get_irq_data(u32 hw_irq)
+{
+ unsigned int irq = irq_find_mapping(xive_irq_domain, hw_irq);
+
+ return irq ? irq_get_irq_data(irq) : NULL;
+}
+
int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
{
- struct irq_chip *chip = irq_data_get_irq_chip(d);
int rc;
u32 target;
u8 prio;
u32 lirq;
- if (!is_xive_irq(chip))
- return -EINVAL;
-
rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
if (rc) {
xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
@@ -273,6 +287,9 @@ int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
hw_irq, target, prio, lirq);
+ if (!d)
+ d = xive_get_irq_data(hw_irq);
+
if (d) {
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
u64 val = xive_esb_read(xd, XIVE_ESB_GET);
@@ -289,6 +306,20 @@ int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
return 0;
}
+void xmon_xive_get_irq_all(void)
+{
+ unsigned int i;
+ struct irq_desc *desc;
+
+ for_each_irq_desc(i, desc) {
+ struct irq_data *d = irq_desc_get_irq_data(desc);
+ unsigned int hwirq = (unsigned int)irqd_to_hwirq(d);
+
+ if (d->domain == xive_irq_domain)
+ xmon_xive_get_irq_config(hwirq, d);
+ }
+}
+
#endif /* CONFIG_XMON */
static unsigned int xive_get_irq(void)
@@ -959,16 +990,12 @@ EXPORT_SYMBOL_GPL(is_xive_irq);
void xive_cleanup_irq_data(struct xive_irq_data *xd)
{
if (xd->eoi_mmio) {
- unmap_kernel_range((unsigned long)xd->eoi_mmio,
- 1u << xd->esb_shift);
iounmap(xd->eoi_mmio);
if (xd->eoi_mmio == xd->trig_mmio)
xd->trig_mmio = NULL;
xd->eoi_mmio = NULL;
}
if (xd->trig_mmio) {
- unmap_kernel_range((unsigned long)xd->trig_mmio,
- 1u << xd->esb_shift);
iounmap(xd->trig_mmio);
xd->trig_mmio = NULL;
}
@@ -1067,28 +1094,94 @@ static struct irq_chip xive_ipi_chip = {
.irq_unmask = xive_ipi_do_nothing,
};
-static void __init xive_request_ipi(void)
+/*
+ * IPIs are marked per-cpu. We use separate HW interrupts under the
+ * hood but associated with the same "linux" interrupt
+ */
+struct xive_ipi_alloc_info {
+ irq_hw_number_t hwirq;
+};
+
+static int xive_ipi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
{
- unsigned int virq;
+ struct xive_ipi_alloc_info *info = arg;
+ int i;
- /*
- * Initialization failed, move on, we might manage to
- * reach the point where we display our errors before
- * the system falls appart
- */
- if (!xive_irq_domain)
- return;
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, info->hwirq + i, &xive_ipi_chip,
+ domain->host_data, handle_percpu_irq,
+ NULL, NULL);
+ }
+ return 0;
+}
- /* Initialize it */
- virq = irq_create_mapping(xive_irq_domain, XIVE_IPI_HW_IRQ);
- xive_ipi_irq = virq;
+static const struct irq_domain_ops xive_ipi_irq_domain_ops = {
+ .alloc = xive_ipi_irq_domain_alloc,
+};
- WARN_ON(request_irq(virq, xive_muxed_ipi_action,
- IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL));
+static int __init xive_request_ipi(void)
+{
+ struct fwnode_handle *fwnode;
+ struct irq_domain *ipi_domain;
+ unsigned int node;
+ int ret = -ENOMEM;
+
+ fwnode = irq_domain_alloc_named_fwnode("XIVE-IPI");
+ if (!fwnode)
+ goto out;
+
+ ipi_domain = irq_domain_create_linear(fwnode, nr_node_ids,
+ &xive_ipi_irq_domain_ops, NULL);
+ if (!ipi_domain)
+ goto out_free_fwnode;
+
+ xive_ipis = kcalloc(nr_node_ids, sizeof(*xive_ipis), GFP_KERNEL | __GFP_NOFAIL);
+ if (!xive_ipis)
+ goto out_free_domain;
+
+ for_each_node(node) {
+ struct xive_ipi_desc *xid = &xive_ipis[node];
+ struct xive_ipi_alloc_info info = { node };
+
+ /* Skip nodes without CPUs */
+ if (cpumask_empty(cpumask_of_node(node)))
+ continue;
+
+ /*
+ * Map one IPI interrupt per node for all cpus of that node.
+ * Since the HW interrupt number doesn't have any meaning,
+ * simply use the node number.
+ */
+ xid->irq = irq_domain_alloc_irqs(ipi_domain, 1, node, &info);
+ if (xid->irq < 0) {
+ ret = xid->irq;
+ goto out_free_xive_ipis;
+ }
+
+ snprintf(xid->name, sizeof(xid->name), "IPI-%d", node);
+
+ ret = request_irq(xid->irq, xive_muxed_ipi_action,
+ IRQF_PERCPU | IRQF_NO_THREAD, xid->name, NULL);
+
+ WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
+ }
+
+ return ret;
+
+out_free_xive_ipis:
+ kfree(xive_ipis);
+out_free_domain:
+ irq_domain_remove(ipi_domain);
+out_free_fwnode:
+ irq_domain_free_fwnode(fwnode);
+out:
+ return ret;
}
static int xive_setup_cpu_ipi(unsigned int cpu)
{
+ unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
struct xive_cpu *xc;
int rc;
@@ -1131,6 +1224,8 @@ static int xive_setup_cpu_ipi(unsigned int cpu)
static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
{
+ unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
+
/* Disable the IPI and free the IRQ data */
/* Already cleaned up ? */
@@ -1178,19 +1273,6 @@ static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq,
*/
irq_clear_status_flags(virq, IRQ_LEVEL);
-#ifdef CONFIG_SMP
- /* IPIs are special and come up with HW number 0 */
- if (hw == XIVE_IPI_HW_IRQ) {
- /*
- * IPIs are marked per-cpu. We use separate HW interrupts under
- * the hood but associated with the same "linux" interrupt
- */
- irq_set_chip_and_handler(virq, &xive_ipi_chip,
- handle_percpu_irq);
- return 0;
- }
-#endif
-
rc = xive_irq_alloc_data(virq, hw);
if (rc)
return rc;
@@ -1202,15 +1284,7 @@ static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq,
static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
{
- struct irq_data *data = irq_get_irq_data(virq);
- unsigned int hw_irq;
-
- /* XXX Assign BAD number */
- if (!data)
- return;
- hw_irq = (unsigned int)irqd_to_hwirq(data);
- if (hw_irq != XIVE_IPI_HW_IRQ)
- xive_irq_free_data(virq);
+ xive_irq_free_data(virq);
}
static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct,
@@ -1335,17 +1409,14 @@ static int xive_prepare_cpu(unsigned int cpu)
xc = per_cpu(xive_cpu, cpu);
if (!xc) {
- struct device_node *np;
-
xc = kzalloc_node(sizeof(struct xive_cpu),
GFP_KERNEL, cpu_to_node(cpu));
if (!xc)
return -ENOMEM;
- np = of_get_cpu_node(cpu, NULL);
- if (np)
- xc->chip_id = of_get_ibm_chip_id(np);
- of_node_put(np);
xc->hw_ipi = XIVE_BAD_IRQ;
+ xc->chip_id = XIVE_INVALID_CHIP_ID;
+ if (xive_ops->prepare_cpu)
+ xive_ops->prepare_cpu(cpu, xc);
per_cpu(xive_cpu, cpu) = xc;
}
@@ -1408,13 +1479,12 @@ static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
struct irq_desc *desc = irq_to_desc(irq);
struct irq_data *d = irq_desc_get_irq_data(desc);
struct xive_irq_data *xd;
- unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
/*
* Ignore anything that isn't a XIVE irq and ignore
* IPIs, so can just be dropped.
*/
- if (d->domain != xive_irq_domain || hw_irq == XIVE_IPI_HW_IRQ)
+ if (d->domain != xive_irq_domain)
continue;
/*
@@ -1592,16 +1662,15 @@ static void xive_debug_show_cpu(struct seq_file *m, int cpu)
seq_puts(m, "\n");
}
-static void xive_debug_show_irq(struct seq_file *m, u32 hw_irq, struct irq_data *d)
+static void xive_debug_show_irq(struct seq_file *m, struct irq_data *d)
{
- struct irq_chip *chip = irq_data_get_irq_chip(d);
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
int rc;
u32 target;
u8 prio;
u32 lirq;
-
- if (!is_xive_irq(chip))
- return;
+ struct xive_irq_data *xd;
+ u64 val;
rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
if (rc) {
@@ -1612,17 +1681,14 @@ static void xive_debug_show_irq(struct seq_file *m, u32 hw_irq, struct irq_data
seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
hw_irq, target, prio, lirq);
- if (d) {
- struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
- u64 val = xive_esb_read(xd, XIVE_ESB_GET);
-
- seq_printf(m, "flags=%c%c%c PQ=%c%c",
- xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
- xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
- xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
- val & XIVE_ESB_VAL_P ? 'P' : '-',
- val & XIVE_ESB_VAL_Q ? 'Q' : '-');
- }
+ xd = irq_data_get_irq_handler_data(d);
+ val = xive_esb_read(xd, XIVE_ESB_GET);
+ seq_printf(m, "flags=%c%c%c PQ=%c%c",
+ xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
+ xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
+ xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
+ val & XIVE_ESB_VAL_P ? 'P' : '-',
+ val & XIVE_ESB_VAL_Q ? 'Q' : '-');
seq_puts(m, "\n");
}
@@ -1640,16 +1706,9 @@ static int xive_core_debug_show(struct seq_file *m, void *private)
for_each_irq_desc(i, desc) {
struct irq_data *d = irq_desc_get_irq_data(desc);
- unsigned int hw_irq;
-
- if (!d)
- continue;
-
- hw_irq = (unsigned int)irqd_to_hwirq(d);
- /* IPIs are special (HW number 0) */
- if (hw_irq != XIVE_IPI_HW_IRQ)
- xive_debug_show_irq(m, hw_irq, d);
+ if (d->domain == xive_irq_domain)
+ xive_debug_show_irq(m, d);
}
return 0;
}
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index 05a800a3104e..57e3f1540435 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -380,6 +380,11 @@ static void xive_native_update_pending(struct xive_cpu *xc)
}
}
+static void xive_native_prepare_cpu(unsigned int cpu, struct xive_cpu *xc)
+{
+ xc->chip_id = cpu_to_chip_id(cpu);
+}
+
static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
{
s64 rc;
@@ -462,6 +467,7 @@ static const struct xive_ops xive_native_ops = {
.match = xive_native_match,
.shutdown = xive_native_shutdown,
.update_pending = xive_native_update_pending,
+ .prepare_cpu = xive_native_prepare_cpu,
.setup_cpu = xive_native_setup_cpu,
.teardown_cpu = xive_native_teardown_cpu,
.sync_source = xive_native_sync_source,
diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c
index 01ccc0786ada..f143b6f111ac 100644
--- a/arch/powerpc/sysdev/xive/spapr.c
+++ b/arch/powerpc/sysdev/xive/spapr.c
@@ -549,7 +549,7 @@ static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
static bool xive_spapr_match(struct device_node *node)
{
/* Ignore cascaded controllers for the moment */
- return 1;
+ return true;
}
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/sysdev/xive/xive-internal.h b/arch/powerpc/sysdev/xive/xive-internal.h
index 9cf57c722faa..504e7edce358 100644
--- a/arch/powerpc/sysdev/xive/xive-internal.h
+++ b/arch/powerpc/sysdev/xive/xive-internal.h
@@ -5,8 +5,6 @@
#ifndef __XIVE_INTERNAL_H
#define __XIVE_INTERNAL_H
-#define XIVE_IPI_HW_IRQ 0 /* interrupt source # for IPIs */
-
/*
* A "disabled" interrupt should never fire, to catch problems
* we set its logical number to this
@@ -46,6 +44,7 @@ struct xive_ops {
u32 *sw_irq);
int (*setup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
void (*cleanup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
+ void (*prepare_cpu)(unsigned int cpu, struct xive_cpu *xc);
void (*setup_cpu)(unsigned int cpu, struct xive_cpu *xc);
void (*teardown_cpu)(unsigned int cpu, struct xive_cpu *xc);
bool (*match)(struct device_node *np);
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 3fe37495f63d..da4d7f225a40 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -54,6 +54,7 @@
#include <asm/code-patching.h>
#include <asm/sections.h>
#include <asm/inst.h>
+#include <asm/interrupt.h>
#ifdef CONFIG_PPC64
#include <asm/hvcall.h>
@@ -69,6 +70,9 @@ static cpumask_t cpus_in_xmon = CPU_MASK_NONE;
static unsigned long xmon_taken = 1;
static int xmon_owner;
static int xmon_gate;
+static int xmon_batch;
+static unsigned long xmon_batch_start_cpu;
+static cpumask_t xmon_batch_cpus = CPU_MASK_NONE;
#else
#define xmon_owner 0
#endif /* CONFIG_SMP */
@@ -99,7 +103,7 @@ static long *xmon_fault_jmp[NR_CPUS];
/* Breakpoint stuff */
struct bpt {
unsigned long address;
- struct ppc_inst *instr;
+ u32 *instr;
atomic_t ref_count;
int enabled;
unsigned long pad;
@@ -132,6 +136,12 @@ static void prdump(unsigned long, long);
static int ppc_inst_dump(unsigned long, long, int);
static void dump_log_buf(void);
+#ifdef CONFIG_SMP
+static int xmon_switch_cpu(unsigned long);
+static int xmon_batch_next_cpu(void);
+static int batch_cmds(struct pt_regs *);
+#endif
+
#ifdef CONFIG_PPC_POWERNV
static void dump_opal_msglog(void);
#else
@@ -215,7 +225,8 @@ Commands:\n\
#ifdef CONFIG_SMP
"\
c print cpus stopped in xmon\n\
- c# try to switch to cpu number h (in hex)\n"
+ c# try to switch to cpu number h (in hex)\n\
+ c# $ run command '$' (one of 'r','S' or 't') on all cpus in xmon\n"
#endif
"\
C checksum\n\
@@ -488,10 +499,10 @@ static void xmon_touch_watchdogs(void)
touch_nmi_watchdog();
}
-static int xmon_core(struct pt_regs *regs, int fromipi)
+static int xmon_core(struct pt_regs *regs, volatile int fromipi)
{
- int cmd = 0;
- struct bpt *bp;
+ volatile int cmd = 0;
+ struct bpt *volatile bp;
long recurse_jmp[JMP_BUF_LEN];
bool locked_down;
unsigned long offset;
@@ -513,7 +524,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
bp = in_breakpoint_table(regs->nip, &offset);
if (bp != NULL) {
- regs->nip = bp->address + offset;
+ regs_set_return_ip(regs, bp->address + offset);
atomic_dec(&bp->ref_count);
}
@@ -605,7 +616,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
* debugger break (IPI). This is similar to
* crash_kexec_secondary().
*/
- if (TRAP(regs) != 0x100 || !wait_for_other_cpus(ncpus))
+ if (TRAP(regs) != INTERRUPT_SYSTEM_RESET || !wait_for_other_cpus(ncpus))
smp_send_debugger_break();
wait_for_other_cpus(ncpus);
@@ -615,7 +626,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
if (!locked_down) {
/* for breakpoint or single step, print curr insn */
- if (bp || TRAP(regs) == 0xd00)
+ if (bp || TRAP(regs) == INTERRUPT_TRACE)
ppc_inst_dump(regs->nip, 1, 0);
printf("enter ? for help\n");
}
@@ -643,7 +654,12 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
spin_cpu_relax();
touch_nmi_watchdog();
} else {
- if (!locked_down)
+ cmd = 1;
+#ifdef CONFIG_SMP
+ if (xmon_batch)
+ cmd = batch_cmds(regs);
+#endif
+ if (!locked_down && cmd)
cmd = cmds(regs);
if (locked_down || cmd != 0) {
/* exiting xmon */
@@ -684,7 +700,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
disable_surveillance();
if (!locked_down) {
/* for breakpoint or single step, print current insn */
- if (bp || TRAP(regs) == 0xd00)
+ if (bp || TRAP(regs) == INTERRUPT_TRACE)
ppc_inst_dump(regs->nip, 1, 0);
printf("enter ? for help\n");
}
@@ -701,7 +717,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
if (regs->msr & MSR_DE) {
bp = at_breakpoint(regs->nip);
if (bp != NULL) {
- regs->nip = (unsigned long) &bp->instr[0];
+ regs_set_return_ip(regs, (unsigned long) &bp->instr[0]);
atomic_inc(&bp->ref_count);
}
}
@@ -711,7 +727,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
if (bp != NULL) {
int stepped = emulate_step(regs, ppc_inst_read(bp->instr));
if (stepped == 0) {
- regs->nip = (unsigned long) &bp->instr[0];
+ regs_set_return_ip(regs, (unsigned long) &bp->instr[0]);
atomic_inc(&bp->ref_count);
} else if (stepped < 0) {
printf("Couldn't single-step %s instruction\n",
@@ -765,7 +781,7 @@ static int xmon_bpt(struct pt_regs *regs)
/* Are we at the trap at bp->instr[1] for some bp? */
bp = in_breakpoint_table(regs->nip, &offset);
if (bp != NULL && (offset == 4 || offset == 8)) {
- regs->nip = bp->address + offset;
+ regs_set_return_ip(regs, bp->address + offset);
atomic_dec(&bp->ref_count);
return 1;
}
@@ -835,7 +851,7 @@ static int xmon_fault_handler(struct pt_regs *regs)
if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) {
bp = in_breakpoint_table(regs->nip, &offset);
if (bp != NULL) {
- regs->nip = bp->address + offset;
+ regs_set_return_ip(regs, bp->address + offset);
atomic_dec(&bp->ref_count);
}
}
@@ -856,7 +872,7 @@ static inline void force_enable_xmon(void)
static struct bpt *at_breakpoint(unsigned long pc)
{
int i;
- struct bpt *bp;
+ struct bpt *volatile bp;
bp = bpts;
for (i = 0; i < NBPTS; ++i, ++bp)
@@ -945,11 +961,11 @@ static void insert_bpts(void)
}
patch_instruction(bp->instr, instr);
- patch_instruction(ppc_inst_next(bp->instr, &instr),
+ patch_instruction(ppc_inst_next(bp->instr, bp->instr),
ppc_inst(bpinstr));
if (bp->enabled & BP_CIABR)
continue;
- if (patch_instruction((struct ppc_inst *)bp->address,
+ if (patch_instruction((u32 *)bp->address,
ppc_inst(bpinstr)) != 0) {
printf("Couldn't write instruction at %lx, "
"disabling breakpoint there\n", bp->address);
@@ -991,7 +1007,7 @@ static void remove_bpts(void)
if (mread_instr(bp->address, &instr)
&& ppc_inst_equal(instr, ppc_inst(bpinstr))
&& patch_instruction(
- (struct ppc_inst *)bp->address, ppc_inst_read(bp->instr)) != 0)
+ (u32 *)bp->address, ppc_inst_read(bp->instr)) != 0)
printf("Couldn't remove breakpoint at %lx\n",
bp->address);
}
@@ -1187,7 +1203,7 @@ cmds(struct pt_regs *excp)
#ifdef CONFIG_BOOKE
static int do_step(struct pt_regs *regs)
{
- regs->msr |= MSR_DE;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
return 1;
}
@@ -1220,7 +1236,7 @@ static int do_step(struct pt_regs *regs)
}
}
}
- regs->msr |= MSR_SE;
+ regs_set_return_msr(regs, regs->msr | MSR_SE);
return 1;
}
#endif
@@ -1242,11 +1258,112 @@ static void bootcmds(void)
}
}
+#ifdef CONFIG_SMP
+static int xmon_switch_cpu(unsigned long cpu)
+{
+ int timeout;
+
+ xmon_taken = 0;
+ mb();
+ xmon_owner = cpu;
+ timeout = 10000000;
+ while (!xmon_taken) {
+ if (--timeout == 0) {
+ if (test_and_set_bit(0, &xmon_taken))
+ break;
+ /* take control back */
+ mb();
+ xmon_owner = smp_processor_id();
+ printf("cpu 0x%lx didn't take control\n", cpu);
+ return 0;
+ }
+ barrier();
+ }
+ return 1;
+}
+
+static int xmon_batch_next_cpu(void)
+{
+ unsigned long cpu;
+
+ while (!cpumask_empty(&xmon_batch_cpus)) {
+ cpu = cpumask_next_wrap(smp_processor_id(), &xmon_batch_cpus,
+ xmon_batch_start_cpu, true);
+ if (cpu == nr_cpumask_bits)
+ break;
+ if (xmon_batch_start_cpu == -1)
+ xmon_batch_start_cpu = cpu;
+ if (xmon_switch_cpu(cpu))
+ return 0;
+ cpumask_clear_cpu(cpu, &xmon_batch_cpus);
+ }
+
+ xmon_batch = 0;
+ printf("%x:mon> \n", smp_processor_id());
+ return 1;
+}
+
+static int batch_cmds(struct pt_regs *excp)
+{
+ int cmd;
+
+ /* simulate command entry */
+ cmd = xmon_batch;
+ termch = '\n';
+
+ last_cmd = NULL;
+ xmon_regs = excp;
+
+ printf("%x:", smp_processor_id());
+ printf("mon> ");
+ printf("%c\n", (char)cmd);
+
+ switch (cmd) {
+ case 'r':
+ prregs(excp); /* print regs */
+ break;
+ case 'S':
+ super_regs();
+ break;
+ case 't':
+ backtrace(excp);
+ break;
+ }
+
+ cpumask_clear_cpu(smp_processor_id(), &xmon_batch_cpus);
+
+ return xmon_batch_next_cpu();
+}
+
static int cpu_cmd(void)
{
-#ifdef CONFIG_SMP
unsigned long cpu, first_cpu, last_cpu;
- int timeout;
+
+ cpu = skipbl();
+ if (cpu == '#') {
+ xmon_batch = skipbl();
+ if (xmon_batch) {
+ switch (xmon_batch) {
+ case 'r':
+ case 'S':
+ case 't':
+ cpumask_copy(&xmon_batch_cpus, &cpus_in_xmon);
+ if (cpumask_weight(&xmon_batch_cpus) <= 1) {
+ printf("There are no other cpus in xmon\n");
+ break;
+ }
+ xmon_batch_start_cpu = -1;
+ if (!xmon_batch_next_cpu())
+ return 1;
+ break;
+ default:
+ printf("c# only supports 'r', 'S' and 't' commands\n");
+ }
+ xmon_batch = 0;
+ return 0;
+ }
+ }
+ termch = cpu;
if (!scanhex(&cpu)) {
/* print cpus waiting or in xmon */
@@ -1278,27 +1395,15 @@ static int cpu_cmd(void)
#endif
return 0;
}
- xmon_taken = 0;
- mb();
- xmon_owner = cpu;
- timeout = 10000000;
- while (!xmon_taken) {
- if (--timeout == 0) {
- if (test_and_set_bit(0, &xmon_taken))
- break;
- /* take control back */
- mb();
- xmon_owner = smp_processor_id();
- printf("cpu 0x%lx didn't take control\n", cpu);
- return 0;
- }
- barrier();
- }
- return 1;
+
+ return xmon_switch_cpu(cpu);
+}
#else
+static int cpu_cmd(void)
+{
return 0;
-#endif /* CONFIG_SMP */
}
+#endif /* CONFIG_SMP */
static unsigned short fcstab[256] = {
0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
@@ -1769,9 +1874,12 @@ static void excprint(struct pt_regs *fp)
printf(" sp: %lx\n", fp->gpr[1]);
printf(" msr: %lx\n", fp->msr);
- if (trap == 0x300 || trap == 0x380 || trap == 0x600 || trap == 0x200) {
+ if (trap == INTERRUPT_DATA_STORAGE ||
+ trap == INTERRUPT_DATA_SEGMENT ||
+ trap == INTERRUPT_ALIGNMENT ||
+ trap == INTERRUPT_MACHINE_CHECK) {
printf(" dar: %lx\n", fp->dar);
- if (trap != 0x380)
+ if (trap != INTERRUPT_DATA_SEGMENT)
printf(" dsisr: %lx\n", fp->dsisr);
}
@@ -1785,7 +1893,7 @@ static void excprint(struct pt_regs *fp)
current->pid, current->comm);
}
- if (trap == 0x700)
+ if (trap == INTERRUPT_PROGRAM)
print_bug_trap(fp);
printf(linux_banner);
@@ -1815,25 +1923,16 @@ static void prregs(struct pt_regs *fp)
}
#ifdef CONFIG_PPC64
- if (FULL_REGS(fp)) {
- for (n = 0; n < 16; ++n)
- printf("R%.2d = "REG" R%.2d = "REG"\n",
- n, fp->gpr[n], n+16, fp->gpr[n+16]);
- } else {
- for (n = 0; n < 7; ++n)
- printf("R%.2d = "REG" R%.2d = "REG"\n",
- n, fp->gpr[n], n+7, fp->gpr[n+7]);
- }
+#define R_PER_LINE 2
#else
+#define R_PER_LINE 4
+#endif
+
for (n = 0; n < 32; ++n) {
- printf("R%.2d = %.8lx%s", n, fp->gpr[n],
- (n & 3) == 3? "\n": " ");
- if (n == 12 && !FULL_REGS(fp)) {
- printf("\n");
- break;
- }
+ printf("R%.2d = "REG"%s", n, fp->gpr[n],
+ (n % R_PER_LINE) == R_PER_LINE - 1 ? "\n" : " ");
}
-#endif
+
printf("pc = ");
xmon_print_symbol(fp->nip, " ", "\n");
if (!trap_is_syscall(fp) && cpu_has_feature(CPU_FTR_CFAR)) {
@@ -1846,7 +1945,9 @@ static void prregs(struct pt_regs *fp)
printf("ctr = "REG" xer = "REG" trap = %4lx\n",
fp->ctr, fp->xer, fp->trap);
trap = TRAP(fp);
- if (trap == 0x300 || trap == 0x380 || trap == 0x600)
+ if (trap == INTERRUPT_DATA_STORAGE ||
+ trap == INTERRUPT_DATA_SEGMENT ||
+ trap == INTERRUPT_ALIGNMENT)
printf("dar = "REG" dsisr = %.8lx\n", fp->dar, fp->dsisr);
}
@@ -2217,7 +2318,7 @@ mread_instr(unsigned long adrs, struct ppc_inst *instr)
if (setjmp(bus_error_jmp) == 0) {
catch_memory_errors = 1;
sync();
- *instr = ppc_inst_read((struct ppc_inst *)adrs);
+ *instr = ppc_inst_read((u32 *)adrs);
sync();
/* wait a little while to see if we get a machine check */
__delay(200);
@@ -2727,30 +2828,6 @@ static void dump_all_xives(void)
dump_one_xive(cpu);
}
-static void dump_one_xive_irq(u32 num, struct irq_data *d)
-{
- xmon_xive_get_irq_config(num, d);
-}
-
-static void dump_all_xive_irq(void)
-{
- unsigned int i;
- struct irq_desc *desc;
-
- for_each_irq_desc(i, desc) {
- struct irq_data *d = irq_desc_get_irq_data(desc);
- unsigned int hwirq;
-
- if (!d)
- continue;
-
- hwirq = (unsigned int)irqd_to_hwirq(d);
- /* IPIs are special (HW number 0) */
- if (hwirq)
- dump_one_xive_irq(hwirq, d);
- }
-}
-
static void dump_xives(void)
{
unsigned long num;
@@ -2767,9 +2844,9 @@ static void dump_xives(void)
return;
} else if (c == 'i') {
if (scanhex(&num))
- dump_one_xive_irq(num, NULL);
+ xmon_xive_get_irq_config(num, NULL);
else
- dump_all_xive_irq();
+ xmon_xive_get_irq_all();
return;
}
@@ -2980,7 +3057,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr,
if (!ppc_inst_prefixed(inst))
dump_func(ppc_inst_val(inst), adr);
else
- dump_func(ppc_inst_as_u64(inst), adr);
+ dump_func(ppc_inst_as_ulong(inst), adr);
printf("\n");
}
return adr - first_adr;
@@ -3001,8 +3078,8 @@ print_address(unsigned long addr)
static void
dump_log_buf(void)
{
- struct kmsg_dumper dumper = { .active = 1 };
- unsigned char buf[128];
+ struct kmsg_dump_iter iter;
+ static unsigned char buf[1024];
size_t len;
if (setjmp(bus_error_jmp) != 0) {
@@ -3013,9 +3090,9 @@ dump_log_buf(void)
catch_memory_errors = 1;
sync();
- kmsg_dump_rewind_nolock(&dumper);
+ kmsg_dump_rewind(&iter);
xmon_start_pagination();
- while (kmsg_dump_get_line_nolock(&dumper, false, buf, sizeof(buf), &len)) {
+ while (kmsg_dump_get_line(&iter, false, buf, sizeof(buf), &len)) {
buf[len] = '\0';
printf("%s", buf);
}
@@ -3032,7 +3109,7 @@ static void dump_opal_msglog(void)
{
unsigned char buf[128];
ssize_t res;
- loff_t pos = 0;
+ volatile loff_t pos = 0;
if (!firmware_has_feature(FW_FEATURE_OPAL)) {
printf("Machine is not running OPAL firmware.\n");
@@ -3187,8 +3264,9 @@ memzcan(void)
printf("%.8lx\n", a - mskip);
}
-static void show_task(struct task_struct *tsk)
+static void show_task(struct task_struct *volatile tsk)
{
+ unsigned int p_state = READ_ONCE(tsk->__state);
char state;
/*
@@ -3196,14 +3274,14 @@ static void show_task(struct task_struct *tsk)
* appropriate for calling from xmon. This could be moved
* to a common, generic, routine used by both.
*/
- state = (tsk->state == 0) ? 'R' :
- (tsk->state < 0) ? 'U' :
- (tsk->state & TASK_UNINTERRUPTIBLE) ? 'D' :
- (tsk->state & TASK_STOPPED) ? 'T' :
- (tsk->state & TASK_TRACED) ? 'C' :
+ state = (p_state == 0) ? 'R' :
+ (p_state < 0) ? 'U' :
+ (p_state & TASK_UNINTERRUPTIBLE) ? 'D' :
+ (p_state & TASK_STOPPED) ? 'T' :
+ (p_state & TASK_TRACED) ? 'C' :
(tsk->exit_state & EXIT_ZOMBIE) ? 'Z' :
(tsk->exit_state & EXIT_DEAD) ? 'E' :
- (tsk->state & TASK_INTERRUPTIBLE) ? 'S' : '?';
+ (p_state & TASK_INTERRUPTIBLE) ? 'S' : '?';
printf("%16px %16lx %16px %6d %6d %c %2d %s\n", tsk,
tsk->thread.ksp, tsk->thread.regs,
@@ -3231,7 +3309,7 @@ static void format_pte(void *ptep, unsigned long pte)
static void show_pte(unsigned long addr)
{
unsigned long tskv = 0;
- struct task_struct *tsk = NULL;
+ struct task_struct *volatile tsk = NULL;
struct mm_struct *mm;
pgd_t *pgdp;
p4d_t *p4dp;
@@ -3326,7 +3404,7 @@ static void show_pte(unsigned long addr)
static void show_tasks(void)
{
unsigned long tskv;
- struct task_struct *tsk = NULL;
+ struct task_struct *volatile tsk = NULL;
printf(" task_struct ->thread.ksp ->thread.regs PID PPID S P CMD\n");
@@ -3649,7 +3727,7 @@ static void xmon_print_symbol(unsigned long address, const char *mid,
const char *after)
{
char *modname;
- const char *name = NULL;
+ const char *volatile name = NULL;
unsigned long offset, size;
printf(REG, address);
@@ -4081,7 +4159,7 @@ void xmon_register_spus(struct list_head *list)
static void stop_spus(void)
{
struct spu *spu;
- int i;
+ volatile int i;
u64 tmp;
for (i = 0; i < XMON_NUM_SPUS; i++) {
@@ -4122,7 +4200,7 @@ static void stop_spus(void)
static void restart_spus(void)
{
struct spu *spu;
- int i;
+ volatile int i;
for (i = 0; i < XMON_NUM_SPUS; i++) {
if (!spu_info[i].spu)
@@ -4212,8 +4290,7 @@ static void dump_spu_fields(struct spu *spu)
DUMP_FIELD(spu, "0x%p", pdata);
}
-int
-spu_inst_dump(unsigned long adr, long count, int praddr)
+static int spu_inst_dump(unsigned long adr, long count, int praddr)
{
return generic_inst_dump(adr, count, praddr, print_insn_spu);
}
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 85d626b8ce5e..469a70bd8da6 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -20,6 +20,7 @@ config RISCV
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEBUG_WX
+ select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_KCOV
@@ -27,18 +28,24 @@ config RISCV
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_SET_MEMORY
- select ARCH_HAS_STRICT_KERNEL_RWX if MMU
+ select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
+ select ARCH_HAS_STRICT_MODULE_RWX if MMU && !XIP_KERNEL
+ select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
+ select ARCH_SUPPORTS_HUGETLBFS if MMU
+ select ARCH_USE_MEMTEST
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
+ select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU
select CLONE_BACKWARDS
select CLINT_TIMER if !MMU
select COMMON_CLK
select EDAC_SUPPORT
select GENERIC_ARCH_TOPOLOGY if SMP
select GENERIC_ATOMIC64 if !64BIT
+ select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select GENERIC_EARLY_IOREMAP
select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO
select GENERIC_IOREMAP
@@ -54,11 +61,11 @@ config RISCV
select GENERIC_TIME_VSYSCALL if MMU && 64BIT
select HANDLE_DOMAIN_IRQ
select HAVE_ARCH_AUDITSYSCALL
- select HAVE_ARCH_JUMP_LABEL
- select HAVE_ARCH_JUMP_LABEL_RELATIVE
+ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
+ select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL
select HAVE_ARCH_KASAN if MMU && 64BIT
select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
- select HAVE_ARCH_KGDB
+ select HAVE_ARCH_KGDB if !XIP_KERNEL
select HAVE_ARCH_KGDB_QXFER_PKT
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_SECCOMP_FILTER
@@ -73,9 +80,9 @@ config RISCV
select HAVE_GCC_PLUGINS
select HAVE_GENERIC_VDSO if MMU && 64BIT
select HAVE_IRQ_TIME_ACCOUNTING
- select HAVE_KPROBES
- select HAVE_KPROBES_ON_FTRACE
- select HAVE_KRETPROBES
+ select HAVE_KPROBES if !XIP_KERNEL
+ select HAVE_KPROBES_ON_FTRACE if !XIP_KERNEL
+ select HAVE_KRETPROBES if !XIP_KERNEL
select HAVE_PCI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
@@ -93,11 +100,11 @@ config RISCV
select PCI_MSI if PCI
select RISCV_INTC
select RISCV_TIMER if RISCV_SBI
- select SPARSEMEM_STATIC if 32BIT
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
select UACCESS_MEMCPY if !MMU
+ select ZONE_DMA32 if 64BIT
config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT
@@ -127,10 +134,6 @@ config MMU
Select if you want MMU-based virtualised addressing space
support by paged memory management. If unsure, say 'Y'.
-config ZONE_DMA32
- bool
- default y if 64BIT
-
config VA_BITS
int
default 32 if 32BIT
@@ -154,7 +157,8 @@ config ARCH_FLATMEM_ENABLE
config ARCH_SPARSEMEM_ENABLE
def_bool y
depends on MMU
- select SPARSEMEM_VMEMMAP_ENABLE
+ select SPARSEMEM_STATIC if 32BIT && SPARSEMEM
+ select SPARSEMEM_VMEMMAP_ENABLE if 64BIT
config ARCH_SELECT_MEMORY_MODEL
def_bool ARCH_SPARSEMEM_ENABLE
@@ -165,10 +169,6 @@ config ARCH_WANT_GENERAL_HUGETLB
config ARCH_SUPPORTS_UPROBES
def_bool y
-config SYS_SUPPORTS_HUGETLBFS
- depends on MMU
- def_bool y
-
config STACKTRACE_SUPPORT
def_bool y
@@ -204,6 +204,7 @@ config LOCKDEP_SUPPORT
def_bool y
source "arch/riscv/Kconfig.socs"
+source "arch/riscv/Kconfig.erratas"
menu "Platform type"
@@ -227,11 +228,11 @@ config ARCH_RV64I
bool "RV64I"
select 64BIT
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
- select HAVE_DYNAMIC_FTRACE if MMU
+ select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8)
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
- select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER
- select HAVE_FUNCTION_TRACER
+ select HAVE_FUNCTION_TRACER if !XIP_KERNEL
select SWIOTLB if MMU
endchoice
@@ -314,7 +315,7 @@ endchoice
# Common NUMA Features
config NUMA
bool "NUMA Memory Allocation and Scheduler Support"
- depends on SMP
+ depends on SMP && MMU
select GENERIC_ARCH_NUMA
select OF_NUMA
select ARCH_SUPPORTS_NUMA_BALANCING
@@ -328,7 +329,7 @@ config NODES_SHIFT
int "Maximum NUMA Nodes (as a power of 2)"
range 1 10
default "2"
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
help
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accommodate various tables.
@@ -386,6 +387,31 @@ config RISCV_SBI_V01
help
This config allows kernel to use SBI v0.1 APIs. This will be
deprecated in future once legacy M-mode software are no longer in use.
+
+config KEXEC
+ bool "Kexec system call"
+ select KEXEC_CORE
+ select HOTPLUG_CPU if SMP
+ depends on MMU
+ help
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+ but it is independent of the system firmware. And like a reboot
+ you can start any kernel with it, not just Linux.
+
+ The name comes from the similarity to the exec system call.
+
+config CRASH_DUMP
+ bool "Build kdump crash kernel"
+ help
+ Generate crash dump after being started by kexec. This should
+ be normally only set in special crash dump kernels which are
+ loaded in the main kernel with kexec-tools into a specially
+ reserved region and then later executed after a crash by
+ kdump/kexec.
+
+ For more details see Documentation/admin-guide/kdump/kdump.rst
+
endmenu
menu "Boot options"
@@ -438,7 +464,7 @@ config EFI_STUB
config EFI
bool "UEFI runtime support"
- depends on OF
+ depends on OF && !XIP_KERNEL
select LIBFDT
select UCS2_STRING
select EFI_PARAMS_FROM_FDT
@@ -462,11 +488,63 @@ config STACKPROTECTOR_PER_TASK
def_bool y
depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_TLS
+config PHYS_RAM_BASE_FIXED
+ bool "Explicitly specified physical RAM address"
+ default n
+
+config PHYS_RAM_BASE
+ hex "Platform Physical RAM address"
+ depends on PHYS_RAM_BASE_FIXED
+ default "0x80000000"
+ help
+ This is the physical address of RAM in the system. It has to be
+ explicitly specified to run early relocations of read-write data
+ from flash to RAM.
+
+config XIP_KERNEL
+ bool "Kernel Execute-In-Place from ROM"
+ depends on MMU && SPARSEMEM
+ # This prevents XIP from being enabled by all{yes,mod}config, which
+ # fail to build since XIP doesn't support large kernels.
+ depends on !COMPILE_TEST
+ select PHYS_RAM_BASE_FIXED
+ help
+ Execute-In-Place allows the kernel to run from non-volatile storage
+ directly addressable by the CPU, such as NOR flash. This saves RAM
+ space since the text section of the kernel is not loaded from flash
+ to RAM. Read-write sections, such as the data section and stack,
+ are still copied to RAM. The XIP kernel is not compressed since
+ it has to run directly from flash, so it will take more space to
+ store it. The flash address used to link the kernel object files,
+ and for storing it, is configuration dependent. Therefore, if you
+ say Y here, you must know the proper physical address where to
+ store the kernel image depending on your own flash memory usage.
+
+ Also note that the make target becomes "make xipImage" rather than
+ "make zImage" or "make Image". The final kernel binary to put in
+ ROM memory will be arch/riscv/boot/xipImage.
+
+ SPARSEMEM is required because the kernel text and rodata that are
+ flash resident are not backed by memmap, then any attempt to get
+ a struct page on those regions will trigger a fault.
+
+ If unsure, say N.
+
+config XIP_PHYS_ADDR
+ hex "XIP Kernel Physical Location"
+ depends on XIP_KERNEL
+ default "0x21000000"
+ help
+ This is the physical address in your flash memory the kernel will
+ be linked for and stored to. This address is dependent on your
+ own flash usage.
+
endmenu
config BUILTIN_DTB
- def_bool n
+ bool
depends on OF
+ default y if XIP_KERNEL
menu "Power management options"
diff --git a/arch/riscv/Kconfig.erratas b/arch/riscv/Kconfig.erratas
new file mode 100644
index 000000000000..b44d6ecdb46e
--- /dev/null
+++ b/arch/riscv/Kconfig.erratas
@@ -0,0 +1,44 @@
+menu "CPU errata selection"
+
+config RISCV_ERRATA_ALTERNATIVE
+ bool "RISC-V alternative scheme"
+ default y
+ help
+ This Kconfig allows the kernel to automatically patch the
+ errata required by the execution platform at run time. The
+ code patching is performed once in the boot stages. It means
+ that the overhead from this mechanism is just taken once.
+
+config ERRATA_SIFIVE
+ bool "SiFive errata"
+ depends on RISCV_ERRATA_ALTERNATIVE
+ help
+ All SiFive errata Kconfig depend on this Kconfig. Disabling
+ this Kconfig will disable all SiFive errata. Please say "Y"
+ here if your platform uses SiFive CPU cores.
+
+ Otherwise, please say "N" here to avoid unnecessary overhead.
+
+config ERRATA_SIFIVE_CIP_453
+ bool "Apply SiFive errata CIP-453"
+ depends on ERRATA_SIFIVE && 64BIT
+ default y
+ help
+ This will apply the SiFive CIP-453 errata to add sign extension
+ to the $badaddr when exception type is instruction page fault
+ and instruction access fault.
+
+ If you don't know what to do here, say "Y".
+
+config ERRATA_SIFIVE_CIP_1200
+ bool "Apply SiFive errata CIP-1200"
+ depends on ERRATA_SIFIVE && 64BIT
+ default y
+ help
+ This will apply the SiFive CIP-1200 errata to repalce all
+ "sfence.vma addr" with "sfence.vma" to ensure that the addr
+ has been flushed from TLB.
+
+ If you don't know what to do here, say "Y".
+
+endmenu
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index 7efcece8896c..30676ebb16eb 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -1,5 +1,12 @@
menu "SoC selection"
+config SOC_MICROCHIP_POLARFIRE
+ bool "Microchip PolarFire SoCs"
+ select MCHP_CLK_MPFS
+ select SIFIVE_PLIC
+ help
+ This enables support for Microchip PolarFire SoC platforms.
+
config SOC_SIFIVE
bool "SiFive SoCs"
select SERIAL_SIFIVE if TTY
@@ -7,6 +14,8 @@ config SOC_SIFIVE
select CLK_SIFIVE
select CLK_SIFIVE_PRCI
select SIFIVE_PLIC
+ select RISCV_ERRATA_ALTERNATIVE
+ select ERRATA_SIFIVE
help
This enables support for SiFive SoC platform hardware.
@@ -31,6 +40,8 @@ config SOC_CANAAN
select SIFIVE_PLIC
select ARCH_HAS_RESET_CONTROLLER
select PINCTRL
+ select COMMON_CLK
+ select COMMON_CLK_K210
help
This enables support for Canaan Kendryte K210 SoC platform hardware.
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 1368d943f1f3..99ecd8bcfd77 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -16,7 +16,7 @@ ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
CC_FLAGS_FTRACE := -fpatchable-function-entry=8
endif
-ifeq ($(CONFIG_64BIT)$(CONFIG_CMODEL_MEDLOW),yy)
+ifeq ($(CONFIG_CMODEL_MEDLOW),y)
KBUILD_CFLAGS_MODULE += -mcmodel=medany
endif
@@ -38,6 +38,15 @@ else
KBUILD_LDFLAGS += -melf32lriscv
endif
+ifeq ($(CONFIG_LD_IS_LLD),y)
+ KBUILD_CFLAGS += -mno-relax
+ KBUILD_AFLAGS += -mno-relax
+ifneq ($(LLVM_IAS),1)
+ KBUILD_CFLAGS += -Wa,-mno-relax
+ KBUILD_AFLAGS += -Wa,-mno-relax
+endif
+endif
+
# ISA string setting
riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima
riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima
@@ -82,11 +91,16 @@ CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS)
# Default target when executing plain make
boot := arch/riscv/boot
+ifeq ($(CONFIG_XIP_KERNEL),y)
+KBUILD_IMAGE := $(boot)/xipImage
+else
KBUILD_IMAGE := $(boot)/Image.gz
+endif
head-y := arch/riscv/kernel/head.o
core-y += arch/riscv/
+core-$(CONFIG_RISCV_ERRATA_ALTERNATIVE) += arch/riscv/errata/
libs-y += arch/riscv/lib/
libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
@@ -95,12 +109,14 @@ PHONY += vdso_install
vdso_install:
$(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
+ifneq ($(CONFIG_XIP_KERNEL),y)
ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_SOC_CANAAN),yy)
KBUILD_IMAGE := $(boot)/loader.bin
else
KBUILD_IMAGE := $(boot)/Image.gz
endif
-BOOT_TARGETS := Image Image.gz loader loader.bin
+endif
+BOOT_TARGETS := Image Image.gz loader loader.bin xipImage
all: $(notdir $(KBUILD_IMAGE))
diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile
index 03404c84f971..6bf299f70c27 100644
--- a/arch/riscv/boot/Makefile
+++ b/arch/riscv/boot/Makefile
@@ -17,8 +17,21 @@
KCOV_INSTRUMENT := n
OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
+OBJCOPYFLAGS_xipImage :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
targets := Image Image.* loader loader.o loader.lds loader.bin
+targets := Image Image.* loader loader.o loader.lds loader.bin xipImage
+
+ifeq ($(CONFIG_XIP_KERNEL),y)
+
+quiet_cmd_mkxip = $(quiet_cmd_objcopy)
+cmd_mkxip = $(cmd_objcopy)
+
+$(obj)/xipImage: vmlinux FORCE
+ $(call if_changed,mkxip)
+ @$(kecho) ' Physical Address of xipImage: $(CONFIG_XIP_PHYS_ADDR)'
+
+endif
$(obj)/Image: vmlinux FORCE
$(call if_changed,objcopy)
diff --git a/arch/riscv/boot/dts/Makefile b/arch/riscv/boot/dts/Makefile
index 7ffd502e3e7b..fe996b88319e 100644
--- a/arch/riscv/boot/dts/Makefile
+++ b/arch/riscv/boot/dts/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
subdir-y += sifive
subdir-$(CONFIG_SOC_CANAAN_K210_DTB_BUILTIN) += canaan
+subdir-y += microchip
obj-$(CONFIG_BUILTIN_DTB) := $(addsuffix /, $(subdir-y))
diff --git a/arch/riscv/boot/dts/microchip/Makefile b/arch/riscv/boot/dts/microchip/Makefile
new file mode 100644
index 000000000000..855c1502d912
--- /dev/null
+++ b/arch/riscv/boot/dts/microchip/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+dtb-$(CONFIG_SOC_MICROCHIP_POLARFIRE) += microchip-mpfs-icicle-kit.dtb
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
new file mode 100644
index 000000000000..ec79944065c9
--- /dev/null
+++ b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright (c) 2020 Microchip Technology Inc */
+
+/dts-v1/;
+
+#include "microchip-mpfs.dtsi"
+
+/* Clock frequency (in Hz) of the rtcclk */
+#define RTCCLK_FREQ 1000000
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ model = "Microchip PolarFire-SoC Icicle Kit";
+ compatible = "microchip,mpfs-icicle-kit";
+
+ chosen {
+ stdout-path = &serial0;
+ };
+
+ cpus {
+ timebase-frequency = <RTCCLK_FREQ>;
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x0 0x80000000 0x0 0x40000000>;
+ clocks = <&clkcfg 26>;
+ };
+
+ soc {
+ };
+};
+
+&serial0 {
+ status = "okay";
+};
+
+&serial1 {
+ status = "okay";
+};
+
+&serial2 {
+ status = "okay";
+};
+
+&serial3 {
+ status = "okay";
+};
+
+&sdcard {
+ status = "okay";
+};
+
+&emac0 {
+ phy-mode = "sgmii";
+ phy-handle = <&phy0>;
+ phy0: ethernet-phy@8 {
+ reg = <8>;
+ ti,fifo-depth = <0x01>;
+ };
+};
+
+&emac1 {
+ status = "okay";
+ phy-mode = "sgmii";
+ phy-handle = <&phy1>;
+ phy1: ethernet-phy@9 {
+ reg = <9>;
+ ti,fifo-depth = <0x01>;
+ };
+};
diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi b/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi
new file mode 100644
index 000000000000..b9819570a7d1
--- /dev/null
+++ b/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright (c) 2020 Microchip Technology Inc */
+
+/dts-v1/;
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ model = "Microchip MPFS Icicle Kit";
+ compatible = "microchip,mpfs-icicle-kit";
+
+ chosen {
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ clock-frequency = <0>;
+ compatible = "sifive,e51", "sifive,rocket0", "riscv";
+ device_type = "cpu";
+ i-cache-block-size = <64>;
+ i-cache-sets = <128>;
+ i-cache-size = <16384>;
+ reg = <0>;
+ riscv,isa = "rv64imac";
+ status = "disabled";
+
+ cpu0_intc: interrupt-controller {
+ #interrupt-cells = <1>;
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ };
+ };
+
+ cpu@1 {
+ clock-frequency = <0>;
+ compatible = "sifive,u54-mc", "sifive,rocket0", "riscv";
+ d-cache-block-size = <64>;
+ d-cache-sets = <64>;
+ d-cache-size = <32768>;
+ d-tlb-sets = <1>;
+ d-tlb-size = <32>;
+ device_type = "cpu";
+ i-cache-block-size = <64>;
+ i-cache-sets = <64>;
+ i-cache-size = <32768>;
+ i-tlb-sets = <1>;
+ i-tlb-size = <32>;
+ mmu-type = "riscv,sv39";
+ reg = <1>;
+ riscv,isa = "rv64imafdc";
+ tlb-split;
+ status = "okay";
+
+ cpu1_intc: interrupt-controller {
+ #interrupt-cells = <1>;
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ };
+ };
+
+ cpu@2 {
+ clock-frequency = <0>;
+ compatible = "sifive,u54-mc", "sifive,rocket0", "riscv";
+ d-cache-block-size = <64>;
+ d-cache-sets = <64>;
+ d-cache-size = <32768>;
+ d-tlb-sets = <1>;
+ d-tlb-size = <32>;
+ device_type = "cpu";
+ i-cache-block-size = <64>;
+ i-cache-sets = <64>;
+ i-cache-size = <32768>;
+ i-tlb-sets = <1>;
+ i-tlb-size = <32>;
+ mmu-type = "riscv,sv39";
+ reg = <2>;
+ riscv,isa = "rv64imafdc";
+ tlb-split;
+ status = "okay";
+
+ cpu2_intc: interrupt-controller {
+ #interrupt-cells = <1>;
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ };
+ };
+
+ cpu@3 {
+ clock-frequency = <0>;
+ compatible = "sifive,u54-mc", "sifive,rocket0", "riscv";
+ d-cache-block-size = <64>;
+ d-cache-sets = <64>;
+ d-cache-size = <32768>;
+ d-tlb-sets = <1>;
+ d-tlb-size = <32>;
+ device_type = "cpu";
+ i-cache-block-size = <64>;
+ i-cache-sets = <64>;
+ i-cache-size = <32768>;
+ i-tlb-sets = <1>;
+ i-tlb-size = <32>;
+ mmu-type = "riscv,sv39";
+ reg = <3>;
+ riscv,isa = "rv64imafdc";
+ tlb-split;
+ status = "okay";
+
+ cpu3_intc: interrupt-controller {
+ #interrupt-cells = <1>;
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ };
+ };
+
+ cpu@4 {
+ clock-frequency = <0>;
+ compatible = "sifive,u54-mc", "sifive,rocket0", "riscv";
+ d-cache-block-size = <64>;
+ d-cache-sets = <64>;
+ d-cache-size = <32768>;
+ d-tlb-sets = <1>;
+ d-tlb-size = <32>;
+ device_type = "cpu";
+ i-cache-block-size = <64>;
+ i-cache-sets = <64>;
+ i-cache-size = <32768>;
+ i-tlb-sets = <1>;
+ i-tlb-size = <32>;
+ mmu-type = "riscv,sv39";
+ reg = <4>;
+ riscv,isa = "rv64imafdc";
+ tlb-split;
+ status = "okay";
+ cpu4_intc: interrupt-controller {
+ #interrupt-cells = <1>;
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ };
+ };
+ };
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ compatible = "simple-bus";
+ ranges;
+
+ cache-controller@2010000 {
+ compatible = "sifive,fu540-c000-ccache", "cache";
+ cache-block-size = <64>;
+ cache-level = <2>;
+ cache-sets = <1024>;
+ cache-size = <2097152>;
+ cache-unified;
+ interrupt-parent = <&plic>;
+ interrupts = <1 2 3>;
+ reg = <0x0 0x2010000 0x0 0x1000>;
+ };
+
+ clint@2000000 {
+ compatible = "sifive,clint0";
+ reg = <0x0 0x2000000 0x0 0xC000>;
+ interrupts-extended = <&cpu0_intc 3 &cpu0_intc 7
+ &cpu1_intc 3 &cpu1_intc 7
+ &cpu2_intc 3 &cpu2_intc 7
+ &cpu3_intc 3 &cpu3_intc 7
+ &cpu4_intc 3 &cpu4_intc 7>;
+ };
+
+ plic: interrupt-controller@c000000 {
+ #interrupt-cells = <1>;
+ compatible = "sifive,plic-1.0.0";
+ reg = <0x0 0xc000000 0x0 0x4000000>;
+ riscv,ndev = <186>;
+ interrupt-controller;
+ interrupts-extended = <&cpu0_intc 11
+ &cpu1_intc 11 &cpu1_intc 9
+ &cpu2_intc 11 &cpu2_intc 9
+ &cpu3_intc 11 &cpu3_intc 9
+ &cpu4_intc 11 &cpu4_intc 9>;
+ };
+
+ dma@3000000 {
+ compatible = "sifive,fu540-c000-pdma";
+ reg = <0x0 0x3000000 0x0 0x8000>;
+ interrupt-parent = <&plic>;
+ interrupts = <23 24 25 26 27 28 29 30>;
+ #dma-cells = <1>;
+ };
+
+ refclk: refclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <600000000>;
+ clock-output-names = "msspllclk";
+ };
+
+ clkcfg: clkcfg@20002000 {
+ compatible = "microchip,mpfs-clkcfg";
+ reg = <0x0 0x20002000 0x0 0x1000>;
+ reg-names = "mss_sysreg";
+ clocks = <&refclk>;
+ #clock-cells = <1>;
+ clock-output-names = "cpu", "axi", "ahb", "envm", /* 0-3 */
+ "mac0", "mac1", "mmc", "timer", /* 4-7 */
+ "mmuart0", "mmuart1", "mmuart2", "mmuart3", /* 8-11 */
+ "mmuart4", "spi0", "spi1", "i2c0", /* 12-15 */
+ "i2c1", "can0", "can1", "usb", /* 16-19 */
+ "rsvd", "rtc", "qspi", "gpio0", /* 20-23 */
+ "gpio1", "gpio2", "ddrc", "fic0", /* 24-27 */
+ "fic1", "fic2", "fic3", "athena", "cfm"; /* 28-32 */
+ };
+
+ serial0: serial@20000000 {
+ compatible = "ns16550a";
+ reg = <0x0 0x20000000 0x0 0x400>;
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ interrupt-parent = <&plic>;
+ interrupts = <90>;
+ current-speed = <115200>;
+ clocks = <&clkcfg 8>;
+ status = "disabled";
+ };
+
+ serial1: serial@20100000 {
+ compatible = "ns16550a";
+ reg = <0x0 0x20100000 0x0 0x400>;
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ interrupt-parent = <&plic>;
+ interrupts = <91>;
+ current-speed = <115200>;
+ clocks = <&clkcfg 9>;
+ status = "disabled";
+ };
+
+ serial2: serial@20102000 {
+ compatible = "ns16550a";
+ reg = <0x0 0x20102000 0x0 0x400>;
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ interrupt-parent = <&plic>;
+ interrupts = <92>;
+ current-speed = <115200>;
+ clocks = <&clkcfg 10>;
+ status = "disabled";
+ };
+
+ serial3: serial@20104000 {
+ compatible = "ns16550a";
+ reg = <0x0 0x20104000 0x0 0x400>;
+ reg-io-width = <4>;
+ reg-shift = <2>;
+ interrupt-parent = <&plic>;
+ interrupts = <93>;
+ current-speed = <115200>;
+ clocks = <&clkcfg 11>;
+ status = "disabled";
+ };
+
+ emmc: mmc@20008000 {
+ compatible = "cdns,sd4hc";
+ reg = <0x0 0x20008000 0x0 0x1000>;
+ interrupt-parent = <&plic>;
+ interrupts = <88 89>;
+ pinctrl-names = "default";
+ clocks = <&clkcfg 6>;
+ bus-width = <4>;
+ cap-mmc-highspeed;
+ mmc-ddr-3_3v;
+ max-frequency = <200000000>;
+ non-removable;
+ no-sd;
+ no-sdio;
+ voltage-ranges = <3300 3300>;
+ status = "disabled";
+ };
+
+ sdcard: sdhc@20008000 {
+ compatible = "cdns,sd4hc";
+ reg = <0x0 0x20008000 0x0 0x1000>;
+ interrupt-parent = <&plic>;
+ interrupts = <88>;
+ pinctrl-names = "default";
+ clocks = <&clkcfg 6>;
+ bus-width = <4>;
+ disable-wp;
+ cap-sd-highspeed;
+ card-detect-delay = <200>;
+ sd-uhs-sdr12;
+ sd-uhs-sdr25;
+ sd-uhs-sdr50;
+ sd-uhs-sdr104;
+ max-frequency = <200000000>;
+ status = "disabled";
+ };
+
+ emac0: ethernet@20110000 {
+ compatible = "cdns,macb";
+ reg = <0x0 0x20110000 0x0 0x2000>;
+ interrupt-parent = <&plic>;
+ interrupts = <64 65 66 67>;
+ local-mac-address = [00 00 00 00 00 00];
+ clocks = <&clkcfg 4>, <&clkcfg 2>;
+ clock-names = "pclk", "hclk";
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ emac1: ethernet@20112000 {
+ compatible = "cdns,macb";
+ reg = <0x0 0x20112000 0x0 0x2000>;
+ interrupt-parent = <&plic>;
+ interrupts = <70 71 72 73>;
+ mac-address = [00 00 00 00 00 00];
+ clocks = <&clkcfg 5>, <&clkcfg 2>;
+ status = "disabled";
+ clock-names = "pclk", "hclk";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ };
+};
diff --git a/arch/riscv/boot/dts/sifive/Makefile b/arch/riscv/boot/dts/sifive/Makefile
index 74c47fe9fc22..d90e4eb0ade8 100644
--- a/arch/riscv/boot/dts/sifive/Makefile
+++ b/arch/riscv/boot/dts/sifive/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
dtb-$(CONFIG_SOC_SIFIVE) += hifive-unleashed-a00.dtb \
hifive-unmatched-a00.dtb
+obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
index eeb4f8c3e0e7..abbb960f90a0 100644
--- a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
+++ b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
@@ -159,6 +159,7 @@
reg = <0x0 0x10000000 0x0 0x1000>;
clocks = <&hfclk>, <&rtcclk>;
#clock-cells = <1>;
+ #reset-cells = <1>;
};
uart0: serial@10010000 {
compatible = "sifive,fu740-c000-uart", "sifive,uart0";
@@ -272,7 +273,7 @@
cache-size = <2097152>;
cache-unified;
interrupt-parent = <&plic0>;
- interrupts = <19 20 21 22>;
+ interrupts = <19 21 22 20>;
reg = <0x0 0x2010000 0x0 0x1000>;
};
gpio: gpio@10060000 {
@@ -289,5 +290,37 @@
clocks = <&prci PRCI_CLK_PCLK>;
status = "disabled";
};
+ pcie@e00000000 {
+ compatible = "sifive,fu740-pcie";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ reg = <0xe 0x00000000 0x0 0x80000000>,
+ <0xd 0xf0000000 0x0 0x10000000>,
+ <0x0 0x100d0000 0x0 0x1000>;
+ reg-names = "dbi", "config", "mgmt";
+ device_type = "pci";
+ dma-coherent;
+ bus-range = <0x0 0xff>;
+ ranges = <0x81000000 0x0 0x60080000 0x0 0x60080000 0x0 0x10000>, /* I/O */
+ <0x82000000 0x0 0x60090000 0x0 0x60090000 0x0 0xff70000>, /* mem */
+ <0x82000000 0x0 0x70000000 0x0 0x70000000 0x0 0x1000000>, /* mem */
+ <0xc3000000 0x20 0x00000000 0x20 0x00000000 0x20 0x00000000>; /* mem prefetchable */
+ num-lanes = <0x8>;
+ interrupts = <56>, <57>, <58>, <59>, <60>, <61>, <62>, <63>, <64>;
+ interrupt-names = "msi", "inta", "intb", "intc", "intd";
+ interrupt-parent = <&plic0>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ interrupt-map = <0x0 0x0 0x0 0x1 &plic0 57>,
+ <0x0 0x0 0x0 0x2 &plic0 58>,
+ <0x0 0x0 0x0 0x3 &plic0 59>,
+ <0x0 0x0 0x0 0x4 &plic0 60>;
+ clock-names = "pcie_aux";
+ clocks = <&prci PRCI_CLK_PCIE_AUX>;
+ pwren-gpios = <&gpio 5 0>;
+ reset-gpios = <&gpio 8 0>;
+ resets = <&prci 4>;
+ status = "okay";
+ };
};
};
diff --git a/arch/riscv/boot/loader.lds.S b/arch/riscv/boot/loader.lds.S
index 47a5003c2e28..62d94696a19c 100644
--- a/arch/riscv/boot/loader.lds.S
+++ b/arch/riscv/boot/loader.lds.S
@@ -1,13 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm/page.h>
+#include <asm/pgtable.h>
OUTPUT_ARCH(riscv)
ENTRY(_start)
SECTIONS
{
- . = PAGE_OFFSET;
+ . = KERNEL_LINK_ADDR;
.payload : {
*(.payload)
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index 6c0625aa96c7..1f2be234b11c 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -16,6 +16,7 @@ CONFIG_EXPERT=y
CONFIG_BPF_SYSCALL=y
CONFIG_SOC_SIFIVE=y
CONFIG_SOC_VIRT=y
+CONFIG_SOC_MICROCHIP_POLARFIRE=y
CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
CONFIG_JUMP_LABEL=y
@@ -82,6 +83,9 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_UAS=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_CADENCE=y
CONFIG_MMC=y
CONFIG_MMC_SPI=y
CONFIG_RTC_CLASS=y
diff --git a/arch/riscv/errata/Makefile b/arch/riscv/errata/Makefile
new file mode 100644
index 000000000000..b8f8740a3e44
--- /dev/null
+++ b/arch/riscv/errata/Makefile
@@ -0,0 +1,2 @@
+obj-y += alternative.o
+obj-$(CONFIG_ERRATA_SIFIVE) += sifive/
diff --git a/arch/riscv/errata/alternative.c b/arch/riscv/errata/alternative.c
new file mode 100644
index 000000000000..3b15885db70b
--- /dev/null
+++ b/arch/riscv/errata/alternative.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * alternative runtime patching
+ * inspired by the ARM64 and x86 version
+ *
+ * Copyright (C) 2021 Sifive.
+ */
+
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/uaccess.h>
+#include <asm/alternative.h>
+#include <asm/sections.h>
+#include <asm/vendorid_list.h>
+#include <asm/sbi.h>
+#include <asm/csr.h>
+
+static struct cpu_manufacturer_info_t {
+ unsigned long vendor_id;
+ unsigned long arch_id;
+ unsigned long imp_id;
+} cpu_mfr_info;
+
+static void (*vendor_patch_func)(struct alt_entry *begin, struct alt_entry *end,
+ unsigned long archid, unsigned long impid);
+
+static inline void __init riscv_fill_cpu_mfr_info(void)
+{
+#ifdef CONFIG_RISCV_M_MODE
+ cpu_mfr_info.vendor_id = csr_read(CSR_MVENDORID);
+ cpu_mfr_info.arch_id = csr_read(CSR_MARCHID);
+ cpu_mfr_info.imp_id = csr_read(CSR_MIMPID);
+#else
+ cpu_mfr_info.vendor_id = sbi_get_mvendorid();
+ cpu_mfr_info.arch_id = sbi_get_marchid();
+ cpu_mfr_info.imp_id = sbi_get_mimpid();
+#endif
+}
+
+static void __init init_alternative(void)
+{
+ riscv_fill_cpu_mfr_info();
+
+ switch (cpu_mfr_info.vendor_id) {
+#ifdef CONFIG_ERRATA_SIFIVE
+ case SIFIVE_VENDOR_ID:
+ vendor_patch_func = sifive_errata_patch_func;
+ break;
+#endif
+ default:
+ vendor_patch_func = NULL;
+ }
+}
+
+/*
+ * This is called very early in the boot process (directly after we run
+ * a feature detect on the boot CPU). No need to worry about other CPUs
+ * here.
+ */
+void __init apply_boot_alternatives(void)
+{
+ /* If called on non-boot cpu things could go wrong */
+ WARN_ON(smp_processor_id() != 0);
+
+ init_alternative();
+
+ if (!vendor_patch_func)
+ return;
+
+ vendor_patch_func((struct alt_entry *)__alt_start,
+ (struct alt_entry *)__alt_end,
+ cpu_mfr_info.arch_id, cpu_mfr_info.imp_id);
+}
+
diff --git a/arch/riscv/errata/sifive/Makefile b/arch/riscv/errata/sifive/Makefile
new file mode 100644
index 000000000000..2fde48db0619
--- /dev/null
+++ b/arch/riscv/errata/sifive/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_ERRATA_SIFIVE_CIP_453) += errata_cip_453.o
+obj-y += errata.o
diff --git a/arch/riscv/errata/sifive/errata.c b/arch/riscv/errata/sifive/errata.c
new file mode 100644
index 000000000000..f5e5ae70e829
--- /dev/null
+++ b/arch/riscv/errata/sifive/errata.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Sifive.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/bug.h>
+#include <asm/patch.h>
+#include <asm/alternative.h>
+#include <asm/vendorid_list.h>
+#include <asm/errata_list.h>
+
+struct errata_info_t {
+ char name[ERRATA_STRING_LENGTH_MAX];
+ bool (*check_func)(unsigned long arch_id, unsigned long impid);
+};
+
+static bool errata_cip_453_check_func(unsigned long arch_id, unsigned long impid)
+{
+ /*
+ * Affected cores:
+ * Architecture ID: 0x8000000000000007
+ * Implement ID: 0x20181004 <= impid <= 0x20191105
+ */
+ if (arch_id != 0x8000000000000007 ||
+ (impid < 0x20181004 || impid > 0x20191105))
+ return false;
+ return true;
+}
+
+static bool errata_cip_1200_check_func(unsigned long arch_id, unsigned long impid)
+{
+ /*
+ * Affected cores:
+ * Architecture ID: 0x8000000000000007 or 0x1
+ * Implement ID: mimpid[23:0] <= 0x200630 and mimpid != 0x01200626
+ */
+ if (arch_id != 0x8000000000000007 && arch_id != 0x1)
+ return false;
+ if ((impid & 0xffffff) > 0x200630 || impid == 0x1200626)
+ return false;
+ return true;
+}
+
+static struct errata_info_t errata_list[ERRATA_SIFIVE_NUMBER] = {
+ {
+ .name = "cip-453",
+ .check_func = errata_cip_453_check_func
+ },
+ {
+ .name = "cip-1200",
+ .check_func = errata_cip_1200_check_func
+ },
+};
+
+static u32 __init sifive_errata_probe(unsigned long archid, unsigned long impid)
+{
+ int idx;
+ u32 cpu_req_errata = 0;
+
+ for (idx = 0; idx < ERRATA_SIFIVE_NUMBER; idx++)
+ if (errata_list[idx].check_func(archid, impid))
+ cpu_req_errata |= (1U << idx);
+
+ return cpu_req_errata;
+}
+
+static void __init warn_miss_errata(u32 miss_errata)
+{
+ int i;
+
+ pr_warn("----------------------------------------------------------------\n");
+ pr_warn("WARNING: Missing the following errata may cause potential issues\n");
+ for (i = 0; i < ERRATA_SIFIVE_NUMBER; i++)
+ if (miss_errata & 0x1 << i)
+ pr_warn("\tSiFive Errata[%d]:%s\n", i, errata_list[i].name);
+ pr_warn("Please enable the corresponding Kconfig to apply them\n");
+ pr_warn("----------------------------------------------------------------\n");
+}
+
+void __init sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
+ unsigned long archid, unsigned long impid)
+{
+ struct alt_entry *alt;
+ u32 cpu_req_errata = sifive_errata_probe(archid, impid);
+ u32 cpu_apply_errata = 0;
+ u32 tmp;
+
+ for (alt = begin; alt < end; alt++) {
+ if (alt->vendor_id != SIFIVE_VENDOR_ID)
+ continue;
+ if (alt->errata_id >= ERRATA_SIFIVE_NUMBER) {
+ WARN(1, "This errata id:%d is not in kernel errata list", alt->errata_id);
+ continue;
+ }
+
+ tmp = (1U << alt->errata_id);
+ if (cpu_req_errata & tmp) {
+ patch_text_nosync(alt->old_ptr, alt->alt_ptr, alt->alt_len);
+ cpu_apply_errata |= tmp;
+ }
+ }
+ if (cpu_apply_errata != cpu_req_errata)
+ warn_miss_errata(cpu_req_errata - cpu_apply_errata);
+}
diff --git a/arch/riscv/errata/sifive/errata_cip_453.S b/arch/riscv/errata/sifive/errata_cip_453.S
new file mode 100644
index 000000000000..f1b9623fe1de
--- /dev/null
+++ b/arch/riscv/errata/sifive/errata_cip_453.S
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 SiFive
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/alternative.h>
+
+.macro ADD_SIGN_EXT pt_reg badaddr tmp_reg
+ REG_L \badaddr, PT_BADADDR(\pt_reg)
+ li \tmp_reg,1
+ slli \tmp_reg,\tmp_reg,0x26
+ and \tmp_reg,\tmp_reg,\badaddr
+ beqz \tmp_reg, 1f
+ li \tmp_reg,-1
+ slli \tmp_reg,\tmp_reg,0x27
+ or \badaddr,\tmp_reg,\badaddr
+ REG_S \badaddr, PT_BADADDR(\pt_reg)
+1:
+.endm
+
+ENTRY(sifive_cip_453_page_fault_trp)
+ ADD_SIGN_EXT a0, t0, t1
+#ifdef CONFIG_MMU
+ la t0, do_page_fault
+#else
+ la t0, do_trap_unknown
+#endif
+ jr t0
+END(sifive_cip_453_page_fault_trp)
+
+ENTRY(sifive_cip_453_insn_fault_trp)
+ ADD_SIGN_EXT a0, t0, t1
+ la t0, do_trap_insn_fault
+ jr t0
+END(sifive_cip_453_insn_fault_trp)
diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h
new file mode 100644
index 000000000000..67406c376389
--- /dev/null
+++ b/arch/riscv/include/asm/alternative-macros.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ALTERNATIVE_MACROS_H
+#define __ASM_ALTERNATIVE_MACROS_H
+
+#ifdef CONFIG_RISCV_ERRATA_ALTERNATIVE
+
+#ifdef __ASSEMBLY__
+
+.macro ALT_ENTRY oldptr newptr vendor_id errata_id new_len
+ RISCV_PTR \oldptr
+ RISCV_PTR \newptr
+ REG_ASM \vendor_id
+ REG_ASM \new_len
+ .word \errata_id
+.endm
+
+.macro ALT_NEW_CONTENT vendor_id, errata_id, enable = 1, new_c : vararg
+ .if \enable
+ .pushsection .alternative, "a"
+ ALT_ENTRY 886b, 888f, \vendor_id, \errata_id, 889f - 888f
+ .popsection
+ .subsection 1
+888 :
+ \new_c
+889 :
+ .previous
+ .org . - (889b - 888b) + (887b - 886b)
+ .org . - (887b - 886b) + (889b - 888b)
+ .endif
+.endm
+
+.macro __ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, enable
+886 :
+ \old_c
+887 :
+ ALT_NEW_CONTENT \vendor_id, \errata_id, \enable, \new_c
+.endm
+
+#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
+ __ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k)
+
+#else /* !__ASSEMBLY__ */
+
+#include <asm/asm.h>
+#include <linux/stringify.h>
+
+#define ALT_ENTRY(oldptr, newptr, vendor_id, errata_id, newlen) \
+ RISCV_PTR " " oldptr "\n" \
+ RISCV_PTR " " newptr "\n" \
+ REG_ASM " " vendor_id "\n" \
+ REG_ASM " " newlen "\n" \
+ ".word " errata_id "\n"
+
+#define ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c) \
+ ".if " __stringify(enable) " == 1\n" \
+ ".pushsection .alternative, \"a\"\n" \
+ ALT_ENTRY("886b", "888f", __stringify(vendor_id), __stringify(errata_id), "889f - 888f") \
+ ".popsection\n" \
+ ".subsection 1\n" \
+ "888 :\n" \
+ new_c "\n" \
+ "889 :\n" \
+ ".previous\n" \
+ ".org . - (887b - 886b) + (889b - 888b)\n" \
+ ".org . - (889b - 888b) + (887b - 886b)\n" \
+ ".endif\n"
+
+#define __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, enable) \
+ "886 :\n" \
+ old_c "\n" \
+ "887 :\n" \
+ ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c)
+
+#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
+ __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k))
+
+#endif /* __ASSEMBLY__ */
+
+#else /* !CONFIG_RISCV_ERRATA_ALTERNATIVE*/
+#ifdef __ASSEMBLY__
+
+.macro __ALTERNATIVE_CFG old_c
+ \old_c
+.endm
+
+#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
+ __ALTERNATIVE_CFG old_c
+
+#else /* !__ASSEMBLY__ */
+
+#define __ALTERNATIVE_CFG(old_c) \
+ old_c "\n"
+
+#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \
+ __ALTERNATIVE_CFG(old_c)
+
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_RISCV_ERRATA_ALTERNATIVE */
+/*
+ * Usage:
+ * ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k)
+ * in the assembly code. Otherwise,
+ * asm(ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k));
+ *
+ * old_content: The old content which is probably replaced with new content.
+ * new_content: The new content.
+ * vendor_id: The CPU vendor ID.
+ * errata_id: The errata ID.
+ * CONFIG_k: The Kconfig of this errata. When Kconfig is disabled, the old
+ * content will alwyas be executed.
+ */
+#define ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k) \
+ _ALTERNATIVE_CFG(old_content, new_content, vendor_id, errata_id, CONFIG_k)
+
+/*
+ * A vendor wants to replace an old_content, but another vendor has used
+ * ALTERNATIVE() to patch its customized content at the same location. In
+ * this case, this vendor can create a new macro ALTERNATIVE_2() based
+ * on the following sample code and then replace ALTERNATIVE() with
+ * ALTERNATIVE_2() to append its customized content.
+ *
+ * .macro __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \
+ * new_c_2, vendor_id_2, errata_id_2, enable_2
+ * 886 :
+ * \old_c
+ * 887 :
+ * ALT_NEW_CONTENT \vendor_id_1, \errata_id_1, \enable_1, \new_c_1
+ * ALT_NEW_CONTENT \vendor_id_2, \errata_id_2, \enable_2, \new_c_2
+ * .endm
+ *
+ * #define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, CONFIG_k_1, \
+ * new_c_2, vendor_id_2, errata_id_2, CONFIG_k_2) \
+ * __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, IS_ENABLED(CONFIG_k_1), \
+ * new_c_2, vendor_id_2, errata_id_2, IS_ENABLED(CONFIG_k_2) \
+ *
+ * #define ALTERNATIVE_2(old_content, new_content_1, vendor_id_1, errata_id_1, CONFIG_k_1, \
+ * new_content_2, vendor_id_2, errata_id_2, CONFIG_k_2) \
+ * _ALTERNATIVE_CFG_2(old_content, new_content_1, vendor_id_1, errata_id_1, CONFIG_k_1, \
+ * new_content_2, vendor_id_2, errata_id_2, CONFIG_k_2)
+ *
+ */
+#endif
diff --git a/arch/riscv/include/asm/alternative.h b/arch/riscv/include/asm/alternative.h
new file mode 100644
index 000000000000..e625d3cafbed
--- /dev/null
+++ b/arch/riscv/include/asm/alternative.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Sifive.
+ */
+
+#ifndef __ASM_ALTERNATIVE_H
+#define __ASM_ALTERNATIVE_H
+
+#define ERRATA_STRING_LENGTH_MAX 32
+
+#include <asm/alternative-macros.h>
+
+#ifndef __ASSEMBLY__
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <asm/hwcap.h>
+
+void __init apply_boot_alternatives(void);
+
+struct alt_entry {
+ void *old_ptr; /* address of original instruciton or data */
+ void *alt_ptr; /* address of replacement instruction or data */
+ unsigned long vendor_id; /* cpu vendor id */
+ unsigned long alt_len; /* The replacement size */
+ unsigned int errata_id; /* The errata id */
+} __packed;
+
+struct errata_checkfunc_id {
+ unsigned long vendor_id;
+ bool (*func)(struct alt_entry *alt);
+};
+
+void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
+ unsigned long archid, unsigned long impid);
+
+#endif
+#endif
diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h
index 27e005fca584..2a652b0c987d 100644
--- a/arch/riscv/include/asm/asm-prototypes.h
+++ b/arch/riscv/include/asm/asm-prototypes.h
@@ -9,4 +9,20 @@ long long __lshrti3(long long a, int b);
long long __ashrti3(long long a, int b);
long long __ashlti3(long long a, int b);
+
+#define DECLARE_DO_ERROR_INFO(name) asmlinkage void name(struct pt_regs *regs)
+
+DECLARE_DO_ERROR_INFO(do_trap_unknown);
+DECLARE_DO_ERROR_INFO(do_trap_insn_misaligned);
+DECLARE_DO_ERROR_INFO(do_trap_insn_fault);
+DECLARE_DO_ERROR_INFO(do_trap_insn_illegal);
+DECLARE_DO_ERROR_INFO(do_trap_load_fault);
+DECLARE_DO_ERROR_INFO(do_trap_load_misaligned);
+DECLARE_DO_ERROR_INFO(do_trap_store_misaligned);
+DECLARE_DO_ERROR_INFO(do_trap_store_fault);
+DECLARE_DO_ERROR_INFO(do_trap_ecall_u);
+DECLARE_DO_ERROR_INFO(do_trap_ecall_s);
+DECLARE_DO_ERROR_INFO(do_trap_ecall_m);
+DECLARE_DO_ERROR_INFO(do_trap_break);
+
#endif /* _ASM_RISCV_PROTOTYPES_H */
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
index 9c992a88d858..618d7c5af1a2 100644
--- a/arch/riscv/include/asm/asm.h
+++ b/arch/riscv/include/asm/asm.h
@@ -23,6 +23,7 @@
#define REG_L __REG_SEL(ld, lw)
#define REG_S __REG_SEL(sd, sw)
#define REG_SC __REG_SEL(sc.d, sc.w)
+#define REG_ASM __REG_SEL(.dword, .word)
#define SZREG __REG_SEL(8, 4)
#define LGREG __REG_SEL(3, 2)
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index 400a8c8b6de7..ac9bdf4fc404 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -25,22 +25,22 @@
#define __atomic_release_fence() \
__asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
-static __always_inline int atomic_read(const atomic_t *v)
+static __always_inline int arch_atomic_read(const atomic_t *v)
{
return READ_ONCE(v->counter);
}
-static __always_inline void atomic_set(atomic_t *v, int i)
+static __always_inline void arch_atomic_set(atomic_t *v, int i)
{
WRITE_ONCE(v->counter, i);
}
#ifndef CONFIG_GENERIC_ATOMIC64
#define ATOMIC64_INIT(i) { (i) }
-static __always_inline s64 atomic64_read(const atomic64_t *v)
+static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
{
return READ_ONCE(v->counter);
}
-static __always_inline void atomic64_set(atomic64_t *v, s64 i)
+static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
WRITE_ONCE(v->counter, i);
}
@@ -53,7 +53,7 @@ static __always_inline void atomic64_set(atomic64_t *v, s64 i)
*/
#define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
static __always_inline \
-void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
+void arch_atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
{ \
__asm__ __volatile__ ( \
" amo" #asm_op "." #asm_type " zero, %1, %0" \
@@ -87,7 +87,7 @@ ATOMIC_OPS(xor, xor, i)
*/
#define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \
static __always_inline \
-c_type atomic##prefix##_fetch_##op##_relaxed(c_type i, \
+c_type arch_atomic##prefix##_fetch_##op##_relaxed(c_type i, \
atomic##prefix##_t *v) \
{ \
register c_type ret; \
@@ -99,7 +99,7 @@ c_type atomic##prefix##_fetch_##op##_relaxed(c_type i, \
return ret; \
} \
static __always_inline \
-c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
+c_type arch_atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
{ \
register c_type ret; \
__asm__ __volatile__ ( \
@@ -112,15 +112,15 @@ c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \
static __always_inline \
-c_type atomic##prefix##_##op##_return_relaxed(c_type i, \
+c_type arch_atomic##prefix##_##op##_return_relaxed(c_type i, \
atomic##prefix##_t *v) \
{ \
- return atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
+ return arch_atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
} \
static __always_inline \
-c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
+c_type arch_atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
{ \
- return atomic##prefix##_fetch_##op(i, v) c_op I; \
+ return arch_atomic##prefix##_fetch_##op(i, v) c_op I; \
}
#ifdef CONFIG_GENERIC_ATOMIC64
@@ -138,26 +138,26 @@ c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
ATOMIC_OPS(add, add, +, i)
ATOMIC_OPS(sub, add, +, -i)
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
-#define atomic_add_return atomic_add_return
-#define atomic_sub_return atomic_sub_return
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_add_return arch_atomic_add_return
+#define arch_atomic_sub_return arch_atomic_sub_return
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
-#define atomic_fetch_add atomic_fetch_add
-#define atomic_fetch_sub atomic_fetch_sub
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
#ifndef CONFIG_GENERIC_ATOMIC64
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
-#define atomic64_add_return atomic64_add_return
-#define atomic64_sub_return atomic64_sub_return
-
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
-#define atomic64_fetch_add atomic64_fetch_add
-#define atomic64_fetch_sub atomic64_fetch_sub
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_add_return arch_atomic64_add_return
+#define arch_atomic64_sub_return arch_atomic64_sub_return
+
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
+#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
#endif
#undef ATOMIC_OPS
@@ -175,20 +175,20 @@ ATOMIC_OPS(and, and, i)
ATOMIC_OPS( or, or, i)
ATOMIC_OPS(xor, xor, i)
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
-#define atomic_fetch_and atomic_fetch_and
-#define atomic_fetch_or atomic_fetch_or
-#define atomic_fetch_xor atomic_fetch_xor
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
#ifndef CONFIG_GENERIC_ATOMIC64
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
-#define atomic64_fetch_and atomic64_fetch_and
-#define atomic64_fetch_or atomic64_fetch_or
-#define atomic64_fetch_xor atomic64_fetch_xor
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
#endif
#undef ATOMIC_OPS
@@ -197,7 +197,7 @@ ATOMIC_OPS(xor, xor, i)
#undef ATOMIC_OP_RETURN
/* This is required to provide a full barrier on success. */
-static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int prev, rc;
@@ -214,10 +214,10 @@ static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
: "memory");
return prev;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
#ifndef CONFIG_GENERIC_ATOMIC64
-static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 prev;
long rc;
@@ -235,7 +235,7 @@ static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u
: "memory");
return prev;
}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
#endif
/*
@@ -244,45 +244,45 @@ static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u
*/
#define ATOMIC_OP(c_t, prefix, size) \
static __always_inline \
-c_t atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \
+c_t arch_atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \
{ \
return __xchg_relaxed(&(v->counter), n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \
+c_t arch_atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \
{ \
return __xchg_acquire(&(v->counter), n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \
+c_t arch_atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \
{ \
return __xchg_release(&(v->counter), n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \
+c_t arch_atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \
{ \
return __xchg(&(v->counter), n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \
+c_t arch_atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \
c_t o, c_t n) \
{ \
return __cmpxchg_relaxed(&(v->counter), o, n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \
+c_t arch_atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \
c_t o, c_t n) \
{ \
return __cmpxchg_acquire(&(v->counter), o, n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \
+c_t arch_atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \
c_t o, c_t n) \
{ \
return __cmpxchg_release(&(v->counter), o, n, size); \
} \
static __always_inline \
-c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
+c_t arch_atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
{ \
return __cmpxchg(&(v->counter), o, n, size); \
}
@@ -298,19 +298,19 @@ c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
ATOMIC_OPS()
-#define atomic_xchg_relaxed atomic_xchg_relaxed
-#define atomic_xchg_acquire atomic_xchg_acquire
-#define atomic_xchg_release atomic_xchg_release
-#define atomic_xchg atomic_xchg
-#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
-#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
-#define atomic_cmpxchg_release atomic_cmpxchg_release
-#define atomic_cmpxchg atomic_cmpxchg
+#define arch_atomic_xchg_relaxed arch_atomic_xchg_relaxed
+#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire
+#define arch_atomic_xchg_release arch_atomic_xchg_release
+#define arch_atomic_xchg arch_atomic_xchg
+#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
+#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
+#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
#undef ATOMIC_OPS
#undef ATOMIC_OP
-static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
+static __always_inline int arch_atomic_sub_if_positive(atomic_t *v, int offset)
{
int prev, rc;
@@ -328,10 +328,10 @@ static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
return prev - offset;
}
-#define atomic_dec_if_positive(v) atomic_sub_if_positive(v, 1)
+#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(v, 1)
#ifndef CONFIG_GENERIC_ATOMIC64
-static __always_inline s64 atomic64_sub_if_positive(atomic64_t *v, s64 offset)
+static __always_inline s64 arch_atomic64_sub_if_positive(atomic64_t *v, s64 offset)
{
s64 prev;
long rc;
@@ -350,7 +350,7 @@ static __always_inline s64 atomic64_sub_if_positive(atomic64_t *v, s64 offset)
return prev - offset;
}
-#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(v, 1)
+#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(v, 1)
#endif
#endif /* _ASM_RISCV_ATOMIC_H */
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 262e5bbb2776..36dc962f6343 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -37,7 +37,7 @@
__ret; \
})
-#define xchg_relaxed(ptr, x) \
+#define arch_xchg_relaxed(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_relaxed((ptr), \
@@ -72,7 +72,7 @@
__ret; \
})
-#define xchg_acquire(ptr, x) \
+#define arch_xchg_acquire(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_acquire((ptr), \
@@ -107,7 +107,7 @@
__ret; \
})
-#define xchg_release(ptr, x) \
+#define arch_xchg_release(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_release((ptr), \
@@ -140,7 +140,7 @@
__ret; \
})
-#define xchg(ptr, x) \
+#define arch_xchg(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg((ptr), _x_, sizeof(*(ptr))); \
@@ -149,13 +149,13 @@
#define xchg32(ptr, x) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
- xchg((ptr), (x)); \
+ arch_xchg((ptr), (x)); \
})
#define xchg64(ptr, x) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- xchg((ptr), (x)); \
+ arch_xchg((ptr), (x)); \
})
/*
@@ -199,7 +199,7 @@
__ret; \
})
-#define cmpxchg_relaxed(ptr, o, n) \
+#define arch_cmpxchg_relaxed(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -245,7 +245,7 @@
__ret; \
})
-#define cmpxchg_acquire(ptr, o, n) \
+#define arch_cmpxchg_acquire(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -291,7 +291,7 @@
__ret; \
})
-#define cmpxchg_release(ptr, o, n) \
+#define arch_cmpxchg_release(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -337,7 +337,7 @@
__ret; \
})
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -345,31 +345,31 @@
_o_, _n_, sizeof(*(ptr))); \
})
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
(__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
#define cmpxchg32(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
- cmpxchg((ptr), (o), (n)); \
+ arch_cmpxchg((ptr), (o), (n)); \
})
#define cmpxchg32_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
- cmpxchg_relaxed((ptr), (o), (n)) \
+ arch_cmpxchg_relaxed((ptr), (o), (n)) \
})
-#define cmpxchg64(ptr, o, n) \
+#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
+ arch_cmpxchg((ptr), (o), (n)); \
})
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_relaxed((ptr), (o), (n)); \
+ arch_cmpxchg_relaxed((ptr), (o), (n)); \
})
#endif /* _ASM_RISCV_CMPXCHG_H */
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index caadfc1d7487..87ac65696871 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -115,6 +115,9 @@
#define CSR_MIP 0x344
#define CSR_PMPCFG0 0x3a0
#define CSR_PMPADDR0 0x3b0
+#define CSR_MVENDORID 0xf11
+#define CSR_MARCHID 0xf12
+#define CSR_MIMPID 0xf13
#define CSR_MHARTID 0xf14
#ifdef CONFIG_RISCV_M_MODE
diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h
index 5c725e1df58b..f4b490cd0e5d 100644
--- a/arch/riscv/include/asm/elf.h
+++ b/arch/riscv/include/asm/elf.h
@@ -81,4 +81,10 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
#endif /* CONFIG_MMU */
+#define ELF_CORE_COPY_REGS(dest, regs) \
+do { \
+ *(struct user_regs_struct *)&(dest) = \
+ *(struct user_regs_struct *)regs; \
+} while (0);
+
#endif /* _ASM_RISCV_ELF_H */
diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
new file mode 100644
index 000000000000..5f1046e82d9f
--- /dev/null
+++ b/arch/riscv/include/asm/errata_list.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Sifive.
+ */
+#ifndef ASM_ERRATA_LIST_H
+#define ASM_ERRATA_LIST_H
+
+#include <asm/alternative.h>
+#include <asm/vendorid_list.h>
+
+#ifdef CONFIG_ERRATA_SIFIVE
+#define ERRATA_SIFIVE_CIP_453 0
+#define ERRATA_SIFIVE_CIP_1200 1
+#define ERRATA_SIFIVE_NUMBER 2
+#endif
+
+#ifdef __ASSEMBLY__
+
+#define ALT_INSN_FAULT(x) \
+ALTERNATIVE(__stringify(RISCV_PTR do_trap_insn_fault), \
+ __stringify(RISCV_PTR sifive_cip_453_insn_fault_trp), \
+ SIFIVE_VENDOR_ID, ERRATA_SIFIVE_CIP_453, \
+ CONFIG_ERRATA_SIFIVE_CIP_453)
+
+#define ALT_PAGE_FAULT(x) \
+ALTERNATIVE(__stringify(RISCV_PTR do_page_fault), \
+ __stringify(RISCV_PTR sifive_cip_453_page_fault_trp), \
+ SIFIVE_VENDOR_ID, ERRATA_SIFIVE_CIP_453, \
+ CONFIG_ERRATA_SIFIVE_CIP_453)
+#else /* !__ASSEMBLY__ */
+
+#define ALT_FLUSH_TLB_PAGE(x) \
+asm(ALTERNATIVE("sfence.vma %0", "sfence.vma", SIFIVE_VENDOR_ID, \
+ ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \
+ : : "r" (addr) : "memory")
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
index 845002cc2e57..04dad3380041 100644
--- a/arch/riscv/include/asm/ftrace.h
+++ b/arch/riscv/include/asm/ftrace.h
@@ -13,9 +13,19 @@
#endif
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
+/*
+ * Clang prior to 13 had "mcount" instead of "_mcount":
+ * https://reviews.llvm.org/D98881
+ */
+#if defined(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 130000
+#define MCOUNT_NAME _mcount
+#else
+#define MCOUNT_NAME mcount
+#endif
+
#define ARCH_SUPPORTS_FTRACE_OPS 1
#ifndef __ASSEMBLY__
-void _mcount(void);
+void MCOUNT_NAME(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr;
@@ -36,7 +46,7 @@ struct dyn_arch_ftrace {
* both auipc and jalr at the same time.
*/
-#define MCOUNT_ADDR ((unsigned long)_mcount)
+#define MCOUNT_ADDR ((unsigned long)MCOUNT_NAME)
#define JALR_SIGN_MASK (0x00000800)
#define JALR_OFFSET_MASK (0x00000fff)
#define AUIPC_OFFSET_MASK (0xfffff000)
diff --git a/arch/riscv/include/asm/irq.h b/arch/riscv/include/asm/irq.h
index 9807ad164015..e4c435509983 100644
--- a/arch/riscv/include/asm/irq.h
+++ b/arch/riscv/include/asm/irq.h
@@ -12,4 +12,6 @@
#include <asm-generic/irq.h>
+extern void __init init_IRQ(void);
+
#endif /* _ASM_RISCV_IRQ_H */
diff --git a/arch/riscv/include/asm/kexec.h b/arch/riscv/include/asm/kexec.h
new file mode 100644
index 000000000000..e4e291d40759
--- /dev/null
+++ b/arch/riscv/include/asm/kexec.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 FORTH-ICS/CARV
+ * Nick Kossifidis <mick@ics.forth.gr>
+ */
+
+#ifndef _RISCV_KEXEC_H
+#define _RISCV_KEXEC_H
+
+#include <asm/page.h> /* For PAGE_SIZE */
+
+/* Maximum physical address we can use pages from */
+#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
+
+/* Maximum address we can reach in physical address mode */
+#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
+
+/* Maximum address we can use for the control code buffer */
+#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
+
+/* Reserve a page for the control code buffer */
+#define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE
+
+#define KEXEC_ARCH KEXEC_ARCH_RISCV
+
+extern void riscv_crash_save_regs(struct pt_regs *newregs);
+
+static inline void
+crash_setup_regs(struct pt_regs *newregs,
+ struct pt_regs *oldregs)
+{
+ if (oldregs)
+ memcpy(newregs, oldregs, sizeof(struct pt_regs));
+ else
+ riscv_crash_save_regs(newregs);
+}
+
+
+#define ARCH_HAS_KIMAGE_ARCH
+
+struct kimage_arch {
+ unsigned long fdt_addr;
+};
+
+extern const unsigned char riscv_kexec_relocate[];
+extern const unsigned int riscv_kexec_relocate_size;
+
+typedef void (*riscv_kexec_method)(unsigned long first_ind_entry,
+ unsigned long jump_addr,
+ unsigned long fdt_addr,
+ unsigned long hartid,
+ unsigned long va_pa_off);
+
+extern riscv_kexec_method riscv_kexec_norelocate;
+
+#endif
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index adc9d26f3d75..6a7761c86ec2 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -90,15 +90,58 @@ typedef struct page *pgtable_t;
#ifdef CONFIG_MMU
extern unsigned long va_pa_offset;
+#ifdef CONFIG_64BIT
+extern unsigned long va_kernel_pa_offset;
+#endif
+#ifdef CONFIG_XIP_KERNEL
+extern unsigned long va_kernel_xip_pa_offset;
+#endif
extern unsigned long pfn_base;
#define ARCH_PFN_OFFSET (pfn_base)
#else
#define va_pa_offset 0
+#ifdef CONFIG_64BIT
+#define va_kernel_pa_offset 0
+#endif
#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
#endif /* CONFIG_MMU */
-#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset))
-#define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset)
+extern unsigned long kernel_virt_addr;
+
+#ifdef CONFIG_64BIT
+#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset))
+#ifdef CONFIG_XIP_KERNEL
+#define kernel_mapping_pa_to_va(y) ({ \
+ unsigned long _y = y; \
+ (_y >= CONFIG_PHYS_RAM_BASE) ? \
+ (void *)((unsigned long)(_y) + va_kernel_pa_offset + XIP_OFFSET) : \
+ (void *)((unsigned long)(_y) + va_kernel_xip_pa_offset); \
+ })
+#else
+#define kernel_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_kernel_pa_offset))
+#endif
+#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
+
+#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - va_pa_offset)
+#ifdef CONFIG_XIP_KERNEL
+#define kernel_mapping_va_to_pa(y) ({ \
+ unsigned long _y = y; \
+ (_y < kernel_virt_addr + XIP_OFFSET) ? \
+ ((unsigned long)(_y) - va_kernel_xip_pa_offset) : \
+ ((unsigned long)(_y) - va_kernel_pa_offset - XIP_OFFSET); \
+ })
+#else
+#define kernel_mapping_va_to_pa(x) ((unsigned long)(x) - va_kernel_pa_offset)
+#endif
+#define __va_to_pa_nodebug(x) ({ \
+ unsigned long _x = x; \
+ (_x < kernel_virt_addr) ? \
+ linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \
+ })
+#else
+#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset))
+#define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset)
+#endif
#ifdef CONFIG_DEBUG_VIRTUAL
extern phys_addr_t __virt_to_phys(unsigned long x);
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
index 23b1544e0ca5..0af6933a7100 100644
--- a/arch/riscv/include/asm/pgalloc.h
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -38,8 +38,6 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
}
#endif /* __PAGETABLE_PMD_FOLDED */
-#define pmd_pgtable(pmd) pmd_page(pmd)
-
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index ebf817c1bdf4..62f3fe7368f3 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -11,23 +11,37 @@
#include <asm/pgtable-bits.h>
-#ifndef __ASSEMBLY__
+#ifndef CONFIG_MMU
+#define KERNEL_LINK_ADDR PAGE_OFFSET
+#else
-/* Page Upper Directory not used in RISC-V */
-#include <asm-generic/pgtable-nopud.h>
-#include <asm/page.h>
-#include <asm/tlbflush.h>
-#include <linux/mm_types.h>
+#define ADDRESS_SPACE_END (UL(-1))
-#ifdef CONFIG_MMU
+#ifdef CONFIG_64BIT
+/* Leave 2GB for kernel and BPF at the end of the address space */
+#define KERNEL_LINK_ADDR (ADDRESS_SPACE_END - SZ_2G + 1)
+#else
+#define KERNEL_LINK_ADDR PAGE_OFFSET
+#endif
#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
#define VMALLOC_END (PAGE_OFFSET - 1)
#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
#define BPF_JIT_REGION_SIZE (SZ_128M)
+#ifdef CONFIG_64BIT
+#define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
+#define BPF_JIT_REGION_END (MODULES_END)
+#else
#define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
#define BPF_JIT_REGION_END (VMALLOC_END)
+#endif
+
+/* Modules always live before the kernel */
+#ifdef CONFIG_64BIT
+#define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G)
+#define MODULES_END (PFN_ALIGN((unsigned long)&_start))
+#endif
/*
* Roughly size the vmemmap space to be large enough to fit enough
@@ -60,12 +74,35 @@
#endif
+#ifdef CONFIG_XIP_KERNEL
+#define XIP_OFFSET SZ_8M
+#endif
+
+#ifndef __ASSEMBLY__
+
+/* Page Upper Directory not used in RISC-V */
+#include <asm-generic/pgtable-nopud.h>
+#include <asm/page.h>
+#include <asm/tlbflush.h>
+#include <linux/mm_types.h>
+
#ifdef CONFIG_64BIT
#include <asm/pgtable-64.h>
#else
#include <asm/pgtable-32.h>
#endif /* CONFIG_64BIT */
+#ifdef CONFIG_XIP_KERNEL
+#define XIP_FIXUP(addr) ({ \
+ uintptr_t __a = (uintptr_t)(addr); \
+ (__a >= CONFIG_XIP_PHYS_ADDR && __a < CONFIG_XIP_PHYS_ADDR + SZ_16M) ? \
+ __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\
+ __a; \
+ })
+#else
+#define XIP_FIXUP(addr) (addr)
+#endif /* CONFIG_XIP_KERNEL */
+
#ifdef CONFIG_MMU
/* Number of entries in the page global directory */
#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
@@ -484,14 +521,21 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
#define kern_addr_valid(addr) (1) /* FIXME */
-extern void *dtb_early_va;
-extern uintptr_t dtb_early_pa;
+extern char _start[];
+extern void *_dtb_early_va;
+extern uintptr_t _dtb_early_pa;
+#if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU)
+#define dtb_early_va (*(void **)XIP_FIXUP(&_dtb_early_va))
+#define dtb_early_pa (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa))
+#else
+#define dtb_early_va _dtb_early_va
+#define dtb_early_pa _dtb_early_pa
+#endif /* CONFIG_XIP_KERNEL */
+
void setup_bootmem(void);
void paging_init(void);
void misc_mem_init(void);
-#define FIRST_USER_ADDRESS 0
-
/*
* ZERO_PAGE is a global shared page that is always zero,
* used for zero-mapped memory areas, etc.
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 3a240037bde2..021ed64ee608 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -71,6 +71,7 @@ int riscv_of_processor_hartid(struct device_node *node);
int riscv_of_parent_hartid(struct device_node *node);
extern void riscv_fill_hwcap(void);
+extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
#endif /* __ASSEMBLY__ */
diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h
index cb4abb639e8d..09ad4e923510 100644
--- a/arch/riscv/include/asm/ptrace.h
+++ b/arch/riscv/include/asm/ptrace.h
@@ -119,6 +119,11 @@ extern int regs_query_register_offset(const char *name);
extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
unsigned int n);
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+ unsigned long frame_pointer);
+int do_syscall_trace_enter(struct pt_regs *regs);
+void do_syscall_trace_exit(struct pt_regs *regs);
+
/**
* regs_get_register() - get register value from its offset
* @regs: pt_regs from which register value is gotten
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index 99895d9c3bdd..0d42693cb65e 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -51,10 +51,10 @@ enum sbi_ext_rfence_fid {
SBI_EXT_RFENCE_REMOTE_FENCE_I = 0,
SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
- SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID,
- SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
+ SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID,
+ SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
};
enum sbi_ext_hsm_fid {
@@ -97,6 +97,9 @@ struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
void sbi_console_putchar(int ch);
int sbi_console_getchar(void);
+long sbi_get_mvendorid(void);
+long sbi_get_marchid(void);
+long sbi_get_mimpid(void);
void sbi_set_timer(uint64_t stime_value);
void sbi_shutdown(void);
void sbi_clear_ipi(void);
diff --git a/arch/riscv/include/asm/sections.h b/arch/riscv/include/asm/sections.h
index 1595c5b60cfd..8a303fb1ee3b 100644
--- a/arch/riscv/include/asm/sections.h
+++ b/arch/riscv/include/asm/sections.h
@@ -11,5 +11,6 @@ extern char _start[];
extern char _start_kernel[];
extern char __init_data_begin[], __init_data_end[];
extern char __init_text_begin[], __init_text_end[];
+extern char __alt_start[], __alt_end[];
#endif /* __ASM_SECTIONS_H */
diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h
index 6887b3d9f371..086f757e8ba3 100644
--- a/arch/riscv/include/asm/set_memory.h
+++ b/arch/riscv/include/asm/set_memory.h
@@ -26,6 +26,12 @@ static inline void protect_kernel_text_data(void) {}
static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; }
#endif
+#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
+void protect_kernel_linear_mapping_text_rodata(void);
+#else
+static inline void protect_kernel_linear_mapping_text_rodata(void) {}
+#endif
+
int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
bool kernel_page_present(struct page *page);
diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h
index df1f7c4cd433..a7d2811f3536 100644
--- a/arch/riscv/include/asm/smp.h
+++ b/arch/riscv/include/asm/smp.h
@@ -46,7 +46,7 @@ int riscv_hartid_to_cpuid(int hartid);
void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out);
/* Set custom IPI operations */
-void riscv_set_ipi_ops(struct riscv_ipi_ops *ops);
+void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops);
/* Clear IPI for current CPU */
void riscv_clear_ipi(void);
@@ -92,7 +92,7 @@ static inline void riscv_cpuid_to_hartid_mask(const struct cpumask *in,
cpumask_set_cpu(boot_cpu_hartid, out);
}
-static inline void riscv_set_ipi_ops(struct riscv_ipi_ops *ops)
+static inline void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops)
{
}
diff --git a/arch/riscv/include/asm/string.h b/arch/riscv/include/asm/string.h
index 5477e7ecb6e1..909049366555 100644
--- a/arch/riscv/include/asm/string.h
+++ b/arch/riscv/include/asm/string.h
@@ -23,5 +23,10 @@ extern asmlinkage void *__memmove(void *, const void *, size_t);
#define memcpy(dst, src, len) __memcpy(dst, src, len)
#define memset(s, c, n) __memset(s, c, n)
#define memmove(dst, src, len) __memmove(dst, src, len)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+
#endif
#endif /* _ASM_RISCV_STRING_H */
diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
index 49350c8bd7b0..b933b1583c9f 100644
--- a/arch/riscv/include/asm/syscall.h
+++ b/arch/riscv/include/asm/syscall.h
@@ -15,7 +15,7 @@
#include <linux/err.h>
/* The array of function pointers for syscalls. */
-extern void *sys_call_table[];
+extern void * const sys_call_table[];
/*
* Only the low 32 bits of orig_r0 are meaningful, so we return int.
diff --git a/arch/riscv/include/asm/timex.h b/arch/riscv/include/asm/timex.h
index 81de51e6aa32..507cae273bc6 100644
--- a/arch/riscv/include/asm/timex.h
+++ b/arch/riscv/include/asm/timex.h
@@ -88,4 +88,6 @@ static inline int read_current_timer(unsigned long *timer_val)
return 0;
}
+extern void time_init(void);
+
#endif /* _ASM_RISCV_TIMEX_H */
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 394cfbccdcd9..c84218ad7afc 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -9,6 +9,7 @@
#include <linux/mm_types.h>
#include <asm/smp.h>
+#include <asm/errata_list.h>
#ifdef CONFIG_MMU
static inline void local_flush_tlb_all(void)
@@ -19,7 +20,7 @@ static inline void local_flush_tlb_all(void)
/* Flush one page from local TLB */
static inline void local_flush_tlb_page(unsigned long addr)
{
- __asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory");
+ ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
}
#else /* CONFIG_MMU */
#define local_flush_tlb_all() do { } while (0)
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index 824b2c9da75b..f314ff44c48d 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -306,7 +306,9 @@ do { \
* data types like structures or arrays.
*
* @ptr must have pointer-to-simple-variable type, and @x must be assignable
- * to the result of dereferencing @ptr.
+ * to the result of dereferencing @ptr. The value of @x is copied to avoid
+ * re-ordering where @x is evaluated inside the block that enables user-space
+ * access (thus bypassing user space protection if @x is a function).
*
* Caller must check the pointer with access_ok() before calling this
* function.
@@ -316,12 +318,13 @@ do { \
#define __put_user(x, ptr) \
({ \
__typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
+ __typeof__(*__gu_ptr) __val = (x); \
long __pu_err = 0; \
\
__chk_user_ptr(__gu_ptr); \
\
__enable_user_access(); \
- __put_user_nocheck(x, __gu_ptr, __pu_err); \
+ __put_user_nocheck(__val, __gu_ptr, __pu_err); \
__disable_user_access(); \
\
__pu_err; \
@@ -372,7 +375,6 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
extern long strncpy_from_user(char *dest, const char __user *src, long count);
-extern long __must_check strlen_user(const char __user *str);
extern long __must_check strnlen_user(const char __user *str, long n);
extern
diff --git a/arch/riscv/include/asm/vendorid_list.h b/arch/riscv/include/asm/vendorid_list.h
new file mode 100644
index 000000000000..9d934215b3c8
--- /dev/null
+++ b/arch/riscv/include/asm/vendorid_list.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 SiFive
+ */
+#ifndef ASM_VENDOR_LIST_H
+#define ASM_VENDOR_LIST_H
+
+#define SIFIVE_VENDOR_ID 0x489
+
+#endif
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 3dc0abde988a..d3081e4d9600 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -8,6 +8,11 @@ CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE)
endif
+CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
+
+ifdef CONFIG_KEXEC
+AFLAGS_kexec_relocate.o := -mcmodel=medany -mno-relax
+endif
extra-y += head.o
extra-y += vmlinux.lds
@@ -54,6 +59,8 @@ obj-$(CONFIG_SMP) += cpu_ops_sbi.o
endif
obj-$(CONFIG_HOTPLUG_CPU) += cpu-hotplug.o
obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_KEXEC) += kexec_relocate.o crash_save_regs.o machine_kexec.o
+obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
diff --git a/arch/riscv/kernel/crash_dump.c b/arch/riscv/kernel/crash_dump.c
new file mode 100644
index 000000000000..86cc0ada5752
--- /dev/null
+++ b/arch/riscv/kernel/crash_dump.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This code comes from arch/arm64/kernel/crash_dump.c
+ * Created by: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ * Copyright (C) 2017 Linaro Limited
+ */
+
+#include <linux/crash_dump.h>
+#include <linux/io.h>
+
+/**
+ * copy_oldmem_page() - copy one page from old kernel memory
+ * @pfn: page frame number to be copied
+ * @buf: buffer where the copied page is placed
+ * @csize: number of bytes to copy
+ * @offset: offset in bytes into the page
+ * @userbuf: if set, @buf is in a user address space
+ *
+ * This function copies one page from old kernel memory into buffer pointed by
+ * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes
+ * copied or negative error in case of failure.
+ */
+ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+ size_t csize, unsigned long offset,
+ int userbuf)
+{
+ void *vaddr;
+
+ if (!csize)
+ return 0;
+
+ vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB);
+ if (!vaddr)
+ return -ENOMEM;
+
+ if (userbuf) {
+ if (copy_to_user((char __user *)buf, vaddr + offset, csize)) {
+ memunmap(vaddr);
+ return -EFAULT;
+ }
+ } else
+ memcpy(buf, vaddr + offset, csize);
+
+ memunmap(vaddr);
+ return csize;
+}
diff --git a/arch/riscv/kernel/crash_save_regs.S b/arch/riscv/kernel/crash_save_regs.S
new file mode 100644
index 000000000000..7832fb763aba
--- /dev/null
+++ b/arch/riscv/kernel/crash_save_regs.S
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 FORTH-ICS/CARV
+ * Nick Kossifidis <mick@ics.forth.gr>
+ */
+
+#include <asm/asm.h> /* For RISCV_* and REG_* macros */
+#include <asm/csr.h> /* For CSR_* macros */
+#include <asm/asm-offsets.h> /* For offsets on pt_regs */
+#include <linux/linkage.h> /* For SYM_* macros */
+
+.section ".text"
+SYM_CODE_START(riscv_crash_save_regs)
+ REG_S ra, PT_RA(a0) /* x1 */
+ REG_S sp, PT_SP(a0) /* x2 */
+ REG_S gp, PT_GP(a0) /* x3 */
+ REG_S tp, PT_TP(a0) /* x4 */
+ REG_S t0, PT_T0(a0) /* x5 */
+ REG_S t1, PT_T1(a0) /* x6 */
+ REG_S t2, PT_T2(a0) /* x7 */
+ REG_S s0, PT_S0(a0) /* x8/fp */
+ REG_S s1, PT_S1(a0) /* x9 */
+ REG_S a0, PT_A0(a0) /* x10 */
+ REG_S a1, PT_A1(a0) /* x11 */
+ REG_S a2, PT_A2(a0) /* x12 */
+ REG_S a3, PT_A3(a0) /* x13 */
+ REG_S a4, PT_A4(a0) /* x14 */
+ REG_S a5, PT_A5(a0) /* x15 */
+ REG_S a6, PT_A6(a0) /* x16 */
+ REG_S a7, PT_A7(a0) /* x17 */
+ REG_S s2, PT_S2(a0) /* x18 */
+ REG_S s3, PT_S3(a0) /* x19 */
+ REG_S s4, PT_S4(a0) /* x20 */
+ REG_S s5, PT_S5(a0) /* x21 */
+ REG_S s6, PT_S6(a0) /* x22 */
+ REG_S s7, PT_S7(a0) /* x23 */
+ REG_S s8, PT_S8(a0) /* x24 */
+ REG_S s9, PT_S9(a0) /* x25 */
+ REG_S s10, PT_S10(a0) /* x26 */
+ REG_S s11, PT_S11(a0) /* x27 */
+ REG_S t3, PT_T3(a0) /* x28 */
+ REG_S t4, PT_T4(a0) /* x29 */
+ REG_S t5, PT_T5(a0) /* x30 */
+ REG_S t6, PT_T6(a0) /* x31 */
+
+ csrr t1, CSR_STATUS
+ csrr t2, CSR_EPC
+ csrr t3, CSR_TVAL
+ csrr t4, CSR_CAUSE
+
+ REG_S t1, PT_STATUS(a0)
+ REG_S t2, PT_EPC(a0)
+ REG_S t3, PT_BADADDR(a0)
+ REG_S t4, PT_CAUSE(a0)
+ ret
+SYM_CODE_END(riscv_crash_save_regs)
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 744f3209c48d..80d5a9e017b0 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -12,6 +12,7 @@
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
+#include <asm/errata_list.h>
#if !IS_ENABLED(CONFIG_PREEMPTION)
.set resume_kernel, restore_all
@@ -130,6 +131,9 @@ skip_context_tracking:
*/
andi t0, s1, SR_PIE
beqz t0, 1f
+ /* kprobes, entered via ebreak, must have interrupts disabled. */
+ li t0, EXC_BREAKPOINT
+ beq s4, t0, 1f
#ifdef CONFIG_TRACE_IRQFLAGS
call trace_hardirqs_on
#endif
@@ -447,10 +451,11 @@ ENDPROC(__switch_to)
#endif
.section ".rodata"
+ .align LGREG
/* Exception vector table */
ENTRY(excp_vect_table)
RISCV_PTR do_trap_insn_misaligned
- RISCV_PTR do_trap_insn_fault
+ ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault)
RISCV_PTR do_trap_insn_illegal
RISCV_PTR do_trap_break
RISCV_PTR do_trap_load_misaligned
@@ -461,7 +466,8 @@ ENTRY(excp_vect_table)
RISCV_PTR do_trap_ecall_s
RISCV_PTR do_trap_unknown
RISCV_PTR do_trap_ecall_m
- RISCV_PTR do_page_fault /* instruction page fault */
+ /* instruciton page fault */
+ ALT_PAGE_FAULT(RISCV_PTR do_page_fault)
RISCV_PTR do_page_fault /* load page fault */
RISCV_PTR do_trap_unknown
RISCV_PTR do_page_fault /* store page fault */
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index f5a9bad86e58..89cc58ab52b4 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -9,11 +9,23 @@
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/page.h>
+#include <asm/pgtable.h>
#include <asm/csr.h>
#include <asm/hwcap.h>
#include <asm/image.h>
#include "efi-header.S"
+#ifdef CONFIG_XIP_KERNEL
+.macro XIP_FIXUP_OFFSET reg
+ REG_L t0, _xip_fixup
+ add \reg, \reg, t0
+.endm
+_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
+#else
+.macro XIP_FIXUP_OFFSET reg
+.endm
+#endif /* CONFIG_XIP_KERNEL */
+
__HEAD
ENTRY(_start)
/*
@@ -69,7 +81,9 @@ pe_head_start:
#ifdef CONFIG_MMU
relocate:
/* Relocate return address */
- li a1, PAGE_OFFSET
+ la a1, kernel_virt_addr
+ XIP_FIXUP_OFFSET a1
+ REG_L a1, 0(a1)
la a2, _start
sub a1, a1, a2
add ra, ra, a1
@@ -91,6 +105,7 @@ relocate:
* to ensure the new translations are in use.
*/
la a0, trampoline_pg_dir
+ XIP_FIXUP_OFFSET a0
srl a0, a0, PAGE_SHIFT
or a0, a0, a1
sfence.vma
@@ -144,7 +159,9 @@ secondary_start_sbi:
slli a3, a0, LGREG
la a4, __cpu_up_stack_pointer
+ XIP_FIXUP_OFFSET a4
la a5, __cpu_up_task_pointer
+ XIP_FIXUP_OFFSET a5
add a4, a3, a4
add a5, a3, a5
REG_L sp, (a4)
@@ -156,6 +173,7 @@ secondary_start_common:
#ifdef CONFIG_MMU
/* Enable virtual memory and relocate to virtual address */
la a0, swapper_pg_dir
+ XIP_FIXUP_OFFSET a0
call relocate
#endif
call setup_trap_vector
@@ -236,12 +254,33 @@ pmp_done:
.Lgood_cores:
#endif
+#ifndef CONFIG_XIP_KERNEL
/* Pick one hart to run the main boot sequence */
la a3, hart_lottery
li a2, 1
amoadd.w a3, a2, (a3)
bnez a3, .Lsecondary_start
+#else
+ /* hart_lottery in flash contains a magic number */
+ la a3, hart_lottery
+ mv a2, a3
+ XIP_FIXUP_OFFSET a2
+ lw t1, (a3)
+ amoswap.w t0, t1, (a2)
+ /* first time here if hart_lottery in RAM is not set */
+ beq t0, t1, .Lsecondary_start
+
+ la sp, _end + THREAD_SIZE
+ XIP_FIXUP_OFFSET sp
+ mv s0, a0
+ call __copy_data
+
+ /* Restore a0 copy */
+ mv a0, s0
+#endif
+
+#ifndef CONFIG_XIP_KERNEL
/* Clear BSS for flat non-ELF images */
la a3, __bss_start
la a4, __bss_stop
@@ -251,15 +290,18 @@ clear_bss:
add a3, a3, RISCV_SZPTR
blt a3, a4, clear_bss
clear_bss_done:
-
+#endif
/* Save hart ID and DTB physical address */
mv s0, a0
mv s1, a1
+
la a2, boot_cpu_hartid
+ XIP_FIXUP_OFFSET a2
REG_S a0, (a2)
/* Initialize page tables and relocate to virtual addresses */
la sp, init_thread_union + THREAD_SIZE
+ XIP_FIXUP_OFFSET sp
#ifdef CONFIG_BUILTIN_DTB
la a0, __dtb_start
#else
@@ -268,6 +310,7 @@ clear_bss_done:
call setup_vm
#ifdef CONFIG_MMU
la a0, early_pg_dir
+ XIP_FIXUP_OFFSET a0
call relocate
#endif /* CONFIG_MMU */
@@ -292,7 +335,9 @@ clear_bss_done:
slli a3, a0, LGREG
la a1, __cpu_up_stack_pointer
+ XIP_FIXUP_OFFSET a1
la a2, __cpu_up_task_pointer
+ XIP_FIXUP_OFFSET a2
add a1, a3, a1
add a2, a3, a2
diff --git a/arch/riscv/kernel/head.h b/arch/riscv/kernel/head.h
index b48dda3d04f6..aabbc3ac3e48 100644
--- a/arch/riscv/kernel/head.h
+++ b/arch/riscv/kernel/head.h
@@ -12,6 +12,9 @@ extern atomic_t hart_lottery;
asmlinkage void do_page_fault(struct pt_regs *regs);
asmlinkage void __init setup_vm(uintptr_t dtb_pa);
+#ifdef CONFIG_XIP_KERNEL
+asmlinkage void __init __copy_data(void);
+#endif
extern void *__cpu_up_stack_pointer[];
extern void *__cpu_up_task_pointer[];
diff --git a/arch/riscv/kernel/kexec_relocate.S b/arch/riscv/kernel/kexec_relocate.S
new file mode 100644
index 000000000000..88c3beabe9b4
--- /dev/null
+++ b/arch/riscv/kernel/kexec_relocate.S
@@ -0,0 +1,223 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 FORTH-ICS/CARV
+ * Nick Kossifidis <mick@ics.forth.gr>
+ */
+
+#include <asm/asm.h> /* For RISCV_* and REG_* macros */
+#include <asm/csr.h> /* For CSR_* macros */
+#include <asm/page.h> /* For PAGE_SIZE */
+#include <linux/linkage.h> /* For SYM_* macros */
+
+.section ".rodata"
+SYM_CODE_START(riscv_kexec_relocate)
+
+ /*
+ * s0: Pointer to the current entry
+ * s1: (const) Phys address to jump to after relocation
+ * s2: (const) Phys address of the FDT image
+ * s3: (const) The hartid of the current hart
+ * s4: Pointer to the destination address for the relocation
+ * s5: (const) Number of words per page
+ * s6: (const) 1, used for subtraction
+ * s7: (const) va_pa_offset, used when switching MMU off
+ * s8: (const) Physical address of the main loop
+ * s9: (debug) indirection page counter
+ * s10: (debug) entry counter
+ * s11: (debug) copied words counter
+ */
+ mv s0, a0
+ mv s1, a1
+ mv s2, a2
+ mv s3, a3
+ mv s4, zero
+ li s5, (PAGE_SIZE / RISCV_SZPTR)
+ li s6, 1
+ mv s7, a4
+ mv s8, zero
+ mv s9, zero
+ mv s10, zero
+ mv s11, zero
+
+ /* Disable / cleanup interrupts */
+ csrw CSR_SIE, zero
+ csrw CSR_SIP, zero
+
+ /*
+ * When we switch SATP.MODE to "Bare" we'll only
+ * play with physical addresses. However the first time
+ * we try to jump somewhere, the offset on the jump
+ * will be relative to pc which will still be on VA. To
+ * deal with this we set stvec to the physical address at
+ * the start of the loop below so that we jump there in
+ * any case.
+ */
+ la s8, 1f
+ sub s8, s8, s7
+ csrw CSR_STVEC, s8
+
+ /* Process entries in a loop */
+.align 2
+1:
+ addi s10, s10, 1
+ REG_L t0, 0(s0) /* t0 = *image->entry */
+ addi s0, s0, RISCV_SZPTR /* image->entry++ */
+
+ /* IND_DESTINATION entry ? -> save destination address */
+ andi t1, t0, 0x1
+ beqz t1, 2f
+ andi s4, t0, ~0x1
+ j 1b
+
+2:
+ /* IND_INDIRECTION entry ? -> update next entry ptr (PA) */
+ andi t1, t0, 0x2
+ beqz t1, 2f
+ andi s0, t0, ~0x2
+ addi s9, s9, 1
+ csrw CSR_SATP, zero
+ jalr zero, s8, 0
+
+2:
+ /* IND_DONE entry ? -> jump to done label */
+ andi t1, t0, 0x4
+ beqz t1, 2f
+ j 4f
+
+2:
+ /*
+ * IND_SOURCE entry ? -> copy page word by word to the
+ * destination address we got from IND_DESTINATION
+ */
+ andi t1, t0, 0x8
+ beqz t1, 1b /* Unknown entry type, ignore it */
+ andi t0, t0, ~0x8
+ mv t3, s5 /* i = num words per page */
+3: /* copy loop */
+ REG_L t1, (t0) /* t1 = *src_ptr */
+ REG_S t1, (s4) /* *dst_ptr = *src_ptr */
+ addi t0, t0, RISCV_SZPTR /* stc_ptr++ */
+ addi s4, s4, RISCV_SZPTR /* dst_ptr++ */
+ sub t3, t3, s6 /* i-- */
+ addi s11, s11, 1 /* c++ */
+ beqz t3, 1b /* copy done ? */
+ j 3b
+
+4:
+ /* Pass the arguments to the next kernel / Cleanup*/
+ mv a0, s3
+ mv a1, s2
+ mv a2, s1
+
+ /* Cleanup */
+ mv a3, zero
+ mv a4, zero
+ mv a5, zero
+ mv a6, zero
+ mv a7, zero
+
+ mv s0, zero
+ mv s1, zero
+ mv s2, zero
+ mv s3, zero
+ mv s4, zero
+ mv s5, zero
+ mv s6, zero
+ mv s7, zero
+ mv s8, zero
+ mv s9, zero
+ mv s10, zero
+ mv s11, zero
+
+ mv t0, zero
+ mv t1, zero
+ mv t2, zero
+ mv t3, zero
+ mv t4, zero
+ mv t5, zero
+ mv t6, zero
+ csrw CSR_SEPC, zero
+ csrw CSR_SCAUSE, zero
+ csrw CSR_SSCRATCH, zero
+
+ /*
+ * Make sure the relocated code is visible
+ * and jump to the new kernel
+ */
+ fence.i
+
+ jalr zero, a2, 0
+
+SYM_CODE_END(riscv_kexec_relocate)
+riscv_kexec_relocate_end:
+
+
+/* Used for jumping to crashkernel */
+.section ".text"
+SYM_CODE_START(riscv_kexec_norelocate)
+ /*
+ * s0: (const) Phys address to jump to
+ * s1: (const) Phys address of the FDT image
+ * s2: (const) The hartid of the current hart
+ * s3: (const) va_pa_offset, used when switching MMU off
+ */
+ mv s0, a1
+ mv s1, a2
+ mv s2, a3
+ mv s3, a4
+
+ /* Disable / cleanup interrupts */
+ csrw CSR_SIE, zero
+ csrw CSR_SIP, zero
+
+ /* Switch to physical addressing */
+ la s4, 1f
+ sub s4, s4, s3
+ csrw CSR_STVEC, s4
+ csrw CSR_SATP, zero
+
+.align 2
+1:
+ /* Pass the arguments to the next kernel / Cleanup*/
+ mv a0, s2
+ mv a1, s1
+ mv a2, s0
+
+ /* Cleanup */
+ mv a3, zero
+ mv a4, zero
+ mv a5, zero
+ mv a6, zero
+ mv a7, zero
+
+ mv s0, zero
+ mv s1, zero
+ mv s2, zero
+ mv s3, zero
+ mv s4, zero
+ mv s5, zero
+ mv s6, zero
+ mv s7, zero
+ mv s8, zero
+ mv s9, zero
+ mv s10, zero
+ mv s11, zero
+
+ mv t0, zero
+ mv t1, zero
+ mv t2, zero
+ mv t3, zero
+ mv t4, zero
+ mv t5, zero
+ mv t6, zero
+ csrw CSR_SEPC, zero
+ csrw CSR_SCAUSE, zero
+ csrw CSR_SSCRATCH, zero
+
+ jalr zero, a2, 0
+SYM_CODE_END(riscv_kexec_norelocate)
+
+.section ".rodata"
+SYM_DATA(riscv_kexec_relocate_size,
+ .long riscv_kexec_relocate_end - riscv_kexec_relocate)
+
diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c
new file mode 100644
index 000000000000..9e99e1db156b
--- /dev/null
+++ b/arch/riscv/kernel/machine_kexec.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 FORTH-ICS/CARV
+ * Nick Kossifidis <mick@ics.forth.gr>
+ */
+
+#include <linux/kexec.h>
+#include <asm/kexec.h> /* For riscv_kexec_* symbol defines */
+#include <linux/smp.h> /* For smp_send_stop () */
+#include <asm/cacheflush.h> /* For local_flush_icache_all() */
+#include <asm/barrier.h> /* For smp_wmb() */
+#include <asm/page.h> /* For PAGE_MASK */
+#include <linux/libfdt.h> /* For fdt_check_header() */
+#include <asm/set_memory.h> /* For set_memory_x() */
+#include <linux/compiler.h> /* For unreachable() */
+#include <linux/cpu.h> /* For cpu_down() */
+#include <linux/reboot.h>
+
+/*
+ * kexec_image_info - Print received image details
+ */
+static void
+kexec_image_info(const struct kimage *image)
+{
+ unsigned long i;
+
+ pr_debug("Kexec image info:\n");
+ pr_debug("\ttype: %d\n", image->type);
+ pr_debug("\tstart: %lx\n", image->start);
+ pr_debug("\thead: %lx\n", image->head);
+ pr_debug("\tnr_segments: %lu\n", image->nr_segments);
+
+ for (i = 0; i < image->nr_segments; i++) {
+ pr_debug("\t segment[%lu]: %016lx - %016lx", i,
+ image->segment[i].mem,
+ image->segment[i].mem + image->segment[i].memsz);
+ pr_debug("\t\t0x%lx bytes, %lu pages\n",
+ (unsigned long) image->segment[i].memsz,
+ (unsigned long) image->segment[i].memsz / PAGE_SIZE);
+ }
+}
+
+/*
+ * machine_kexec_prepare - Initialize kexec
+ *
+ * This function is called from do_kexec_load, when the user has
+ * provided us with an image to be loaded. Its goal is to validate
+ * the image and prepare the control code buffer as needed.
+ * Note that kimage_alloc_init has already been called and the
+ * control buffer has already been allocated.
+ */
+int
+machine_kexec_prepare(struct kimage *image)
+{
+ struct kimage_arch *internal = &image->arch;
+ struct fdt_header fdt = {0};
+ void *control_code_buffer = NULL;
+ unsigned int control_code_buffer_sz = 0;
+ int i = 0;
+
+ kexec_image_info(image);
+
+ /* Find the Flattened Device Tree and save its physical address */
+ for (i = 0; i < image->nr_segments; i++) {
+ if (image->segment[i].memsz <= sizeof(fdt))
+ continue;
+
+ if (copy_from_user(&fdt, image->segment[i].buf, sizeof(fdt)))
+ continue;
+
+ if (fdt_check_header(&fdt))
+ continue;
+
+ internal->fdt_addr = (unsigned long) image->segment[i].mem;
+ break;
+ }
+
+ if (!internal->fdt_addr) {
+ pr_err("Device tree not included in the provided image\n");
+ return -EINVAL;
+ }
+
+ /* Copy the assembler code for relocation to the control page */
+ if (image->type != KEXEC_TYPE_CRASH) {
+ control_code_buffer = page_address(image->control_code_page);
+ control_code_buffer_sz = page_size(image->control_code_page);
+
+ if (unlikely(riscv_kexec_relocate_size > control_code_buffer_sz)) {
+ pr_err("Relocation code doesn't fit within a control page\n");
+ return -EINVAL;
+ }
+
+ memcpy(control_code_buffer, riscv_kexec_relocate,
+ riscv_kexec_relocate_size);
+
+ /* Mark the control page executable */
+ set_memory_x((unsigned long) control_code_buffer, 1);
+ }
+
+ return 0;
+}
+
+
+/*
+ * machine_kexec_cleanup - Cleanup any leftovers from
+ * machine_kexec_prepare
+ *
+ * This function is called by kimage_free to handle any arch-specific
+ * allocations done on machine_kexec_prepare. Since we didn't do any
+ * allocations there, this is just an empty function. Note that the
+ * control buffer is freed by kimage_free.
+ */
+void
+machine_kexec_cleanup(struct kimage *image)
+{
+}
+
+
+/*
+ * machine_shutdown - Prepare for a kexec reboot
+ *
+ * This function is called by kernel_kexec just before machine_kexec
+ * below. Its goal is to prepare the rest of the system (the other
+ * harts and possibly devices etc) for a kexec reboot.
+ */
+void machine_shutdown(void)
+{
+ /*
+ * No more interrupts on this hart
+ * until we are back up.
+ */
+ local_irq_disable();
+
+#if defined(CONFIG_HOTPLUG_CPU)
+ smp_shutdown_nonboot_cpus(smp_processor_id());
+#endif
+}
+
+/*
+ * machine_crash_shutdown - Prepare to kexec after a kernel crash
+ *
+ * This function is called by crash_kexec just before machine_kexec
+ * below and its goal is similar to machine_shutdown, but in case of
+ * a kernel crash. Since we don't handle such cases yet, this function
+ * is empty.
+ */
+void
+machine_crash_shutdown(struct pt_regs *regs)
+{
+ crash_save_cpu(regs, smp_processor_id());
+ machine_shutdown();
+ pr_info("Starting crashdump kernel...\n");
+}
+
+/*
+ * machine_kexec - Jump to the loaded kimage
+ *
+ * This function is called by kernel_kexec which is called by the
+ * reboot system call when the reboot cmd is LINUX_REBOOT_CMD_KEXEC,
+ * or by crash_kernel which is called by the kernel's arch-specific
+ * trap handler in case of a kernel panic. It's the final stage of
+ * the kexec process where the pre-loaded kimage is ready to be
+ * executed. We assume at this point that all other harts are
+ * suspended and this hart will be the new boot hart.
+ */
+void __noreturn
+machine_kexec(struct kimage *image)
+{
+ struct kimage_arch *internal = &image->arch;
+ unsigned long jump_addr = (unsigned long) image->start;
+ unsigned long first_ind_entry = (unsigned long) &image->head;
+ unsigned long this_hart_id = raw_smp_processor_id();
+ unsigned long fdt_addr = internal->fdt_addr;
+ void *control_code_buffer = page_address(image->control_code_page);
+ riscv_kexec_method kexec_method = NULL;
+
+ if (image->type != KEXEC_TYPE_CRASH)
+ kexec_method = control_code_buffer;
+ else
+ kexec_method = (riscv_kexec_method) &riscv_kexec_norelocate;
+
+ pr_notice("Will call new kernel at %08lx from hart id %lx\n",
+ jump_addr, this_hart_id);
+ pr_notice("FDT image at %08lx\n", fdt_addr);
+
+ /* Make sure the relocation code is visible to the hart */
+ local_flush_icache_all();
+
+ /* Jump to the relocation code */
+ pr_notice("Bye...\n");
+ kexec_method(first_ind_entry, jump_addr, fdt_addr,
+ this_hart_id, va_pa_offset);
+ unreachable();
+}
diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S
index 8a5593ff9ff3..6d462681c9c0 100644
--- a/arch/riscv/kernel/mcount.S
+++ b/arch/riscv/kernel/mcount.S
@@ -47,8 +47,8 @@
ENTRY(ftrace_stub)
#ifdef CONFIG_DYNAMIC_FTRACE
- .global _mcount
- .set _mcount, ftrace_stub
+ .global MCOUNT_NAME
+ .set MCOUNT_NAME, ftrace_stub
#endif
ret
ENDPROC(ftrace_stub)
@@ -78,7 +78,7 @@ ENDPROC(return_to_handler)
#endif
#ifndef CONFIG_DYNAMIC_FTRACE
-ENTRY(_mcount)
+ENTRY(MCOUNT_NAME)
la t4, ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
la t0, ftrace_graph_return
@@ -124,6 +124,6 @@ do_trace:
jalr t5
RESTORE_ABI_STATE
ret
-ENDPROC(_mcount)
+ENDPROC(MCOUNT_NAME)
#endif
-EXPORT_SYMBOL(_mcount)
+EXPORT_SYMBOL(MCOUNT_NAME)
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 104fba889cf7..68a9e3d1fe16 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -408,13 +408,11 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
}
#if defined(CONFIG_MMU) && defined(CONFIG_64BIT)
-#define VMALLOC_MODULE_START \
- max(PFN_ALIGN((unsigned long)&_end - SZ_2G), VMALLOC_START)
void *module_alloc(unsigned long size)
{
- return __vmalloc_node_range(size, 1, VMALLOC_MODULE_START,
- VMALLOC_END, GFP_KERNEL,
- PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
+ return __vmalloc_node_range(size, 1, MODULES_VADDR,
+ MODULES_END, GFP_KERNEL,
+ PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
}
#endif
diff --git a/arch/riscv/kernel/probes/ftrace.c b/arch/riscv/kernel/probes/ftrace.c
index e6372490aa0b..aab85a82f419 100644
--- a/arch/riscv/kernel/probes/ftrace.c
+++ b/arch/riscv/kernel/probes/ftrace.c
@@ -2,39 +2,47 @@
#include <linux/kprobes.h>
-/* Ftrace callback handler for kprobes -- called under preepmt disabed */
+/* Ftrace callback handler for kprobes -- called under preepmt disabled */
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
- struct ftrace_ops *ops, struct ftrace_regs *regs)
+ struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
struct kprobe *p;
+ struct pt_regs *regs;
struct kprobe_ctlblk *kcb;
+ int bit;
+ bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ if (bit < 0)
+ return;
+
+ preempt_disable_notrace();
p = get_kprobe((kprobe_opcode_t *)ip);
if (unlikely(!p) || kprobe_disabled(p))
- return;
+ goto out;
+ regs = ftrace_get_regs(fregs);
kcb = get_kprobe_ctlblk();
if (kprobe_running()) {
kprobes_inc_nmissed_count(p);
} else {
- unsigned long orig_ip = instruction_pointer(&(regs->regs));
+ unsigned long orig_ip = instruction_pointer(regs);
- instruction_pointer_set(&(regs->regs), ip);
+ instruction_pointer_set(regs, ip);
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
- if (!p->pre_handler || !p->pre_handler(p, &(regs->regs))) {
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
/*
* Emulate singlestep (and also recover regs->pc)
* as if there is a nop
*/
- instruction_pointer_set(&(regs->regs),
+ instruction_pointer_set(regs,
(unsigned long)p->addr + MCOUNT_INSN_SIZE);
if (unlikely(p->post_handler)) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
- p->post_handler(p, &(regs->regs), 0);
+ p->post_handler(p, regs, 0);
}
- instruction_pointer_set(&(regs->regs), orig_ip);
+ instruction_pointer_set(regs, orig_ip);
}
/*
@@ -43,6 +51,9 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
*/
__this_cpu_write(current_kprobe, NULL);
}
+out:
+ preempt_enable_notrace();
+ ftrace_test_recursion_unlock(bit);
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c
index a2ec18662fee..247e33fa5bc7 100644
--- a/arch/riscv/kernel/probes/kprobes.c
+++ b/arch/riscv/kernel/probes/kprobes.c
@@ -84,6 +84,16 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
return 0;
}
+#ifdef CONFIG_MMU
+void *alloc_insn_page(void)
+{
+ return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
+ GFP_KERNEL, PAGE_KERNEL_READ_EXEC,
+ VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
+ __builtin_return_address(0));
+}
+#endif
+
/* install breakpoint in text */
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
@@ -256,35 +266,19 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
* normal page fault.
*/
regs->epc = (unsigned long) cur->addr;
- if (!instruction_pointer(regs))
- BUG();
+ BUG_ON(!instruction_pointer(regs));
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
- else
+ else {
+ kprobes_restore_local_irqflag(kcb, regs);
reset_current_kprobe();
+ }
break;
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index 6f728e731bed..f9cd57c9c67d 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -10,6 +10,7 @@
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <linux/tick.h>
#include <linux/ptrace.h>
diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c
index f4a7db3d309e..7402a417f38e 100644
--- a/arch/riscv/kernel/sbi.c
+++ b/arch/riscv/kernel/sbi.c
@@ -11,14 +11,14 @@
#include <asm/smp.h>
/* default SBI version is 0.1 */
-unsigned long sbi_spec_version = SBI_SPEC_VERSION_DEFAULT;
+unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT;
EXPORT_SYMBOL(sbi_spec_version);
-static void (*__sbi_set_timer)(uint64_t stime);
-static int (*__sbi_send_ipi)(const unsigned long *hart_mask);
+static void (*__sbi_set_timer)(uint64_t stime) __ro_after_init;
+static int (*__sbi_send_ipi)(const unsigned long *hart_mask) __ro_after_init;
static int (*__sbi_rfence)(int fid, const unsigned long *hart_mask,
unsigned long start, unsigned long size,
- unsigned long arg4, unsigned long arg5);
+ unsigned long arg4, unsigned long arg5) __ro_after_init;
struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
unsigned long arg1, unsigned long arg2,
@@ -116,7 +116,7 @@ void sbi_clear_ipi(void)
EXPORT_SYMBOL(sbi_clear_ipi);
/**
- * sbi_set_timer_v01() - Program the timer for next timer event.
+ * __sbi_set_timer_v01() - Program the timer for next timer event.
* @stime_value: The value after which next timer event should fire.
*
* Return: None
@@ -547,6 +547,21 @@ static inline long sbi_get_firmware_version(void)
return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION);
}
+long sbi_get_mvendorid(void)
+{
+ return __sbi_base_ecall(SBI_EXT_BASE_GET_MVENDORID);
+}
+
+long sbi_get_marchid(void)
+{
+ return __sbi_base_ecall(SBI_EXT_BASE_GET_MARCHID);
+}
+
+long sbi_get_mimpid(void)
+{
+ return __sbi_base_ecall(SBI_EXT_BASE_GET_MIMPID);
+}
+
static void sbi_send_cpumask_ipi(const struct cpumask *target)
{
struct cpumask hartid_mask;
@@ -556,7 +571,7 @@ static void sbi_send_cpumask_ipi(const struct cpumask *target)
sbi_send_ipi(cpumask_bits(&hartid_mask));
}
-static struct riscv_ipi_ops sbi_ipi_ops = {
+static const struct riscv_ipi_ops sbi_ipi_ops = {
.ipi_inject = sbi_send_cpumask_ipi
};
@@ -577,19 +592,19 @@ void __init sbi_init(void)
sbi_get_firmware_id(), sbi_get_firmware_version());
if (sbi_probe_extension(SBI_EXT_TIME) > 0) {
__sbi_set_timer = __sbi_set_timer_v02;
- pr_info("SBI v0.2 TIME extension detected\n");
+ pr_info("SBI TIME extension detected\n");
} else {
__sbi_set_timer = __sbi_set_timer_v01;
}
if (sbi_probe_extension(SBI_EXT_IPI) > 0) {
__sbi_send_ipi = __sbi_send_ipi_v02;
- pr_info("SBI v0.2 IPI extension detected\n");
+ pr_info("SBI IPI extension detected\n");
} else {
__sbi_send_ipi = __sbi_send_ipi_v01;
}
if (sbi_probe_extension(SBI_EXT_RFENCE) > 0) {
__sbi_rfence = __sbi_rfence_v02;
- pr_info("SBI v0.2 RFENCE extension detected\n");
+ pr_info("SBI RFENCE extension detected\n");
} else {
__sbi_rfence = __sbi_rfence_v01;
}
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index e85bacff1b50..9a1b7a0603b2 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -20,9 +20,11 @@
#include <linux/swiotlb.h>
#include <linux/smp.h>
#include <linux/efi.h>
+#include <linux/crash_dump.h>
#include <asm/cpu_ops.h>
#include <asm/early_ioremap.h>
+#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/set_memory.h>
#include <asm/sections.h>
@@ -50,7 +52,11 @@ struct screen_info screen_info __section(".data") = {
* This is used before the kernel initializes the BSS so it can't be in the
* BSS.
*/
-atomic_t hart_lottery __section(".sdata");
+atomic_t hart_lottery __section(".sdata")
+#ifdef CONFIG_XIP_KERNEL
+= ATOMIC_INIT(0xC001BEEF)
+#endif
+;
unsigned long boot_cpu_hartid;
static DEFINE_PER_CPU(struct cpu, cpu_devices);
@@ -60,10 +66,14 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
* also add "System RAM" regions for compatibility with other
* archs, and the rest of the known regions for completeness.
*/
+static struct resource kimage_res = { .name = "Kernel image", };
static struct resource code_res = { .name = "Kernel code", };
static struct resource data_res = { .name = "Kernel data", };
static struct resource rodata_res = { .name = "Kernel rodata", };
static struct resource bss_res = { .name = "Kernel bss", };
+#ifdef CONFIG_CRASH_DUMP
+static struct resource elfcorehdr_res = { .name = "ELF Core hdr", };
+#endif
static int __init add_resource(struct resource *parent,
struct resource *res)
@@ -80,45 +90,54 @@ static int __init add_resource(struct resource *parent,
return 1;
}
-static int __init add_kernel_resources(struct resource *res)
+static int __init add_kernel_resources(void)
{
int ret = 0;
/*
* The memory region of the kernel image is continuous and
- * was reserved on setup_bootmem, find it here and register
- * it as a resource, then register the various segments of
- * the image as child nodes
+ * was reserved on setup_bootmem, register it here as a
+ * resource, with the various segments of the image as
+ * child nodes.
*/
- if (!(res->start <= code_res.start && res->end >= data_res.end))
- return 0;
- res->name = "Kernel image";
- res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+ code_res.start = __pa_symbol(_text);
+ code_res.end = __pa_symbol(_etext) - 1;
+ code_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
- /*
- * We removed a part of this region on setup_bootmem so
- * we need to expand the resource for the bss to fit in.
- */
- res->end = bss_res.end;
+ rodata_res.start = __pa_symbol(__start_rodata);
+ rodata_res.end = __pa_symbol(__end_rodata) - 1;
+ rodata_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+
+ data_res.start = __pa_symbol(_data);
+ data_res.end = __pa_symbol(_edata) - 1;
+ data_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
- ret = add_resource(&iomem_resource, res);
+ bss_res.start = __pa_symbol(__bss_start);
+ bss_res.end = __pa_symbol(__bss_stop) - 1;
+ bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+
+ kimage_res.start = code_res.start;
+ kimage_res.end = bss_res.end;
+ kimage_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+
+ ret = add_resource(&iomem_resource, &kimage_res);
if (ret < 0)
return ret;
- ret = add_resource(res, &code_res);
+ ret = add_resource(&kimage_res, &code_res);
if (ret < 0)
return ret;
- ret = add_resource(res, &rodata_res);
+ ret = add_resource(&kimage_res, &rodata_res);
if (ret < 0)
return ret;
- ret = add_resource(res, &data_res);
+ ret = add_resource(&kimage_res, &data_res);
if (ret < 0)
return ret;
- ret = add_resource(res, &bss_res);
+ ret = add_resource(&kimage_res, &bss_res);
return ret;
}
@@ -129,53 +148,59 @@ static void __init init_resources(void)
struct resource *res = NULL;
struct resource *mem_res = NULL;
size_t mem_res_sz = 0;
- int ret = 0, i = 0;
-
- code_res.start = __pa_symbol(_text);
- code_res.end = __pa_symbol(_etext) - 1;
- code_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
-
- rodata_res.start = __pa_symbol(__start_rodata);
- rodata_res.end = __pa_symbol(__end_rodata) - 1;
- rodata_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
-
- data_res.start = __pa_symbol(_data);
- data_res.end = __pa_symbol(_edata) - 1;
- data_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+ int num_resources = 0, res_idx = 0;
+ int ret = 0;
- bss_res.start = __pa_symbol(__bss_start);
- bss_res.end = __pa_symbol(__bss_stop) - 1;
- bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+ /* + 1 as memblock_alloc() might increase memblock.reserved.cnt */
+ num_resources = memblock.memory.cnt + memblock.reserved.cnt + 1;
+ res_idx = num_resources - 1;
- mem_res_sz = (memblock.memory.cnt + memblock.reserved.cnt) * sizeof(*mem_res);
+ mem_res_sz = num_resources * sizeof(*mem_res);
mem_res = memblock_alloc(mem_res_sz, SMP_CACHE_BYTES);
if (!mem_res)
panic("%s: Failed to allocate %zu bytes\n", __func__, mem_res_sz);
+
/*
* Start by adding the reserved regions, if they overlap
* with /memory regions, insert_resource later on will take
* care of it.
*/
+ ret = add_kernel_resources();
+ if (ret < 0)
+ goto error;
+
+#ifdef CONFIG_KEXEC_CORE
+ if (crashk_res.start != crashk_res.end) {
+ ret = add_resource(&iomem_resource, &crashk_res);
+ if (ret < 0)
+ goto error;
+ }
+#endif
+
+#ifdef CONFIG_CRASH_DUMP
+ if (elfcorehdr_size > 0) {
+ elfcorehdr_res.start = elfcorehdr_addr;
+ elfcorehdr_res.end = elfcorehdr_addr + elfcorehdr_size - 1;
+ elfcorehdr_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+ add_resource(&iomem_resource, &elfcorehdr_res);
+ }
+#endif
+
for_each_reserved_mem_region(region) {
- res = &mem_res[i++];
+ res = &mem_res[res_idx--];
res->name = "Reserved";
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region));
res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1;
- ret = add_kernel_resources(res);
- if (ret < 0)
- goto error;
- else if (ret)
- continue;
-
/*
* Ignore any other reserved regions within
* system memory.
*/
if (memblock_is_memory(res->start)) {
- memblock_free((phys_addr_t) res, sizeof(struct resource));
+ /* Re-use this pre-allocated resource */
+ res_idx++;
continue;
}
@@ -186,7 +211,7 @@ static void __init init_resources(void)
/* Add /memory regions to the resource tree */
for_each_mem_region(region) {
- res = &mem_res[i++];
+ res = &mem_res[res_idx--];
if (unlikely(memblock_is_nomap(region))) {
res->name = "Reserved";
@@ -204,12 +229,15 @@ static void __init init_resources(void)
goto error;
}
+ /* Clean-up any unused pre-allocated resources */
+ mem_res_sz = (num_resources - res_idx + 1) * sizeof(*mem_res);
+ memblock_free(__pa(mem_res), mem_res_sz);
return;
error:
/* Better an empty resource tree than an inconsistent one */
release_child_resources(&iomem_resource);
- memblock_free((phys_addr_t) mem_res, mem_res_sz);
+ memblock_free(__pa(mem_res), mem_res_sz);
}
@@ -250,21 +278,24 @@ void __init setup_arch(char **cmdline_p)
efi_init();
setup_bootmem();
paging_init();
- init_resources();
#if IS_ENABLED(CONFIG_BUILTIN_DTB)
unflatten_and_copy_device_tree();
#else
- if (early_init_dt_verify(__va(dtb_early_pa)))
+ if (early_init_dt_verify(__va(XIP_FIXUP(dtb_early_pa))))
unflatten_device_tree();
else
pr_err("No DTB found in kernel mappings\n");
#endif
misc_mem_init();
+ init_resources();
sbi_init();
- if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
+ if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) {
protect_kernel_text_data();
+ protect_kernel_linear_mapping_text_rodata();
+ }
+
#ifdef CONFIG_SWIOTLB
swiotlb_init(1);
#endif
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index ea028d9e0d24..921d9d7df400 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -9,6 +9,7 @@
*/
#include <linux/cpu.h>
+#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/profile.h>
@@ -27,10 +28,11 @@ enum ipi_message_type {
IPI_CALL_FUNC,
IPI_CPU_STOP,
IPI_IRQ_WORK,
+ IPI_TIMER,
IPI_MAX
};
-unsigned long __cpuid_to_hartid_map[NR_CPUS] = {
+unsigned long __cpuid_to_hartid_map[NR_CPUS] __ro_after_init = {
[0 ... NR_CPUS-1] = INVALID_HARTID
};
@@ -54,7 +56,7 @@ int riscv_hartid_to_cpuid(int hartid)
return i;
pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
- return i;
+ return -ENOENT;
}
void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
@@ -85,9 +87,9 @@ static void ipi_stop(void)
wait_for_interrupt();
}
-static struct riscv_ipi_ops *ipi_ops;
+static const struct riscv_ipi_ops *ipi_ops __ro_after_init;
-void riscv_set_ipi_ops(struct riscv_ipi_ops *ops)
+void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops)
{
ipi_ops = ops;
}
@@ -176,6 +178,12 @@ void handle_IPI(struct pt_regs *regs)
irq_work_run();
}
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+ if (ops & (1 << IPI_TIMER)) {
+ stats[IPI_TIMER]++;
+ tick_receive_broadcast();
+ }
+#endif
BUG_ON((ops >> IPI_MAX) != 0);
/* Order data access and bit testing. */
@@ -192,6 +200,7 @@ static const char * const ipi_names[] = {
[IPI_CALL_FUNC] = "Function call interrupts",
[IPI_CPU_STOP] = "CPU stop interrupts",
[IPI_IRQ_WORK] = "IRQ work interrupts",
+ [IPI_TIMER] = "Timer broadcast interrupts",
};
void show_ipi_stats(struct seq_file *p, int prec)
@@ -217,6 +226,13 @@ void arch_send_call_function_single_ipi(int cpu)
send_ipi_single(cpu, IPI_CALL_FUNC);
}
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+void tick_broadcast(const struct cpumask *mask)
+{
+ send_ipi_mask(mask, IPI_TIMER);
+}
+#endif
+
void smp_send_stop(void)
{
unsigned long timeout;
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index 5e276c25646f..bd82375db51a 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -32,6 +32,7 @@
#include <asm/sections.h>
#include <asm/sbi.h>
#include <asm/smp.h>
+#include <asm/alternative.h>
#include "head.h"
@@ -40,6 +41,9 @@ static DECLARE_COMPLETION(cpu_running);
void __init smp_prepare_boot_cpu(void)
{
init_cpu_topology();
+#ifdef CONFIG_RISCV_ERRATA_ALTERNATIVE
+ apply_boot_alternatives();
+#endif
}
void __init smp_prepare_cpus(unsigned int max_cpus)
@@ -176,7 +180,6 @@ asmlinkage __visible void smp_callin(void)
* Disable preemption before enabling interrupts, so we don't try to
* schedule a CPU that hasn't actually started yet.
*/
- preempt_disable();
local_irq_enable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 3f893c9d9d85..ff467b98c3e3 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -14,7 +14,7 @@
#include <asm/stacktrace.h>
-register const unsigned long sp_in_global __asm__("sp");
+register unsigned long sp_in_global __asm__("sp");
#ifdef CONFIG_FRAME_POINTER
@@ -27,10 +27,10 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
fp = frame_pointer(regs);
sp = user_stack_pointer(regs);
pc = instruction_pointer(regs);
- } else if (task == NULL || task == current) {
- fp = (unsigned long)__builtin_frame_address(0);
- sp = sp_in_global;
- pc = (unsigned long)walk_stackframe;
+ } else if (task == current) {
+ fp = (unsigned long)__builtin_frame_address(1);
+ sp = (unsigned long)__builtin_frame_address(0);
+ pc = (unsigned long)__builtin_return_address(0);
} else {
/* task blocked in __switch_to */
fp = task->thread.s[0];
@@ -106,15 +106,15 @@ static bool print_trace_address(void *arg, unsigned long pc)
return true;
}
-void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
+noinline void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
const char *loglvl)
{
- pr_cont("%sCall Trace:\n", loglvl);
walk_stackframe(task, regs, print_trace_address, (void *)loglvl);
}
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
+ pr_cont("%sCall Trace:\n", loglvl);
dump_backtrace(NULL, task, loglvl);
}
@@ -132,14 +132,14 @@ unsigned long get_wchan(struct task_struct *task)
{
unsigned long pc = 0;
- if (likely(task && task != current && task->state != TASK_RUNNING))
+ if (likely(task && task != current && !task_is_running(task)))
walk_stackframe(task, NULL, save_wchan, &pc);
return pc;
}
#ifdef CONFIG_STACKTRACE
-void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
struct task_struct *task, struct pt_regs *regs)
{
walk_stackframe(task, regs, consume_entry, cookie);
diff --git a/arch/riscv/kernel/syscall_table.c b/arch/riscv/kernel/syscall_table.c
index f1ead9df96ca..a63c667c27b3 100644
--- a/arch/riscv/kernel/syscall_table.c
+++ b/arch/riscv/kernel/syscall_table.c
@@ -13,7 +13,7 @@
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),
-void *sys_call_table[__NR_syscalls] = {
+void * const sys_call_table[__NR_syscalls] = {
[0 ... __NR_syscalls - 1] = sys_ni_syscall,
#include <asm/unistd.h>
};
diff --git a/arch/riscv/kernel/time.c b/arch/riscv/kernel/time.c
index 8a5cf99c0776..8217b0f67c6c 100644
--- a/arch/riscv/kernel/time.c
+++ b/arch/riscv/kernel/time.c
@@ -9,8 +9,9 @@
#include <linux/delay.h>
#include <asm/sbi.h>
#include <asm/processor.h>
+#include <asm/timex.h>
-unsigned long riscv_timebase;
+unsigned long riscv_timebase __ro_after_init;
EXPORT_SYMBOL_GPL(riscv_timebase);
void __init time_init(void)
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 3ed2c23601a0..7bc88d8aab97 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/irq.h>
+#include <asm/asm-prototypes.h>
#include <asm/bug.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
@@ -24,8 +25,6 @@
int show_unhandled_signals = 1;
-extern asmlinkage void handle_exception(void);
-
static DEFINE_SPINLOCK(die_lock);
void die(struct pt_regs *regs, const char *str)
@@ -87,8 +86,13 @@ static void do_trap_error(struct pt_regs *regs, int signo, int code,
}
}
+#if defined (CONFIG_XIP_KERNEL) && defined (CONFIG_RISCV_ERRATA_ALTERNATIVE)
+#define __trap_section __section(".xip.traps")
+#else
+#define __trap_section
+#endif
#define DO_ERROR_INFO(name, signo, code, str) \
-asmlinkage __visible void name(struct pt_regs *regs) \
+asmlinkage __visible __trap_section void name(struct pt_regs *regs) \
{ \
do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \
}
@@ -112,7 +116,7 @@ DO_ERROR_INFO(do_trap_store_misaligned,
int handle_misaligned_load(struct pt_regs *regs);
int handle_misaligned_store(struct pt_regs *regs);
-asmlinkage void do_trap_load_misaligned(struct pt_regs *regs)
+asmlinkage void __trap_section do_trap_load_misaligned(struct pt_regs *regs)
{
if (!handle_misaligned_load(regs))
return;
@@ -120,7 +124,7 @@ asmlinkage void do_trap_load_misaligned(struct pt_regs *regs)
"Oops - load address misaligned");
}
-asmlinkage void do_trap_store_misaligned(struct pt_regs *regs)
+asmlinkage void __trap_section do_trap_store_misaligned(struct pt_regs *regs)
{
if (!handle_misaligned_store(regs))
return;
@@ -147,7 +151,7 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
return GET_INSN_LENGTH(insn);
}
-asmlinkage __visible void do_trap_break(struct pt_regs *regs)
+asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
{
#ifdef CONFIG_KPROBES
if (kprobe_single_step_handler(regs))
@@ -177,6 +181,7 @@ asmlinkage __visible void do_trap_break(struct pt_regs *regs)
else
die(regs, "Kernel BUG");
}
+NOKPROBE_SYMBOL(do_trap_break);
#ifdef CONFIG_GENERIC_BUG
int is_valid_bugaddr(unsigned long pc)
@@ -195,6 +200,6 @@ int is_valid_bugaddr(unsigned long pc)
#endif /* CONFIG_GENERIC_BUG */
/* stvec & scratch is already set from head.S */
-void trap_init(void)
+void __init trap_init(void)
{
}
diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c
index 3f1d35e7c98a..25a3b8849599 100644
--- a/arch/riscv/kernel/vdso.c
+++ b/arch/riscv/kernel/vdso.c
@@ -20,8 +20,8 @@
extern char vdso_start[], vdso_end[];
-static unsigned int vdso_pages;
-static struct page **vdso_pagelist;
+static unsigned int vdso_pages __ro_after_init;
+static struct page **vdso_pagelist __ro_after_init;
/*
* The vDSO data page.
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index 71a315e73cbe..24d936c147cd 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -23,7 +23,7 @@ ifneq ($(c-gettimeofday-y),)
endif
# Build rules
-targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o
+targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-syms.S
obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
obj-y += vdso.o vdso-syms.o
@@ -41,11 +41,10 @@ KASAN_SANITIZE := n
$(obj)/vdso.o: $(obj)/vdso.so
# link rule for the .so file, .lds has to be first
-SYSCFLAGS_vdso.so.dbg = $(c_flags)
-$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
+$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,vdsold)
-SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
- -Wl,--build-id=sha1 -Wl,--hash-style=both
+LDFLAGS_vdso.so.dbg = -shared -s -soname=linux-vdso.so.1 \
+ --build-id=sha1 --hash-style=both --eh-frame-hdr
# We also create a special relocatable object that should mirror the symbol
# table and layout of the linked DSO. With ld --just-symbols we can then
@@ -60,13 +59,10 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
# actual build commands
# The DSO images are built using a special linker script
-# Add -lgcc so rv32 gets static muldi3 and lshrdi3 definitions.
# Make sure only to export the intended __vdso_xxx symbol offsets.
quiet_cmd_vdsold = VDSOLD $@
- cmd_vdsold = $(CC) $(KBUILD_CFLAGS) $(call cc-option, -no-pie) -nostdlib -nostartfiles $(SYSCFLAGS_$(@F)) \
- -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp && \
- $(CROSS_COMPILE)objcopy \
- $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
+ cmd_vdsold = $(LD) $(ld_flags) -T $(filter-out FORCE,$^) -o $@.tmp && \
+ $(OBJCOPY) $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
rm $@.tmp
# Extracts symbol offsets from the VDSO, converting them into an assembly file
diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S
new file mode 100644
index 000000000000..a3ff09c4c3f9
--- /dev/null
+++ b/arch/riscv/kernel/vmlinux-xip.lds.S
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ * Copyright (C) 2020 Vitaly Wool, Konsulko AB
+ */
+
+#include <asm/pgtable.h>
+#define LOAD_OFFSET KERNEL_LINK_ADDR
+/* No __ro_after_init data in the .rodata section - which will always be ro */
+#define RO_AFTER_INIT_DATA
+
+#include <asm/vmlinux.lds.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/cache.h>
+#include <asm/thread_info.h>
+
+OUTPUT_ARCH(riscv)
+ENTRY(_start)
+
+jiffies = jiffies_64;
+
+SECTIONS
+{
+ /* Beginning of code and text segment */
+ . = LOAD_OFFSET;
+ _xiprom = .;
+ _start = .;
+ HEAD_TEXT_SECTION
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ /* we have to discard exit text and such at runtime, not link time */
+ .exit.text :
+ {
+ EXIT_TEXT
+ }
+
+ .text : {
+ _text = .;
+ _stext = .;
+ TEXT_TEXT
+ SCHED_TEXT
+ CPUIDLE_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ ENTRY_TEXT
+ IRQENTRY_TEXT
+ SOFTIRQENTRY_TEXT
+ *(.fixup)
+ _etext = .;
+ }
+ RO_DATA(L1_CACHE_BYTES)
+ .srodata : {
+ *(.srodata*)
+ }
+ .init.rodata : {
+ INIT_SETUP(16)
+ INIT_CALLS
+ CON_INITCALL
+ INIT_RAM_FS
+ }
+ _exiprom = .; /* End of XIP ROM area */
+
+
+/*
+ * From this point, stuff is considered writable and will be copied to RAM
+ */
+ __data_loc = ALIGN(16); /* location in file */
+ . = LOAD_OFFSET + XIP_OFFSET; /* location in memory */
+
+ _sdata = .; /* Start of data section */
+ _data = .;
+ RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+ _edata = .;
+ __start_ro_after_init = .;
+ .data.ro_after_init : AT(ADDR(.data.ro_after_init) - LOAD_OFFSET) {
+ *(.data..ro_after_init)
+ }
+ __end_ro_after_init = .;
+
+ . = ALIGN(PAGE_SIZE);
+ __init_begin = .;
+ .init.data : {
+ INIT_DATA
+ }
+ .exit.data : {
+ EXIT_DATA
+ }
+ . = ALIGN(8);
+ __soc_early_init_table : {
+ __soc_early_init_table_start = .;
+ KEEP(*(__soc_early_init_table))
+ __soc_early_init_table_end = .;
+ }
+ __soc_builtin_dtb_table : {
+ __soc_builtin_dtb_table_start = .;
+ KEEP(*(__soc_builtin_dtb_table))
+ __soc_builtin_dtb_table_end = .;
+ }
+ PERCPU_SECTION(L1_CACHE_BYTES)
+
+ . = ALIGN(8);
+ .alternative : {
+ __alt_start = .;
+ *(.alternative)
+ __alt_end = .;
+ }
+ __init_end = .;
+
+ . = ALIGN(16);
+ .xip.traps : {
+ __xip_traps_start = .;
+ *(.xip.traps)
+ __xip_traps_end = .;
+ }
+
+ . = ALIGN(PAGE_SIZE);
+ .sdata : {
+ __global_pointer$ = . + 0x800;
+ *(.sdata*)
+ *(.sbss*)
+ }
+
+ BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0)
+ EXCEPTION_TABLE(0x10)
+
+ .rel.dyn : AT(ADDR(.rel.dyn) - LOAD_OFFSET) {
+ *(.rel.dyn*)
+ }
+
+ /*
+ * End of copied data. We need a dummy section to get its LMA.
+ * Also located before final ALIGN() as trailing padding is not stored
+ * in the resulting binary file and useless to copy.
+ */
+ .data.endmark : AT(ADDR(.data.endmark) - LOAD_OFFSET) { }
+ _edata_loc = LOADADDR(.data.endmark);
+
+ . = ALIGN(PAGE_SIZE);
+ _end = .;
+
+ STABS_DEBUG
+ DWARF_DEBUG
+
+ DISCARDS
+}
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index de03cb22d0e9..891742ff75a7 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -4,7 +4,13 @@
* Copyright (C) 2017 SiFive
*/
-#define LOAD_OFFSET PAGE_OFFSET
+#ifdef CONFIG_XIP_KERNEL
+#include "vmlinux-xip.lds.S"
+#else
+
+#include <asm/pgtable.h>
+#define LOAD_OFFSET KERNEL_LINK_ADDR
+
#include <asm/vmlinux.lds.h>
#include <asm/page.h>
#include <asm/cache.h>
@@ -90,6 +96,13 @@ SECTIONS
}
__init_data_end = .;
+
+ . = ALIGN(8);
+ .alternative : {
+ __alt_start = .;
+ *(.alternative)
+ __alt_end = .;
+ }
__init_end = .;
/* Start of data section */
@@ -132,3 +145,4 @@ SECTIONS
DISCARDS
}
+#endif /* CONFIG_XIP_KERNEL */
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 8f17519208c7..096463cc6fff 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -231,6 +231,19 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
return;
}
+#ifdef CONFIG_64BIT
+ /*
+ * Modules in 64bit kernels lie in their own virtual region which is not
+ * in the vmalloc region, but dealing with page faults in this region
+ * or the vmalloc region amounts to doing the same thing: checking that
+ * the mapping exists in init_mm.pgd and updating user page table, so
+ * just use vmalloc_fault.
+ */
+ if (unlikely(addr >= MODULES_VADDR && addr < MODULES_END)) {
+ vmalloc_fault(regs, code, addr);
+ return;
+ }
+#endif
/* Enable interrupts if they were enabled in the parent context. */
if (likely(regs->status & SR_PIE))
local_irq_enable();
@@ -328,3 +341,4 @@ good_area:
}
return;
}
+NOKPROBE_SYMBOL(do_page_fault);
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 067583ab1bd7..4c4c92ce0bb8 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -2,6 +2,8 @@
/*
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2019 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2020 FORTH-ICS/CARV
+ * Nick Kossifidis <mick@ics.forth.gr>
*/
#include <linux/init.h>
@@ -11,9 +13,11 @@
#include <linux/swap.h>
#include <linux/sizes.h>
#include <linux/of_fdt.h>
+#include <linux/of_reserved_mem.h>
#include <linux/libfdt.h>
#include <linux/set_memory.h>
#include <linux/dma-map-ops.h>
+#include <linux/crash_dump.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
@@ -25,14 +29,20 @@
#include "../kernel/head.h"
+unsigned long kernel_virt_addr = KERNEL_LINK_ADDR;
+EXPORT_SYMBOL(kernel_virt_addr);
+#ifdef CONFIG_XIP_KERNEL
+#define kernel_virt_addr (*((unsigned long *)XIP_FIXUP(&kernel_virt_addr)))
+#endif
+
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
__page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
extern char _start[];
#define DTB_EARLY_BASE_VA PGDIR_SIZE
-void *dtb_early_va __initdata;
-uintptr_t dtb_early_pa __initdata;
+void *_dtb_early_va __initdata;
+uintptr_t _dtb_early_pa __initdata;
struct pt_alloc_ops {
pte_t *(*get_pte_virt)(phys_addr_t pa);
@@ -57,7 +67,7 @@ static void __init zone_sizes_init(void)
free_area_init(max_zone_pfns);
}
-static void setup_zero_page(void)
+static void __init setup_zero_page(void)
{
memset((void *)empty_zero_page, 0, PAGE_SIZE);
}
@@ -75,7 +85,7 @@ static inline void print_mlm(char *name, unsigned long b, unsigned long t)
(((t) - (b)) >> 20));
}
-static void print_vm_layout(void)
+static void __init print_vm_layout(void)
{
pr_notice("Virtual kernel memory layout:\n");
print_mlk("fixmap", (unsigned long)FIXADDR_START,
@@ -88,6 +98,10 @@ static void print_vm_layout(void)
(unsigned long)VMALLOC_END);
print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
(unsigned long)high_memory);
+#ifdef CONFIG_64BIT
+ print_mlm("kernel", (unsigned long)KERNEL_LINK_ADDR,
+ (unsigned long)ADDRESS_SPACE_END);
+#endif
}
#else
static void print_vm_layout(void) { }
@@ -102,7 +116,6 @@ void __init mem_init(void)
high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
memblock_free_all();
- mem_init_print_info(NULL);
print_vm_layout();
}
@@ -113,10 +126,24 @@ void __init setup_bootmem(void)
phys_addr_t dram_end = memblock_end_of_DRAM();
phys_addr_t max_mapped_addr = __pa(~(ulong)0);
+#ifdef CONFIG_XIP_KERNEL
+ vmlinux_start = __pa_symbol(&_sdata);
+#endif
+
/* The maximal physical memory size is -PAGE_OFFSET. */
memblock_enforce_memory_limit(-PAGE_OFFSET);
- /* Reserve from the start of the kernel to the end of the kernel */
+ /*
+ * Reserve from the start of the kernel to the end of the kernel
+ */
+#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
+ /*
+ * Make sure we align the reservation on PMD_SIZE since we will
+ * map the kernel in the linear mapping as read-only: we do not want
+ * any allocation to happen between _end and the next pmd aligned page.
+ */
+ vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK;
+#endif
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
/*
@@ -128,8 +155,9 @@ void __init setup_bootmem(void)
if (max_mapped_addr == (dram_end - 1))
memblock_set_current_limit(max_mapped_addr - 4096);
- max_pfn = PFN_DOWN(dram_end);
- max_low_pfn = max_pfn;
+ min_low_pfn = PFN_UP(memblock_start_of_DRAM());
+ max_low_pfn = max_pfn = PFN_DOWN(dram_end);
+
dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
@@ -148,12 +176,42 @@ void __init setup_bootmem(void)
memblock_allow_resize();
}
+#ifdef CONFIG_XIP_KERNEL
+
+extern char _xiprom[], _exiprom[];
+extern char _sdata[], _edata[];
+
+#endif /* CONFIG_XIP_KERNEL */
+
#ifdef CONFIG_MMU
-static struct pt_alloc_ops pt_ops;
+static struct pt_alloc_ops _pt_ops __ro_after_init;
-unsigned long va_pa_offset;
+#ifdef CONFIG_XIP_KERNEL
+#define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&_pt_ops))
+#else
+#define pt_ops _pt_ops
+#endif
+
+/* Offset between linear mapping virtual address and kernel load address */
+unsigned long va_pa_offset __ro_after_init;
EXPORT_SYMBOL(va_pa_offset);
-unsigned long pfn_base;
+#ifdef CONFIG_XIP_KERNEL
+#define va_pa_offset (*((unsigned long *)XIP_FIXUP(&va_pa_offset)))
+#endif
+/* Offset between kernel mapping virtual address and kernel load address */
+#ifdef CONFIG_64BIT
+unsigned long va_kernel_pa_offset;
+EXPORT_SYMBOL(va_kernel_pa_offset);
+#endif
+#ifdef CONFIG_XIP_KERNEL
+#define va_kernel_pa_offset (*((unsigned long *)XIP_FIXUP(&va_kernel_pa_offset)))
+#endif
+unsigned long va_kernel_xip_pa_offset;
+EXPORT_SYMBOL(va_kernel_xip_pa_offset);
+#ifdef CONFIG_XIP_KERNEL
+#define va_kernel_xip_pa_offset (*((unsigned long *)XIP_FIXUP(&va_kernel_xip_pa_offset)))
+#endif
+unsigned long pfn_base __ro_after_init;
EXPORT_SYMBOL(pfn_base);
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
@@ -162,6 +220,12 @@ pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
+#ifdef CONFIG_XIP_KERNEL
+#define trampoline_pg_dir ((pgd_t *)XIP_FIXUP(trampoline_pg_dir))
+#define fixmap_pte ((pte_t *)XIP_FIXUP(fixmap_pte))
+#define early_pg_dir ((pgd_t *)XIP_FIXUP(early_pg_dir))
+#endif /* CONFIG_XIP_KERNEL */
+
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
{
unsigned long addr = __fix_to_virt(idx);
@@ -213,8 +277,8 @@ static phys_addr_t alloc_pte_late(uintptr_t va)
unsigned long vaddr;
vaddr = __get_free_page(GFP_KERNEL);
- if (!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr)))
- BUG();
+ BUG_ON(!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr)));
+
return __pa(vaddr);
}
@@ -237,6 +301,12 @@ pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
+#ifdef CONFIG_XIP_KERNEL
+#define trampoline_pmd ((pmd_t *)XIP_FIXUP(trampoline_pmd))
+#define fixmap_pmd ((pmd_t *)XIP_FIXUP(fixmap_pmd))
+#define early_pmd ((pmd_t *)XIP_FIXUP(early_pmd))
+#endif /* CONFIG_XIP_KERNEL */
+
static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
{
/* Before MMU is enabled */
@@ -256,7 +326,7 @@ static pmd_t *get_pmd_virt_late(phys_addr_t pa)
static phys_addr_t __init alloc_pmd_early(uintptr_t va)
{
- BUG_ON((va - PAGE_OFFSET) >> PGDIR_SHIFT);
+ BUG_ON((va - kernel_virt_addr) >> PGDIR_SHIFT);
return (uintptr_t)early_pmd;
}
@@ -353,6 +423,19 @@ static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
return PMD_SIZE;
}
+#ifdef CONFIG_XIP_KERNEL
+/* called from head.S with MMU off */
+asmlinkage void __init __copy_data(void)
+{
+ void *from = (void *)(&_sdata);
+ void *end = (void *)(&_end);
+ void *to = (void *)CONFIG_PHYS_RAM_BASE;
+ size_t sz = (size_t)(end - from + 1);
+
+ memcpy(to, from, sz);
+}
+#endif
+
/*
* setup_vm() is called from head.S with MMU-off.
*
@@ -371,17 +454,74 @@ static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
#endif
+uintptr_t load_pa, load_sz;
+#ifdef CONFIG_XIP_KERNEL
+#define load_pa (*((uintptr_t *)XIP_FIXUP(&load_pa)))
+#define load_sz (*((uintptr_t *)XIP_FIXUP(&load_sz)))
+#endif
+
+#ifdef CONFIG_XIP_KERNEL
+uintptr_t xiprom, xiprom_sz;
+#define xiprom_sz (*((uintptr_t *)XIP_FIXUP(&xiprom_sz)))
+#define xiprom (*((uintptr_t *)XIP_FIXUP(&xiprom)))
+
+static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
+{
+ uintptr_t va, end_va;
+
+ /* Map the flash resident part */
+ end_va = kernel_virt_addr + xiprom_sz;
+ for (va = kernel_virt_addr; va < end_va; va += map_size)
+ create_pgd_mapping(pgdir, va,
+ xiprom + (va - kernel_virt_addr),
+ map_size, PAGE_KERNEL_EXEC);
+
+ /* Map the data in RAM */
+ end_va = kernel_virt_addr + XIP_OFFSET + load_sz;
+ for (va = kernel_virt_addr + XIP_OFFSET; va < end_va; va += map_size)
+ create_pgd_mapping(pgdir, va,
+ load_pa + (va - (kernel_virt_addr + XIP_OFFSET)),
+ map_size, PAGE_KERNEL);
+}
+#else
+static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
+{
+ uintptr_t va, end_va;
+
+ end_va = kernel_virt_addr + load_sz;
+ for (va = kernel_virt_addr; va < end_va; va += map_size)
+ create_pgd_mapping(pgdir, va,
+ load_pa + (va - kernel_virt_addr),
+ map_size, PAGE_KERNEL_EXEC);
+}
+#endif
+
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
- uintptr_t va, pa, end_va;
- uintptr_t load_pa = (uintptr_t)(&_start);
- uintptr_t load_sz = (uintptr_t)(&_end) - load_pa;
+ uintptr_t __maybe_unused pa;
uintptr_t map_size;
#ifndef __PAGETABLE_PMD_FOLDED
pmd_t fix_bmap_spmd, fix_bmap_epmd;
#endif
+#ifdef CONFIG_XIP_KERNEL
+ xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
+ xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
+
+ load_pa = (uintptr_t)CONFIG_PHYS_RAM_BASE;
+ load_sz = (uintptr_t)(&_end) - (uintptr_t)(&_sdata);
+
+ va_kernel_xip_pa_offset = kernel_virt_addr - xiprom;
+#else
+ load_pa = (uintptr_t)(&_start);
+ load_sz = (uintptr_t)(&_end) - load_pa;
+#endif
+
va_pa_offset = PAGE_OFFSET - load_pa;
+#ifdef CONFIG_64BIT
+ va_kernel_pa_offset = kernel_virt_addr - load_pa;
+#endif
+
pfn_base = PFN_DOWN(load_pa);
/*
@@ -409,26 +549,27 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
create_pmd_mapping(fixmap_pmd, FIXADDR_START,
(uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
/* Setup trampoline PGD and PMD */
- create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
+ create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr,
(uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
- create_pmd_mapping(trampoline_pmd, PAGE_OFFSET,
+#ifdef CONFIG_XIP_KERNEL
+ create_pmd_mapping(trampoline_pmd, kernel_virt_addr,
+ xiprom, PMD_SIZE, PAGE_KERNEL_EXEC);
+#else
+ create_pmd_mapping(trampoline_pmd, kernel_virt_addr,
load_pa, PMD_SIZE, PAGE_KERNEL_EXEC);
+#endif
#else
/* Setup trampoline PGD */
- create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
+ create_pgd_mapping(trampoline_pg_dir, kernel_virt_addr,
load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC);
#endif
/*
- * Setup early PGD covering entire kernel which will allows
+ * Setup early PGD covering entire kernel which will allow
* us to reach paging_init(). We map all memory banks later
* in setup_vm_final() below.
*/
- end_va = PAGE_OFFSET + load_sz;
- for (va = PAGE_OFFSET; va < end_va; va += map_size)
- create_pgd_mapping(early_pg_dir, va,
- load_pa + (va - PAGE_OFFSET),
- map_size, PAGE_KERNEL_EXEC);
+ create_kernel_page_table(early_pg_dir, map_size);
#ifndef __PAGETABLE_PMD_FOLDED
/* Setup early PMD for DTB */
@@ -443,7 +584,16 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
#else /* CONFIG_BUILTIN_DTB */
+#ifdef CONFIG_64BIT
+ /*
+ * __va can't be used since it would return a linear mapping address
+ * whereas dtb_early_va will be used before setup_vm_final installs
+ * the linear mapping.
+ */
+ dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa));
+#else
dtb_early_va = __va(dtb_pa);
+#endif /* CONFIG_64BIT */
#endif /* CONFIG_BUILTIN_DTB */
#else
#ifndef CONFIG_BUILTIN_DTB
@@ -455,7 +605,11 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL);
dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1));
#else /* CONFIG_BUILTIN_DTB */
+#ifdef CONFIG_64BIT
+ dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa));
+#else
dtb_early_va = __va(dtb_pa);
+#endif /* CONFIG_64BIT */
#endif /* CONFIG_BUILTIN_DTB */
#endif
dtb_early_pa = dtb_pa;
@@ -491,6 +645,22 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
#endif
}
+#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
+void protect_kernel_linear_mapping_text_rodata(void)
+{
+ unsigned long text_start = (unsigned long)lm_alias(_start);
+ unsigned long init_text_start = (unsigned long)lm_alias(__init_text_begin);
+ unsigned long rodata_start = (unsigned long)lm_alias(__start_rodata);
+ unsigned long data_start = (unsigned long)lm_alias(_data);
+
+ set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
+ set_memory_nx(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
+
+ set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
+ set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
+}
+#endif
+
static void __init setup_vm_final(void)
{
uintptr_t va, map_size;
@@ -512,7 +682,7 @@ static void __init setup_vm_final(void)
__pa_symbol(fixmap_pgd_next),
PGDIR_SIZE, PAGE_TABLE);
- /* Map all memory banks */
+ /* Map all memory banks in the linear mapping */
for_each_mem_range(i, &start, &end) {
if (start >= end)
break;
@@ -524,10 +694,22 @@ static void __init setup_vm_final(void)
for (pa = start; pa < end; pa += map_size) {
va = (uintptr_t)__va(pa);
create_pgd_mapping(swapper_pg_dir, va, pa,
- map_size, PAGE_KERNEL_EXEC);
+ map_size,
+#ifdef CONFIG_64BIT
+ PAGE_KERNEL
+#else
+ PAGE_KERNEL_EXEC
+#endif
+ );
+
}
}
+#ifdef CONFIG_64BIT
+ /* Map the kernel */
+ create_kernel_page_table(swapper_pg_dir, PMD_SIZE);
+#endif
+
/* Clear fixmap PTE and PMD mappings */
clear_fixmap(FIX_PTE);
clear_fixmap(FIX_PMD);
@@ -557,21 +739,25 @@ static inline void setup_vm_final(void)
#endif /* CONFIG_MMU */
#ifdef CONFIG_STRICT_KERNEL_RWX
-void protect_kernel_text_data(void)
+void __init protect_kernel_text_data(void)
{
unsigned long text_start = (unsigned long)_start;
unsigned long init_text_start = (unsigned long)__init_text_begin;
unsigned long init_data_start = (unsigned long)__init_data_begin;
unsigned long rodata_start = (unsigned long)__start_rodata;
unsigned long data_start = (unsigned long)_data;
- unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
+#if defined(CONFIG_64BIT) && defined(CONFIG_MMU)
+ unsigned long end_va = kernel_virt_addr + load_sz;
+#else
+ unsigned long end_va = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
+#endif
set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT);
set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT);
/* rodata section is marked readonly in mark_rodata_ro */
set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
- set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
+ set_memory_nx(data_start, (end_va - data_start) >> PAGE_SHIFT);
}
void mark_rodata_ro(void)
@@ -585,6 +771,103 @@ void mark_rodata_ro(void)
}
#endif
+#ifdef CONFIG_KEXEC_CORE
+/*
+ * reserve_crashkernel() - reserves memory for crash kernel
+ *
+ * This function reserves memory area given in "crashkernel=" kernel command
+ * line parameter. The memory reserved is used by dump capture kernel when
+ * primary kernel is crashing.
+ */
+static void __init reserve_crashkernel(void)
+{
+ unsigned long long crash_base = 0;
+ unsigned long long crash_size = 0;
+ unsigned long search_start = memblock_start_of_DRAM();
+ unsigned long search_end = memblock_end_of_DRAM();
+
+ int ret = 0;
+
+ /*
+ * Don't reserve a region for a crash kernel on a crash kernel
+ * since it doesn't make much sense and we have limited memory
+ * resources.
+ */
+#ifdef CONFIG_CRASH_DUMP
+ if (is_kdump_kernel()) {
+ pr_info("crashkernel: ignoring reservation request\n");
+ return;
+ }
+#endif
+
+ ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
+ &crash_size, &crash_base);
+ if (ret || !crash_size)
+ return;
+
+ crash_size = PAGE_ALIGN(crash_size);
+
+ if (crash_base == 0) {
+ /*
+ * Current riscv boot protocol requires 2MB alignment for
+ * RV64 and 4MB alignment for RV32 (hugepage size)
+ */
+ crash_base = memblock_find_in_range(search_start, search_end,
+ crash_size, PMD_SIZE);
+
+ if (crash_base == 0) {
+ pr_warn("crashkernel: couldn't allocate %lldKB\n",
+ crash_size >> 10);
+ return;
+ }
+ } else {
+ /* User specifies base address explicitly. */
+ if (!memblock_is_region_memory(crash_base, crash_size)) {
+ pr_warn("crashkernel: requested region is not memory\n");
+ return;
+ }
+
+ if (memblock_is_region_reserved(crash_base, crash_size)) {
+ pr_warn("crashkernel: requested region is reserved\n");
+ return;
+ }
+
+
+ if (!IS_ALIGNED(crash_base, PMD_SIZE)) {
+ pr_warn("crashkernel: requested region is misaligned\n");
+ return;
+ }
+ }
+ memblock_reserve(crash_base, crash_size);
+
+ pr_info("crashkernel: reserved 0x%016llx - 0x%016llx (%lld MB)\n",
+ crash_base, crash_base + crash_size, crash_size >> 20);
+
+ crashk_res.start = crash_base;
+ crashk_res.end = crash_base + crash_size - 1;
+}
+#endif /* CONFIG_KEXEC_CORE */
+
+#ifdef CONFIG_CRASH_DUMP
+/*
+ * We keep track of the ELF core header of the crashed
+ * kernel with a reserved-memory region with compatible
+ * string "linux,elfcorehdr". Here we register a callback
+ * to populate elfcorehdr_addr/size when this region is
+ * present. Note that this region will be marked as
+ * reserved once we call early_init_fdt_scan_reserved_mem()
+ * later on.
+ */
+static int elfcore_hdr_setup(struct reserved_mem *rmem)
+{
+ elfcorehdr_addr = rmem->base;
+ elfcorehdr_size = rmem->size;
+ return 0;
+}
+
+RESERVEDMEM_OF_DECLARE(elfcorehdr, "linux,elfcorehdr", elfcore_hdr_setup);
+#endif
+
void __init paging_init(void)
{
setup_vm_final();
@@ -593,9 +876,13 @@ void __init paging_init(void)
void __init misc_mem_init(void)
{
+ early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
arch_numa_init();
sparse_init();
zone_sizes_init();
+#ifdef CONFIG_KEXEC_CORE
+ reserve_crashkernel();
+#endif
memblock_dump_all();
}
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index 3fc18f469efb..d7189c8714a9 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -11,18 +11,6 @@
#include <asm/fixmap.h>
#include <asm/pgalloc.h>
-static __init void *early_alloc(size_t size, int node)
-{
- void *ptr = memblock_alloc_try_nid(size, size,
- __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);
-
- if (!ptr)
- panic("%pS: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n",
- __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
-
- return ptr;
-}
-
extern pgd_t early_pg_dir[PTRS_PER_PGD];
asmlinkage void __init kasan_early_init(void)
{
@@ -60,7 +48,7 @@ asmlinkage void __init kasan_early_init(void)
local_flush_tlb_all();
}
-static void kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
+static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
{
phys_addr_t phys_addr;
pte_t *ptep, *base_pte;
@@ -82,7 +70,7 @@ static void kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long en
set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
}
-static void kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long end)
+static void __init kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long end)
{
phys_addr_t phys_addr;
pmd_t *pmdp, *base_pmd;
@@ -117,7 +105,7 @@ static void kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long en
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
}
-static void kasan_populate_pgd(unsigned long vaddr, unsigned long end)
+static void __init kasan_populate_pgd(unsigned long vaddr, unsigned long end)
{
phys_addr_t phys_addr;
pgd_t *pgdp = pgd_offset_k(vaddr);
@@ -155,45 +143,39 @@ static void __init kasan_populate(void *start, void *end)
memset(start, KASAN_SHADOW_INIT, end - start);
}
-void __init kasan_shallow_populate(void *start, void *end)
+static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
{
- unsigned long vaddr = (unsigned long)start & PAGE_MASK;
- unsigned long vend = PAGE_ALIGN((unsigned long)end);
- unsigned long pfn;
- int index;
+ unsigned long next;
void *p;
- pud_t *pud_dir, *pud_k;
- pgd_t *pgd_dir, *pgd_k;
- p4d_t *p4d_dir, *p4d_k;
-
- while (vaddr < vend) {
- index = pgd_index(vaddr);
- pfn = csr_read(CSR_SATP) & SATP_PPN;
- pgd_dir = (pgd_t *)pfn_to_virt(pfn) + index;
- pgd_k = init_mm.pgd + index;
- pgd_dir = pgd_offset_k(vaddr);
- set_pgd(pgd_dir, *pgd_k);
-
- p4d_dir = p4d_offset(pgd_dir, vaddr);
- p4d_k = p4d_offset(pgd_k, vaddr);
-
- vaddr = (vaddr + PUD_SIZE) & PUD_MASK;
- pud_dir = pud_offset(p4d_dir, vaddr);
- pud_k = pud_offset(p4d_k, vaddr);
-
- if (pud_present(*pud_dir)) {
- p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
- pud_populate(&init_mm, pud_dir, p);
+ pgd_t *pgd_k = pgd_offset_k(vaddr);
+
+ do {
+ next = pgd_addr_end(vaddr, end);
+ if (pgd_page_vaddr(*pgd_k) == (unsigned long)lm_alias(kasan_early_shadow_pmd)) {
+ p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
- vaddr += PAGE_SIZE;
- }
+ } while (pgd_k++, vaddr = next, vaddr != end);
+}
+
+static void __init kasan_shallow_populate(void *start, void *end)
+{
+ unsigned long vaddr = (unsigned long)start & PAGE_MASK;
+ unsigned long vend = PAGE_ALIGN((unsigned long)end);
+
+ kasan_shallow_populate_pgd(vaddr, vend);
+ local_flush_tlb_all();
}
void __init kasan_init(void)
{
- phys_addr_t _start, _end;
+ phys_addr_t p_start, p_end;
u64 i;
+ /*
+ * Populate all kernel virtual address space with kasan_early_shadow_page
+ * except for the linear mapping and the modules/kernel/BPF mapping.
+ */
kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
(void *)kasan_mem_to_shadow((void *)
VMEMMAP_END));
@@ -206,15 +188,20 @@ void __init kasan_init(void)
(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
- for_each_mem_range(i, &_start, &_end) {
- void *start = (void *)__va(_start);
- void *end = (void *)__va(_end);
+ /* Populate the linear mapping */
+ for_each_mem_range(i, &p_start, &p_end) {
+ void *start = (void *)__va(p_start);
+ void *end = (void *)__va(p_end);
if (start >= end)
break;
kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
- };
+ }
+
+ /* Populate kernel, BPF, modules mapping */
+ kasan_populate(kasan_mem_to_shadow((const void *)MODULES_VADDR),
+ kasan_mem_to_shadow((const void *)MODULES_VADDR + SZ_2G));
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte(&kasan_early_shadow_pte[i],
diff --git a/arch/riscv/mm/physaddr.c b/arch/riscv/mm/physaddr.c
index e8e4dcd39fed..35703d5ef5fd 100644
--- a/arch/riscv/mm/physaddr.c
+++ b/arch/riscv/mm/physaddr.c
@@ -23,7 +23,7 @@ EXPORT_SYMBOL(__virt_to_phys);
phys_addr_t __phys_addr_symbol(unsigned long x)
{
- unsigned long kernel_start = (unsigned long)PAGE_OFFSET;
+ unsigned long kernel_start = (unsigned long)kernel_virt_addr;
unsigned long kernel_end = (unsigned long)_end;
/*
diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c
index ace74dec7492..0536ac84b730 100644
--- a/arch/riscv/mm/ptdump.c
+++ b/arch/riscv/mm/ptdump.c
@@ -58,29 +58,56 @@ struct ptd_mm_info {
unsigned long end;
};
+enum address_markers_idx {
+#ifdef CONFIG_KASAN
+ KASAN_SHADOW_START_NR,
+ KASAN_SHADOW_END_NR,
+#endif
+ FIXMAP_START_NR,
+ FIXMAP_END_NR,
+ PCI_IO_START_NR,
+ PCI_IO_END_NR,
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ VMEMMAP_START_NR,
+ VMEMMAP_END_NR,
+#endif
+ VMALLOC_START_NR,
+ VMALLOC_END_NR,
+ PAGE_OFFSET_NR,
+#ifdef CONFIG_64BIT
+ MODULES_MAPPING_NR,
+ KERNEL_MAPPING_NR,
+#endif
+ END_OF_SPACE_NR
+};
+
static struct addr_marker address_markers[] = {
#ifdef CONFIG_KASAN
- {KASAN_SHADOW_START, "Kasan shadow start"},
- {KASAN_SHADOW_END, "Kasan shadow end"},
+ {0, "Kasan shadow start"},
+ {0, "Kasan shadow end"},
#endif
- {FIXADDR_START, "Fixmap start"},
- {FIXADDR_TOP, "Fixmap end"},
- {PCI_IO_START, "PCI I/O start"},
- {PCI_IO_END, "PCI I/O end"},
+ {0, "Fixmap start"},
+ {0, "Fixmap end"},
+ {0, "PCI I/O start"},
+ {0, "PCI I/O end"},
#ifdef CONFIG_SPARSEMEM_VMEMMAP
- {VMEMMAP_START, "vmemmap start"},
- {VMEMMAP_END, "vmemmap end"},
+ {0, "vmemmap start"},
+ {0, "vmemmap end"},
+#endif
+ {0, "vmalloc() area"},
+ {0, "vmalloc() end"},
+ {0, "Linear mapping"},
+#ifdef CONFIG_64BIT
+ {0, "Modules mapping"},
+ {0, "Kernel mapping (kernel, BPF)"},
#endif
- {VMALLOC_START, "vmalloc() area"},
- {VMALLOC_END, "vmalloc() end"},
- {PAGE_OFFSET, "Linear mapping"},
{-1, NULL},
};
static struct ptd_mm_info kernel_ptd_info = {
.mm = &init_mm,
.markers = address_markers,
- .base_addr = KERN_VIRT_START,
+ .base_addr = 0,
.end = ULONG_MAX,
};
@@ -331,10 +358,32 @@ static int ptdump_show(struct seq_file *m, void *v)
DEFINE_SHOW_ATTRIBUTE(ptdump);
-static int ptdump_init(void)
+static int __init ptdump_init(void)
{
unsigned int i, j;
+#ifdef CONFIG_KASAN
+ address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START;
+ address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END;
+#endif
+ address_markers[FIXMAP_START_NR].start_address = FIXADDR_START;
+ address_markers[FIXMAP_END_NR].start_address = FIXADDR_TOP;
+ address_markers[PCI_IO_START_NR].start_address = PCI_IO_START;
+ address_markers[PCI_IO_END_NR].start_address = PCI_IO_END;
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START;
+ address_markers[VMEMMAP_END_NR].start_address = VMEMMAP_END;
+#endif
+ address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
+ address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
+ address_markers[PAGE_OFFSET_NR].start_address = PAGE_OFFSET;
+#ifdef CONFIG_64BIT
+ address_markers[MODULES_MAPPING_NR].start_address = MODULES_VADDR;
+ address_markers[KERNEL_MAPPING_NR].start_address = kernel_virt_addr;
+#endif
+
+ kernel_ptd_info.base_addr = KERN_VIRT_START;
+
for (i = 0; i < ARRAY_SIZE(pg_level); i++)
for (j = 0; j < ARRAY_SIZE(pte_bits); j++)
pg_level[i].mask |= pte_bits[j].mask;
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index b44ff52f84a6..87e3bf5b9086 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -1148,16 +1148,3 @@ void bpf_jit_build_epilogue(struct rv_jit_context *ctx)
{
__build_epilogue(false, ctx);
}
-
-void *bpf_jit_alloc_exec(unsigned long size)
-{
- return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
- BPF_JIT_REGION_END, GFP_KERNEL,
- PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
- __builtin_return_address(0));
-}
-
-void bpf_jit_free_exec(void *addr)
-{
- return vfree(addr);
-}
diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c
index 3630d447352c..fed86f42dfbe 100644
--- a/arch/riscv/net/bpf_jit_core.c
+++ b/arch/riscv/net/bpf_jit_core.c
@@ -152,6 +152,7 @@ skip_init_ctx:
bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns);
if (!prog->is_func || extra_pass) {
+ bpf_jit_binary_lock_ro(jit_data->header);
out_offset:
kfree(ctx->offset);
kfree(jit_data);
@@ -164,3 +165,16 @@ out:
tmp : orig_prog);
return prog;
}
+
+void *bpf_jit_alloc_exec(unsigned long size)
+{
+ return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
+ BPF_JIT_REGION_END, GFP_KERNEL,
+ PAGE_KERNEL, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
+}
+
+void bpf_jit_free_exec(void *addr)
+{
+ return vfree(addr);
+}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index c1ff874e6c2e..c448567b18ca 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -2,9 +2,6 @@
config MMU
def_bool y
-config ZONE_DMA
- def_bool y
-
config CPU_BIG_ENDIAN
def_bool y
@@ -60,6 +57,9 @@ config S390
imply IMA_SECURE_AND_OR_TRUSTED_BOOT
select ARCH_32BIT_USTAT_F_TINODE
select ARCH_BINFMT_ELF_STATE
+ select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM
+ select ARCH_ENABLE_MEMORY_HOTREMOVE
+ select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEBUG_WX
select ARCH_HAS_DEVMEM_IS_ALLOWED
@@ -114,6 +114,7 @@ config S390
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
+ select ARCH_WANTS_NO_INSTR
select ARCH_WANT_DEFAULT_BPF_JIT
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_TABLE_SORT
@@ -137,6 +138,7 @@ config S390
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN
select HAVE_ARCH_KASAN_VMALLOC
+ select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_SOFT_DIRTY
select HAVE_ARCH_TRACEHOOK
@@ -206,6 +208,7 @@ config S390
select THREAD_INFO_IN_TASK
select TTY
select VIRT_CPU_ACCOUNTING
+ select ZONE_DMA
# Note: keep the above list sorted alphabetically
config SCHED_OMIT_FRAME_POINTER
@@ -471,7 +474,7 @@ config NUMA
config NODES_SHIFT
int
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
default "1"
config SCHED_SMT
@@ -626,15 +629,6 @@ config ARCH_SPARSEMEM_ENABLE
config ARCH_SPARSEMEM_DEFAULT
def_bool y
-config ARCH_ENABLE_MEMORY_HOTPLUG
- def_bool y if SPARSEMEM
-
-config ARCH_ENABLE_MEMORY_HOTREMOVE
- def_bool y
-
-config ARCH_ENABLE_SPLIT_PMD_PTLOCK
- def_bool y
-
config MAX_PHYSMEM_BITS
int "Maximum size of supported physical memory in bits (42-53)"
range 42 53
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
index ef96c25fa921..9ea6e61d5858 100644
--- a/arch/s390/Kconfig.debug
+++ b/arch/s390/Kconfig.debug
@@ -15,3 +15,11 @@ config DEBUG_ENTRY
exits or otherwise impact performance.
If unsure, say N.
+
+config CIO_INJECT
+ bool "CIO Inject interfaces"
+ depends on DEBUG_KERNEL && DEBUG_FS
+ help
+ This option provides a debugging facility to inject certain artificial events
+ and instruction responses to the CIO layer of Linux kernel. The newly created
+ debugfs user-interfaces will be at /sys/kernel/debug/s390/cio/*
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 02056b024091..86afcc6b56bf 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -275,9 +275,9 @@ CONFIG_IP_VS_DH=m
CONFIG_IP_VS_SH=m
CONFIG_IP_VS_SED=m
CONFIG_IP_VS_NQ=m
+CONFIG_IP_VS_TWOS=m
CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
-CONFIG_NF_TABLES_IPV4=y
CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_IP_NF_IPTABLES=m
@@ -298,7 +298,6 @@ CONFIG_IP_NF_SECURITY=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_TABLES_IPV6=y
CONFIG_NFT_FIB_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
@@ -388,6 +387,7 @@ CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_PCI=y
+CONFIG_PCI_IOV=y
# CONFIG_PCIEASPM is not set
CONFIG_PCI_DEBUG=y
CONFIG_HOTPLUG_PCI=y
@@ -481,7 +481,6 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_ATHEROS is not set
-# CONFIG_NET_VENDOR_AURORA is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_CADENCE is not set
@@ -550,7 +549,7 @@ CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
CONFIG_LEGACY_PTY_COUNT=0
-CONFIG_VIRTIO_CONSOLE=y
+CONFIG_VIRTIO_CONSOLE=m
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
CONFIG_HANGCHECK_TIMER=m
@@ -581,7 +580,6 @@ CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
CONFIG_VHOST_NET=m
CONFIG_VHOST_VSOCK=m
-# CONFIG_SURFACE_PLATFORMS is not set
CONFIG_S390_CCW_IOMMU=y
CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y
@@ -635,6 +633,7 @@ CONFIG_NTFS_RW=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS_INODE64=y
CONFIG_HUGETLBFS=y
CONFIG_CONFIGFS_FS=m
CONFIG_ECRYPT_FS=m
@@ -714,12 +713,8 @@ CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
@@ -731,7 +726,6 @@ CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
@@ -778,7 +772,6 @@ CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_GDB_SCRIPTS=y
-CONFIG_FRAME_WARN=1024
CONFIG_HEADERS_INSTALL=y
CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_MAGIC_SYSRQ=y
@@ -796,12 +789,9 @@ CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
CONFIG_SLUB_DEBUG_ON=y
CONFIG_SLUB_STATS=y
-CONFIG_DEBUG_KMEMLEAK=y
-CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_VM=y
CONFIG_DEBUG_VM_VMACACHE=y
-CONFIG_DEBUG_VM_RB=y
CONFIG_DEBUG_VM_PGFLAGS=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
@@ -838,6 +828,8 @@ CONFIG_BPF_KPROBE_OVERRIDE=y
CONFIG_HIST_TRIGGERS=y
CONFIG_FTRACE_STARTUP_TEST=y
# CONFIG_EVENT_TRACE_STARTUP_TEST is not set
+CONFIG_DEBUG_ENTRY=y
+CONFIG_CIO_INJECT=y
CONFIG_NOTIFIER_ERROR_INJECTION=m
CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
CONFIG_FAULT_INJECTION=y
@@ -861,4 +853,3 @@ CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BITOPS=m
CONFIG_TEST_BPF=m
-CONFIG_DEBUG_ENTRY=y
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index bac721a501da..71b49ea5b058 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -266,9 +266,9 @@ CONFIG_IP_VS_DH=m
CONFIG_IP_VS_SH=m
CONFIG_IP_VS_SED=m
CONFIG_IP_VS_NQ=m
+CONFIG_IP_VS_TWOS=m
CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
-CONFIG_NF_TABLES_IPV4=y
CONFIG_NFT_FIB_IPV4=m
CONFIG_NF_TABLES_ARP=y
CONFIG_IP_NF_IPTABLES=m
@@ -289,7 +289,6 @@ CONFIG_IP_NF_SECURITY=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_TABLES_IPV6=y
CONFIG_NFT_FIB_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
@@ -378,6 +377,7 @@ CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
CONFIG_PCI=y
+CONFIG_PCI_IOV=y
# CONFIG_PCIEASPM is not set
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y
@@ -473,7 +473,6 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_ATHEROS is not set
-# CONFIG_NET_VENDOR_AURORA is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_CADENCE is not set
@@ -542,7 +541,7 @@ CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
CONFIG_LEGACY_PTY_COUNT=0
-CONFIG_VIRTIO_CONSOLE=y
+CONFIG_VIRTIO_CONSOLE=m
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
CONFIG_HANGCHECK_TIMER=m
@@ -573,7 +572,6 @@ CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
CONFIG_VHOST_NET=m
CONFIG_VHOST_VSOCK=m
-# CONFIG_SURFACE_PLATFORMS is not set
CONFIG_S390_CCW_IOMMU=y
CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y
@@ -623,6 +621,7 @@ CONFIG_NTFS_RW=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS_INODE64=y
CONFIG_HUGETLBFS=y
CONFIG_CONFIGFS_FS=m
CONFIG_ECRYPT_FS=m
@@ -703,12 +702,8 @@ CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
CONFIG_CRYPTO_BLAKE2S=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
@@ -720,7 +715,6 @@ CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
@@ -763,7 +757,6 @@ CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_GDB_SCRIPTS=y
-CONFIG_FRAME_WARN=1024
CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_WX=y
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index acf982a2ae4c..76123a4b26ab 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -26,7 +26,6 @@ CONFIG_CRASH_DUMP=y
# CONFIG_SECCOMP is not set
# CONFIG_GCC_PLUGINS is not set
CONFIG_PARTITION_ADVANCED=y
-CONFIG_IBM_PARTITION=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_COMPACTION is not set
# CONFIG_MIGRATION is not set
@@ -61,11 +60,9 @@ CONFIG_RAW_DRIVER=y
# CONFIG_HID is not set
# CONFIG_VIRTIO_MENU is not set
# CONFIG_VHOST_MENU is not set
-# CONFIG_SURFACE_PLATFORMS is not set
# CONFIG_IOMMU_SUPPORT is not set
# CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY_USER is not set
-CONFIG_CONFIGFS_FS=y
# CONFIG_MISC_FILESYSTEMS is not set
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_LSM="yama,loadpin,safesetid,integrity"
diff --git a/arch/s390/crypto/arch_random.c b/arch/s390/crypto/arch_random.c
index 7b947728d57e..56007c763902 100644
--- a/arch/s390/crypto/arch_random.c
+++ b/arch/s390/crypto/arch_random.c
@@ -54,6 +54,10 @@ static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer);
bool s390_arch_random_generate(u8 *buf, unsigned int nbytes)
{
+ /* max hunk is ARCH_RNG_BUF_SIZE */
+ if (nbytes > ARCH_RNG_BUF_SIZE)
+ return false;
+
/* lock rng buffer */
if (!spin_trylock(&arch_rng_lock))
return false;
diff --git a/arch/s390/crypto/crc32be-vx.S b/arch/s390/crypto/crc32be-vx.S
index 0099044e2c86..6b3d1009c392 100644
--- a/arch/s390/crypto/crc32be-vx.S
+++ b/arch/s390/crypto/crc32be-vx.S
@@ -32,7 +32,7 @@
* process particular chunks of the input data stream in parallel.
*
* For the CRC-32 variants, the constants are precomputed according to
- * these defintions:
+ * these definitions:
*
* R1 = x4*128+64 mod P(x)
* R2 = x4*128 mod P(x)
@@ -189,7 +189,7 @@ ENTRY(crc32_be_vgfm_16)
* Note: To compensate the division by x^32, use the vector unpack
* instruction to move the leftmost word into the leftmost doubleword
* of the vector register. The rightmost doubleword is multiplied
- * with zero to not contribute to the intermedate results.
+ * with zero to not contribute to the intermediate results.
*/
/* T1(x) = floor( R(x) / x^32 ) GF2MUL u */
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 5860ae790f2d..7138d189cc42 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -15,48 +15,46 @@
#include <asm/barrier.h>
#include <asm/cmpxchg.h>
-static inline int atomic_read(const atomic_t *v)
+static inline int arch_atomic_read(const atomic_t *v)
{
- int c;
-
- asm volatile(
- " l %0,%1\n"
- : "=d" (c) : "Q" (v->counter));
- return c;
+ return __atomic_read(v);
}
+#define arch_atomic_read arch_atomic_read
-static inline void atomic_set(atomic_t *v, int i)
+static inline void arch_atomic_set(atomic_t *v, int i)
{
- asm volatile(
- " st %1,%0\n"
- : "=Q" (v->counter) : "d" (i));
+ __atomic_set(v, i);
}
+#define arch_atomic_set arch_atomic_set
-static inline int atomic_add_return(int i, atomic_t *v)
+static inline int arch_atomic_add_return(int i, atomic_t *v)
{
return __atomic_add_barrier(i, &v->counter) + i;
}
+#define arch_atomic_add_return arch_atomic_add_return
-static inline int atomic_fetch_add(int i, atomic_t *v)
+static inline int arch_atomic_fetch_add(int i, atomic_t *v)
{
return __atomic_add_barrier(i, &v->counter);
}
+#define arch_atomic_fetch_add arch_atomic_fetch_add
-static inline void atomic_add(int i, atomic_t *v)
+static inline void arch_atomic_add(int i, atomic_t *v)
{
__atomic_add(i, &v->counter);
}
+#define arch_atomic_add arch_atomic_add
-#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
-#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
-#define atomic_fetch_sub(_i, _v) atomic_fetch_add(-(int)(_i), _v)
+#define arch_atomic_sub(_i, _v) arch_atomic_add(-(int)(_i), _v)
+#define arch_atomic_sub_return(_i, _v) arch_atomic_add_return(-(int)(_i), _v)
+#define arch_atomic_fetch_sub(_i, _v) arch_atomic_fetch_add(-(int)(_i), _v)
#define ATOMIC_OPS(op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
__atomic_##op(i, &v->counter); \
} \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
return __atomic_##op##_barrier(i, &v->counter); \
}
@@ -67,60 +65,67 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OPS
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_and arch_atomic_and
+#define arch_atomic_or arch_atomic_or
+#define arch_atomic_xor arch_atomic_xor
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
return __atomic_cmpxchg(&v->counter, old, new);
}
+#define arch_atomic_cmpxchg arch_atomic_cmpxchg
#define ATOMIC64_INIT(i) { (i) }
-static inline s64 atomic64_read(const atomic64_t *v)
+static inline s64 arch_atomic64_read(const atomic64_t *v)
{
- s64 c;
-
- asm volatile(
- " lg %0,%1\n"
- : "=d" (c) : "Q" (v->counter));
- return c;
+ return __atomic64_read(v);
}
+#define arch_atomic64_read arch_atomic64_read
-static inline void atomic64_set(atomic64_t *v, s64 i)
+static inline void arch_atomic64_set(atomic64_t *v, s64 i)
{
- asm volatile(
- " stg %1,%0\n"
- : "=Q" (v->counter) : "d" (i));
+ __atomic64_set(v, i);
}
+#define arch_atomic64_set arch_atomic64_set
-static inline s64 atomic64_add_return(s64 i, atomic64_t *v)
+static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
{
return __atomic64_add_barrier(i, (long *)&v->counter) + i;
}
+#define arch_atomic64_add_return arch_atomic64_add_return
-static inline s64 atomic64_fetch_add(s64 i, atomic64_t *v)
+static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
{
return __atomic64_add_barrier(i, (long *)&v->counter);
}
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
-static inline void atomic64_add(s64 i, atomic64_t *v)
+static inline void arch_atomic64_add(s64 i, atomic64_t *v)
{
__atomic64_add(i, (long *)&v->counter);
}
+#define arch_atomic64_add arch_atomic64_add
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
-static inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
{
return __atomic64_cmpxchg((long *)&v->counter, old, new);
}
+#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
#define ATOMIC64_OPS(op) \
-static inline void atomic64_##op(s64 i, atomic64_t *v) \
+static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
{ \
__atomic64_##op(i, (long *)&v->counter); \
} \
-static inline long atomic64_fetch_##op(s64 i, atomic64_t *v) \
+static inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
{ \
return __atomic64_##op##_barrier(i, (long *)&v->counter); \
}
@@ -131,8 +136,15 @@ ATOMIC64_OPS(xor)
#undef ATOMIC64_OPS
-#define atomic64_sub_return(_i, _v) atomic64_add_return(-(s64)(_i), _v)
-#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(s64)(_i), _v)
-#define atomic64_sub(_i, _v) atomic64_add(-(s64)(_i), _v)
+#define arch_atomic64_and arch_atomic64_and
+#define arch_atomic64_or arch_atomic64_or
+#define arch_atomic64_xor arch_atomic64_xor
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
+
+#define arch_atomic64_sub_return(_i, _v) arch_atomic64_add_return(-(s64)(_i), _v)
+#define arch_atomic64_fetch_sub(_i, _v) arch_atomic64_fetch_add(-(s64)(_i), _v)
+#define arch_atomic64_sub(_i, _v) arch_atomic64_add(-(s64)(_i), _v)
#endif /* __ARCH_S390_ATOMIC__ */
diff --git a/arch/s390/include/asm/atomic_ops.h b/arch/s390/include/asm/atomic_ops.h
index 61467b9eecc7..50510e08b893 100644
--- a/arch/s390/include/asm/atomic_ops.h
+++ b/arch/s390/include/asm/atomic_ops.h
@@ -8,6 +8,40 @@
#ifndef __ARCH_S390_ATOMIC_OPS__
#define __ARCH_S390_ATOMIC_OPS__
+static inline int __atomic_read(const atomic_t *v)
+{
+ int c;
+
+ asm volatile(
+ " l %0,%1\n"
+ : "=d" (c) : "R" (v->counter));
+ return c;
+}
+
+static inline void __atomic_set(atomic_t *v, int i)
+{
+ asm volatile(
+ " st %1,%0\n"
+ : "=R" (v->counter) : "d" (i));
+}
+
+static inline s64 __atomic64_read(const atomic64_t *v)
+{
+ s64 c;
+
+ asm volatile(
+ " lg %0,%1\n"
+ : "=d" (c) : "RT" (v->counter));
+ return c;
+}
+
+static inline void __atomic64_set(atomic64_t *v, s64 i)
+{
+ asm volatile(
+ " stg %1,%0\n"
+ : "=RT" (v->counter) : "d" (i));
+}
+
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
@@ -18,7 +52,7 @@ static inline op_type op_name(op_type val, op_type *ptr) \
asm volatile( \
op_string " %[old],%[val],%[ptr]\n" \
op_barrier \
- : [old] "=d" (old), [ptr] "+Q" (*ptr) \
+ : [old] "=d" (old), [ptr] "+QS" (*ptr) \
: [val] "d" (val) : "cc", "memory"); \
return old; \
} \
@@ -46,7 +80,7 @@ static __always_inline void op_name(op_type val, op_type *ptr) \
asm volatile( \
op_string " %[ptr],%[val]\n" \
op_barrier \
- : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc", "memory");\
+ : [ptr] "+QS" (*ptr) : [val] "i" (val) : "cc", "memory");\
}
#define __ATOMIC_CONST_OPS(op_name, op_type, op_string) \
@@ -97,7 +131,7 @@ static inline long op_name(long val, long *ptr) \
op_string " %[new],%[val]\n" \
" csg %[old],%[new],%[ptr]\n" \
" jl 0b" \
- : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
+ : [old] "=d" (old), [new] "=&d" (new), [ptr] "+QS" (*ptr)\
: [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
return old; \
}
@@ -122,22 +156,46 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr")
static inline int __atomic_cmpxchg(int *ptr, int old, int new)
{
- return __sync_val_compare_and_swap(ptr, old, new);
+ asm volatile(
+ " cs %[old],%[new],%[ptr]"
+ : [old] "+d" (old), [ptr] "+Q" (*ptr)
+ : [new] "d" (new)
+ : "cc", "memory");
+ return old;
}
-static inline int __atomic_cmpxchg_bool(int *ptr, int old, int new)
+static inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
{
- return __sync_bool_compare_and_swap(ptr, old, new);
+ int old_expected = old;
+
+ asm volatile(
+ " cs %[old],%[new],%[ptr]"
+ : [old] "+d" (old), [ptr] "+Q" (*ptr)
+ : [new] "d" (new)
+ : "cc", "memory");
+ return old == old_expected;
}
static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
{
- return __sync_val_compare_and_swap(ptr, old, new);
+ asm volatile(
+ " csg %[old],%[new],%[ptr]"
+ : [old] "+d" (old), [ptr] "+QS" (*ptr)
+ : [new] "d" (new)
+ : "cc", "memory");
+ return old;
}
-static inline long __atomic64_cmpxchg_bool(long *ptr, long old, long new)
+static inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
{
- return __sync_bool_compare_and_swap(ptr, old, new);
+ long old_expected = old;
+
+ asm volatile(
+ " csg %[old],%[new],%[ptr]"
+ : [old] "+d" (old), [ptr] "+QS" (*ptr)
+ : [new] "d" (new)
+ : "cc", "memory");
+ return old == old_expected;
}
#endif /* __ARCH_S390_ATOMIC_OPS__ */
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 31121d32f81d..68da67d2c4c9 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -42,7 +42,7 @@
#define __BITOPS_WORDS(bits) (((bits) + BITS_PER_LONG - 1) / BITS_PER_LONG)
static inline unsigned long *
-__bitops_word(unsigned long nr, volatile unsigned long *ptr)
+__bitops_word(unsigned long nr, const volatile unsigned long *ptr)
{
unsigned long addr;
@@ -50,37 +50,33 @@ __bitops_word(unsigned long nr, volatile unsigned long *ptr)
return (unsigned long *)addr;
}
-static inline unsigned char *
-__bitops_byte(unsigned long nr, volatile unsigned long *ptr)
+static inline unsigned long __bitops_mask(unsigned long nr)
{
- return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
+ return 1UL << (nr & (BITS_PER_LONG - 1));
}
static __always_inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
- unsigned long mask;
+ unsigned long mask = __bitops_mask(nr);
- mask = 1UL << (nr & (BITS_PER_LONG - 1));
__atomic64_or(mask, (long *)addr);
}
static __always_inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
- unsigned long mask;
+ unsigned long mask = __bitops_mask(nr);
- mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
- __atomic64_and(mask, (long *)addr);
+ __atomic64_and(~mask, (long *)addr);
}
static __always_inline void arch_change_bit(unsigned long nr,
volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
- unsigned long mask;
+ unsigned long mask = __bitops_mask(nr);
- mask = 1UL << (nr & (BITS_PER_LONG - 1));
__atomic64_xor(mask, (long *)addr);
}
@@ -88,99 +84,104 @@ static inline bool arch_test_and_set_bit(unsigned long nr,
volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
- unsigned long old, mask;
+ unsigned long mask = __bitops_mask(nr);
+ unsigned long old;
- mask = 1UL << (nr & (BITS_PER_LONG - 1));
old = __atomic64_or_barrier(mask, (long *)addr);
- return (old & mask) != 0;
+ return old & mask;
}
static inline bool arch_test_and_clear_bit(unsigned long nr,
volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
- unsigned long old, mask;
+ unsigned long mask = __bitops_mask(nr);
+ unsigned long old;
- mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
- old = __atomic64_and_barrier(mask, (long *)addr);
- return (old & ~mask) != 0;
+ old = __atomic64_and_barrier(~mask, (long *)addr);
+ return old & mask;
}
static inline bool arch_test_and_change_bit(unsigned long nr,
volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
- unsigned long old, mask;
+ unsigned long mask = __bitops_mask(nr);
+ unsigned long old;
- mask = 1UL << (nr & (BITS_PER_LONG - 1));
old = __atomic64_xor_barrier(mask, (long *)addr);
- return (old & mask) != 0;
+ return old & mask;
}
static inline void arch___set_bit(unsigned long nr, volatile unsigned long *ptr)
{
- unsigned char *addr = __bitops_byte(nr, ptr);
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long mask = __bitops_mask(nr);
- *addr |= 1 << (nr & 7);
+ *addr |= mask;
}
static inline void arch___clear_bit(unsigned long nr,
volatile unsigned long *ptr)
{
- unsigned char *addr = __bitops_byte(nr, ptr);
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long mask = __bitops_mask(nr);
- *addr &= ~(1 << (nr & 7));
+ *addr &= ~mask;
}
static inline void arch___change_bit(unsigned long nr,
volatile unsigned long *ptr)
{
- unsigned char *addr = __bitops_byte(nr, ptr);
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long mask = __bitops_mask(nr);
- *addr ^= 1 << (nr & 7);
+ *addr ^= mask;
}
static inline bool arch___test_and_set_bit(unsigned long nr,
volatile unsigned long *ptr)
{
- unsigned char *addr = __bitops_byte(nr, ptr);
- unsigned char ch;
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long mask = __bitops_mask(nr);
+ unsigned long old;
- ch = *addr;
- *addr |= 1 << (nr & 7);
- return (ch >> (nr & 7)) & 1;
+ old = *addr;
+ *addr |= mask;
+ return old & mask;
}
static inline bool arch___test_and_clear_bit(unsigned long nr,
volatile unsigned long *ptr)
{
- unsigned char *addr = __bitops_byte(nr, ptr);
- unsigned char ch;
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long mask = __bitops_mask(nr);
+ unsigned long old;
- ch = *addr;
- *addr &= ~(1 << (nr & 7));
- return (ch >> (nr & 7)) & 1;
+ old = *addr;
+ *addr &= ~mask;
+ return old & mask;
}
static inline bool arch___test_and_change_bit(unsigned long nr,
volatile unsigned long *ptr)
{
- unsigned char *addr = __bitops_byte(nr, ptr);
- unsigned char ch;
+ unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long mask = __bitops_mask(nr);
+ unsigned long old;
- ch = *addr;
- *addr ^= 1 << (nr & 7);
- return (ch >> (nr & 7)) & 1;
+ old = *addr;
+ *addr ^= mask;
+ return old & mask;
}
static inline bool arch_test_bit(unsigned long nr,
const volatile unsigned long *ptr)
{
- const volatile unsigned char *addr;
+ const volatile unsigned long *addr = __bitops_word(nr, ptr);
+ unsigned long mask = __bitops_mask(nr);
- addr = ((const volatile unsigned char *)ptr);
- addr += (nr ^ (BITS_PER_LONG - 8)) >> 3;
- return (*addr >> (nr & 7)) & 1;
+ return *addr & mask;
}
static inline bool arch_test_and_set_bit_lock(unsigned long nr,
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index 778247bb1d61..d4e90f2ba77e 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -152,9 +152,6 @@ extern struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
* when new devices for its type pop up */
extern int ccw_driver_register (struct ccw_driver *driver);
extern void ccw_driver_unregister (struct ccw_driver *driver);
-
-struct ccw1;
-
extern int ccw_device_set_options_mask(struct ccw_device *, unsigned long);
extern int ccw_device_set_options(struct ccw_device *, unsigned long);
extern void ccw_device_clear_options(struct ccw_device *, unsigned long);
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index af99c1f66f12..1960a7295ae5 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -12,27 +12,163 @@
#include <linux/types.h>
#include <linux/bug.h>
-#define cmpxchg(ptr, o, n) \
+void __xchg_called_with_bad_pointer(void);
+
+static __always_inline unsigned long __xchg(unsigned long x,
+ unsigned long address, int size)
+{
+ unsigned long old;
+ int shift;
+
+ switch (size) {
+ case 1:
+ shift = (3 ^ (address & 3)) << 3;
+ address ^= address & 3;
+ asm volatile(
+ " l %0,%1\n"
+ "0: lr 0,%0\n"
+ " nr 0,%3\n"
+ " or 0,%2\n"
+ " cs %0,0,%1\n"
+ " jl 0b\n"
+ : "=&d" (old), "+Q" (*(int *) address)
+ : "d" ((x & 0xff) << shift), "d" (~(0xff << shift))
+ : "memory", "cc", "0");
+ return old >> shift;
+ case 2:
+ shift = (2 ^ (address & 2)) << 3;
+ address ^= address & 2;
+ asm volatile(
+ " l %0,%1\n"
+ "0: lr 0,%0\n"
+ " nr 0,%3\n"
+ " or 0,%2\n"
+ " cs %0,0,%1\n"
+ " jl 0b\n"
+ : "=&d" (old), "+Q" (*(int *) address)
+ : "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift))
+ : "memory", "cc", "0");
+ return old >> shift;
+ case 4:
+ asm volatile(
+ " l %0,%1\n"
+ "0: cs %0,%2,%1\n"
+ " jl 0b\n"
+ : "=&d" (old), "+Q" (*(int *) address)
+ : "d" (x)
+ : "memory", "cc");
+ return old;
+ case 8:
+ asm volatile(
+ " lg %0,%1\n"
+ "0: csg %0,%2,%1\n"
+ " jl 0b\n"
+ : "=&d" (old), "+QS" (*(long *) address)
+ : "d" (x)
+ : "memory", "cc");
+ return old;
+ }
+ __xchg_called_with_bad_pointer();
+ return x;
+}
+
+#define arch_xchg(ptr, x) \
({ \
- __typeof__(*(ptr)) __o = (o); \
- __typeof__(*(ptr)) __n = (n); \
- (__typeof__(*(ptr))) __sync_val_compare_and_swap((ptr),__o,__n);\
+ __typeof__(*(ptr)) __ret; \
+ \
+ __ret = (__typeof__(*(ptr))) \
+ __xchg((unsigned long)(x), (unsigned long)(ptr), \
+ sizeof(*(ptr))); \
+ __ret; \
})
-#define cmpxchg64 cmpxchg
-#define cmpxchg_local cmpxchg
-#define cmpxchg64_local cmpxchg
+void __cmpxchg_called_with_bad_pointer(void);
+
+static __always_inline unsigned long __cmpxchg(unsigned long address,
+ unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long prev, tmp;
+ int shift;
+
+ switch (size) {
+ case 1:
+ shift = (3 ^ (address & 3)) << 3;
+ address ^= address & 3;
+ asm volatile(
+ " l %0,%2\n"
+ "0: nr %0,%5\n"
+ " lr %1,%0\n"
+ " or %0,%3\n"
+ " or %1,%4\n"
+ " cs %0,%1,%2\n"
+ " jnl 1f\n"
+ " xr %1,%0\n"
+ " nr %1,%5\n"
+ " jnz 0b\n"
+ "1:"
+ : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) address)
+ : "d" ((old & 0xff) << shift),
+ "d" ((new & 0xff) << shift),
+ "d" (~(0xff << shift))
+ : "memory", "cc");
+ return prev >> shift;
+ case 2:
+ shift = (2 ^ (address & 2)) << 3;
+ address ^= address & 2;
+ asm volatile(
+ " l %0,%2\n"
+ "0: nr %0,%5\n"
+ " lr %1,%0\n"
+ " or %0,%3\n"
+ " or %1,%4\n"
+ " cs %0,%1,%2\n"
+ " jnl 1f\n"
+ " xr %1,%0\n"
+ " nr %1,%5\n"
+ " jnz 0b\n"
+ "1:"
+ : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) address)
+ : "d" ((old & 0xffff) << shift),
+ "d" ((new & 0xffff) << shift),
+ "d" (~(0xffff << shift))
+ : "memory", "cc");
+ return prev >> shift;
+ case 4:
+ asm volatile(
+ " cs %0,%3,%1\n"
+ : "=&d" (prev), "+Q" (*(int *) address)
+ : "0" (old), "d" (new)
+ : "memory", "cc");
+ return prev;
+ case 8:
+ asm volatile(
+ " csg %0,%3,%1\n"
+ : "=&d" (prev), "+QS" (*(long *) address)
+ : "0" (old), "d" (new)
+ : "memory", "cc");
+ return prev;
+ }
+ __cmpxchg_called_with_bad_pointer();
+ return old;
+}
-#define xchg(ptr, x) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(*(ptr)) __old; \
- do { \
- __old = *__ptr; \
- } while (!__sync_bool_compare_and_swap(__ptr, __old, x)); \
- __old; \
+ __typeof__(*(ptr)) __ret; \
+ \
+ __ret = (__typeof__(*(ptr))) \
+ __cmpxchg((unsigned long)(ptr), (unsigned long)(o), \
+ (unsigned long)(n), sizeof(*(ptr))); \
+ __ret; \
})
+#define arch_cmpxchg64 arch_cmpxchg
+#define arch_cmpxchg_local arch_cmpxchg
+#define arch_cmpxchg64_local arch_cmpxchg
+
+#define system_has_cmpxchg_double() 1
+
#define __cmpxchg_double(p1, p2, o1, o2, n1, n2) \
({ \
register __typeof__(*(p1)) __old1 asm("2") = (o1); \
@@ -51,7 +187,7 @@
!cc; \
})
-#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
+#define arch_cmpxchg_double(p1, p2, o1, o2, n1, n2) \
({ \
__typeof__(p1) __p1 = (p1); \
__typeof__(p2) __p2 = (p2); \
@@ -61,6 +197,4 @@
__cmpxchg_double(__p1, __p2, o1, o2, n1, n2); \
})
-#define system_has_cmpxchg_double() 1
-
#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/s390/include/asm/cpu_mcf.h b/arch/s390/include/asm/cpu_mcf.h
index 649b9fc60685..3e4cbcb7c4cc 100644
--- a/arch/s390/include/asm/cpu_mcf.h
+++ b/arch/s390/include/asm/cpu_mcf.h
@@ -123,4 +123,6 @@ static inline int stccm_avail(void)
return test_facility(142);
}
+size_t cpum_cf_ctrset_size(enum cpumf_ctr_set ctrset,
+ struct cpumf_ctr_info *info);
#endif /* _ASM_S390_CPU_MCF_H */
diff --git a/arch/s390/include/asm/entry-common.h b/arch/s390/include/asm/entry-common.h
index 75cebc80474e..baa8005090c3 100644
--- a/arch/s390/include/asm/entry-common.h
+++ b/arch/s390/include/asm/entry-common.h
@@ -4,9 +4,11 @@
#include <linux/sched.h>
#include <linux/audit.h>
+#include <linux/randomize_kstack.h>
#include <linux/tracehook.h>
#include <linux/processor.h>
#include <linux/uaccess.h>
+#include <asm/timex.h>
#include <asm/fpu/api.h>
#define ARCH_EXIT_TO_USER_MODE_WORK (_TIF_GUARDED_STORAGE | _TIF_PER_TRAP)
@@ -14,10 +16,6 @@
void do_per_trap(struct pt_regs *regs);
void do_syscall(struct pt_regs *regs);
-typedef void (*pgm_check_func)(struct pt_regs *regs);
-
-extern pgm_check_func pgm_check_table[128];
-
#ifdef CONFIG_DEBUG_ENTRY
static __always_inline void arch_check_user_regs(struct pt_regs *regs)
{
@@ -52,6 +50,14 @@ static __always_inline void arch_exit_to_user_mode(void)
#define arch_exit_to_user_mode arch_exit_to_user_mode
+static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ unsigned long ti_work)
+{
+ choose_random_kstack_offset(get_tod_clock_fast() & 0xff);
+}
+
+#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
+
static inline bool on_thread_stack(void)
{
return !(((unsigned long)(current->stack) ^ current_stack_pointer()) & ~(THREAD_SIZE - 1));
diff --git a/arch/s390/include/asm/idle.h b/arch/s390/include/asm/idle.h
index b04f6a794cdf..5cea629c548e 100644
--- a/arch/s390/include/asm/idle.h
+++ b/arch/s390/include/asm/idle.h
@@ -14,12 +14,12 @@
struct s390_idle_data {
seqcount_t seqcount;
- unsigned long long idle_count;
- unsigned long long idle_time;
- unsigned long long clock_idle_enter;
- unsigned long long clock_idle_exit;
- unsigned long long timer_idle_enter;
- unsigned long long timer_idle_exit;
+ unsigned long idle_count;
+ unsigned long idle_time;
+ unsigned long clock_idle_enter;
+ unsigned long clock_idle_exit;
+ unsigned long timer_idle_enter;
+ unsigned long timer_idle_exit;
unsigned long mt_cycles_enter[8];
};
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index 28664ee0abc1..e3882b012bfa 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -20,11 +20,6 @@ void *xlate_dev_mem_ptr(phys_addr_t phys);
#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p) p
-
#define IO_SPACE_LIMIT 0
void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot);
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 6bcfc5614bbc..9b4473f76e56 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -361,6 +361,7 @@ struct sie_page {
};
struct kvm_vcpu_stat {
+ struct kvm_vcpu_stat_generic generic;
u64 exit_userspace;
u64 exit_null;
u64 exit_external_request;
@@ -370,13 +371,7 @@ struct kvm_vcpu_stat {
u64 exit_validity;
u64 exit_instruction;
u64 exit_pei;
- u64 halt_successful_poll;
- u64 halt_attempted_poll;
- u64 halt_poll_invalid;
u64 halt_no_poll_steal;
- u64 halt_wakeup;
- u64 halt_poll_success_ns;
- u64 halt_poll_fail_ns;
u64 instruction_lctl;
u64 instruction_lctlg;
u64 instruction_stctl;
@@ -454,6 +449,7 @@ struct kvm_vcpu_stat {
u64 diagnose_44;
u64 diagnose_9c;
u64 diagnose_9c_ignored;
+ u64 diagnose_9c_forward;
u64 diagnose_258;
u64 diagnose_308;
u64 diagnose_500;
@@ -700,6 +696,10 @@ struct kvm_hw_bp_info_arch {
#define guestdbg_exit_pending(vcpu) (guestdbg_enabled(vcpu) && \
(vcpu->guest_debug & KVM_GUESTDBG_EXIT_PENDING))
+#define KVM_GUESTDBG_VALID_MASK \
+ (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP |\
+ KVM_GUESTDBG_USE_HW_BP | KVM_GUESTDBG_EXIT_PENDING)
+
struct kvm_guestdbg_info_arch {
unsigned long cr0;
unsigned long cr9;
@@ -750,12 +750,12 @@ struct kvm_vcpu_arch {
};
struct kvm_vm_stat {
+ struct kvm_vm_stat_generic generic;
u64 inject_io;
u64 inject_float_mchk;
u64 inject_pfault_done;
u64 inject_service_signal;
u64 inject_virtio;
- u64 remote_tlb_flush;
};
struct kvm_arch_memory_slot {
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index cc98f9b78fd4..479dc76e0eca 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -68,9 +68,9 @@ static inline void copy_page(void *to, void *from)
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
- alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
+ alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
/*
* These are used to make use of C type-checking..
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 053fe8b8dec7..10b67f8aab99 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -85,7 +85,6 @@ enum zpci_state {
ZPCI_FN_STATE_STANDBY = 0,
ZPCI_FN_STATE_CONFIGURED = 1,
ZPCI_FN_STATE_RESERVED = 2,
- ZPCI_FN_STATE_ONLINE = 3,
};
struct zpci_bar_struct {
@@ -131,9 +130,10 @@ struct zpci_dev {
u8 port;
u8 rid_available : 1;
u8 has_hp_slot : 1;
+ u8 has_resources : 1;
u8 is_physfn : 1;
u8 util_str_avail : 1;
- u8 reserved : 4;
+ u8 reserved : 3;
unsigned int devfn; /* DEVFN part of the RID*/
struct mutex lock;
@@ -201,10 +201,12 @@ extern unsigned int s390_pci_no_rid;
Prototypes
----------------------------------------------------------------------------- */
/* Base stuff */
-int zpci_create_device(u32 fid, u32 fh, enum zpci_state state);
-void zpci_remove_device(struct zpci_dev *zdev);
+struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state);
int zpci_enable_device(struct zpci_dev *);
int zpci_disable_device(struct zpci_dev *);
+int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh);
+int zpci_deconfigure_device(struct zpci_dev *zdev);
+
int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
int zpci_unregister_ioat(struct zpci_dev *, u8);
void zpci_remove_reserved_devices(void);
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 6b187cd72251..f14a555eff74 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -134,9 +134,6 @@ static inline void pmd_populate(struct mm_struct *mm,
#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
-#define pmd_pgtable(pmd) \
- ((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
-
/*
* page table entry allocation/free routines.
*/
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 29c7ecd5ad1d..1f8f5da53262 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -65,8 +65,6 @@ extern unsigned long zero_page_mask;
/* TODO: s390 cannot support io_remap_pfn_range... */
-#define FIRST_USER_ADDRESS 0UL
-
#define pte_ERROR(e) \
printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
#define pmd_ERROR(e) \
@@ -344,8 +342,6 @@ static inline int is_module_addr(void *addr)
#define PTRS_PER_P4D _CRST_ENTRIES
#define PTRS_PER_PGD _CRST_ENTRIES
-#define MAX_PTRS_PER_P4D PTRS_PER_P4D
-
/*
* Segment table and region3 table entry encoding
* (R = read-only, I = invalid, y = young bit):
@@ -1713,4 +1709,7 @@ extern void s390_reset_cmma(struct mm_struct *mm);
#define HAVE_ARCH_UNMAPPED_AREA
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+#define pmd_pgtable(pmd) \
+ ((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
+
#endif /* _S390_PAGE_H */
diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h
index b49e0492842c..23ff51be7e29 100644
--- a/arch/s390/include/asm/preempt.h
+++ b/arch/s390/include/asm/preempt.h
@@ -32,7 +32,7 @@ static inline void preempt_count_set(int pc)
#define init_task_preempt_count(p) do { } while (0)
#define init_idle_preempt_count(p, cpu) do { \
- S390_lowcore.preempt_count = PREEMPT_ENABLED; \
+ S390_lowcore.preempt_count = PREEMPT_DISABLED; \
} while (0)
static inline void set_preempt_need_resched(void)
@@ -91,7 +91,7 @@ static inline void preempt_count_set(int pc)
#define init_task_preempt_count(p) do { } while (0)
#define init_idle_preempt_count(p, cpu) do { \
- S390_lowcore.preempt_count = PREEMPT_ENABLED; \
+ S390_lowcore.preempt_count = PREEMPT_DISABLED; \
} while (0)
static inline void set_preempt_need_resched(void)
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index d9215c7106f0..cb4f73c7228d 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -137,7 +137,6 @@ struct slibe {
* @user0: user defineable value
* @res4: reserved paramater
* @user1: user defineable value
- * @user2: user defineable value
*/
struct qaob {
u64 res0[6];
@@ -152,8 +151,7 @@ struct qaob {
u16 dcount[QDIO_MAX_ELEMENTS_PER_BUFFER];
u64 user0;
u64 res4[2];
- u64 user1;
- u64 user2;
+ u8 user1[16];
} __attribute__ ((packed, aligned(256)));
/**
@@ -246,21 +244,8 @@ struct slsb {
u8 val[QDIO_MAX_BUFFERS_PER_Q];
} __attribute__ ((packed, aligned(256)));
-/**
- * struct qdio_outbuf_state - SBAL related asynchronous operation information
- * (for communication with upper layer programs)
- * (only required for use with completion queues)
- * @user: pointer to upper layer program's state information related to SBAL
- * (stored in user1 data of QAOB)
- */
-struct qdio_outbuf_state {
- void *user;
-};
-
-#define CHSC_AC1_INITIATE_INPUTQ 0x80
-
-
/* qdio adapter-characteristics-1 flag */
+#define CHSC_AC1_INITIATE_INPUTQ 0x80
#define AC1_SIGA_INPUT_NEEDED 0x40 /* process input queues */
#define AC1_SIGA_OUTPUT_NEEDED 0x20 /* process output queues */
#define AC1_SIGA_SYNC_NEEDED 0x10 /* ask hypervisor to sync */
@@ -338,7 +323,6 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
* @int_parm: interruption parameter
* @input_sbal_addr_array: per-queue array, each element points to 128 SBALs
* @output_sbal_addr_array: per-queue array, each element points to 128 SBALs
- * @output_sbal_state_array: no_output_qs * 128 state info (for CQ or NULL)
*/
struct qdio_initialize {
unsigned char q_format;
@@ -357,7 +341,6 @@ struct qdio_initialize {
unsigned long int_parm;
struct qdio_buffer ***input_sbal_addr_array;
struct qdio_buffer ***output_sbal_addr_array;
- struct qdio_outbuf_state *output_sbal_state_array;
};
#define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */
@@ -378,9 +361,10 @@ extern int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
extern int qdio_establish(struct ccw_device *cdev,
struct qdio_initialize *init_data);
extern int qdio_activate(struct ccw_device *);
+extern struct qaob *qdio_allocate_aob(void);
extern void qdio_release_aob(struct qaob *);
-extern int do_QDIO(struct ccw_device *, unsigned int, int, unsigned int,
- unsigned int);
+extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags, int q_nr,
+ unsigned int bufnr, unsigned int count, struct qaob *aob);
extern int qdio_start_irq(struct ccw_device *cdev);
extern int qdio_stop_irq(struct ccw_device *cdev);
extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 01e360004481..e317fd4866c1 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -63,5 +63,6 @@ extern void __noreturn cpu_die(void);
extern void __cpu_die(unsigned int cpu);
extern int __cpu_disable(void);
extern void schedule_mcck_handler(void);
+void notrace smp_yield_cpu(int cpu);
#endif /* __ASM_SMP_H */
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index 3a37172d5398..ef59588a3042 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -88,7 +88,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lp)
asm_inline volatile(
ALTERNATIVE("", ".long 0xb2fa0070", 49) /* NIAI 7 */
" sth %1,%0\n"
- : "=Q" (((unsigned short *) &lp->lock)[1])
+ : "=R" (((unsigned short *) &lp->lock)[1])
: "d" (0) : "cc", "memory");
}
diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h
index cfed272e4fd5..a2bbfd7df85f 100644
--- a/arch/s390/include/asm/spinlock_types.h
+++ b/arch/s390/include/asm/spinlock_types.h
@@ -8,7 +8,7 @@
typedef struct {
int lock;
-} __attribute__ ((aligned (4))) arch_spinlock_t;
+} arch_spinlock_t;
#define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0, }
diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h
index ee056f4a4fa3..76c6034428be 100644
--- a/arch/s390/include/asm/stacktrace.h
+++ b/arch/s390/include/asm/stacktrace.h
@@ -12,6 +12,7 @@ enum stack_type {
STACK_TYPE_IRQ,
STACK_TYPE_NODAT,
STACK_TYPE_RESTART,
+ STACK_TYPE_MCCK,
};
struct stack_info {
@@ -90,12 +91,16 @@ struct stack_frame {
CALL_ARGS_4(arg1, arg2, arg3, arg4); \
register unsigned long r4 asm("6") = (unsigned long)(arg5)
-#define CALL_FMT_0 "=&d" (r2) :
-#define CALL_FMT_1 "+&d" (r2) :
-#define CALL_FMT_2 CALL_FMT_1 "d" (r3),
-#define CALL_FMT_3 CALL_FMT_2 "d" (r4),
-#define CALL_FMT_4 CALL_FMT_3 "d" (r5),
-#define CALL_FMT_5 CALL_FMT_4 "d" (r6),
+/*
+ * To keep this simple mark register 2-6 as being changed (volatile)
+ * by the called function, even though register 6 is saved/nonvolatile.
+ */
+#define CALL_FMT_0 "=&d" (r2)
+#define CALL_FMT_1 "+&d" (r2)
+#define CALL_FMT_2 CALL_FMT_1, "+&d" (r3)
+#define CALL_FMT_3 CALL_FMT_2, "+&d" (r4)
+#define CALL_FMT_4 CALL_FMT_3, "+&d" (r5)
+#define CALL_FMT_5 CALL_FMT_4, "+&d" (r6)
#define CALL_CLOBBER_5 "0", "1", "14", "cc", "memory"
#define CALL_CLOBBER_4 CALL_CLOBBER_5
@@ -117,7 +122,7 @@ struct stack_frame {
" brasl 14,%[_fn]\n" \
" la 15,0(%[_prev])\n" \
: [_prev] "=&a" (prev), CALL_FMT_##nr \
- [_stack] "R" (stack), \
+ : [_stack] "R" (stack), \
[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
[_frame] "d" (frame), \
[_fn] "X" (fn) : CALL_CLOBBER_##nr); \
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index c4e23e925665..f6326c6d2abe 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -98,10 +98,10 @@ extern unsigned char ptff_function_mask[16];
/* Query TOD offset result */
struct ptff_qto {
- unsigned long long physical_clock;
- unsigned long long tod_offset;
- unsigned long long logical_tod_offset;
- unsigned long long tod_epoch_difference;
+ unsigned long physical_clock;
+ unsigned long tod_offset;
+ unsigned long logical_tod_offset;
+ unsigned long tod_epoch_difference;
} __packed;
static inline int ptff_query(unsigned int nr)
@@ -151,9 +151,9 @@ struct ptff_qui {
rc; \
})
-static inline unsigned long long local_tick_disable(void)
+static inline unsigned long local_tick_disable(void)
{
- unsigned long long old;
+ unsigned long old;
old = S390_lowcore.clock_comparator;
S390_lowcore.clock_comparator = clock_comparator_max;
@@ -161,7 +161,7 @@ static inline unsigned long long local_tick_disable(void)
return old;
}
-static inline void local_tick_enable(unsigned long long comp)
+static inline void local_tick_enable(unsigned long comp)
{
S390_lowcore.clock_comparator = comp;
set_clock_comparator(S390_lowcore.clock_comparator);
@@ -169,9 +169,9 @@ static inline void local_tick_enable(unsigned long long comp)
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
-typedef unsigned long long cycles_t;
+typedef unsigned long cycles_t;
-static inline unsigned long long get_tod_clock(void)
+static inline unsigned long get_tod_clock(void)
{
union tod_clock clk;
@@ -179,10 +179,10 @@ static inline unsigned long long get_tod_clock(void)
return clk.tod;
}
-static inline unsigned long long get_tod_clock_fast(void)
+static inline unsigned long get_tod_clock_fast(void)
{
#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
- unsigned long long clk;
+ unsigned long clk;
asm volatile("stckf %0" : "=Q" (clk) : : "cc");
return clk;
@@ -208,9 +208,9 @@ extern union tod_clock tod_clock_base;
* Therefore preemption must be disabled, otherwise the returned
* value is not guaranteed to be monotonic.
*/
-static inline unsigned long long get_tod_clock_monotonic(void)
+static inline unsigned long get_tod_clock_monotonic(void)
{
- unsigned long long tod;
+ unsigned long tod;
preempt_disable_notrace();
tod = get_tod_clock() - tod_clock_base.tod;
@@ -237,7 +237,7 @@ static inline unsigned long long get_tod_clock_monotonic(void)
* -> ns = (th * 125) + ((tl * 125) >> 9);
*
*/
-static inline unsigned long long tod_to_ns(unsigned long long todval)
+static inline unsigned long tod_to_ns(unsigned long todval)
{
return ((todval >> 9) * 125) + (((todval & 0x1ff) * 125) >> 9);
}
@@ -249,10 +249,10 @@ static inline unsigned long long tod_to_ns(unsigned long long todval)
*
* Returns: true if a is later than b
*/
-static inline int tod_after(unsigned long long a, unsigned long long b)
+static inline int tod_after(unsigned long a, unsigned long b)
{
if (MACHINE_HAS_SCC)
- return (long long) a > (long long) b;
+ return (long) a > (long) b;
return a > b;
}
@@ -263,10 +263,10 @@ static inline int tod_after(unsigned long long a, unsigned long long b)
*
* Returns: true if a is later than b
*/
-static inline int tod_after_eq(unsigned long long a, unsigned long long b)
+static inline int tod_after_eq(unsigned long a, unsigned long b)
{
if (MACHINE_HAS_SCC)
- return (long long) a >= (long long) b;
+ return (long) a >= (long) b;
return a >= b;
}
diff --git a/arch/s390/include/asm/vdso/data.h b/arch/s390/include/asm/vdso/data.h
index 7b3cdb4a5f48..73ee89142666 100644
--- a/arch/s390/include/asm/vdso/data.h
+++ b/arch/s390/include/asm/vdso/data.h
@@ -6,7 +6,7 @@
#include <vdso/datapage.h>
struct arch_vdso_data {
- __u64 tod_steering_delta;
+ __s64 tod_steering_delta;
__u64 tod_steering_end;
};
diff --git a/arch/s390/include/asm/vdso/gettimeofday.h b/arch/s390/include/asm/vdso/gettimeofday.h
index ed89ef742530..383c53c3dddd 100644
--- a/arch/s390/include/asm/vdso/gettimeofday.h
+++ b/arch/s390/include/asm/vdso/gettimeofday.h
@@ -68,7 +68,8 @@ long clock_getres_fallback(clockid_t clkid, struct __kernel_timespec *ts)
}
#ifdef CONFIG_TIME_NS
-static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void)
+static __always_inline
+const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
{
return _timens_data;
}
diff --git a/arch/s390/include/uapi/asm/perf_cpum_cf_diag.h b/arch/s390/include/uapi/asm/hwctrset.h
index 3d8284b95f87..3d8284b95f87 100644
--- a/arch/s390/include/uapi/asm/perf_cpum_cf_diag.h
+++ b/arch/s390/include/uapi/asm/hwctrset.h
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index c97818a382f3..68ca1834316f 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -36,7 +36,7 @@ CFLAGS_unwind_bc.o += -fno-optimize-sibling-calls
obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o syscall.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o
-obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o pgm_check.o
+obj-y += sysinfo.o lgr.o os_info.o machine_kexec.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o guarded_storage.o sthyi.o
obj-y += entry.o reipl.o relocate_kernel.o kdebugfs.o alternative.o
obj-y += nospec-branch.o ipl_vmparm.o machine_kexec_reloc.o unwind_bc.o
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index af013b4244d3..2da027359798 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -37,10 +37,12 @@ static int diag8_noresponse(int cmdlen)
static int diag8_response(int cmdlen, char *response, int *rlen)
{
+ unsigned long _cmdlen = cmdlen | 0x40000000L;
+ unsigned long _rlen = *rlen;
register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
register unsigned long reg3 asm ("3") = (addr_t) response;
- register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L;
- register unsigned long reg5 asm ("5") = *rlen;
+ register unsigned long reg4 asm ("4") = _cmdlen;
+ register unsigned long reg5 asm ("5") = _rlen;
asm volatile(
" diag %2,%0,0x8\n"
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index a7eab7be4db0..5412efe328f8 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -563,7 +563,7 @@ void show_code(struct pt_regs *regs)
void print_fn_code(unsigned char *code, unsigned long len)
{
- char buffer[64], *ptr;
+ char buffer[128], *ptr;
int opsize, i;
while (len) {
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 0dc4b258b98d..db1bc00229ca 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -79,6 +79,15 @@ static bool in_nodat_stack(unsigned long sp, struct stack_info *info)
return in_stack(sp, info, STACK_TYPE_NODAT, top - THREAD_SIZE, top);
}
+static bool in_mcck_stack(unsigned long sp, struct stack_info *info)
+{
+ unsigned long frame_size, top;
+
+ frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
+ top = S390_lowcore.mcck_stack + frame_size;
+ return in_stack(sp, info, STACK_TYPE_MCCK, top - THREAD_SIZE, top);
+}
+
static bool in_restart_stack(unsigned long sp, struct stack_info *info)
{
unsigned long frame_size, top;
@@ -108,7 +117,8 @@ int get_stack_info(unsigned long sp, struct task_struct *task,
/* Check per-cpu stacks */
if (!in_irq_stack(sp, info) &&
!in_nodat_stack(sp, info) &&
- !in_restart_stack(sp, info))
+ !in_restart_stack(sp, info) &&
+ !in_mcck_stack(sp, info))
goto unknown;
recursion_check:
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index c10b9f31eef7..e84f495e7eb2 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -401,15 +401,13 @@ ENTRY(\name)
brasl %r14,.Lcleanup_sie_int
#endif
0: CHECK_STACK __LC_SAVE_AREA_ASYNC
- lgr %r11,%r15
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
- stg %r11,__SF_BACKCHAIN(%r15)
j 2f
1: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
lctlg %c1,%c1,__LC_KERNEL_ASCE
lg %r15,__LC_KERNEL_STACK
- xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
-2: la %r11,STACK_FRAME_OVERHEAD(%r15)
+2: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
# clear user controlled registers to prevent speculative use
xgr %r0,%r0
@@ -420,6 +418,7 @@ ENTRY(\name)
xgr %r6,%r6
xgr %r7,%r7
xgr %r10,%r10
+ xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
stmg %r8,%r9,__PT_PSW(%r11)
tm %r8,0x0001 # coming from user space?
@@ -445,6 +444,7 @@ INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
* Load idle PSW.
*/
ENTRY(psw_idle)
+ stg %r14,(__SF_GPRS+8*8)(%r15)
stg %r3,__SF_EMPTY(%r15)
larl %r1,psw_idle_exit
stg %r1,__SF_EMPTY+8(%r15)
@@ -652,9 +652,9 @@ ENDPROC(stack_overflow)
.Lcleanup_sie_mcck:
larl %r13,.Lsie_entry
slgr %r9,%r13
- larl %r13,.Lsie_skip
+ lghi %r13,.Lsie_skip - .Lsie_entry
clgr %r9,%r13
- jh .Lcleanup_sie_int
+ jhe .Lcleanup_sie_int
oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
.Lcleanup_sie_int:
BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 3d0c0ac5c20e..1ab33465382f 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -26,29 +26,6 @@ void do_dat_exception(struct pt_regs *regs);
void do_secure_storage_access(struct pt_regs *regs);
void do_non_secure_storage_access(struct pt_regs *regs);
void do_secure_storage_violation(struct pt_regs *regs);
-
-void addressing_exception(struct pt_regs *regs);
-void data_exception(struct pt_regs *regs);
-void default_trap_handler(struct pt_regs *regs);
-void divide_exception(struct pt_regs *regs);
-void execute_exception(struct pt_regs *regs);
-void hfp_divide_exception(struct pt_regs *regs);
-void hfp_overflow_exception(struct pt_regs *regs);
-void hfp_significance_exception(struct pt_regs *regs);
-void hfp_sqrt_exception(struct pt_regs *regs);
-void hfp_underflow_exception(struct pt_regs *regs);
-void illegal_op(struct pt_regs *regs);
-void operand_exception(struct pt_regs *regs);
-void overflow_exception(struct pt_regs *regs);
-void privileged_op(struct pt_regs *regs);
-void space_switch_exception(struct pt_regs *regs);
-void special_op_exception(struct pt_regs *regs);
-void specification_exception(struct pt_regs *regs);
-void transaction_exception(struct pt_regs *regs);
-void translation_exception(struct pt_regs *regs);
-void vector_exception(struct pt_regs *regs);
-void monitor_event_exception(struct pt_regs *regs);
-
void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str);
void kernel_stack_overflow(struct pt_regs * regs);
void do_signal(struct pt_regs *regs);
@@ -59,7 +36,7 @@ void do_notify_resume(struct pt_regs *regs);
void __init init_IRQ(void);
void do_io_irq(struct pt_regs *regs);
void do_ext_irq(struct pt_regs *regs);
-void do_restart(void);
+void do_restart(void *arg);
void __init startup_init(void);
void die(struct pt_regs *regs, const char *str);
int setup_profiling_timer(unsigned int multiplier);
diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
index 812073ea073e..4bf1ee293f2b 100644
--- a/arch/s390/kernel/idle.c
+++ b/arch/s390/kernel/idle.c
@@ -47,7 +47,7 @@ void account_idle_time_irq(void)
void arch_cpu_idle(void)
{
struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
- unsigned long long idle_time;
+ unsigned long idle_time;
unsigned long psw_mask;
/* Wait for external, I/O or machine check interrupt. */
@@ -73,7 +73,7 @@ static ssize_t show_idle_count(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
- unsigned long long idle_count;
+ unsigned long idle_count;
unsigned int seq;
do {
@@ -82,14 +82,14 @@ static ssize_t show_idle_count(struct device *dev,
if (READ_ONCE(idle->clock_idle_enter))
idle_count++;
} while (read_seqcount_retry(&idle->seqcount, seq));
- return sprintf(buf, "%llu\n", idle_count);
+ return sprintf(buf, "%lu\n", idle_count);
}
DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
static ssize_t show_idle_time(struct device *dev,
struct device_attribute *attr, char *buf)
{
- unsigned long long now, idle_time, idle_enter, idle_exit, in_idle;
+ unsigned long now, idle_time, idle_enter, idle_exit, in_idle;
struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
unsigned int seq;
@@ -109,14 +109,14 @@ static ssize_t show_idle_time(struct device *dev,
}
}
idle_time += in_idle;
- return sprintf(buf, "%llu\n", idle_time >> 12);
+ return sprintf(buf, "%lu\n", idle_time >> 12);
}
DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
u64 arch_cpu_idle_time(int cpu)
{
struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
- unsigned long long now, idle_enter, idle_exit, in_idle;
+ unsigned long now, idle_enter, idle_exit, in_idle;
unsigned int seq;
do {
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 7a21eca498aa..36f870dc944f 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/delay.h>
+#include <linux/panic_notifier.h>
#include <linux/reboot.h>
#include <linux/ctype.h>
#include <linux/fs.h>
@@ -1849,12 +1850,12 @@ static void __do_restart(void *ignore)
stop_run(&on_restart_trigger);
}
-void do_restart(void)
+void do_restart(void *arg)
{
tracing_off();
debug_locks_off();
lgr_info_log();
- smp_call_online_cpu(__do_restart, NULL);
+ smp_call_online_cpu(__do_restart, arg);
}
/* on halt */
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 601c21791338..714269e10eec 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -174,7 +174,7 @@ void noinstr do_ext_irq(struct pt_regs *regs)
memcpy(&regs->int_code, &S390_lowcore.ext_cpu_addr, 4);
regs->int_parm = S390_lowcore.ext_params;
- regs->int_parm_long = *(unsigned long *)S390_lowcore.ext_params2;
+ regs->int_parm_long = S390_lowcore.ext_params2;
from_idle = !user_mode(regs) && regs->psw.addr == (unsigned long)psw_idle_exit;
if (from_idle)
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index aae24dc75df6..528bb31815c3 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -44,11 +44,6 @@ void *alloc_insn_page(void)
return page;
}
-void free_insn_page(void *page)
-{
- module_memfree(page);
-}
-
static void *alloc_s390_insn_page(void)
{
if (xchg(&insn_page_in_use, 1) == 1)
@@ -446,23 +441,6 @@ static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(p);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (p->fault_handler && p->fault_handler(p, regs, trapnr))
- return 1;
-
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
diff --git a/arch/s390/kernel/os_info.c b/arch/s390/kernel/os_info.c
index 0a5e4bafb6ad..5a7420b23aa8 100644
--- a/arch/s390/kernel/os_info.c
+++ b/arch/s390/kernel/os_info.c
@@ -52,7 +52,7 @@ void os_info_entry_add(int nr, void *ptr, u64 size)
}
/*
- * Initialize OS info struture and set lowcore pointer
+ * Initialize OS info structure and set lowcore pointer
*/
void __init os_info_init(void)
{
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 0eb1d1cc53a8..31a605bcbc6e 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -230,9 +230,7 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
/* No support for kernel space counters only */
} else if (!attr->exclude_kernel && attr->exclude_user) {
return -EOPNOTSUPP;
-
- /* Count user and kernel space */
- } else {
+ } else { /* Count user and kernel space */
if (ev >= ARRAY_SIZE(cpumf_generic_events_basic))
return -EOPNOTSUPP;
ev = cpumf_generic_events_basic[ev];
@@ -269,7 +267,7 @@ static int __hw_perf_event_init(struct perf_event *event, unsigned int type)
case CPUMF_CTR_SET_MAX:
/* The counter could not be associated to a counter set */
return -EINVAL;
- };
+ }
/* Initialize for using the CPU-measurement counter facility */
if (!atomic_inc_not_zero(&num_events)) {
@@ -402,12 +400,12 @@ static void cpumf_pmu_stop(struct perf_event *event, int flags)
*/
if (!atomic_dec_return(&cpuhw->ctr_set[hwc->config_base]))
ctr_set_stop(&cpuhw->state, hwc->config_base);
- event->hw.state |= PERF_HES_STOPPED;
+ hwc->state |= PERF_HES_STOPPED;
}
if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
hw_perf_event_update(event);
- event->hw.state |= PERF_HES_UPTODATE;
+ hwc->state |= PERF_HES_UPTODATE;
}
}
@@ -430,8 +428,6 @@ static int cpumf_pmu_add(struct perf_event *event, int flags)
if (flags & PERF_EF_START)
cpumf_pmu_start(event, PERF_EF_RELOAD);
- perf_event_update_userpage(event);
-
return 0;
}
@@ -451,8 +447,6 @@ static void cpumf_pmu_del(struct perf_event *event, int flags)
*/
if (!atomic_read(&cpuhw->ctr_set[event->hw.config_base]))
ctr_set_disable(&cpuhw->state, event->hw.config_base);
-
- perf_event_update_userpage(event);
}
/*
diff --git a/arch/s390/kernel/perf_cpum_cf_common.c b/arch/s390/kernel/perf_cpum_cf_common.c
index 3bced89caffb..6d53215c8484 100644
--- a/arch/s390/kernel/perf_cpum_cf_common.c
+++ b/arch/s390/kernel/perf_cpum_cf_common.c
@@ -170,6 +170,52 @@ static int cpum_cf_offline_cpu(unsigned int cpu)
return cpum_cf_setup(cpu, PMC_RELEASE);
}
+/* Return the maximum possible counter set size (in number of 8 byte counters)
+ * depending on type and model number.
+ */
+size_t cpum_cf_ctrset_size(enum cpumf_ctr_set ctrset,
+ struct cpumf_ctr_info *info)
+{
+ size_t ctrset_size = 0;
+
+ switch (ctrset) {
+ case CPUMF_CTR_SET_BASIC:
+ if (info->cfvn >= 1)
+ ctrset_size = 6;
+ break;
+ case CPUMF_CTR_SET_USER:
+ if (info->cfvn == 1)
+ ctrset_size = 6;
+ else if (info->cfvn >= 3)
+ ctrset_size = 2;
+ break;
+ case CPUMF_CTR_SET_CRYPTO:
+ if (info->csvn >= 1 && info->csvn <= 5)
+ ctrset_size = 16;
+ else if (info->csvn == 6)
+ ctrset_size = 20;
+ break;
+ case CPUMF_CTR_SET_EXT:
+ if (info->csvn == 1)
+ ctrset_size = 32;
+ else if (info->csvn == 2)
+ ctrset_size = 48;
+ else if (info->csvn >= 3 && info->csvn <= 5)
+ ctrset_size = 128;
+ else if (info->csvn == 6)
+ ctrset_size = 160;
+ break;
+ case CPUMF_CTR_SET_MT_DIAG:
+ if (info->csvn > 3)
+ ctrset_size = 48;
+ break;
+ case CPUMF_CTR_SET_MAX:
+ break;
+ }
+
+ return ctrset_size;
+}
+
static int __init cpum_cf_init(void)
{
int rc;
diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c
index db4877bbb9aa..08c985c1097c 100644
--- a/arch/s390/kernel/perf_cpum_cf_diag.c
+++ b/arch/s390/kernel/perf_cpum_cf_diag.c
@@ -26,12 +26,10 @@
#include <asm/timex.h>
#include <asm/debug.h>
-#include <asm/perf_cpum_cf_diag.h>
+#include <asm/hwctrset.h>
#define CF_DIAG_CTRSET_DEF 0xfeef /* Counter set header mark */
-#define CF_DIAG_MIN_INTERVAL 60 /* Minimum counter set read */
/* interval in seconds */
-static unsigned long cf_diag_interval = CF_DIAG_MIN_INTERVAL;
static unsigned int cf_diag_cpu_speed;
static debug_info_t *cf_diag_dbg;
@@ -318,52 +316,6 @@ static void cf_diag_read(struct perf_event *event)
debug_sprintf_event(cf_diag_dbg, 5, "%s event %p\n", __func__, event);
}
-/* Return the maximum possible counter set size (in number of 8 byte counters)
- * depending on type and model number.
- */
-static size_t cf_diag_ctrset_size(enum cpumf_ctr_set ctrset,
- struct cpumf_ctr_info *info)
-{
- size_t ctrset_size = 0;
-
- switch (ctrset) {
- case CPUMF_CTR_SET_BASIC:
- if (info->cfvn >= 1)
- ctrset_size = 6;
- break;
- case CPUMF_CTR_SET_USER:
- if (info->cfvn == 1)
- ctrset_size = 6;
- else if (info->cfvn >= 3)
- ctrset_size = 2;
- break;
- case CPUMF_CTR_SET_CRYPTO:
- if (info->csvn >= 1 && info->csvn <= 5)
- ctrset_size = 16;
- else if (info->csvn == 6)
- ctrset_size = 20;
- break;
- case CPUMF_CTR_SET_EXT:
- if (info->csvn == 1)
- ctrset_size = 32;
- else if (info->csvn == 2)
- ctrset_size = 48;
- else if (info->csvn >= 3 && info->csvn <= 5)
- ctrset_size = 128;
- else if (info->csvn == 6)
- ctrset_size = 160;
- break;
- case CPUMF_CTR_SET_MT_DIAG:
- if (info->csvn > 3)
- ctrset_size = 48;
- break;
- case CPUMF_CTR_SET_MAX:
- break;
- }
-
- return ctrset_size;
-}
-
/* Calculate memory needed to store all counter sets together with header and
* trailer data. This is independend of the counter set authorization which
* can vary depending on the configuration.
@@ -374,7 +326,7 @@ static size_t cf_diag_ctrset_maxsize(struct cpumf_ctr_info *info)
enum cpumf_ctr_set i;
for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
- size_t size = cf_diag_ctrset_size(i, info);
+ size_t size = cpum_cf_ctrset_size(i, info);
if (size)
max_size += size * sizeof(u64) +
@@ -407,7 +359,7 @@ static size_t cf_diag_getctrset(struct cf_ctrset_entry *ctrdata, int ctrset,
ctrdata->def = CF_DIAG_CTRSET_DEF;
ctrdata->set = ctrset;
ctrdata->res1 = 0;
- ctrset_size = cf_diag_ctrset_size(ctrset, &cpuhw->info);
+ ctrset_size = cpum_cf_ctrset_size(ctrset, &cpuhw->info);
if (ctrset_size) { /* Save data */
need = ctrset_size * sizeof(u64) + sizeof(*ctrdata);
@@ -729,7 +681,6 @@ static DEFINE_MUTEX(cf_diag_ctrset_mutex);
static struct cf_diag_ctrset {
unsigned long ctrset; /* Bit mask of counter set to read */
cpumask_t mask; /* CPU mask to read from */
- time64_t lastread; /* Epoch counter set last read */
} cf_diag_ctrset;
static void cf_diag_ctrset_clear(void)
@@ -848,7 +799,7 @@ static void cf_diag_cpu_read(void *parm)
if (!(p->sets & cpumf_ctr_ctl[set]))
continue; /* Counter set not in list */
- set_size = cf_diag_ctrset_size(set, &cpuhw->info);
+ set_size = cpum_cf_ctrset_size(set, &cpuhw->info);
space = sizeof(csd->data) - csd->used;
space = cf_diag_cpuset_read(sp, set, set_size, space);
if (space) {
@@ -866,27 +817,16 @@ static int cf_diag_all_read(unsigned long arg)
{
struct cf_diag_call_on_cpu_parm p;
cpumask_var_t mask;
- time64_t now;
- int rc = 0;
+ int rc;
debug_sprintf_event(cf_diag_dbg, 5, "%s\n", __func__);
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
- now = ktime_get_seconds();
- if (cf_diag_ctrset.lastread + cf_diag_interval > now) {
- debug_sprintf_event(cf_diag_dbg, 5, "%s now %lld "
- " lastread %lld\n", __func__, now,
- cf_diag_ctrset.lastread);
- rc = -EAGAIN;
- goto out;
- } else {
- cf_diag_ctrset.lastread = now;
- }
+
p.sets = cf_diag_ctrset.ctrset;
cpumask_and(mask, &cf_diag_ctrset.mask, cpu_online_mask);
on_each_cpu_mask(mask, cf_diag_cpu_read, &p, 1);
rc = cf_diag_all_copy(arg, mask);
-out:
free_cpumask_var(mask);
debug_sprintf_event(cf_diag_dbg, 5, "%s rc %d\n", __func__, rc);
return rc;
@@ -982,14 +922,14 @@ static int cf_diag_all_start(void)
*/
static size_t cf_diag_needspace(unsigned int sets)
{
- struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+ struct cpu_cf_events *cpuhw = get_cpu_ptr(&cpu_cf_events);
size_t bytes = 0;
int i;
for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
if (!(sets & cpumf_ctr_ctl[i]))
continue;
- bytes += cf_diag_ctrset_size(i, &cpuhw->info) * sizeof(u64) +
+ bytes += cpum_cf_ctrset_size(i, &cpuhw->info) * sizeof(u64) +
sizeof(((struct s390_ctrset_setdata *)0)->set) +
sizeof(((struct s390_ctrset_setdata *)0)->no_cnts);
}
@@ -998,6 +938,7 @@ static size_t cf_diag_needspace(unsigned int sets)
sizeof(((struct s390_ctrset_cpudata *)0)->no_sets));
debug_sprintf_event(cf_diag_dbg, 5, "%s bytes %ld\n", __func__,
bytes);
+ put_cpu_ptr(&cpu_cf_events);
return bytes;
}
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index 1e75cc983546..ea7729bebaa0 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -23,27 +23,6 @@
#include <asm/sysinfo.h>
#include <asm/unwind.h>
-const char *perf_pmu_name(void)
-{
- if (cpum_cf_avail() || cpum_sf_avail())
- return "CPU-Measurement Facilities (CPU-MF)";
- return "pmu";
-}
-EXPORT_SYMBOL(perf_pmu_name);
-
-int perf_num_counters(void)
-{
- int num = 0;
-
- if (cpum_cf_avail())
- num += PERF_CPUM_CF_MAX_CTR;
- if (cpum_sf_avail())
- num += PERF_CPUM_SF_MAX_CTR;
-
- return num;
-}
-EXPORT_SYMBOL(perf_num_counters);
-
static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs)
{
struct stack_frame *stack = (struct stack_frame *) regs->gprs[15];
diff --git a/arch/s390/kernel/pgm_check.S b/arch/s390/kernel/pgm_check.S
deleted file mode 100644
index 9a92638360ee..000000000000
--- a/arch/s390/kernel/pgm_check.S
+++ /dev/null
@@ -1,147 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Program check table.
- *
- * Copyright IBM Corp. 2012
- */
-
-#include <linux/linkage.h>
-
-#define PGM_CHECK(handler) .quad handler
-#define PGM_CHECK_DEFAULT PGM_CHECK(default_trap_handler)
-
-/*
- * The program check table contains exactly 128 (0x00-0x7f) entries. Each
- * line defines the function to be called corresponding to the program check
- * interruption code.
- */
-.section .rodata, "a"
-ENTRY(pgm_check_table)
-PGM_CHECK_DEFAULT /* 00 */
-PGM_CHECK(illegal_op) /* 01 */
-PGM_CHECK(privileged_op) /* 02 */
-PGM_CHECK(execute_exception) /* 03 */
-PGM_CHECK(do_protection_exception) /* 04 */
-PGM_CHECK(addressing_exception) /* 05 */
-PGM_CHECK(specification_exception) /* 06 */
-PGM_CHECK(data_exception) /* 07 */
-PGM_CHECK(overflow_exception) /* 08 */
-PGM_CHECK(divide_exception) /* 09 */
-PGM_CHECK(overflow_exception) /* 0a */
-PGM_CHECK(divide_exception) /* 0b */
-PGM_CHECK(hfp_overflow_exception) /* 0c */
-PGM_CHECK(hfp_underflow_exception) /* 0d */
-PGM_CHECK(hfp_significance_exception) /* 0e */
-PGM_CHECK(hfp_divide_exception) /* 0f */
-PGM_CHECK(do_dat_exception) /* 10 */
-PGM_CHECK(do_dat_exception) /* 11 */
-PGM_CHECK(translation_exception) /* 12 */
-PGM_CHECK(special_op_exception) /* 13 */
-PGM_CHECK_DEFAULT /* 14 */
-PGM_CHECK(operand_exception) /* 15 */
-PGM_CHECK_DEFAULT /* 16 */
-PGM_CHECK_DEFAULT /* 17 */
-PGM_CHECK(transaction_exception) /* 18 */
-PGM_CHECK_DEFAULT /* 19 */
-PGM_CHECK_DEFAULT /* 1a */
-PGM_CHECK(vector_exception) /* 1b */
-PGM_CHECK(space_switch_exception) /* 1c */
-PGM_CHECK(hfp_sqrt_exception) /* 1d */
-PGM_CHECK_DEFAULT /* 1e */
-PGM_CHECK_DEFAULT /* 1f */
-PGM_CHECK_DEFAULT /* 20 */
-PGM_CHECK_DEFAULT /* 21 */
-PGM_CHECK_DEFAULT /* 22 */
-PGM_CHECK_DEFAULT /* 23 */
-PGM_CHECK_DEFAULT /* 24 */
-PGM_CHECK_DEFAULT /* 25 */
-PGM_CHECK_DEFAULT /* 26 */
-PGM_CHECK_DEFAULT /* 27 */
-PGM_CHECK_DEFAULT /* 28 */
-PGM_CHECK_DEFAULT /* 29 */
-PGM_CHECK_DEFAULT /* 2a */
-PGM_CHECK_DEFAULT /* 2b */
-PGM_CHECK_DEFAULT /* 2c */
-PGM_CHECK_DEFAULT /* 2d */
-PGM_CHECK_DEFAULT /* 2e */
-PGM_CHECK_DEFAULT /* 2f */
-PGM_CHECK_DEFAULT /* 30 */
-PGM_CHECK_DEFAULT /* 31 */
-PGM_CHECK_DEFAULT /* 32 */
-PGM_CHECK_DEFAULT /* 33 */
-PGM_CHECK_DEFAULT /* 34 */
-PGM_CHECK_DEFAULT /* 35 */
-PGM_CHECK_DEFAULT /* 36 */
-PGM_CHECK_DEFAULT /* 37 */
-PGM_CHECK(do_dat_exception) /* 38 */
-PGM_CHECK(do_dat_exception) /* 39 */
-PGM_CHECK(do_dat_exception) /* 3a */
-PGM_CHECK(do_dat_exception) /* 3b */
-PGM_CHECK_DEFAULT /* 3c */
-PGM_CHECK(do_secure_storage_access) /* 3d */
-PGM_CHECK(do_non_secure_storage_access) /* 3e */
-PGM_CHECK(do_secure_storage_violation) /* 3f */
-PGM_CHECK(monitor_event_exception) /* 40 */
-PGM_CHECK_DEFAULT /* 41 */
-PGM_CHECK_DEFAULT /* 42 */
-PGM_CHECK_DEFAULT /* 43 */
-PGM_CHECK_DEFAULT /* 44 */
-PGM_CHECK_DEFAULT /* 45 */
-PGM_CHECK_DEFAULT /* 46 */
-PGM_CHECK_DEFAULT /* 47 */
-PGM_CHECK_DEFAULT /* 48 */
-PGM_CHECK_DEFAULT /* 49 */
-PGM_CHECK_DEFAULT /* 4a */
-PGM_CHECK_DEFAULT /* 4b */
-PGM_CHECK_DEFAULT /* 4c */
-PGM_CHECK_DEFAULT /* 4d */
-PGM_CHECK_DEFAULT /* 4e */
-PGM_CHECK_DEFAULT /* 4f */
-PGM_CHECK_DEFAULT /* 50 */
-PGM_CHECK_DEFAULT /* 51 */
-PGM_CHECK_DEFAULT /* 52 */
-PGM_CHECK_DEFAULT /* 53 */
-PGM_CHECK_DEFAULT /* 54 */
-PGM_CHECK_DEFAULT /* 55 */
-PGM_CHECK_DEFAULT /* 56 */
-PGM_CHECK_DEFAULT /* 57 */
-PGM_CHECK_DEFAULT /* 58 */
-PGM_CHECK_DEFAULT /* 59 */
-PGM_CHECK_DEFAULT /* 5a */
-PGM_CHECK_DEFAULT /* 5b */
-PGM_CHECK_DEFAULT /* 5c */
-PGM_CHECK_DEFAULT /* 5d */
-PGM_CHECK_DEFAULT /* 5e */
-PGM_CHECK_DEFAULT /* 5f */
-PGM_CHECK_DEFAULT /* 60 */
-PGM_CHECK_DEFAULT /* 61 */
-PGM_CHECK_DEFAULT /* 62 */
-PGM_CHECK_DEFAULT /* 63 */
-PGM_CHECK_DEFAULT /* 64 */
-PGM_CHECK_DEFAULT /* 65 */
-PGM_CHECK_DEFAULT /* 66 */
-PGM_CHECK_DEFAULT /* 67 */
-PGM_CHECK_DEFAULT /* 68 */
-PGM_CHECK_DEFAULT /* 69 */
-PGM_CHECK_DEFAULT /* 6a */
-PGM_CHECK_DEFAULT /* 6b */
-PGM_CHECK_DEFAULT /* 6c */
-PGM_CHECK_DEFAULT /* 6d */
-PGM_CHECK_DEFAULT /* 6e */
-PGM_CHECK_DEFAULT /* 6f */
-PGM_CHECK_DEFAULT /* 70 */
-PGM_CHECK_DEFAULT /* 71 */
-PGM_CHECK_DEFAULT /* 72 */
-PGM_CHECK_DEFAULT /* 73 */
-PGM_CHECK_DEFAULT /* 74 */
-PGM_CHECK_DEFAULT /* 75 */
-PGM_CHECK_DEFAULT /* 76 */
-PGM_CHECK_DEFAULT /* 77 */
-PGM_CHECK_DEFAULT /* 78 */
-PGM_CHECK_DEFAULT /* 79 */
-PGM_CHECK_DEFAULT /* 7a */
-PGM_CHECK_DEFAULT /* 7b */
-PGM_CHECK_DEFAULT /* 7c */
-PGM_CHECK_DEFAULT /* 7d */
-PGM_CHECK_DEFAULT /* 7e */
-PGM_CHECK_DEFAULT /* 7f */
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index e20bed1ed34a..7ae5dde9c54d 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -180,7 +180,7 @@ unsigned long get_wchan(struct task_struct *p)
struct unwind_state state;
unsigned long ip = 0;
- if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
+ if (!p || p == current || task_is_running(p) || !task_stack_page(p))
return 0;
if (!try_get_task_stack(p))
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 60da976eee6f..5aab59ad5688 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -354,7 +354,7 @@ static int __init stack_realloc(void)
if (!new)
panic("Couldn't allocate machine check stack");
WRITE_ONCE(S390_lowcore.mcck_stack, new + STACK_INIT_OFFSET);
- memblock_free(old, THREAD_SIZE);
+ memblock_free_late(old, THREAD_SIZE);
return 0;
}
early_initcall(stack_realloc);
@@ -937,9 +937,9 @@ static int __init setup_hwcaps(void)
if (MACHINE_HAS_VX) {
elf_hwcap |= HWCAP_S390_VXRS;
if (test_facility(134))
- elf_hwcap |= HWCAP_S390_VXRS_EXT;
- if (test_facility(135))
elf_hwcap |= HWCAP_S390_VXRS_BCD;
+ if (test_facility(135))
+ elf_hwcap |= HWCAP_S390_VXRS_EXT;
if (test_facility(148))
elf_hwcap |= HWCAP_S390_VXRS_EXT2;
if (test_facility(152))
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 90163e6184f5..080e7aed181f 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -512,7 +512,6 @@ void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal)
/* No handlers present - check for system call restart */
clear_pt_regs_flag(regs, PIF_SYSCALL);
- clear_pt_regs_flag(regs, PIF_SYSCALL_RESTART);
if (current->thread.system_call) {
regs->int_code = current->thread.system_call;
switch (regs->gprs[2]) {
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 58c8afa3da65..111909aeb8d2 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -429,6 +429,7 @@ void notrace smp_yield_cpu(int cpu)
asm volatile("diag %0,0,0x9c"
: : "d" (pcpu_devices[cpu].address));
}
+EXPORT_SYMBOL_GPL(smp_yield_cpu);
/*
* Send cpus emergency shutdown signal. This gives the cpus the
@@ -877,7 +878,6 @@ static void smp_init_secondary(void)
restore_access_regs(S390_lowcore.access_regs_save_area);
cpu_init();
rcu_cpu_starting(cpu);
- preempt_disable();
init_cpu_timer();
vtime_init();
vdso_getcpu_init();
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 7f1266c24f6b..101477b3e263 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -24,12 +24,6 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
}
}
-/*
- * This function returns an error if it detects any unreliable features of the
- * stack. Otherwise it guarantees that the stack trace is reliable.
- *
- * If the task is not 'current', the caller *must* ensure the task is inactive.
- */
int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
void *cookie, struct task_struct *task)
{
diff --git a/arch/s390/kernel/syscall.c b/arch/s390/kernel/syscall.c
index bc8e650e377d..4e5cc7d2364e 100644
--- a/arch/s390/kernel/syscall.c
+++ b/arch/s390/kernel/syscall.c
@@ -142,6 +142,7 @@ void do_syscall(struct pt_regs *regs)
void noinstr __do_syscall(struct pt_regs *regs, int per_trap)
{
+ add_random_kstack_offset();
enter_from_user_mode(regs);
memcpy(&regs->gprs[8], S390_lowcore.save_area_sync, 8 * sizeof(unsigned long));
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index 3abef2144dac..64d51ab5a8b4 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -445,3 +445,7 @@
440 common process_madvise sys_process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 06bcfa636638..326cb8f75f58 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -68,10 +68,10 @@ EXPORT_SYMBOL(s390_epoch_delta_notifier);
unsigned char ptff_function_mask[16];
-static unsigned long long lpar_offset;
-static unsigned long long initial_leap_seconds;
-static unsigned long long tod_steering_end;
-static long long tod_steering_delta;
+static unsigned long lpar_offset;
+static unsigned long initial_leap_seconds;
+static unsigned long tod_steering_end;
+static long tod_steering_delta;
/*
* Get time offsets with PTFF
@@ -80,10 +80,12 @@ void __init time_early_init(void)
{
struct ptff_qto qto;
struct ptff_qui qui;
+ int cs;
/* Initialize TOD steering parameters */
tod_steering_end = tod_clock_base.tod;
- vdso_data->arch_data.tod_steering_end = tod_steering_end;
+ for (cs = 0; cs < CS_BASES; cs++)
+ vdso_data[cs].arch_data.tod_steering_end = tod_steering_end;
if (!test_facility(28))
return;
@@ -96,7 +98,7 @@ void __init time_early_init(void)
/* get initial leap seconds */
if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0)
- initial_leap_seconds = (unsigned long long)
+ initial_leap_seconds = (unsigned long)
((long) qui.old_leap * 4096000000L);
}
@@ -222,7 +224,7 @@ void __init read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
static u64 read_tod_clock(struct clocksource *cs)
{
- unsigned long long now, adj;
+ unsigned long now, adj;
preempt_disable(); /* protect from changes to steering parameters */
now = get_tod_clock();
@@ -362,10 +364,11 @@ static inline int check_sync_clock(void)
* Apply clock delta to the global data structures.
* This is called once on the CPU that performed the clock sync.
*/
-static void clock_sync_global(unsigned long long delta)
+static void clock_sync_global(unsigned long delta)
{
unsigned long now, adj;
struct ptff_qto qto;
+ int cs;
/* Fixup the monotonic sched clock. */
tod_clock_base.eitod += delta;
@@ -378,10 +381,13 @@ static void clock_sync_global(unsigned long long delta)
-(adj >> 15) : (adj >> 15);
tod_steering_delta += delta;
if ((abs(tod_steering_delta) >> 48) != 0)
- panic("TOD clock sync offset %lli is too large to drift\n",
+ panic("TOD clock sync offset %li is too large to drift\n",
tod_steering_delta);
tod_steering_end = now + (abs(tod_steering_delta) << 15);
- vdso_data->arch_data.tod_steering_end = tod_steering_end;
+ for (cs = 0; cs < CS_BASES; cs++) {
+ vdso_data[cs].arch_data.tod_steering_end = tod_steering_end;
+ vdso_data[cs].arch_data.tod_steering_delta = tod_steering_delta;
+ }
/* Update LPAR offset. */
if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
@@ -394,7 +400,7 @@ static void clock_sync_global(unsigned long long delta)
* Apply clock delta to the per-CPU data structures of this CPU.
* This is called for each online CPU after the call to clock_sync_global.
*/
-static void clock_sync_local(unsigned long long delta)
+static void clock_sync_local(unsigned long delta)
{
/* Add the delta to the clock comparator. */
if (S390_lowcore.clock_comparator != clock_comparator_max) {
@@ -418,7 +424,7 @@ static void __init time_init_wq(void)
struct clock_sync_data {
atomic_t cpus;
int in_sync;
- unsigned long long clock_delta;
+ unsigned long clock_delta;
};
/*
@@ -538,7 +544,7 @@ static int stpinfo_valid(void)
static int stp_sync_clock(void *data)
{
struct clock_sync_data *sync = data;
- unsigned long long clock_delta, flags;
+ u64 clock_delta, flags;
static int first;
int rc;
@@ -720,8 +726,8 @@ static ssize_t ctn_id_show(struct device *dev,
mutex_lock(&stp_mutex);
if (stpinfo_valid())
- ret = sprintf(buf, "%016llx\n",
- *(unsigned long long *) stp_info.ctnid);
+ ret = sprintf(buf, "%016lx\n",
+ *(unsigned long *) stp_info.ctnid);
mutex_unlock(&stp_mutex);
return ret;
}
@@ -794,7 +800,7 @@ static ssize_t leap_seconds_scheduled_show(struct device *dev,
if (!stzi.lsoib.p)
return sprintf(buf, "0,0\n");
- return sprintf(buf, "%llu,%d\n",
+ return sprintf(buf, "%lu,%d\n",
tod_to_ns(stzi.lsoib.nlsout - TOD_UNIX_EPOCH) / NSEC_PER_SEC,
stzi.lsoib.nlso - stzi.lsoib.also);
}
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index e7ce447651b9..26aa2614ee35 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -66,7 +66,10 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c
{
static cpumask_t mask;
- cpumask_copy(&mask, cpumask_of(cpu));
+ cpumask_clear(&mask);
+ if (!cpu_online(cpu))
+ goto out;
+ cpumask_set_cpu(cpu, &mask);
switch (topology_mode) {
case TOPOLOGY_MODE_HW:
while (info) {
@@ -76,8 +79,6 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c
}
info = info->next;
}
- if (cpumask_empty(&mask))
- cpumask_copy(&mask, cpumask_of(cpu));
break;
case TOPOLOGY_MODE_PACKAGE:
cpumask_copy(&mask, cpu_present_mask);
@@ -85,10 +86,10 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c
default:
fallthrough;
case TOPOLOGY_MODE_SINGLE:
- cpumask_copy(&mask, cpumask_of(cpu));
break;
}
cpumask_and(&mask, &mask, cpu_online_mask);
+out:
cpumask_copy(dst, &mask);
}
@@ -97,7 +98,10 @@ static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
static cpumask_t mask;
int i;
- cpumask_copy(&mask, cpumask_of(cpu));
+ cpumask_clear(&mask);
+ if (!cpu_online(cpu))
+ goto out;
+ cpumask_set_cpu(cpu, &mask);
if (topology_mode != TOPOLOGY_MODE_HW)
goto out;
cpu -= cpu % (smp_cpu_mtid + 1);
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index db7dd59b570c..8dd23c703718 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -17,6 +17,7 @@
#include "asm/ptrace.h"
#include <linux/kprobes.h>
#include <linux/kdebug.h>
+#include <linux/randomize_kstack.h>
#include <linux/extable.h>
#include <linux/ptrace.h>
#include <linux/sched.h>
@@ -79,7 +80,7 @@ void do_per_trap(struct pt_regs *regs)
}
NOKPROBE_SYMBOL(do_per_trap);
-void default_trap_handler(struct pt_regs *regs)
+static void default_trap_handler(struct pt_regs *regs)
{
if (user_mode(regs)) {
report_user_fault(regs, SIGSEGV, 0);
@@ -89,7 +90,7 @@ void default_trap_handler(struct pt_regs *regs)
}
#define DO_ERROR_INFO(name, signr, sicode, str) \
-void name(struct pt_regs *regs) \
+static void name(struct pt_regs *regs) \
{ \
do_trap(regs, signr, sicode, str); \
}
@@ -141,13 +142,13 @@ static inline void do_fp_trap(struct pt_regs *regs, __u32 fpc)
do_trap(regs, SIGFPE, si_code, "floating point exception");
}
-void translation_exception(struct pt_regs *regs)
+static void translation_exception(struct pt_regs *regs)
{
/* May never happen. */
panic("Translation exception");
}
-void illegal_op(struct pt_regs *regs)
+static void illegal_op(struct pt_regs *regs)
{
__u8 opcode[6];
__u16 __user *location;
@@ -189,7 +190,7 @@ NOKPROBE_SYMBOL(illegal_op);
DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN,
"specification exception");
-void vector_exception(struct pt_regs *regs)
+static void vector_exception(struct pt_regs *regs)
{
int si_code, vic;
@@ -223,7 +224,7 @@ void vector_exception(struct pt_regs *regs)
do_trap(regs, SIGFPE, si_code, "vector exception");
}
-void data_exception(struct pt_regs *regs)
+static void data_exception(struct pt_regs *regs)
{
save_fpu_regs();
if (current->thread.fpu.fpc & FPC_DXC_MASK)
@@ -232,7 +233,7 @@ void data_exception(struct pt_regs *regs)
do_trap(regs, SIGILL, ILL_ILLOPN, "data exception");
}
-void space_switch_exception(struct pt_regs *regs)
+static void space_switch_exception(struct pt_regs *regs)
{
/* Set user psw back to home space mode. */
if (user_mode(regs))
@@ -241,7 +242,7 @@ void space_switch_exception(struct pt_regs *regs)
do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event");
}
-void monitor_event_exception(struct pt_regs *regs)
+static void monitor_event_exception(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
@@ -293,12 +294,15 @@ void __init trap_init(void)
test_monitor_call();
}
+static void (*pgm_check_table[128])(struct pt_regs *regs);
+
void noinstr __do_pgm_check(struct pt_regs *regs)
{
unsigned long last_break = S390_lowcore.breaking_event_addr;
unsigned int trapnr, syscall_redirect = 0;
irqentry_state_t state;
+ add_random_kstack_offset();
regs->int_code = *(u32 *)&S390_lowcore.pgm_ilc;
regs->int_parm_long = S390_lowcore.trans_exc_code;
@@ -353,3 +357,61 @@ out:
exit_to_user_mode();
}
}
+
+/*
+ * The program check table contains exactly 128 (0x00-0x7f) entries. Each
+ * line defines the function to be called corresponding to the program check
+ * interruption code.
+ */
+static void (*pgm_check_table[128])(struct pt_regs *regs) = {
+ [0x00] = default_trap_handler,
+ [0x01] = illegal_op,
+ [0x02] = privileged_op,
+ [0x03] = execute_exception,
+ [0x04] = do_protection_exception,
+ [0x05] = addressing_exception,
+ [0x06] = specification_exception,
+ [0x07] = data_exception,
+ [0x08] = overflow_exception,
+ [0x09] = divide_exception,
+ [0x0a] = overflow_exception,
+ [0x0b] = divide_exception,
+ [0x0c] = hfp_overflow_exception,
+ [0x0d] = hfp_underflow_exception,
+ [0x0e] = hfp_significance_exception,
+ [0x0f] = hfp_divide_exception,
+ [0x10] = do_dat_exception,
+ [0x11] = do_dat_exception,
+ [0x12] = translation_exception,
+ [0x13] = special_op_exception,
+ [0x14] = default_trap_handler,
+ [0x15] = operand_exception,
+ [0x16] = default_trap_handler,
+ [0x17] = default_trap_handler,
+ [0x18] = transaction_exception,
+ [0x19] = default_trap_handler,
+ [0x1a] = default_trap_handler,
+ [0x1b] = vector_exception,
+ [0x1c] = space_switch_exception,
+ [0x1d] = hfp_sqrt_exception,
+ [0x1e ... 0x37] = default_trap_handler,
+ [0x38] = do_dat_exception,
+ [0x39] = do_dat_exception,
+ [0x3a] = do_dat_exception,
+ [0x3b] = do_dat_exception,
+ [0x3c] = default_trap_handler,
+ [0x3d] = do_secure_storage_access,
+ [0x3e] = do_non_secure_storage_access,
+ [0x3f] = do_secure_storage_violation,
+ [0x40] = monitor_event_exception,
+ [0x41 ... 0x7f] = default_trap_handler,
+};
+
+#define COND_TRAP(x) asm( \
+ ".weak " __stringify(x) "\n\t" \
+ ".set " __stringify(x) "," \
+ __stringify(default_trap_handler))
+
+COND_TRAP(do_secure_storage_access);
+COND_TRAP(do_non_secure_storage_access);
+COND_TRAP(do_secure_storage_violation);
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index b2d2ad153067..370f664580af 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -406,6 +406,41 @@ static struct attribute_group uv_query_attr_group = {
.attrs = uv_query_attrs,
};
+static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ int val = 0;
+
+#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
+ val = prot_virt_guest;
+#endif
+ return scnprintf(page, PAGE_SIZE, "%d\n", val);
+}
+
+static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
+ struct kobj_attribute *attr, char *page)
+{
+ int val = 0;
+
+#if IS_ENABLED(CONFIG_KVM)
+ val = prot_virt_host;
+#endif
+
+ return scnprintf(page, PAGE_SIZE, "%d\n", val);
+}
+
+static struct kobj_attribute uv_prot_virt_guest =
+ __ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL);
+
+static struct kobj_attribute uv_prot_virt_host =
+ __ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL);
+
+static const struct attribute *uv_prot_virt_attrs[] = {
+ &uv_prot_virt_guest.attr,
+ &uv_prot_virt_host.attr,
+ NULL,
+};
+
static struct kset *uv_query_kset;
static struct kobject *uv_kobj;
@@ -420,15 +455,23 @@ static int __init uv_info_init(void)
if (!uv_kobj)
return -ENOMEM;
- uv_query_kset = kset_create_and_add("query", NULL, uv_kobj);
- if (!uv_query_kset)
+ rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs);
+ if (rc)
goto out_kobj;
+ uv_query_kset = kset_create_and_add("query", NULL, uv_kobj);
+ if (!uv_query_kset) {
+ rc = -ENOMEM;
+ goto out_ind_files;
+ }
+
rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group);
if (!rc)
return 0;
kset_unregister(uv_query_kset);
+out_ind_files:
+ sysfs_remove_files(uv_kobj, uv_prot_virt_attrs);
out_kobj:
kobject_del(uv_kobj);
kobject_put(uv_kobj);
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 73c7afcc0527..f216a1b2f825 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -214,7 +214,7 @@ void vtime_flush(struct task_struct *tsk)
avg_steal = S390_lowcore.avg_steal_timer / 2;
if ((s64) steal > 0) {
S390_lowcore.steal_timer = 0;
- account_steal_time(steal);
+ account_steal_time(cputime_to_nsecs(steal));
avg_steal += steal;
}
S390_lowcore.avg_steal_timer = avg_steal;
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index 12decca22e7c..b3aaadc60ead 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -4,7 +4,8 @@
# Copyright IBM Corp. 2008
KVM := ../../../virt/kvm
-common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqchip.o $(KVM)/vfio.o
+common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o \
+ $(KVM)/irqchip.o $(KVM)/vfio.o $(KVM)/binary_stats.o
ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 5b8ec1c447e1..02c146f9e5cd 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -150,6 +150,19 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
return 0;
}
+static int forward_cnt;
+static unsigned long cur_slice;
+
+static int diag9c_forwarding_overrun(void)
+{
+ /* Reset the count on a new slice */
+ if (time_after(jiffies, cur_slice)) {
+ cur_slice = jiffies;
+ forward_cnt = diag9c_forwarding_hz / HZ;
+ }
+ return forward_cnt-- <= 0 ? 1 : 0;
+}
+
static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu *tcpu;
@@ -167,9 +180,21 @@ static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
if (!tcpu)
goto no_yield;
- /* target already running */
- if (READ_ONCE(tcpu->cpu) >= 0)
- goto no_yield;
+ /* target guest VCPU already running */
+ if (READ_ONCE(tcpu->cpu) >= 0) {
+ if (!diag9c_forwarding_hz || diag9c_forwarding_overrun())
+ goto no_yield;
+
+ /* target host CPU already running */
+ if (!vcpu_is_preempted(tcpu->cpu))
+ goto no_yield;
+ smp_yield_cpu(tcpu->cpu);
+ VCPU_EVENT(vcpu, 5,
+ "diag time slice end directed to %d: yield forwarded",
+ tid);
+ vcpu->stat.diagnose_9c_forward++;
+ return 0;
+ }
if (kvm_vcpu_yield_to(tcpu) <= 0)
goto no_yield;
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 6d6b57059493..b9f85b2dc053 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -976,7 +976,9 @@ int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
* kvm_s390_shadow_tables - walk the guest page table and create shadow tables
* @sg: pointer to the shadow guest address space structure
* @saddr: faulting address in the shadow gmap
- * @pgt: pointer to the page table address result
+ * @pgt: pointer to the beginning of the page table for the given address if
+ * successful (return value 0), or to the first invalid DAT entry in
+ * case of exceptions (return value > 0)
* @fake: pgt references contiguous guest memory block, not a pgtable
*/
static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
@@ -1034,6 +1036,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
rfte.val = ptr;
goto shadow_r2t;
}
+ *pgt = ptr + vaddr.rfx * 8;
rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val);
if (rc)
return rc;
@@ -1060,6 +1063,7 @@ shadow_r2t:
rste.val = ptr;
goto shadow_r3t;
}
+ *pgt = ptr + vaddr.rsx * 8;
rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val);
if (rc)
return rc;
@@ -1087,6 +1091,7 @@ shadow_r3t:
rtte.val = ptr;
goto shadow_sgt;
}
+ *pgt = ptr + vaddr.rtx * 8;
rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val);
if (rc)
return rc;
@@ -1123,6 +1128,7 @@ shadow_sgt:
ste.val = ptr;
goto shadow_pgt;
}
+ *pgt = ptr + vaddr.sx * 8;
rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val);
if (rc)
return rc;
@@ -1157,6 +1163,8 @@ shadow_pgt:
* @vcpu: virtual cpu
* @sg: pointer to the shadow guest address space structure
* @saddr: faulting address in the shadow gmap
+ * @datptr: will contain the address of the faulting DAT table entry, or of
+ * the valid leaf, plus some flags
*
* Returns: - 0 if the shadow fault was successfully resolved
* - > 0 (pgm exception code) on exceptions while faulting
@@ -1165,11 +1173,11 @@ shadow_pgt:
* - -ENOMEM if out of memory
*/
int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
- unsigned long saddr)
+ unsigned long saddr, unsigned long *datptr)
{
union vaddress vaddr;
union page_table_entry pte;
- unsigned long pgt;
+ unsigned long pgt = 0;
int dat_protection, fake;
int rc;
@@ -1191,8 +1199,20 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
pte.val = pgt + vaddr.px * PAGE_SIZE;
goto shadow_page;
}
- if (!rc)
- rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val);
+
+ switch (rc) {
+ case PGM_SEGMENT_TRANSLATION:
+ case PGM_REGION_THIRD_TRANS:
+ case PGM_REGION_SECOND_TRANS:
+ case PGM_REGION_FIRST_TRANS:
+ pgt |= PEI_NOT_PTE;
+ break;
+ case 0:
+ pgt += vaddr.px * 8;
+ rc = gmap_read_table(sg->parent, pgt, &pte.val);
+ }
+ if (datptr)
+ *datptr = pgt | dat_protection * PEI_DAT_PROT;
if (!rc && pte.i)
rc = PGM_PAGE_TRANSLATION;
if (!rc && pte.z)
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index f4c51756c462..7c72a5e3449f 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -18,17 +18,14 @@
/**
* kvm_s390_real_to_abs - convert guest real address to guest absolute address
- * @vcpu - guest virtual cpu
+ * @prefix - guest prefix
* @gra - guest real address
*
* Returns the guest absolute address that corresponds to the passed guest real
- * address @gra of a virtual guest cpu by applying its prefix.
+ * address @gra of by applying the given prefix.
*/
-static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
- unsigned long gra)
+static inline unsigned long _kvm_s390_real_to_abs(u32 prefix, unsigned long gra)
{
- unsigned long prefix = kvm_s390_get_prefix(vcpu);
-
if (gra < 2 * PAGE_SIZE)
gra += prefix;
else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE)
@@ -37,6 +34,43 @@ static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
}
/**
+ * kvm_s390_real_to_abs - convert guest real address to guest absolute address
+ * @vcpu - guest virtual cpu
+ * @gra - guest real address
+ *
+ * Returns the guest absolute address that corresponds to the passed guest real
+ * address @gra of a virtual guest cpu by applying its prefix.
+ */
+static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
+ unsigned long gra)
+{
+ return _kvm_s390_real_to_abs(kvm_s390_get_prefix(vcpu), gra);
+}
+
+/**
+ * _kvm_s390_logical_to_effective - convert guest logical to effective address
+ * @psw: psw of the guest
+ * @ga: guest logical address
+ *
+ * Convert a guest logical address to an effective address by applying the
+ * rules of the addressing mode defined by bits 31 and 32 of the given PSW
+ * (extendended/basic addressing mode).
+ *
+ * Depending on the addressing mode, the upper 40 bits (24 bit addressing
+ * mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing
+ * mode) of @ga will be zeroed and the remaining bits will be returned.
+ */
+static inline unsigned long _kvm_s390_logical_to_effective(psw_t *psw,
+ unsigned long ga)
+{
+ if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
+ return ga;
+ if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
+ return ga & ((1UL << 31) - 1);
+ return ga & ((1UL << 24) - 1);
+}
+
+/**
* kvm_s390_logical_to_effective - convert guest logical to effective address
* @vcpu: guest virtual cpu
* @ga: guest logical address
@@ -52,13 +86,7 @@ static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu,
unsigned long ga)
{
- psw_t *psw = &vcpu->arch.sie_block->gpsw;
-
- if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
- return ga;
- if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
- return ga & ((1UL << 31) - 1);
- return ga & ((1UL << 24) - 1);
+ return _kvm_s390_logical_to_effective(&vcpu->arch.sie_block->gpsw, ga);
}
/*
@@ -359,7 +387,11 @@ void ipte_unlock(struct kvm_vcpu *vcpu);
int ipte_lock_held(struct kvm_vcpu *vcpu);
int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
+/* MVPG PEI indication bits */
+#define PEI_DAT_PROT 2
+#define PEI_NOT_PTE 4
+
int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *shadow,
- unsigned long saddr);
+ unsigned long saddr, unsigned long *datptr);
#endif /* __KVM_S390_GACCESS_H */
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index e3183bd05910..d548d60caed2 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1287,7 +1287,7 @@ static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
/* already expired? */
if (cputm >> 63)
return 0;
- return min(sltime, tod_to_ns(cputm));
+ return min_t(u64, sltime, tod_to_ns(cputm));
}
} else if (cpu_timer_interrupts_enabled(vcpu)) {
sltime = kvm_s390_get_cpu_timer(vcpu);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 2f09e9d7dc95..f9fb1e1d960d 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -58,111 +58,132 @@
#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
(KVM_MAX_VCPUS + LOCAL_IRQS))
-struct kvm_stats_debugfs_item debugfs_entries[] = {
- VCPU_STAT("userspace_handled", exit_userspace),
- VCPU_STAT("exit_null", exit_null),
- VCPU_STAT("pfault_sync", pfault_sync),
- VCPU_STAT("exit_validity", exit_validity),
- VCPU_STAT("exit_stop_request", exit_stop_request),
- VCPU_STAT("exit_external_request", exit_external_request),
- VCPU_STAT("exit_io_request", exit_io_request),
- VCPU_STAT("exit_external_interrupt", exit_external_interrupt),
- VCPU_STAT("exit_instruction", exit_instruction),
- VCPU_STAT("exit_pei", exit_pei),
- VCPU_STAT("exit_program_interruption", exit_program_interruption),
- VCPU_STAT("exit_instr_and_program_int", exit_instr_and_program),
- VCPU_STAT("exit_operation_exception", exit_operation_exception),
- VCPU_STAT("halt_successful_poll", halt_successful_poll),
- VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
- VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
- VCPU_STAT("halt_no_poll_steal", halt_no_poll_steal),
- VCPU_STAT("halt_wakeup", halt_wakeup),
- VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
- VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
- VCPU_STAT("instruction_lctlg", instruction_lctlg),
- VCPU_STAT("instruction_lctl", instruction_lctl),
- VCPU_STAT("instruction_stctl", instruction_stctl),
- VCPU_STAT("instruction_stctg", instruction_stctg),
- VCPU_STAT("deliver_ckc", deliver_ckc),
- VCPU_STAT("deliver_cputm", deliver_cputm),
- VCPU_STAT("deliver_emergency_signal", deliver_emergency_signal),
- VCPU_STAT("deliver_external_call", deliver_external_call),
- VCPU_STAT("deliver_service_signal", deliver_service_signal),
- VCPU_STAT("deliver_virtio", deliver_virtio),
- VCPU_STAT("deliver_stop_signal", deliver_stop_signal),
- VCPU_STAT("deliver_prefix_signal", deliver_prefix_signal),
- VCPU_STAT("deliver_restart_signal", deliver_restart_signal),
- VCPU_STAT("deliver_program", deliver_program),
- VCPU_STAT("deliver_io", deliver_io),
- VCPU_STAT("deliver_machine_check", deliver_machine_check),
- VCPU_STAT("exit_wait_state", exit_wait_state),
- VCPU_STAT("inject_ckc", inject_ckc),
- VCPU_STAT("inject_cputm", inject_cputm),
- VCPU_STAT("inject_external_call", inject_external_call),
- VM_STAT("inject_float_mchk", inject_float_mchk),
- VCPU_STAT("inject_emergency_signal", inject_emergency_signal),
- VM_STAT("inject_io", inject_io),
- VCPU_STAT("inject_mchk", inject_mchk),
- VM_STAT("inject_pfault_done", inject_pfault_done),
- VCPU_STAT("inject_program", inject_program),
- VCPU_STAT("inject_restart", inject_restart),
- VM_STAT("inject_service_signal", inject_service_signal),
- VCPU_STAT("inject_set_prefix", inject_set_prefix),
- VCPU_STAT("inject_stop_signal", inject_stop_signal),
- VCPU_STAT("inject_pfault_init", inject_pfault_init),
- VM_STAT("inject_virtio", inject_virtio),
- VCPU_STAT("instruction_epsw", instruction_epsw),
- VCPU_STAT("instruction_gs", instruction_gs),
- VCPU_STAT("instruction_io_other", instruction_io_other),
- VCPU_STAT("instruction_lpsw", instruction_lpsw),
- VCPU_STAT("instruction_lpswe", instruction_lpswe),
- VCPU_STAT("instruction_pfmf", instruction_pfmf),
- VCPU_STAT("instruction_ptff", instruction_ptff),
- VCPU_STAT("instruction_stidp", instruction_stidp),
- VCPU_STAT("instruction_sck", instruction_sck),
- VCPU_STAT("instruction_sckpf", instruction_sckpf),
- VCPU_STAT("instruction_spx", instruction_spx),
- VCPU_STAT("instruction_stpx", instruction_stpx),
- VCPU_STAT("instruction_stap", instruction_stap),
- VCPU_STAT("instruction_iske", instruction_iske),
- VCPU_STAT("instruction_ri", instruction_ri),
- VCPU_STAT("instruction_rrbe", instruction_rrbe),
- VCPU_STAT("instruction_sske", instruction_sske),
- VCPU_STAT("instruction_ipte_interlock", instruction_ipte_interlock),
- VCPU_STAT("instruction_essa", instruction_essa),
- VCPU_STAT("instruction_stsi", instruction_stsi),
- VCPU_STAT("instruction_stfl", instruction_stfl),
- VCPU_STAT("instruction_tb", instruction_tb),
- VCPU_STAT("instruction_tpi", instruction_tpi),
- VCPU_STAT("instruction_tprot", instruction_tprot),
- VCPU_STAT("instruction_tsch", instruction_tsch),
- VCPU_STAT("instruction_sthyi", instruction_sthyi),
- VCPU_STAT("instruction_sie", instruction_sie),
- VCPU_STAT("instruction_sigp_sense", instruction_sigp_sense),
- VCPU_STAT("instruction_sigp_sense_running", instruction_sigp_sense_running),
- VCPU_STAT("instruction_sigp_external_call", instruction_sigp_external_call),
- VCPU_STAT("instruction_sigp_emergency", instruction_sigp_emergency),
- VCPU_STAT("instruction_sigp_cond_emergency", instruction_sigp_cond_emergency),
- VCPU_STAT("instruction_sigp_start", instruction_sigp_start),
- VCPU_STAT("instruction_sigp_stop", instruction_sigp_stop),
- VCPU_STAT("instruction_sigp_stop_store_status", instruction_sigp_stop_store_status),
- VCPU_STAT("instruction_sigp_store_status", instruction_sigp_store_status),
- VCPU_STAT("instruction_sigp_store_adtl_status", instruction_sigp_store_adtl_status),
- VCPU_STAT("instruction_sigp_set_arch", instruction_sigp_arch),
- VCPU_STAT("instruction_sigp_set_prefix", instruction_sigp_prefix),
- VCPU_STAT("instruction_sigp_restart", instruction_sigp_restart),
- VCPU_STAT("instruction_sigp_cpu_reset", instruction_sigp_cpu_reset),
- VCPU_STAT("instruction_sigp_init_cpu_reset", instruction_sigp_init_cpu_reset),
- VCPU_STAT("instruction_sigp_unknown", instruction_sigp_unknown),
- VCPU_STAT("instruction_diag_10", diagnose_10),
- VCPU_STAT("instruction_diag_44", diagnose_44),
- VCPU_STAT("instruction_diag_9c", diagnose_9c),
- VCPU_STAT("diag_9c_ignored", diagnose_9c_ignored),
- VCPU_STAT("instruction_diag_258", diagnose_258),
- VCPU_STAT("instruction_diag_308", diagnose_308),
- VCPU_STAT("instruction_diag_500", diagnose_500),
- VCPU_STAT("instruction_diag_other", diagnose_other),
- { NULL }
+const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+ KVM_GENERIC_VM_STATS(),
+ STATS_DESC_COUNTER(VM, inject_io),
+ STATS_DESC_COUNTER(VM, inject_float_mchk),
+ STATS_DESC_COUNTER(VM, inject_pfault_done),
+ STATS_DESC_COUNTER(VM, inject_service_signal),
+ STATS_DESC_COUNTER(VM, inject_virtio)
+};
+static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
+ sizeof(struct kvm_vm_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vm_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vm_stats_desc),
+};
+
+const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ KVM_GENERIC_VCPU_STATS(),
+ STATS_DESC_COUNTER(VCPU, exit_userspace),
+ STATS_DESC_COUNTER(VCPU, exit_null),
+ STATS_DESC_COUNTER(VCPU, exit_external_request),
+ STATS_DESC_COUNTER(VCPU, exit_io_request),
+ STATS_DESC_COUNTER(VCPU, exit_external_interrupt),
+ STATS_DESC_COUNTER(VCPU, exit_stop_request),
+ STATS_DESC_COUNTER(VCPU, exit_validity),
+ STATS_DESC_COUNTER(VCPU, exit_instruction),
+ STATS_DESC_COUNTER(VCPU, exit_pei),
+ STATS_DESC_COUNTER(VCPU, halt_no_poll_steal),
+ STATS_DESC_COUNTER(VCPU, instruction_lctl),
+ STATS_DESC_COUNTER(VCPU, instruction_lctlg),
+ STATS_DESC_COUNTER(VCPU, instruction_stctl),
+ STATS_DESC_COUNTER(VCPU, instruction_stctg),
+ STATS_DESC_COUNTER(VCPU, exit_program_interruption),
+ STATS_DESC_COUNTER(VCPU, exit_instr_and_program),
+ STATS_DESC_COUNTER(VCPU, exit_operation_exception),
+ STATS_DESC_COUNTER(VCPU, deliver_ckc),
+ STATS_DESC_COUNTER(VCPU, deliver_cputm),
+ STATS_DESC_COUNTER(VCPU, deliver_external_call),
+ STATS_DESC_COUNTER(VCPU, deliver_emergency_signal),
+ STATS_DESC_COUNTER(VCPU, deliver_service_signal),
+ STATS_DESC_COUNTER(VCPU, deliver_virtio),
+ STATS_DESC_COUNTER(VCPU, deliver_stop_signal),
+ STATS_DESC_COUNTER(VCPU, deliver_prefix_signal),
+ STATS_DESC_COUNTER(VCPU, deliver_restart_signal),
+ STATS_DESC_COUNTER(VCPU, deliver_program),
+ STATS_DESC_COUNTER(VCPU, deliver_io),
+ STATS_DESC_COUNTER(VCPU, deliver_machine_check),
+ STATS_DESC_COUNTER(VCPU, exit_wait_state),
+ STATS_DESC_COUNTER(VCPU, inject_ckc),
+ STATS_DESC_COUNTER(VCPU, inject_cputm),
+ STATS_DESC_COUNTER(VCPU, inject_external_call),
+ STATS_DESC_COUNTER(VCPU, inject_emergency_signal),
+ STATS_DESC_COUNTER(VCPU, inject_mchk),
+ STATS_DESC_COUNTER(VCPU, inject_pfault_init),
+ STATS_DESC_COUNTER(VCPU, inject_program),
+ STATS_DESC_COUNTER(VCPU, inject_restart),
+ STATS_DESC_COUNTER(VCPU, inject_set_prefix),
+ STATS_DESC_COUNTER(VCPU, inject_stop_signal),
+ STATS_DESC_COUNTER(VCPU, instruction_epsw),
+ STATS_DESC_COUNTER(VCPU, instruction_gs),
+ STATS_DESC_COUNTER(VCPU, instruction_io_other),
+ STATS_DESC_COUNTER(VCPU, instruction_lpsw),
+ STATS_DESC_COUNTER(VCPU, instruction_lpswe),
+ STATS_DESC_COUNTER(VCPU, instruction_pfmf),
+ STATS_DESC_COUNTER(VCPU, instruction_ptff),
+ STATS_DESC_COUNTER(VCPU, instruction_sck),
+ STATS_DESC_COUNTER(VCPU, instruction_sckpf),
+ STATS_DESC_COUNTER(VCPU, instruction_stidp),
+ STATS_DESC_COUNTER(VCPU, instruction_spx),
+ STATS_DESC_COUNTER(VCPU, instruction_stpx),
+ STATS_DESC_COUNTER(VCPU, instruction_stap),
+ STATS_DESC_COUNTER(VCPU, instruction_iske),
+ STATS_DESC_COUNTER(VCPU, instruction_ri),
+ STATS_DESC_COUNTER(VCPU, instruction_rrbe),
+ STATS_DESC_COUNTER(VCPU, instruction_sske),
+ STATS_DESC_COUNTER(VCPU, instruction_ipte_interlock),
+ STATS_DESC_COUNTER(VCPU, instruction_stsi),
+ STATS_DESC_COUNTER(VCPU, instruction_stfl),
+ STATS_DESC_COUNTER(VCPU, instruction_tb),
+ STATS_DESC_COUNTER(VCPU, instruction_tpi),
+ STATS_DESC_COUNTER(VCPU, instruction_tprot),
+ STATS_DESC_COUNTER(VCPU, instruction_tsch),
+ STATS_DESC_COUNTER(VCPU, instruction_sie),
+ STATS_DESC_COUNTER(VCPU, instruction_essa),
+ STATS_DESC_COUNTER(VCPU, instruction_sthyi),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_sense),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_sense_running),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_external_call),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_emergency),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_cond_emergency),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_start),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_stop),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_stop_store_status),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_store_status),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_store_adtl_status),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_arch),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_prefix),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_restart),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_init_cpu_reset),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_cpu_reset),
+ STATS_DESC_COUNTER(VCPU, instruction_sigp_unknown),
+ STATS_DESC_COUNTER(VCPU, diagnose_10),
+ STATS_DESC_COUNTER(VCPU, diagnose_44),
+ STATS_DESC_COUNTER(VCPU, diagnose_9c),
+ STATS_DESC_COUNTER(VCPU, diagnose_9c_ignored),
+ STATS_DESC_COUNTER(VCPU, diagnose_9c_forward),
+ STATS_DESC_COUNTER(VCPU, diagnose_258),
+ STATS_DESC_COUNTER(VCPU, diagnose_308),
+ STATS_DESC_COUNTER(VCPU, diagnose_500),
+ STATS_DESC_COUNTER(VCPU, diagnose_other),
+ STATS_DESC_COUNTER(VCPU, pfault_sync)
+};
+static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
+ sizeof(struct kvm_vcpu_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vcpu_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vcpu_stats_desc),
};
/* allow nested virtualization in KVM (if enabled by user space) */
@@ -185,6 +206,11 @@ static bool use_gisa = true;
module_param(use_gisa, bool, 0644);
MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it.");
+/* maximum diag9c forwarding per second */
+unsigned int diag9c_forwarding_hz;
+module_param(diag9c_forwarding_hz, uint, 0644);
+MODULE_PARM_DESC(diag9c_forwarding_hz, "Maximum diag9c forwarding per second, 0 to turn off");
+
/*
* For now we handle at most 16 double words as this is what the s390 base
* kernel handles and stores in the prefix page. If we ever need to go beyond
@@ -323,31 +349,31 @@ static void allow_cpu_feat(unsigned long nr)
static inline int plo_test_bit(unsigned char nr)
{
- register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
+ unsigned long function = (unsigned long)nr | 0x100;
int cc;
asm volatile(
+ " lgr 0,%[function]\n"
/* Parameter registers are ignored for "test bit" */
" plo 0,0,0,0(0)\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc)
- : "d" (r0)
- : "cc");
+ : [function] "d" (function)
+ : "cc", "0");
return cc == 0;
}
static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
{
- register unsigned long r0 asm("0") = 0; /* query function */
- register unsigned long r1 asm("1") = (unsigned long) query;
-
asm volatile(
- /* Parameter regs are ignored */
+ " lghi 0,0\n"
+ " lgr 1,%[query]\n"
+ /* Parameter registers are ignored */
" .insn rrf,%[opc] << 16,2,4,6,0\n"
:
- : "d" (r0), "a" (r1), [opc] "i" (opcode)
- : "cc", "memory");
+ : [query] "d" ((unsigned long)query), [opc] "i" (opcode)
+ : "cc", "memory", "0", "1");
}
#define INSN_SORTL 0xb938
@@ -544,6 +570,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_DIAG318:
r = 1;
break;
+ case KVM_CAP_SET_GUEST_DEBUG2:
+ r = KVM_GUESTDBG_VALID_MASK;
+ break;
case KVM_CAP_S390_HPAGE_1M:
r = 0;
if (hpage && !kvm_is_ucontrol(kvm))
@@ -704,6 +733,10 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
set_kvm_facility(kvm->arch.model.fac_mask, 152);
set_kvm_facility(kvm->arch.model.fac_list, 152);
}
+ if (test_facility(192)) {
+ set_kvm_facility(kvm->arch.model.fac_mask, 192);
+ set_kvm_facility(kvm->arch.model.fac_list, 192);
+ }
r = 0;
} else
r = -EINVAL;
@@ -4307,16 +4340,16 @@ static void store_regs_fmt2(struct kvm_vcpu *vcpu)
kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
if (MACHINE_HAS_GS) {
+ preempt_disable();
__ctl_set_bit(2, 4);
if (vcpu->arch.gs_enabled)
save_gs_cb(current->thread.gs_cb);
- preempt_disable();
current->thread.gs_cb = vcpu->arch.host_gscb;
restore_gs_cb(vcpu->arch.host_gscb);
- preempt_enable();
if (!vcpu->arch.host_gscb)
__ctl_clear_bit(2, 4);
vcpu->arch.host_gscb = NULL;
+ preempt_enable();
}
/* SIE will save etoken directly into SDNX and therefore kvm_run */
}
@@ -4542,7 +4575,7 @@ int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
/*
* As we are starting a second VCPU, we have to disable
* the IBS facility on all VCPUs to remove potentially
- * oustanding ENABLE requests.
+ * outstanding ENABLE requests.
*/
__disable_ibs_on_all_vcpus(vcpu->kvm);
}
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 79dcd647b378..9fad25109b0d 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -471,4 +471,12 @@ void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
* @kvm: the KVM guest
*/
void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm);
+
+/**
+ * diag9c_forwarding_hz
+ *
+ * Set the maximum number of diag9c forwarding per second
+ */
+extern unsigned int diag9c_forwarding_hz;
+
#endif
diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c
index 813b6e93dc83..c8841f476e91 100644
--- a/arch/s390/kvm/pv.c
+++ b/arch/s390/kvm/pv.c
@@ -140,7 +140,12 @@ static int kvm_s390_pv_alloc_vm(struct kvm *kvm)
/* Allocate variable storage */
vlen = ALIGN(virt * ((npages * PAGE_SIZE) / HPAGE_SIZE), PAGE_SIZE);
vlen += uv_info.guest_virt_base_stor_len;
- kvm->arch.pv.stor_var = vzalloc(vlen);
+ /*
+ * The Create Secure Configuration Ultravisor Call does not support
+ * using large pages for the virtual memory area.
+ * This is a hardware limitation.
+ */
+ kvm->arch.pv.stor_var = vmalloc_no_huge(vlen);
if (!kvm->arch.pv.stor_var)
goto out_err;
return 0;
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index bd803e091918..4002a24bc43a 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -417,11 +417,6 @@ static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
memcpy((void *)((u64)scb_o + 0xc0),
(void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0);
break;
- case ICPT_PARTEXEC:
- /* MVPG only */
- memcpy((void *)((u64)scb_o + 0xc0),
- (void *)((u64)scb_s + 0xc0), 0xd0 - 0xc0);
- break;
}
if (scb_s->ihcpu != 0xffffU)
@@ -620,10 +615,10 @@ static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
/* with mso/msl, the prefix lies at offset *mso* */
prefix += scb_s->mso;
- rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix);
+ rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix, NULL);
if (!rc && (scb_s->ecb & ECB_TE))
rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
- prefix + PAGE_SIZE);
+ prefix + PAGE_SIZE, NULL);
/*
* We don't have to mprotect, we will be called for all unshadows.
* SIE will detect if protection applies and trigger a validity.
@@ -914,7 +909,7 @@ static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
current->thread.gmap_addr, 1);
rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
- current->thread.gmap_addr);
+ current->thread.gmap_addr, NULL);
if (rc > 0) {
rc = inject_fault(vcpu, rc,
current->thread.gmap_addr,
@@ -936,7 +931,7 @@ static void handle_last_fault(struct kvm_vcpu *vcpu,
{
if (vsie_page->fault_addr)
kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
- vsie_page->fault_addr);
+ vsie_page->fault_addr, NULL);
vsie_page->fault_addr = 0;
}
@@ -984,6 +979,98 @@ static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
}
/*
+ * Get a register for a nested guest.
+ * @vcpu the vcpu of the guest
+ * @vsie_page the vsie_page for the nested guest
+ * @reg the register number, the upper 4 bits are ignored.
+ * returns: the value of the register.
+ */
+static u64 vsie_get_register(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, u8 reg)
+{
+ /* no need to validate the parameter and/or perform error handling */
+ reg &= 0xf;
+ switch (reg) {
+ case 15:
+ return vsie_page->scb_s.gg15;
+ case 14:
+ return vsie_page->scb_s.gg14;
+ default:
+ return vcpu->run->s.regs.gprs[reg];
+ }
+}
+
+static int vsie_handle_mvpg(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
+{
+ struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
+ unsigned long pei_dest, pei_src, src, dest, mask, prefix;
+ u64 *pei_block = &vsie_page->scb_o->mcic;
+ int edat, rc_dest, rc_src;
+ union ctlreg0 cr0;
+
+ cr0.val = vcpu->arch.sie_block->gcr[0];
+ edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
+ mask = _kvm_s390_logical_to_effective(&scb_s->gpsw, PAGE_MASK);
+ prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
+
+ dest = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 20) & mask;
+ dest = _kvm_s390_real_to_abs(prefix, dest) + scb_s->mso;
+ src = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 16) & mask;
+ src = _kvm_s390_real_to_abs(prefix, src) + scb_s->mso;
+
+ rc_dest = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, dest, &pei_dest);
+ rc_src = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, src, &pei_src);
+ /*
+ * Either everything went well, or something non-critical went wrong
+ * e.g. because of a race. In either case, simply retry.
+ */
+ if (rc_dest == -EAGAIN || rc_src == -EAGAIN || (!rc_dest && !rc_src)) {
+ retry_vsie_icpt(vsie_page);
+ return -EAGAIN;
+ }
+ /* Something more serious went wrong, propagate the error */
+ if (rc_dest < 0)
+ return rc_dest;
+ if (rc_src < 0)
+ return rc_src;
+
+ /* The only possible suppressing exception: just deliver it */
+ if (rc_dest == PGM_TRANSLATION_SPEC || rc_src == PGM_TRANSLATION_SPEC) {
+ clear_vsie_icpt(vsie_page);
+ rc_dest = kvm_s390_inject_program_int(vcpu, PGM_TRANSLATION_SPEC);
+ WARN_ON_ONCE(rc_dest);
+ return 1;
+ }
+
+ /*
+ * Forward the PEI intercept to the guest if it was a page fault, or
+ * also for segment and region table faults if EDAT applies.
+ */
+ if (edat) {
+ rc_dest = rc_dest == PGM_ASCE_TYPE ? rc_dest : 0;
+ rc_src = rc_src == PGM_ASCE_TYPE ? rc_src : 0;
+ } else {
+ rc_dest = rc_dest != PGM_PAGE_TRANSLATION ? rc_dest : 0;
+ rc_src = rc_src != PGM_PAGE_TRANSLATION ? rc_src : 0;
+ }
+ if (!rc_dest && !rc_src) {
+ pei_block[0] = pei_dest;
+ pei_block[1] = pei_src;
+ return 1;
+ }
+
+ retry_vsie_icpt(vsie_page);
+
+ /*
+ * The host has edat, and the guest does not, or it was an ASCE type
+ * exception. The host needs to inject the appropriate DAT interrupts
+ * into the guest.
+ */
+ if (rc_dest)
+ return inject_fault(vcpu, rc_dest, dest, 1);
+ return inject_fault(vcpu, rc_src, src, 0);
+}
+
+/*
* Run the vsie on a shadow scb and a shadow gmap, without any further
* sanity checks, handling SIE faults.
*
@@ -1071,6 +1158,10 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
if ((scb_s->ipa & 0xf000) != 0xf000)
scb_s->ipa += 0x1000;
break;
+ case ICPT_PARTEXEC:
+ if (scb_s->ipa == 0xb254)
+ rc = vsie_handle_mvpg(vcpu, vsie_page);
+ break;
}
return rc;
}
diff --git a/arch/s390/lib/test_unwind.c b/arch/s390/lib/test_unwind.c
index dcd8946255be..2f32802f79ce 100644
--- a/arch/s390/lib/test_unwind.c
+++ b/arch/s390/lib/test_unwind.c
@@ -64,8 +64,8 @@ static noinline int test_unwind(struct task_struct *task, struct pt_regs *regs,
break;
if (state.reliable && !addr) {
pr_err("unwind state reliable but addr is 0\n");
- kfree(bt);
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
sprint_symbol(sym, addr);
if (bt_pos < BT_BUF_SIZE) {
@@ -296,19 +296,22 @@ static int test_unwind_flags(int flags)
static int test_unwind_init(void)
{
- int ret = 0;
+ int failed = 0;
+ int total = 0;
#define TEST(flags) \
do { \
pr_info("[ RUN ] " #flags "\n"); \
+ total++; \
if (!test_unwind_flags((flags))) { \
pr_info("[ OK ] " #flags "\n"); \
} else { \
pr_err("[ FAILED ] " #flags "\n"); \
- ret = -EINVAL; \
+ failed++; \
} \
} while (0)
+ pr_info("running stack unwinder tests");
TEST(UWM_DEFAULT);
TEST(UWM_SP);
TEST(UWM_REGS);
@@ -335,8 +338,14 @@ do { \
TEST(UWM_PGM | UWM_SP | UWM_REGS);
#endif
#undef TEST
+ if (failed) {
+ pr_err("%d of %d stack unwinder tests failed", failed, total);
+ WARN(1, "%d of %d stack unwinder tests failed", failed, total);
+ } else {
+ pr_info("all %d stack unwinder tests passed", total);
+ }
- return ret;
+ return failed ? -EINVAL : 0;
}
static void test_unwind_exit(void)
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index e30c7c781172..8ae3dc5783fd 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -702,7 +702,7 @@ static void pfault_interrupt(struct ext_code ext_code,
* interrupt since it must be a leftover of a PFAULT
* CANCEL operation which didn't remove all pending
* completion interrupts. */
- if (tsk->state == TASK_RUNNING)
+ if (task_is_running(tsk))
tsk->thread.pfault_wait = -1;
}
} else {
@@ -783,6 +783,7 @@ early_initcall(pfault_irq_init);
#endif /* CONFIG_PFAULT */
#if IS_ENABLED(CONFIG_PGSTE)
+
void do_secure_storage_access(struct pt_regs *regs)
{
unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK;
@@ -859,19 +860,4 @@ void do_secure_storage_violation(struct pt_regs *regs)
send_sig(SIGSEGV, current, 0);
}
-#else
-void do_secure_storage_access(struct pt_regs *regs)
-{
- default_trap_handler(regs);
-}
-
-void do_non_secure_storage_access(struct pt_regs *regs)
-{
- default_trap_handler(regs);
-}
-
-void do_secure_storage_violation(struct pt_regs *regs)
-{
- default_trap_handler(regs);
-}
-#endif
+#endif /* CONFIG_PGSTE */
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 3b5a4d25ca9b..da36d13ffc16 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -189,7 +189,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
return pte;
}
-pte_t *huge_pte_alloc(struct mm_struct *mm,
+pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz)
{
pgd_t *pgdp;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 0e76b2127dc6..8ac710de1ab1 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -209,8 +209,6 @@ void __init mem_init(void)
setup_zero_pages(); /* Setup zeroed pages. */
cmma_init_nodat();
-
- mem_init_print_info(NULL);
}
void free_initmem(void)
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index 567c69f3069e..7f0e154a470a 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -112,7 +112,7 @@ static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end)
next = pmd_addr_end(addr, end);
if (pmd_none(*pmd) || pmd_large(*pmd))
continue;
- page = virt_to_page(pmd_val(*pmd));
+ page = phys_to_page(pmd_val(*pmd));
set_bit(PG_arch_1, &page->flags);
} while (pmd++, addr = next, addr != end);
}
@@ -130,7 +130,7 @@ static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
if (pud_none(*pud) || pud_large(*pud))
continue;
if (!pud_folded(*pud)) {
- page = virt_to_page(pud_val(*pud));
+ page = phys_to_page(pud_val(*pud));
for (i = 0; i < 3; i++)
set_bit(PG_arch_1, &page[i].flags);
}
@@ -151,7 +151,7 @@ static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
if (p4d_none(*p4d))
continue;
if (!p4d_folded(*p4d)) {
- page = virt_to_page(p4d_val(*p4d));
+ page = phys_to_page(p4d_val(*p4d));
for (i = 0; i < 3; i++)
set_bit(PG_arch_1, &page[i].flags);
}
@@ -173,7 +173,7 @@ static void mark_kernel_pgd(void)
if (pgd_none(*pgd))
continue;
if (!pgd_folded(*pgd)) {
- page = virt_to_page(pgd_val(*pgd));
+ page = phys_to_page(pgd_val(*pgd));
for (i = 0; i < 3; i++)
set_bit(PG_arch_1, &page[i].flags);
}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 18205f851c24..eec3a9d7176e 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -691,7 +691,7 @@ static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
if (!non_swap_entry(entry))
dec_mm_counter(mm, MM_SWAPENTS);
else if (is_migration_entry(entry)) {
- struct page *page = migration_entry_to_page(entry);
+ struct page *page = pfn_swap_entry_to_page(entry);
dec_mm_counter(mm, mm_counter(page));
}
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index f973e2ead197..63cae0476bb4 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -1209,21 +1209,67 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
*/
case BPF_STX | BPF_ATOMIC | BPF_DW:
case BPF_STX | BPF_ATOMIC | BPF_W:
- if (insn->imm != BPF_ADD) {
+ {
+ bool is32 = BPF_SIZE(insn->code) == BPF_W;
+
+ switch (insn->imm) {
+/* {op32|op64} {%w0|%src},%src,off(%dst) */
+#define EMIT_ATOMIC(op32, op64) do { \
+ EMIT6_DISP_LH(0xeb000000, is32 ? (op32) : (op64), \
+ (insn->imm & BPF_FETCH) ? src_reg : REG_W0, \
+ src_reg, dst_reg, off); \
+ if (is32 && (insn->imm & BPF_FETCH)) \
+ EMIT_ZERO(src_reg); \
+} while (0)
+ case BPF_ADD:
+ case BPF_ADD | BPF_FETCH:
+ /* {laal|laalg} */
+ EMIT_ATOMIC(0x00fa, 0x00ea);
+ break;
+ case BPF_AND:
+ case BPF_AND | BPF_FETCH:
+ /* {lan|lang} */
+ EMIT_ATOMIC(0x00f4, 0x00e4);
+ break;
+ case BPF_OR:
+ case BPF_OR | BPF_FETCH:
+ /* {lao|laog} */
+ EMIT_ATOMIC(0x00f6, 0x00e6);
+ break;
+ case BPF_XOR:
+ case BPF_XOR | BPF_FETCH:
+ /* {lax|laxg} */
+ EMIT_ATOMIC(0x00f7, 0x00e7);
+ break;
+#undef EMIT_ATOMIC
+ case BPF_XCHG:
+ /* {ly|lg} %w0,off(%dst) */
+ EMIT6_DISP_LH(0xe3000000,
+ is32 ? 0x0058 : 0x0004, REG_W0, REG_0,
+ dst_reg, off);
+ /* 0: {csy|csg} %w0,%src,off(%dst) */
+ EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030,
+ REG_W0, src_reg, dst_reg, off);
+ /* brc 4,0b */
+ EMIT4_PCREL_RIC(0xa7040000, 4, jit->prg - 6);
+ /* {llgfr|lgr} %src,%w0 */
+ EMIT4(is32 ? 0xb9160000 : 0xb9040000, src_reg, REG_W0);
+ if (is32 && insn_is_zext(&insn[1]))
+ insn_count = 2;
+ break;
+ case BPF_CMPXCHG:
+ /* 0: {csy|csg} %b0,%src,off(%dst) */
+ EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030,
+ BPF_REG_0, src_reg, dst_reg, off);
+ break;
+ default:
pr_err("Unknown atomic operation %02x\n", insn->imm);
return -1;
}
- /* *(u32/u64 *)(dst + off) += src
- *
- * BFW_W: laal %w0,%src,off(%dst)
- * BPF_DW: laalg %w0,%src,off(%dst)
- */
- EMIT6_DISP_LH(0xeb000000,
- BPF_SIZE(insn->code) == BPF_W ? 0x00fa : 0x00ea,
- REG_W0, src_reg, dst_reg, off);
jit->seen |= SEEN_MEM;
break;
+ }
/*
* BPF_LDX
*/
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 600881d894dd..b0993e05affe 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -538,6 +538,7 @@ int zpci_setup_bus_resources(struct zpci_dev *zdev,
zdev->bars[i].res = res;
pci_add_resource(resources, res);
}
+ zdev->has_resources = 1;
return 0;
}
@@ -554,6 +555,7 @@ static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
release_resource(zdev->bars[i].res);
kfree(zdev->bars[i].res);
}
+ zdev->has_resources = 0;
}
int pcibios_add_device(struct pci_dev *pdev)
@@ -661,7 +663,6 @@ int zpci_enable_device(struct zpci_dev *zdev)
if (rc)
goto out_dma;
- zdev->state = ZPCI_FN_STATE_ONLINE;
return 0;
out_dma:
@@ -669,7 +670,6 @@ out_dma:
out:
return rc;
}
-EXPORT_SYMBOL_GPL(zpci_enable_device);
int zpci_disable_device(struct zpci_dev *zdev)
{
@@ -680,20 +680,6 @@ int zpci_disable_device(struct zpci_dev *zdev)
*/
return clp_disable_fh(zdev);
}
-EXPORT_SYMBOL_GPL(zpci_disable_device);
-
-void zpci_remove_device(struct zpci_dev *zdev)
-{
- struct zpci_bus *zbus = zdev->zbus;
- struct pci_dev *pdev;
-
- pdev = pci_get_slot(zbus->bus, zdev->devfn);
- if (pdev) {
- if (pdev->is_virtfn)
- return zpci_iov_remove_virtfn(pdev, zdev->vfn);
- pci_stop_and_remove_bus_device_locked(pdev);
- }
-}
/**
* zpci_create_device() - Create a new zpci_dev and add it to the zbus
@@ -704,9 +690,9 @@ void zpci_remove_device(struct zpci_dev *zdev)
* Creates a new zpci device and adds it to its, possibly newly created, zbus
* as well as zpci_list.
*
- * Returns: 0 on success, an error value otherwise
+ * Returns: the zdev on success or an error pointer otherwise
*/
-int zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
+struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
{
struct zpci_dev *zdev;
int rc;
@@ -714,7 +700,7 @@ int zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, state);
zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
if (!zdev)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
/* FID and Function Handle are the static/dynamic identifiers */
zdev->fid = fid;
@@ -733,44 +719,105 @@ int zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
if (rc)
goto error;
- if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
- rc = zpci_enable_device(zdev);
- if (rc)
- goto error_destroy_iommu;
- }
-
rc = zpci_bus_device_register(zdev, &pci_root_ops);
if (rc)
- goto error_disable;
+ goto error_destroy_iommu;
spin_lock(&zpci_list_lock);
list_add_tail(&zdev->entry, &zpci_list);
spin_unlock(&zpci_list_lock);
- return 0;
+ return zdev;
-error_disable:
- if (zdev->state == ZPCI_FN_STATE_ONLINE)
- zpci_disable_device(zdev);
error_destroy_iommu:
zpci_destroy_iommu(zdev);
error:
zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
kfree(zdev);
+ return ERR_PTR(rc);
+}
+
+/**
+ * zpci_scan_configured_device() - Scan a freshly configured zpci_dev
+ * @zdev: The zpci_dev to be configured
+ * @fh: The general function handle supplied by the platform
+ *
+ * Given a device in the configuration state Configured, enables, scans and
+ * adds it to the common code PCI subsystem if possible. If the PCI device is
+ * parked because we can not yet create a PCI bus because we have not seen
+ * function 0, it is ignored but will be scanned once function 0 appears.
+ * If any failure occurs, the zpci_dev is left disabled.
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh)
+{
+ int rc;
+
+ zdev->fh = fh;
+ /* the PCI function will be scanned once function 0 appears */
+ if (!zdev->zbus->bus)
+ return 0;
+
+ /* For function 0 on a multi-function bus scan whole bus as we might
+ * have to pick up existing functions waiting for it to allow creating
+ * the PCI bus
+ */
+ if (zdev->devfn == 0 && zdev->zbus->multifunction)
+ rc = zpci_bus_scan_bus(zdev->zbus);
+ else
+ rc = zpci_bus_scan_device(zdev);
+
return rc;
}
+/**
+ * zpci_deconfigure_device() - Deconfigure a zpci_dev
+ * @zdev: The zpci_dev to configure
+ *
+ * Deconfigure a zPCI function that is currently configured and possibly known
+ * to the common code PCI subsystem.
+ * If any failure occurs the device is left as is.
+ *
+ * Return: 0 on success, or an error code otherwise
+ */
+int zpci_deconfigure_device(struct zpci_dev *zdev)
+{
+ int rc;
+
+ if (zdev->zbus->bus)
+ zpci_bus_remove_device(zdev, false);
+
+ if (zdev_enabled(zdev)) {
+ rc = zpci_disable_device(zdev);
+ if (rc)
+ return rc;
+ }
+
+ rc = sclp_pci_deconfigure(zdev->fid);
+ zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, rc);
+ if (rc)
+ return rc;
+ zdev->state = ZPCI_FN_STATE_STANDBY;
+
+ return 0;
+}
+
void zpci_release_device(struct kref *kref)
{
struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
+ int ret;
if (zdev->zbus->bus)
- zpci_remove_device(zdev);
+ zpci_bus_remove_device(zdev, false);
+
+ if (zdev_enabled(zdev))
+ zpci_disable_device(zdev);
switch (zdev->state) {
- case ZPCI_FN_STATE_ONLINE:
case ZPCI_FN_STATE_CONFIGURED:
- zpci_disable_device(zdev);
+ ret = sclp_pci_deconfigure(zdev->fid);
+ zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, ret);
fallthrough;
case ZPCI_FN_STATE_STANDBY:
if (zdev->has_hp_slot)
@@ -905,6 +952,7 @@ static int __init pci_base_init(void)
rc = clp_scan_pci_devices();
if (rc)
goto out_find;
+ zpci_bus_scan_busses();
s390_pci_initialized = 1;
return 0;
diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
index 755b46f4c595..9629f9779c79 100644
--- a/arch/s390/pci/pci_bus.c
+++ b/arch/s390/pci/pci_bus.c
@@ -27,28 +27,184 @@
#include "pci_iov.h"
static LIST_HEAD(zbus_list);
-static DEFINE_SPINLOCK(zbus_list_lock);
+static DEFINE_MUTEX(zbus_list_lock);
static int zpci_nb_devices;
-/* zpci_bus_scan
+/* zpci_bus_prepare_device - Prepare a zPCI function for scanning
+ * @zdev: the zPCI function to be prepared
+ *
+ * The PCI resources for the function are set up and added to its zbus and the
+ * function is enabled. The function must be added to a zbus which must have
+ * a PCI bus created. If an error occurs the zPCI function is not enabled.
+ *
+ * Return: 0 on success, an error code otherwise
+ */
+static int zpci_bus_prepare_device(struct zpci_dev *zdev)
+{
+ struct resource_entry *window, *n;
+ struct resource *res;
+ int rc;
+
+ if (!zdev_enabled(zdev)) {
+ rc = zpci_enable_device(zdev);
+ if (rc)
+ return rc;
+ }
+
+ if (!zdev->has_resources) {
+ zpci_setup_bus_resources(zdev, &zdev->zbus->resources);
+ resource_list_for_each_entry_safe(window, n, &zdev->zbus->resources) {
+ res = window->res;
+ pci_bus_add_resource(zdev->zbus->bus, res, 0);
+ }
+ }
+
+ return 0;
+}
+
+/* zpci_bus_scan_device - Scan a single device adding it to the PCI core
+ * @zdev: the zdev to be scanned
+ *
+ * Scans the PCI function making it available to the common PCI code.
+ *
+ * Return: 0 on success, an error value otherwise
+ */
+int zpci_bus_scan_device(struct zpci_dev *zdev)
+{
+ struct pci_dev *pdev;
+ int rc;
+
+ rc = zpci_bus_prepare_device(zdev);
+ if (rc)
+ return rc;
+
+ pdev = pci_scan_single_device(zdev->zbus->bus, zdev->devfn);
+ if (!pdev)
+ return -ENODEV;
+
+ pci_bus_add_device(pdev);
+ pci_lock_rescan_remove();
+ pci_bus_add_devices(zdev->zbus->bus);
+ pci_unlock_rescan_remove();
+
+ return 0;
+}
+
+/* zpci_bus_remove_device - Removes the given zdev from the PCI core
+ * @zdev: the zdev to be removed from the PCI core
+ * @set_error: if true the device's error state is set to permanent failure
+ *
+ * Sets a zPCI device to a configured but offline state; the zPCI
+ * device is still accessible through its hotplug slot and the zPCI
+ * API but is removed from the common code PCI bus, making it
+ * no longer available to drivers.
+ */
+void zpci_bus_remove_device(struct zpci_dev *zdev, bool set_error)
+{
+ struct zpci_bus *zbus = zdev->zbus;
+ struct pci_dev *pdev;
+
+ if (!zdev->zbus->bus)
+ return;
+
+ pdev = pci_get_slot(zbus->bus, zdev->devfn);
+ if (pdev) {
+ if (set_error)
+ pdev->error_state = pci_channel_io_perm_failure;
+ if (pdev->is_virtfn) {
+ zpci_iov_remove_virtfn(pdev, zdev->vfn);
+ /* balance pci_get_slot */
+ pci_dev_put(pdev);
+ return;
+ }
+ pci_stop_and_remove_bus_device_locked(pdev);
+ /* balance pci_get_slot */
+ pci_dev_put(pdev);
+ }
+}
+
+/* zpci_bus_scan_bus - Scan all configured zPCI functions on the bus
+ * @zbus: the zbus to be scanned
+ *
+ * Enables and scans all PCI functions on the bus making them available to the
+ * common PCI code. If there is no function 0 on the zbus nothing is scanned. If
+ * a function does not have a slot yet because it was added to the zbus before
+ * function 0 the slot is created. If a PCI function fails to be initialized
+ * an error will be returned but attempts will still be made for all other
+ * functions on the bus.
+ *
+ * Return: 0 on success, an error value otherwise
+ */
+int zpci_bus_scan_bus(struct zpci_bus *zbus)
+{
+ struct zpci_dev *zdev;
+ int devfn, rc, ret = 0;
+
+ if (!zbus->function[0])
+ return 0;
+
+ for (devfn = 0; devfn < ZPCI_FUNCTIONS_PER_BUS; devfn++) {
+ zdev = zbus->function[devfn];
+ if (zdev && zdev->state == ZPCI_FN_STATE_CONFIGURED) {
+ rc = zpci_bus_prepare_device(zdev);
+ if (rc)
+ ret = -EIO;
+ }
+ }
+
+ pci_lock_rescan_remove();
+ pci_scan_child_bus(zbus->bus);
+ pci_bus_add_devices(zbus->bus);
+ pci_unlock_rescan_remove();
+
+ return ret;
+}
+
+/* zpci_bus_scan_busses - Scan all registered busses
+ *
+ * Scan all available zbusses
+ *
+ */
+void zpci_bus_scan_busses(void)
+{
+ struct zpci_bus *zbus = NULL;
+
+ mutex_lock(&zbus_list_lock);
+ list_for_each_entry(zbus, &zbus_list, bus_next) {
+ zpci_bus_scan_bus(zbus);
+ cond_resched();
+ }
+ mutex_unlock(&zbus_list_lock);
+}
+
+/* zpci_bus_create_pci_bus - Create the PCI bus associated with this zbus
* @zbus: the zbus holding the zdevices
+ * @f0: function 0 of the bus
* @ops: the pci operations
*
- * The domain number must be set before pci_scan_root_bus is called.
- * This function can be called once the domain is known, hence
- * when the function_0 is dicovered.
+ * Function zero is taken as a parameter as this is used to determine the
+ * domain, multifunction property and maximum bus speed of the entire bus.
+ *
+ * Return: 0 on success, an error code otherwise
*/
-static int zpci_bus_scan(struct zpci_bus *zbus, int domain, struct pci_ops *ops)
+static int zpci_bus_create_pci_bus(struct zpci_bus *zbus, struct zpci_dev *f0, struct pci_ops *ops)
{
struct pci_bus *bus;
- int rc;
+ int domain;
- rc = zpci_alloc_domain(domain);
- if (rc < 0)
- return rc;
- zbus->domain_nr = rc;
+ domain = zpci_alloc_domain((u16)f0->uid);
+ if (domain < 0)
+ return domain;
+
+ zbus->domain_nr = domain;
+ zbus->multifunction = f0->rid_available;
+ zbus->max_bus_speed = f0->max_bus_speed;
- bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, ops, zbus, &zbus->resources);
+ /*
+ * Note that the zbus->resources are taken over and zbus->resources
+ * is empty after a successful call
+ */
+ bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, ops, zbus, &zbus->resources);
if (!bus) {
zpci_free_domain(zbus->domain_nr);
return -EFAULT;
@@ -56,6 +212,7 @@ static int zpci_bus_scan(struct zpci_bus *zbus, int domain, struct pci_ops *ops)
zbus->bus = bus;
pci_bus_add_devices(bus);
+
return 0;
}
@@ -74,9 +231,9 @@ static void zpci_bus_release(struct kref *kref)
pci_unlock_rescan_remove();
}
- spin_lock(&zbus_list_lock);
+ mutex_lock(&zbus_list_lock);
list_del(&zbus->bus_next);
- spin_unlock(&zbus_list_lock);
+ mutex_unlock(&zbus_list_lock);
kfree(zbus);
}
@@ -89,7 +246,7 @@ static struct zpci_bus *zpci_bus_get(int pchid)
{
struct zpci_bus *zbus;
- spin_lock(&zbus_list_lock);
+ mutex_lock(&zbus_list_lock);
list_for_each_entry(zbus, &zbus_list, bus_next) {
if (pchid == zbus->pchid) {
kref_get(&zbus->kref);
@@ -98,7 +255,7 @@ static struct zpci_bus *zpci_bus_get(int pchid)
}
zbus = NULL;
out_unlock:
- spin_unlock(&zbus_list_lock);
+ mutex_unlock(&zbus_list_lock);
return zbus;
}
@@ -112,9 +269,9 @@ static struct zpci_bus *zpci_bus_alloc(int pchid)
zbus->pchid = pchid;
INIT_LIST_HEAD(&zbus->bus_next);
- spin_lock(&zbus_list_lock);
+ mutex_lock(&zbus_list_lock);
list_add_tail(&zbus->bus_next, &zbus_list);
- spin_unlock(&zbus_list_lock);
+ mutex_unlock(&zbus_list_lock);
kref_init(&zbus->kref);
INIT_LIST_HEAD(&zbus->resources);
@@ -141,53 +298,77 @@ void pcibios_bus_add_device(struct pci_dev *pdev)
}
}
-static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
+/* zpci_bus_create_hotplug_slots - Add hotplug slot(s) for device added to bus
+ * @zdev: the zPCI device that was newly added
+ *
+ * Add the hotplug slot(s) for the newly added PCI function. Normally this is
+ * simply the slot for the function itself. If however we are adding the
+ * function 0 on a zbus, it might be that we already registered functions on
+ * that zbus but could not create their hotplug slots yet so add those now too.
+ *
+ * Return: 0 on success, an error code otherwise
+ */
+static int zpci_bus_create_hotplug_slots(struct zpci_dev *zdev)
{
- struct pci_bus *bus;
- struct resource_entry *window, *n;
- struct resource *res;
- struct pci_dev *pdev;
- int rc;
-
- bus = zbus->bus;
- if (!bus)
- return -EINVAL;
-
- pdev = pci_get_slot(bus, zdev->devfn);
- if (pdev) {
- /* Device is already known. */
- pci_dev_put(pdev);
- return 0;
- }
+ struct zpci_bus *zbus = zdev->zbus;
+ int devfn, rc = 0;
rc = zpci_init_slot(zdev);
if (rc)
return rc;
zdev->has_hp_slot = 1;
- resource_list_for_each_entry_safe(window, n, &zbus->resources) {
- res = window->res;
- pci_bus_add_resource(bus, res, 0);
- }
+ if (zdev->devfn == 0 && zbus->multifunction) {
+ /* Now that function 0 is there we can finally create the
+ * hotplug slots for those functions with devfn != 0 that have
+ * been parked in zbus->function[] waiting for us to be able to
+ * create the PCI bus.
+ */
+ for (devfn = 1; devfn < ZPCI_FUNCTIONS_PER_BUS; devfn++) {
+ zdev = zbus->function[devfn];
+ if (zdev && !zdev->has_hp_slot) {
+ rc = zpci_init_slot(zdev);
+ if (rc)
+ return rc;
+ zdev->has_hp_slot = 1;
+ }
+ }
- pdev = pci_scan_single_device(bus, zdev->devfn);
- if (pdev)
- pci_bus_add_device(pdev);
+ }
- return 0;
+ return rc;
}
-static void zpci_bus_add_devices(struct zpci_bus *zbus)
+static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
{
- int i;
+ int rc = -EINVAL;
- for (i = 1; i < ZPCI_FUNCTIONS_PER_BUS; i++)
- if (zbus->function[i])
- zpci_bus_add_device(zbus, zbus->function[i]);
+ zdev->zbus = zbus;
+ if (zbus->function[zdev->devfn]) {
+ pr_err("devfn %04x is already assigned\n", zdev->devfn);
+ return rc;
+ }
+ zbus->function[zdev->devfn] = zdev;
+ zpci_nb_devices++;
- pci_lock_rescan_remove();
- pci_bus_add_devices(zbus->bus);
- pci_unlock_rescan_remove();
+ if (zbus->bus) {
+ if (zbus->multifunction && !zdev->rid_available) {
+ WARN_ONCE(1, "rid_available not set for multifunction\n");
+ goto error;
+ }
+
+ zpci_bus_create_hotplug_slots(zdev);
+ } else {
+ /* Hotplug slot will be created once function 0 appears */
+ zbus->multifunction = 1;
+ }
+
+ return 0;
+
+error:
+ zbus->function[zdev->devfn] = NULL;
+ zpci_nb_devices--;
+ return rc;
}
int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops)
@@ -200,7 +381,6 @@ int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops)
zdev->fid, ZPCI_NR_DEVICES);
return -ENOSPC;
}
- zpci_nb_devices++;
if (zdev->devfn >= ZPCI_FUNCTIONS_PER_BUS)
return -EINVAL;
@@ -214,51 +394,18 @@ int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops)
return -ENOMEM;
}
- zdev->zbus = zbus;
- if (zbus->function[zdev->devfn]) {
- pr_err("devfn %04x is already assigned\n", zdev->devfn);
- goto error; /* rc already set */
- }
- zbus->function[zdev->devfn] = zdev;
-
- zpci_setup_bus_resources(zdev, &zbus->resources);
-
- if (zbus->bus) {
- if (!zbus->multifunction) {
- WARN_ONCE(1, "zbus is not multifunction\n");
- goto error_bus;
- }
- if (!zdev->rid_available) {
- WARN_ONCE(1, "rid_available not set for multifunction\n");
- goto error_bus;
- }
- rc = zpci_bus_add_device(zbus, zdev);
- if (rc)
- goto error_bus;
- } else if (zdev->devfn == 0) {
- if (zbus->multifunction && !zdev->rid_available) {
- WARN_ONCE(1, "rid_available not set on function 0 for multifunction\n");
- goto error_bus;
- }
- rc = zpci_bus_scan(zbus, (u16)zdev->uid, ops);
- if (rc)
- goto error_bus;
- zpci_bus_add_devices(zbus);
- rc = zpci_init_slot(zdev);
+ if (zdev->devfn == 0) {
+ rc = zpci_bus_create_pci_bus(zbus, zdev, ops);
if (rc)
- goto error_bus;
- zdev->has_hp_slot = 1;
- zbus->multifunction = zdev->rid_available;
- zbus->max_bus_speed = zdev->max_bus_speed;
- } else {
- zbus->multifunction = 1;
+ goto error;
}
+ rc = zpci_bus_add_device(zbus, zdev);
+ if (rc)
+ goto error;
+
return 0;
-error_bus:
- zpci_nb_devices--;
- zbus->function[zdev->devfn] = NULL;
error:
pr_err("Adding PCI function %08x failed\n", zdev->fid);
zpci_bus_put(zbus);
diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h
index f8dfac0b5b71..b877a97e6745 100644
--- a/arch/s390/pci/pci_bus.h
+++ b/arch/s390/pci/pci_bus.h
@@ -10,6 +10,12 @@
int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops);
void zpci_bus_device_unregister(struct zpci_dev *zdev);
+int zpci_bus_scan_bus(struct zpci_bus *zbus);
+void zpci_bus_scan_busses(void);
+
+int zpci_bus_scan_device(struct zpci_dev *zdev);
+void zpci_bus_remove_device(struct zpci_dev *zdev, bool set_error);
+
void zpci_release_device(struct kref *kref);
static inline void zpci_zdev_put(struct zpci_dev *zdev)
{
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index b4162da4e8a2..cd447b96b4b1 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <asm/pci_debug.h>
+#include <asm/pci_dma.h>
#include <asm/sclp.h>
#include "pci_bus.h"
@@ -73,15 +74,24 @@ void zpci_event_error(void *data)
__zpci_event_error(data);
}
+static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh)
+{
+ zdev->fh = fh;
+ /* Give the driver a hint that the function is
+ * already unusable.
+ */
+ zpci_bus_remove_device(zdev, true);
+ /* Even though the device is already gone we still
+ * need to free zPCI resources as part of the disable.
+ */
+ zpci_disable_device(zdev);
+ zdev->state = ZPCI_FN_STATE_STANDBY;
+}
+
static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
{
struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
- struct pci_dev *pdev = NULL;
enum zpci_state state;
- int ret;
-
- if (zdev && zdev->zbus->bus)
- pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
zpci_err("avail CCDF:\n");
zpci_err_hex(ccdf, sizeof(*ccdf));
@@ -89,70 +99,46 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
switch (ccdf->pec) {
case 0x0301: /* Reserved|Standby -> Configured */
if (!zdev) {
- zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_CONFIGURED);
- break;
+ zdev = zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_CONFIGURED);
+ if (IS_ERR(zdev))
+ break;
+ } else {
+ /* the configuration request may be stale */
+ if (zdev->state != ZPCI_FN_STATE_STANDBY)
+ break;
+ zdev->state = ZPCI_FN_STATE_CONFIGURED;
}
- /* the configuration request may be stale */
- if (zdev->state != ZPCI_FN_STATE_STANDBY)
- break;
- zdev->fh = ccdf->fh;
- zdev->state = ZPCI_FN_STATE_CONFIGURED;
- ret = zpci_enable_device(zdev);
- if (ret)
- break;
-
- /* the PCI function will be scanned once function 0 appears */
- if (!zdev->zbus->bus)
- break;
-
- pdev = pci_scan_single_device(zdev->zbus->bus, zdev->devfn);
- if (!pdev)
- break;
-
- pci_bus_add_device(pdev);
- pci_lock_rescan_remove();
- pci_bus_add_devices(zdev->zbus->bus);
- pci_unlock_rescan_remove();
+ zpci_scan_configured_device(zdev, ccdf->fh);
break;
case 0x0302: /* Reserved -> Standby */
- if (!zdev) {
+ if (!zdev)
zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_STANDBY);
- break;
- }
- zdev->fh = ccdf->fh;
+ else
+ zdev->fh = ccdf->fh;
break;
case 0x0303: /* Deconfiguration requested */
- if (!zdev)
- break;
- if (pdev)
- zpci_remove_device(zdev);
-
- ret = zpci_disable_device(zdev);
- if (ret)
- break;
-
- ret = sclp_pci_deconfigure(zdev->fid);
- zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, ret);
- if (!ret)
- zdev->state = ZPCI_FN_STATE_STANDBY;
-
+ if (zdev) {
+ /* The event may have been queued before we confirgured
+ * the device.
+ */
+ if (zdev->state != ZPCI_FN_STATE_CONFIGURED)
+ break;
+ zdev->fh = ccdf->fh;
+ zpci_deconfigure_device(zdev);
+ }
break;
case 0x0304: /* Configured -> Standby|Reserved */
- if (!zdev)
- break;
- if (pdev) {
- /* Give the driver a hint that the function is
- * already unusable. */
- pdev->error_state = pci_channel_io_perm_failure;
- zpci_remove_device(zdev);
- }
-
- zdev->fh = ccdf->fh;
- zpci_disable_device(zdev);
- zdev->state = ZPCI_FN_STATE_STANDBY;
- if (!clp_get_state(ccdf->fid, &state) &&
- state == ZPCI_FN_STATE_RESERVED) {
- zpci_zdev_put(zdev);
+ if (zdev) {
+ /* The event may have been queued before we confirgured
+ * the device.:
+ */
+ if (zdev->state == ZPCI_FN_STATE_CONFIGURED)
+ zpci_event_hard_deconfigured(zdev, ccdf->fh);
+ /* The 0x0304 event may immediately reserve the device */
+ if (!clp_get_state(zdev->fid, &state) &&
+ state == ZPCI_FN_STATE_RESERVED) {
+ zpci_zdev_put(zdev);
+ }
}
break;
case 0x0306: /* 0x308 or 0x302 for multiple devices */
diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c
index 5c028bee91b9..6e2450c2b9c1 100644
--- a/arch/s390/pci/pci_sysfs.c
+++ b/arch/s390/pci/pci_sysfs.c
@@ -131,6 +131,45 @@ static ssize_t report_error_write(struct file *filp, struct kobject *kobj,
}
static BIN_ATTR(report_error, S_IWUSR, NULL, report_error_write, PAGE_SIZE);
+static ssize_t uid_is_unique_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", zpci_unique_uid ? 1 : 0);
+}
+static DEVICE_ATTR_RO(uid_is_unique);
+
+#ifndef CONFIG_DMI
+/* analogous to smbios index */
+static ssize_t index_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
+ u32 index = ~0;
+
+ if (zpci_unique_uid)
+ index = zdev->uid;
+
+ return sysfs_emit(buf, "%u\n", index);
+}
+static DEVICE_ATTR_RO(index);
+
+static umode_t zpci_index_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ return zpci_unique_uid ? attr->mode : 0;
+}
+
+static struct attribute *zpci_ident_attrs[] = {
+ &dev_attr_index.attr,
+ NULL,
+};
+
+static struct attribute_group zpci_ident_attr_group = {
+ .attrs = zpci_ident_attrs,
+ .is_visible = zpci_index_is_visible,
+};
+#endif
+
static struct bin_attribute *zpci_bin_attrs[] = {
&bin_attr_util_string,
&bin_attr_report_error,
@@ -148,8 +187,10 @@ static struct attribute *zpci_dev_attrs[] = {
&dev_attr_uid.attr,
&dev_attr_recover.attr,
&dev_attr_mio_enabled.attr,
+ &dev_attr_uid_is_unique.attr,
NULL,
};
+
static struct attribute_group zpci_attr_group = {
.attrs = zpci_dev_attrs,
.bin_attrs = zpci_bin_attrs,
@@ -170,5 +211,8 @@ static struct attribute_group pfip_attr_group = {
const struct attribute_group *zpci_attr_groups[] = {
&zpci_attr_group,
&pfip_attr_group,
+#ifndef CONFIG_DMI
+ &zpci_ident_attr_group,
+#endif
NULL,
};
diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c
index 61ce5b59b828..606324e56e4e 100644
--- a/arch/s390/tools/gen_facilities.c
+++ b/arch/s390/tools/gen_facilities.c
@@ -115,6 +115,10 @@ static struct facility_def facility_defs[] = {
12, /* AP Query Configuration Information */
15, /* AP Facilities Test */
156, /* etoken facility */
+ 165, /* nnpa facility */
+ 193, /* bear enhancement facility */
+ 194, /* rdp enhancement facility */
+ 196, /* processor activity instrumentation facility */
-1 /* END */
}
},
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index e798e55915c2..45a0549421cd 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -2,6 +2,8 @@
config SUPERH
def_bool y
select ARCH_32BIT_OFF_T
+ select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM && MMU
+ select ARCH_ENABLE_MEMORY_HOTREMOVE if SPARSEMEM && MMU
select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A)
select ARCH_HAS_BINFMT_FLAT if !MMU
@@ -12,7 +14,6 @@ config SUPERH
select ARCH_HIBERNATION_POSSIBLE if MMU
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_WANT_IPC_PARSE_VERSION
- select CLKDEV_LOOKUP
select CPU_NO_EFFICIENT_FFS
select DMA_DECLARE_COHERENT
select GENERIC_ATOMIC64
@@ -101,9 +102,6 @@ config SYS_SUPPORTS_APM_EMULATION
bool
select ARCH_SUSPEND_POSSIBLE
-config SYS_SUPPORTS_HUGETLBFS
- bool
-
config SYS_SUPPORTS_SMP
bool
@@ -175,12 +173,12 @@ config CPU_SH3
config CPU_SH4
bool
+ select ARCH_SUPPORTS_HUGETLBFS if MMU
select CPU_HAS_INTEVT
select CPU_HAS_SR_RB
select CPU_HAS_FPU if !CPU_SH4AL_DSP
select SH_INTC
select SYS_SUPPORTS_SH_TMU
- select SYS_SUPPORTS_HUGETLBFS if MMU
config CPU_SH4A
bool
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 3bcbf52fb30e..44bcb80e791a 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -9,7 +9,7 @@
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
-ifneq ($(SUBARCH),$(ARCH))
+ifdef cross_compiling
ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE := $(call cc-cross-prefix, sh-linux- sh-linux-gnu- sh-unknown-linux-gnu-)
endif
diff --git a/arch/sh/configs/edosk7705_defconfig b/arch/sh/configs/edosk7705_defconfig
index ef7cc31997b1..9ee35269bee2 100644
--- a/arch/sh/configs/edosk7705_defconfig
+++ b/arch/sh/configs/edosk7705_defconfig
@@ -23,7 +23,6 @@ CONFIG_SH_PCLK_FREQ=31250000
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
-# CONFIG_DEVKMEM is not set
# CONFIG_UNIX98_PTYS is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_HW_RANDOM is not set
diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig
index 315b04a8dd2f..601d062250d1 100644
--- a/arch/sh/configs/se7206_defconfig
+++ b/arch/sh/configs/se7206_defconfig
@@ -71,7 +71,6 @@ CONFIG_SMC91X=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_SH_SCI=y
CONFIG_SERIAL_SH_SCI_NR_UARTS=4
CONFIG_SERIAL_SH_SCI_CONSOLE=y
diff --git a/arch/sh/configs/sh2007_defconfig b/arch/sh/configs/sh2007_defconfig
index 99975db461d8..79f02f1c0dc8 100644
--- a/arch/sh/configs/sh2007_defconfig
+++ b/arch/sh/configs/sh2007_defconfig
@@ -75,7 +75,6 @@ CONFIG_INPUT_FF_MEMLESS=y
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
CONFIG_VT_HW_CONSOLE_BINDING=y
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_SH_SCI=y
CONFIG_SERIAL_SH_SCI_CONSOLE=y
# CONFIG_LEGACY_PTYS is not set
diff --git a/arch/sh/configs/sh7724_generic_defconfig b/arch/sh/configs/sh7724_generic_defconfig
index 2c46c0004780..cbc9389a89a8 100644
--- a/arch/sh/configs/sh7724_generic_defconfig
+++ b/arch/sh/configs/sh7724_generic_defconfig
@@ -18,7 +18,6 @@ CONFIG_CPU_IDLE=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_SH_SCI=y
CONFIG_SERIAL_SH_SCI_NR_UARTS=6
CONFIG_SERIAL_SH_SCI_CONSOLE=y
diff --git a/arch/sh/configs/sh7770_generic_defconfig b/arch/sh/configs/sh7770_generic_defconfig
index 88193153e51b..ee2357deba0f 100644
--- a/arch/sh/configs/sh7770_generic_defconfig
+++ b/arch/sh/configs/sh7770_generic_defconfig
@@ -20,7 +20,6 @@ CONFIG_CPU_IDLE=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_SH_SCI=y
CONFIG_SERIAL_SH_SCI_NR_UARTS=6
CONFIG_SERIAL_SH_SCI_CONSOLE=y
diff --git a/arch/sh/configs/sh7785lcr_32bit_defconfig b/arch/sh/configs/sh7785lcr_32bit_defconfig
index 9b885c14c400..5c725c75fcef 100644
--- a/arch/sh/configs/sh7785lcr_32bit_defconfig
+++ b/arch/sh/configs/sh7785lcr_32bit_defconfig
@@ -66,7 +66,6 @@ CONFIG_INPUT_FF_MEMLESS=m
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_EVBUG=m
CONFIG_VT_HW_CONSOLE_BINDING=y
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_SH_SCI=y
CONFIG_SERIAL_SH_SCI_NR_UARTS=6
CONFIG_SERIAL_SH_SCI_CONSOLE=y
diff --git a/arch/sh/include/asm/atomic-grb.h b/arch/sh/include/asm/atomic-grb.h
index aace62d42288..059791fd394f 100644
--- a/arch/sh/include/asm/atomic-grb.h
+++ b/arch/sh/include/asm/atomic-grb.h
@@ -3,7 +3,7 @@
#define __ASM_SH_ATOMIC_GRB_H
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
int tmp; \
\
@@ -23,7 +23,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int tmp; \
\
@@ -45,7 +45,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int res, tmp; \
\
diff --git a/arch/sh/include/asm/atomic-irq.h b/arch/sh/include/asm/atomic-irq.h
index ee523bd2120f..7665de9d00d0 100644
--- a/arch/sh/include/asm/atomic-irq.h
+++ b/arch/sh/include/asm/atomic-irq.h
@@ -11,7 +11,7 @@
*/
#define ATOMIC_OP(op, c_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
@@ -21,7 +21,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
}
#define ATOMIC_OP_RETURN(op, c_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long temp, flags; \
\
@@ -35,7 +35,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long temp, flags; \
\
diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h
index 1d06e4d288dc..b63dcfbfa14e 100644
--- a/arch/sh/include/asm/atomic-llsc.h
+++ b/arch/sh/include/asm/atomic-llsc.h
@@ -17,7 +17,7 @@
*/
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
\
@@ -32,7 +32,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
}
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long temp; \
\
@@ -50,7 +50,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long res, temp; \
\
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h
index 7c2a8a703b9a..528bfeda78f5 100644
--- a/arch/sh/include/asm/atomic.h
+++ b/arch/sh/include/asm/atomic.h
@@ -19,8 +19,8 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
#if defined(CONFIG_GUSA_RB)
#include <asm/atomic-grb.h>
@@ -30,8 +30,8 @@
#include <asm/atomic-irq.h>
#endif
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
+#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
#endif /* CONFIG_CPU_J2 */
diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h
index 450b5854d7c6..3b6c7b5b7ec9 100644
--- a/arch/sh/include/asm/bitops.h
+++ b/arch/sh/include/asm/bitops.h
@@ -58,15 +58,16 @@ static inline unsigned long __ffs(unsigned long word)
return result;
}
-#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/sched.h>
-#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/find.h>
+
#endif /* __ASM_SH_BITOPS_H */
diff --git a/arch/sh/include/asm/cmpxchg.h b/arch/sh/include/asm/cmpxchg.h
index e9501d85c278..0ed9b3f4a577 100644
--- a/arch/sh/include/asm/cmpxchg.h
+++ b/arch/sh/include/asm/cmpxchg.h
@@ -45,7 +45,7 @@ extern void __xchg_called_with_bad_pointer(void);
__xchg__res; \
})
-#define xchg(ptr,x) \
+#define arch_xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr))))
/* This function doesn't exist, so you'll get a linker error
@@ -63,7 +63,7 @@ static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
return old;
}
-#define cmpxchg(ptr,o,n) \
+#define arch_cmpxchg(ptr,o,n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 6d5c6463bc07..cf9a3ec32406 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -283,11 +283,6 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
*/
#define xlate_dev_mem_ptr(p) __va(p)
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p) p
-
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
int valid_phys_addr_range(phys_addr_t addr, size_t size);
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
diff --git a/arch/sh/include/asm/mmzone.h b/arch/sh/include/asm/mmzone.h
index 6552a088dc97..7b8dead2723d 100644
--- a/arch/sh/include/asm/mmzone.h
+++ b/arch/sh/include/asm/mmzone.h
@@ -2,7 +2,7 @@
#ifndef __ASM_SH_MMZONE_H
#define __ASM_SH_MMZONE_H
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
#include <linux/numa.h>
extern struct pglist_data *node_data[];
@@ -31,7 +31,7 @@ static inline void
setup_bootmem_node(int nid, unsigned long start, unsigned long end)
{
}
-#endif /* CONFIG_NEED_MULTIPLE_NODES */
+#endif /* CONFIG_NUMA */
/* Platform specific mem init */
void __init plat_mem_setup(void);
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index 0e6b0be25e33..a9e98233c4d4 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -30,7 +30,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
{
set_pmd(pmd, __pmd((unsigned long)page_address(pte)));
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
#define __pte_free_tlb(tlb,pte,addr) \
do { \
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index 27751e9470df..d7ddb1ec86a0 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -59,8 +59,6 @@ static inline unsigned long long neff_sign_extend(unsigned long val)
/* Entries per level */
#define PTRS_PER_PTE (PAGE_SIZE / (1 << PTE_MAGNITUDE))
-#define FIRST_USER_ADDRESS 0UL
-
#define PHYS_ADDR_MASK29 0x1fffffff
#define PHYS_ADDR_MASK32 0xffffffff
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index 360f713d009b..aeb8915e9254 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -4,12 +4,11 @@
#ifndef __ASSEMBLY__
#include <linux/pagemap.h>
+#include <asm-generic/tlb.h>
#ifdef CONFIG_MMU
#include <linux/swap.h>
-#include <asm-generic/tlb.h>
-
#if defined(CONFIG_CPU_SH4)
extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
extern void tlb_unwire_entry(void);
@@ -24,12 +23,7 @@ static inline void tlb_unwire_entry(void)
{
BUG();
}
-#endif
-
-#else /* CONFIG_MMU */
-
-#include <asm-generic/tlb.h>
-
+#endif /* CONFIG_CPU_SH4 */
#endif /* CONFIG_MMU */
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_TLB_H */
diff --git a/arch/sh/include/asm/unaligned-sh4a.h b/arch/sh/include/asm/unaligned-sh4a.h
deleted file mode 100644
index d311f00ed530..000000000000
--- a/arch/sh/include/asm/unaligned-sh4a.h
+++ /dev/null
@@ -1,199 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_UNALIGNED_SH4A_H
-#define __ASM_SH_UNALIGNED_SH4A_H
-
-/*
- * SH-4A has support for unaligned 32-bit loads, and 32-bit loads only.
- * Support for 64-bit accesses are done through shifting and masking
- * relative to the endianness. Unaligned stores are not supported by the
- * instruction encoding, so these continue to use the packed
- * struct.
- *
- * The same note as with the movli.l/movco.l pair applies here, as long
- * as the load is guaranteed to be inlined, nothing else will hook in to
- * r0 and we get the return value for free.
- *
- * NOTE: Due to the fact we require r0 encoding, care should be taken to
- * avoid mixing these heavily with other r0 consumers, such as the atomic
- * ops. Failure to adhere to this can result in the compiler running out
- * of spill registers and blowing up when building at low optimization
- * levels. See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=34777.
- */
-#include <linux/unaligned/packed_struct.h>
-#include <linux/types.h>
-#include <asm/byteorder.h>
-
-static inline u16 sh4a_get_unaligned_cpu16(const u8 *p)
-{
-#ifdef __LITTLE_ENDIAN
- return p[0] | p[1] << 8;
-#else
- return p[0] << 8 | p[1];
-#endif
-}
-
-static __always_inline u32 sh4a_get_unaligned_cpu32(const u8 *p)
-{
- unsigned long unaligned;
-
- __asm__ __volatile__ (
- "movua.l @%1, %0\n\t"
- : "=z" (unaligned)
- : "r" (p)
- );
-
- return unaligned;
-}
-
-/*
- * Even though movua.l supports auto-increment on the read side, it can
- * only store to r0 due to instruction encoding constraints, so just let
- * the compiler sort it out on its own.
- */
-static inline u64 sh4a_get_unaligned_cpu64(const u8 *p)
-{
-#ifdef __LITTLE_ENDIAN
- return (u64)sh4a_get_unaligned_cpu32(p + 4) << 32 |
- sh4a_get_unaligned_cpu32(p);
-#else
- return (u64)sh4a_get_unaligned_cpu32(p) << 32 |
- sh4a_get_unaligned_cpu32(p + 4);
-#endif
-}
-
-static inline u16 get_unaligned_le16(const void *p)
-{
- return le16_to_cpu(sh4a_get_unaligned_cpu16(p));
-}
-
-static inline u32 get_unaligned_le32(const void *p)
-{
- return le32_to_cpu(sh4a_get_unaligned_cpu32(p));
-}
-
-static inline u64 get_unaligned_le64(const void *p)
-{
- return le64_to_cpu(sh4a_get_unaligned_cpu64(p));
-}
-
-static inline u16 get_unaligned_be16(const void *p)
-{
- return be16_to_cpu(sh4a_get_unaligned_cpu16(p));
-}
-
-static inline u32 get_unaligned_be32(const void *p)
-{
- return be32_to_cpu(sh4a_get_unaligned_cpu32(p));
-}
-
-static inline u64 get_unaligned_be64(const void *p)
-{
- return be64_to_cpu(sh4a_get_unaligned_cpu64(p));
-}
-
-static inline void nonnative_put_le16(u16 val, u8 *p)
-{
- *p++ = val;
- *p++ = val >> 8;
-}
-
-static inline void nonnative_put_le32(u32 val, u8 *p)
-{
- nonnative_put_le16(val, p);
- nonnative_put_le16(val >> 16, p + 2);
-}
-
-static inline void nonnative_put_le64(u64 val, u8 *p)
-{
- nonnative_put_le32(val, p);
- nonnative_put_le32(val >> 32, p + 4);
-}
-
-static inline void nonnative_put_be16(u16 val, u8 *p)
-{
- *p++ = val >> 8;
- *p++ = val;
-}
-
-static inline void nonnative_put_be32(u32 val, u8 *p)
-{
- nonnative_put_be16(val >> 16, p);
- nonnative_put_be16(val, p + 2);
-}
-
-static inline void nonnative_put_be64(u64 val, u8 *p)
-{
- nonnative_put_be32(val >> 32, p);
- nonnative_put_be32(val, p + 4);
-}
-
-static inline void put_unaligned_le16(u16 val, void *p)
-{
-#ifdef __LITTLE_ENDIAN
- __put_unaligned_cpu16(val, p);
-#else
- nonnative_put_le16(val, p);
-#endif
-}
-
-static inline void put_unaligned_le32(u32 val, void *p)
-{
-#ifdef __LITTLE_ENDIAN
- __put_unaligned_cpu32(val, p);
-#else
- nonnative_put_le32(val, p);
-#endif
-}
-
-static inline void put_unaligned_le64(u64 val, void *p)
-{
-#ifdef __LITTLE_ENDIAN
- __put_unaligned_cpu64(val, p);
-#else
- nonnative_put_le64(val, p);
-#endif
-}
-
-static inline void put_unaligned_be16(u16 val, void *p)
-{
-#ifdef __BIG_ENDIAN
- __put_unaligned_cpu16(val, p);
-#else
- nonnative_put_be16(val, p);
-#endif
-}
-
-static inline void put_unaligned_be32(u32 val, void *p)
-{
-#ifdef __BIG_ENDIAN
- __put_unaligned_cpu32(val, p);
-#else
- nonnative_put_be32(val, p);
-#endif
-}
-
-static inline void put_unaligned_be64(u64 val, void *p)
-{
-#ifdef __BIG_ENDIAN
- __put_unaligned_cpu64(val, p);
-#else
- nonnative_put_be64(val, p);
-#endif
-}
-
-/*
- * While it's a bit non-obvious, even though the generic le/be wrappers
- * use the __get/put_xxx prefixing, they actually wrap in to the
- * non-prefixed get/put_xxx variants as provided above.
- */
-#include <linux/unaligned/generic.h>
-
-#ifdef __LITTLE_ENDIAN
-# define get_unaligned __get_unaligned_le
-# define put_unaligned __put_unaligned_le
-#else
-# define get_unaligned __get_unaligned_be
-# define put_unaligned __put_unaligned_be
-#endif
-
-#endif /* __ASM_SH_UNALIGNED_SH4A_H */
diff --git a/arch/sh/include/asm/unaligned.h b/arch/sh/include/asm/unaligned.h
deleted file mode 100644
index 0c92e2c73af4..000000000000
--- a/arch/sh/include/asm/unaligned.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_SH_UNALIGNED_H
-#define _ASM_SH_UNALIGNED_H
-
-#ifdef CONFIG_CPU_SH4A
-/* SH-4A can handle unaligned loads in a relatively neutered fashion. */
-#include <asm/unaligned-sh4a.h>
-#else
-/* Otherwise, SH can't handle unaligned accesses. */
-#include <asm-generic/unaligned.h>
-#endif
-
-#endif /* _ASM_SH_UNALIGNED_H */
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c
index 0646c5961846..295c43315bbe 100644
--- a/arch/sh/kernel/ftrace.c
+++ b/arch/sh/kernel/ftrace.c
@@ -67,7 +67,7 @@ static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
* Modifying code must take extra care. On an SMP machine, if
* the code being modified is also being executed on another CPU
* that CPU will have undefined results and possibly take a GPF.
- * We use kstop_machine to stop other CPUS from exectuing code.
+ * We use kstop_machine to stop other CPUS from executing code.
* But this does not stop NMIs from happening. We still need
* to protect against that. We separate out the modification of
* the code to take care of this.
diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
index 756100b01e84..1c7f358ef0be 100644
--- a/arch/sh/kernel/kprobes.c
+++ b/arch/sh/kernel/kprobes.c
@@ -383,23 +383,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
index 445e3ece4c23..1d2507f22437 100644
--- a/arch/sh/kernel/perf_event.c
+++ b/arch/sh/kernel/perf_event.c
@@ -57,24 +57,6 @@ static inline int sh_pmu_initialized(void)
return !!sh_pmu;
}
-const char *perf_pmu_name(void)
-{
- if (!sh_pmu)
- return NULL;
-
- return sh_pmu->name;
-}
-EXPORT_SYMBOL_GPL(perf_pmu_name);
-
-int perf_num_counters(void)
-{
- if (!sh_pmu)
- return 0;
-
- return sh_pmu->num_events;
-}
-EXPORT_SYMBOL_GPL(perf_num_counters);
-
/*
* Release the PMU if this is the last perf_event.
*/
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index 1aa508eb0823..717de05c81f4 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -186,7 +186,7 @@ unsigned long get_wchan(struct task_struct *p)
{
unsigned long pc;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
/*
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 372acdc9033e..65924d9ec245 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -186,8 +186,6 @@ asmlinkage void start_secondary(void)
per_cpu_trap_init();
- preempt_disable();
-
notify_cpu_starting(cpu);
local_irq_enable();
diff --git a/arch/sh/kernel/syscalls/Makefile b/arch/sh/kernel/syscalls/Makefile
index 285aaba832d9..6713c65a25e1 100644
--- a/arch/sh/kernel/syscalls/Makefile
+++ b/arch/sh/kernel/syscalls/Makefile
@@ -6,20 +6,14 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
$(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
syscall := $(src)/syscall.tbl
-syshdr := $(srctree)/$(src)/syscallhdr.sh
-systbl := $(srctree)/$(src)/syscalltbl.sh
+syshdr := $(srctree)/scripts/syscallhdr.sh
+systbl := $(srctree)/scripts/syscalltbl.sh
quiet_cmd_syshdr = SYSHDR $@
- cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
- '$(syshdr_abis_$(basetarget))' \
- '$(syshdr_pfx_$(basetarget))' \
- '$(syshdr_offset_$(basetarget))'
+ cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr $< $@
quiet_cmd_systbl = SYSTBL $@
- cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \
- '$(systbl_abis_$(basetarget))' \
- '$(systbl_abi_$(basetarget))' \
- '$(systbl_offset_$(basetarget))'
+ cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@
$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl
index d08eebad6b7f..e0a70be77d84 100644
--- a/arch/sh/kernel/syscalls/syscall.tbl
+++ b/arch/sh/kernel/syscalls/syscall.tbl
@@ -445,3 +445,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/sh/kernel/syscalls/syscallhdr.sh b/arch/sh/kernel/syscalls/syscallhdr.sh
deleted file mode 100644
index 4c0519861e97..000000000000
--- a/arch/sh/kernel/syscalls/syscallhdr.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-prefix="$4"
-offset="$5"
-
-fileguard=_UAPI_ASM_SH_`basename "$out" | sed \
- -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
- -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- printf "#ifndef %s\n" "${fileguard}"
- printf "#define %s\n" "${fileguard}"
- printf "\n"
-
- nxt=0
- while read nr abi name entry ; do
- if [ -z "$offset" ]; then
- printf "#define __NR_%s%s\t%s\n" \
- "${prefix}" "${name}" "${nr}"
- else
- printf "#define __NR_%s%s\t(%s + %s)\n" \
- "${prefix}" "${name}" "${offset}" "${nr}"
- fi
- nxt=$((nr+1))
- done
-
- printf "\n"
- printf "#ifdef __KERNEL__\n"
- printf "#define __NR_syscalls\t%s\n" "${nxt}"
- printf "#endif\n"
- printf "\n"
- printf "#endif /* %s */\n" "${fileguard}"
-) > "$out"
diff --git a/arch/sh/kernel/syscalls/syscalltbl.sh b/arch/sh/kernel/syscalls/syscalltbl.sh
deleted file mode 100644
index 904b8e6e625d..000000000000
--- a/arch/sh/kernel/syscalls/syscalltbl.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-my_abi="$4"
-offset="$5"
-
-emit() {
- t_nxt="$1"
- t_nr="$2"
- t_entry="$3"
-
- while [ $t_nxt -lt $t_nr ]; do
- printf "__SYSCALL(%s,sys_ni_syscall)\n" "${t_nxt}"
- t_nxt=$((t_nxt+1))
- done
- printf "__SYSCALL(%s,%s)\n" "${t_nxt}" "${t_entry}"
-}
-
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- nxt=0
- if [ -z "$offset" ]; then
- offset=0
- fi
-
- while read nr abi name entry ; do
- emit $((nxt+offset)) $((nr+offset)) $entry
- nxt=$((nr+1))
- done
-) > "$out"
diff --git a/arch/sh/kernel/topology.c b/arch/sh/kernel/topology.c
index 7a989eed3b18..76af6db9daa2 100644
--- a/arch/sh/kernel/topology.c
+++ b/arch/sh/kernel/topology.c
@@ -46,7 +46,7 @@ static int __init topology_init(void)
{
int i, ret;
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
for_each_online_node(i)
register_one_node(i);
#endif
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index f5beecdac693..e76b22157099 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -180,7 +180,6 @@ static inline void arch_ftrace_nmi_exit(void) { }
BUILD_TRAP_HANDLER(nmi)
{
- unsigned int cpu = smp_processor_id();
TRAP_HANDLER_DECL;
arch_ftrace_nmi_enter();
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 77aa2f802d8d..ba569cfb4368 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -120,7 +120,7 @@ config NODES_SHIFT
int
default "3" if CPU_SUBTYPE_SHX3
default "1"
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
config ARCH_FLATMEM_ENABLE
def_bool y
@@ -136,14 +136,6 @@ config ARCH_SPARSEMEM_DEFAULT
config ARCH_SELECT_MEMORY_MODEL
def_bool y
-config ARCH_ENABLE_MEMORY_HOTPLUG
- def_bool y
- depends on SPARSEMEM && MMU
-
-config ARCH_ENABLE_MEMORY_HOTREMOVE
- def_bool y
- depends on SPARSEMEM && MMU
-
config ARCH_MEMORY_PROBE
def_bool y
depends on MEMORY_HOTPLUG
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index ddfa9685f1ef..72c2e1b46c08 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -16,6 +16,7 @@
#include <linux/mutex.h>
#include <linux/fs.h>
#include <linux/highmem.h>
+#include <linux/pagemap.h>
#include <asm/mmu_context.h>
#include <asm/cache_insns.h>
#include <asm/cacheflush.h>
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c
index 4c67b3d88775..9b63a53a5e46 100644
--- a/arch/sh/mm/cache-sh7705.c
+++ b/arch/sh/mm/cache-sh7705.c
@@ -13,6 +13,7 @@
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/fs.h>
+#include <linux/pagemap.h>
#include <linux/threads.h>
#include <asm/addrspace.h>
#include <asm/page.h>
diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
index 220d7bc43d2b..999ab5916e69 100644
--- a/arch/sh/mm/hugetlbpage.c
+++ b/arch/sh/mm/hugetlbpage.c
@@ -21,7 +21,7 @@
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
-pte_t *huge_pte_alloc(struct mm_struct *mm,
+pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 0db6919af8d3..ce26c7f8950a 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -211,7 +211,7 @@ void __init allocate_pgdat(unsigned int nid)
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
NODE_DATA(nid) = memblock_alloc_try_nid(
sizeof(struct pglist_data),
SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
@@ -359,7 +359,6 @@ void __init mem_init(void)
vsyscall_init();
- mem_init_print_info(NULL);
pr_info("virtual kernel memory layout:\n"
" fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
" vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 164a5254c91c..c5fa7932b550 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -59,6 +59,7 @@ config SPARC32
select CLZ_TAB
select HAVE_UID16
select OLD_SIGACTION
+ select ZONE_DMA
config SPARC64
def_bool 64BIT
@@ -141,10 +142,6 @@ config HIGHMEM
default y if SPARC32
select KMAP_LOCAL
-config ZONE_DMA
- bool
- default y if SPARC32
-
config GENERIC_ISA_DMA
bool
default y if SPARC32
@@ -265,7 +262,7 @@ config NODES_SHIFT
int "Maximum NUMA Nodes (as a power of 2)"
range 4 5 if SPARC64
default "5"
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
help
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accommodate various tables.
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
index 148f44b33890..18099099583e 100644
--- a/arch/sparc/configs/sparc64_defconfig
+++ b/arch/sparc/configs/sparc64_defconfig
@@ -93,7 +93,7 @@ CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_MII=m
CONFIG_SUNLANCE=m
-CONFIG_HAPPYMEAL=m
+CONFIG_HAPPYMEAL=y
CONFIG_SUNGEM=m
CONFIG_SUNVNET=m
CONFIG_LDMVSW=m
@@ -122,7 +122,6 @@ CONFIG_INPUT_SPARCSPKR=y
# CONFIG_SERIO_SERPORT is not set
CONFIG_SERIO_PCIPS2=m
CONFIG_SERIO_RAW=m
-# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_SUNSU=y
CONFIG_SERIAL_SUNSU_CONSOLE=y
CONFIG_SERIAL_SUNSAB=y
@@ -234,9 +233,7 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRC16=m
CONFIG_LIBCRC32C=m
CONFIG_VCC=m
-CONFIG_ATA=y
CONFIG_PATA_CMD64X=y
-CONFIG_HAPPYMEAL=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_DEVTMPFS=y
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index aec20406145e..0b9d98ced34a 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table_32.h
generated-y += syscall_table_64.h
-generated-y += syscall_table_c32.h
generic-y += export.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index efad5532f169..d775daa83d12 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -18,30 +18,30 @@
#include <asm/barrier.h>
#include <asm-generic/atomic64.h>
-int atomic_add_return(int, atomic_t *);
-int atomic_fetch_add(int, atomic_t *);
-int atomic_fetch_and(int, atomic_t *);
-int atomic_fetch_or(int, atomic_t *);
-int atomic_fetch_xor(int, atomic_t *);
-int atomic_cmpxchg(atomic_t *, int, int);
-int atomic_xchg(atomic_t *, int);
-int atomic_fetch_add_unless(atomic_t *, int, int);
-void atomic_set(atomic_t *, int);
+int arch_atomic_add_return(int, atomic_t *);
+int arch_atomic_fetch_add(int, atomic_t *);
+int arch_atomic_fetch_and(int, atomic_t *);
+int arch_atomic_fetch_or(int, atomic_t *);
+int arch_atomic_fetch_xor(int, atomic_t *);
+int arch_atomic_cmpxchg(atomic_t *, int, int);
+int arch_atomic_xchg(atomic_t *, int);
+int arch_atomic_fetch_add_unless(atomic_t *, int, int);
+void arch_atomic_set(atomic_t *, int);
-#define atomic_fetch_add_unless atomic_fetch_add_unless
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
-#define atomic_set_release(v, i) atomic_set((v), (i))
+#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
-#define atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
-#define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v)))
-#define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v)))
+#define arch_atomic_add(i, v) ((void)arch_atomic_add_return( (int)(i), (v)))
+#define arch_atomic_sub(i, v) ((void)arch_atomic_add_return(-(int)(i), (v)))
-#define atomic_and(i, v) ((void)atomic_fetch_and((i), (v)))
-#define atomic_or(i, v) ((void)atomic_fetch_or((i), (v)))
-#define atomic_xor(i, v) ((void)atomic_fetch_xor((i), (v)))
+#define arch_atomic_and(i, v) ((void)arch_atomic_fetch_and((i), (v)))
+#define arch_atomic_or(i, v) ((void)arch_atomic_fetch_or((i), (v)))
+#define arch_atomic_xor(i, v) ((void)arch_atomic_fetch_xor((i), (v)))
-#define atomic_sub_return(i, v) (atomic_add_return(-(int)(i), (v)))
-#define atomic_fetch_sub(i, v) (atomic_fetch_add (-(int)(i), (v)))
+#define arch_atomic_sub_return(i, v) (arch_atomic_add_return(-(int)(i), (v)))
+#define arch_atomic_fetch_sub(i, v) (arch_atomic_fetch_add (-(int)(i), (v)))
#endif /* !(__ARCH_SPARC_ATOMIC__) */
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index 6b235d3d1d9d..077891686715 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -14,23 +14,23 @@
#define ATOMIC64_INIT(i) { (i) }
-#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic64_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic64_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
-#define atomic64_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define arch_atomic64_set(v, i) WRITE_ONCE(((v)->counter), (i))
#define ATOMIC_OP(op) \
-void atomic_##op(int, atomic_t *); \
-void atomic64_##op(s64, atomic64_t *);
+void arch_atomic_##op(int, atomic_t *); \
+void arch_atomic64_##op(s64, atomic64_t *);
#define ATOMIC_OP_RETURN(op) \
-int atomic_##op##_return(int, atomic_t *); \
-s64 atomic64_##op##_return(s64, atomic64_t *);
+int arch_atomic_##op##_return(int, atomic_t *); \
+s64 arch_atomic64_##op##_return(s64, atomic64_t *);
#define ATOMIC_FETCH_OP(op) \
-int atomic_fetch_##op(int, atomic_t *); \
-s64 atomic64_fetch_##op(s64, atomic64_t *);
+int arch_atomic_fetch_##op(int, atomic_t *); \
+s64 arch_atomic64_fetch_##op(s64, atomic64_t *);
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
@@ -49,18 +49,18 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
-static inline int atomic_xchg(atomic_t *v, int new)
+static inline int arch_atomic_xchg(atomic_t *v, int new)
{
- return xchg(&v->counter, new);
+ return arch_xchg(&v->counter, new);
}
-#define atomic64_cmpxchg(v, o, n) \
- ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic64_cmpxchg(v, o, n) \
+ ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
-s64 atomic64_dec_if_positive(atomic64_t *v);
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+s64 arch_atomic64_dec_if_positive(atomic64_t *v);
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
#endif /* !(__ARCH_SPARC64_ATOMIC__) */
diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h
index a53d744d4212..27a57a3a7597 100644
--- a/arch/sparc/include/asm/cmpxchg_32.h
+++ b/arch/sparc/include/asm/cmpxchg_32.h
@@ -25,7 +25,7 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int
return x;
}
-#define xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
+#define arch_xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
/* Emulate cmpxchg() the same way we emulate atomics,
* by hashing the object address and indexing into an array
@@ -55,7 +55,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
return old;
}
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -64,7 +64,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
})
u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new);
-#define cmpxchg64(ptr, old, new) __cmpxchg_u64(ptr, old, new)
+#define arch_cmpxchg64(ptr, old, new) __cmpxchg_u64(ptr, old, new)
#include <asm-generic/cmpxchg-local.h>
@@ -72,9 +72,9 @@ u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new);
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+#define arch_cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o),\
(unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#endif /* __ARCH_SPARC_CMPXCHG__ */
diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h
index 316faa0130ba..8c39a9981187 100644
--- a/arch/sparc/include/asm/cmpxchg_64.h
+++ b/arch/sparc/include/asm/cmpxchg_64.h
@@ -52,7 +52,7 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
return val;
}
-#define xchg(ptr,x) \
+#define arch_xchg(ptr,x) \
({ __typeof__(*(ptr)) __ret; \
__ret = (__typeof__(*(ptr))) \
__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
@@ -168,7 +168,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
return old;
}
-#define cmpxchg(ptr,o,n) \
+#define arch_cmpxchg(ptr,o,n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -189,20 +189,20 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
case 4:
case 8: return __cmpxchg(ptr, old, new, size);
default:
- return __cmpxchg_local_generic(ptr, old, new, size);
+ return __generic_cmpxchg_local(ptr, old, new, size);
}
return old;
}
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
})
-#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
#endif /* __ARCH_SPARC64_CMPXCHG__ */
diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
index 7e078bc73ef5..8fb09eec8c3e 100644
--- a/arch/sparc/include/asm/elf_64.h
+++ b/arch/sparc/include/asm/elf_64.h
@@ -8,7 +8,6 @@
#include <asm/ptrace.h>
#include <asm/processor.h>
-#include <asm/extable_64.h>
#include <asm/spitfire.h>
#include <asm/adi.h>
diff --git a/arch/sparc/include/asm/extable_64.h b/arch/sparc/include/asm/extable.h
index 5a0171907b7e..554a9dc376fc 100644
--- a/arch/sparc/include/asm/extable_64.h
+++ b/arch/sparc/include/asm/extable.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_EXTABLE64_H
-#define __ASM_EXTABLE64_H
+#ifndef __ASM_EXTABLE_H
+#define __ASM_EXTABLE_H
/*
* The exception table consists of pairs of addresses: the first is the
* address of an instruction that is allowed to fault, and the second is
diff --git a/arch/sparc/include/asm/ftrace.h b/arch/sparc/include/asm/ftrace.h
index d3aa1a524431..e284394cb3aa 100644
--- a/arch/sparc/include/asm/ftrace.h
+++ b/arch/sparc/include/asm/ftrace.h
@@ -17,7 +17,7 @@ void _mcount(void);
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
-/* reloction of mcount call site is the same as the address */
+/* relocation of mcount call site is the same as the address */
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr;
diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h
index 9bb27e5c22f1..5ffa820dcd4d 100644
--- a/arch/sparc/include/asm/io_64.h
+++ b/arch/sparc/include/asm/io_64.h
@@ -409,6 +409,10 @@ static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
#define ioremap_uc(X,Y) ioremap((X),(Y))
#define ioremap_wc(X,Y) ioremap((X),(Y))
#define ioremap_wt(X,Y) ioremap((X),(Y))
+static inline void __iomem *ioremap_np(unsigned long offset, unsigned long size)
+{
+ return NULL;
+}
static inline void iounmap(volatile void __iomem *addr)
{
@@ -450,11 +454,6 @@ void sbus_set_sbus64(struct device *, int);
*/
#define xlate_dev_mem_ptr(p) __va(p)
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p) p
-
#endif
#endif /* !(__SPARC64_IO_H) */
diff --git a/arch/sparc/include/asm/mmzone.h b/arch/sparc/include/asm/mmzone.h
index 6543fb97a849..a236d8aa893a 100644
--- a/arch/sparc/include/asm/mmzone.h
+++ b/arch/sparc/include/asm/mmzone.h
@@ -2,7 +2,7 @@
#ifndef _SPARC64_MMZONE_H
#define _SPARC64_MMZONE_H
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
#include <linux/cpumask.h>
@@ -13,6 +13,6 @@ extern struct pglist_data *node_data[];
extern int numa_cpu_lookup_table[];
extern cpumask_t numa_cpumask_lookup_table[];
-#endif /* CONFIG_NEED_MULTIPLE_NODES */
+#endif /* CONFIG_NUMA */
#endif /* _SPARC64_MMZONE_H */
diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
index 9d353e6dc5a9..4f73e87b22a3 100644
--- a/arch/sparc/include/asm/pgalloc_32.h
+++ b/arch/sparc/include/asm/pgalloc_32.h
@@ -51,7 +51,6 @@ static inline void free_pmd_fast(pmd_t * pmd)
#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
#define pmd_populate(mm, pmd, pte) pmd_set(pmd, pte)
-#define pmd_pgtable(pmd) (pgtable_t)__pmd_page(pmd)
void pmd_set(pmd_t *pmdp, pte_t *ptep);
#define pmd_populate_kernel pmd_populate
diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
index a8dafc550985..7b5561d17ab1 100644
--- a/arch/sparc/include/asm/pgalloc_64.h
+++ b/arch/sparc/include/asm/pgalloc_64.h
@@ -67,7 +67,6 @@ void pte_free(struct mm_struct *mm, pgtable_t ptepage);
#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(MM, PMD, PTE)
#define pmd_populate(MM, PMD, PTE) pmd_set(MM, PMD, PTE)
-#define pmd_pgtable(PMD) ((pte_t *)pmd_page_vaddr(PMD))
void pgtable_free(void *table, bool is_page);
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index 632cdb959542..ebaf374b55ab 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -48,7 +48,6 @@ unsigned long __init bootmem_init(unsigned long *pages_avail);
#define PTRS_PER_PMD 64
#define PTRS_PER_PGD 256
#define USER_PTRS_PER_PGD PAGE_OFFSET / PGDIR_SIZE
-#define FIRST_USER_ADDRESS 0UL
#define PTE_SIZE (PTRS_PER_PTE*4)
#define PAGE_NONE SRMMU_PAGE_NONE
@@ -321,6 +320,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
pgprot_val(newprot));
}
+/* only used by the huge vmap code, should never be called */
+#define pud_page(pud) NULL
+
struct seq_file;
void mmu_info(struct seq_file *m);
@@ -430,4 +432,6 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
/* We provide our own get_unmapped_area to cope with VA holes for userland */
#define HAVE_ARCH_UNMAPPED_AREA
+#define pmd_pgtable(pmd) ((pgtable_t)__pmd_page(pmd))
+
#endif /* !(_SPARC_PGTABLE_H) */
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 550d3904de65..e0ee48ec3903 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -95,9 +95,6 @@ bool kern_addr_valid(unsigned long addr);
#define PTRS_PER_PUD (1UL << PUD_BITS)
#define PTRS_PER_PGD (1UL << PGDIR_BITS)
-/* Kernel has a separate 44bit address space. */
-#define FIRST_USER_ADDRESS 0UL
-
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd %p(%016lx) seen at (%pS)\n", \
__FILE__, __LINE__, &(e), pmd_val(e), __builtin_return_address(0))
@@ -377,8 +374,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot)
#define pgprot_noncached pgprot_noncached
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
-extern pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writable);
+pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
#define arch_make_huge_pte arch_make_huge_pte
static inline unsigned long __pte_default_huge_mask(void)
{
@@ -1121,6 +1117,8 @@ extern unsigned long cmdline_memory_size;
asmlinkage void do_sparc64_fault(struct pt_regs *regs);
+#define pmd_pgtable(PMD) ((pte_t *)pmd_page_vaddr(PMD))
+
#ifdef CONFIG_HUGETLB_PAGE
#define pud_leaf_size pud_leaf_size
diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h
index 3c4bc2189092..b6242f7771e9 100644
--- a/arch/sparc/include/asm/processor_32.h
+++ b/arch/sparc/include/asm/processor_32.h
@@ -50,16 +50,12 @@ struct thread_struct {
unsigned long fsr;
unsigned long fpqdepth;
struct fpq fpqueue[16];
- unsigned long flags;
mm_segment_t current_ds;
};
-#define SPARC_FLAG_KTHREAD 0x1 /* task is a kernel thread */
-#define SPARC_FLAG_UNALIGNED 0x2 /* is allowed to do unaligned accesses */
-
#define INIT_THREAD { \
- .flags = SPARC_FLAG_KTHREAD, \
.current_ds = KERNEL_DS, \
+ .kregs = (struct pt_regs *)(init_stack+THREAD_SIZE)-1 \
}
/* Do necessary setup to start up a newly executed thread. */
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 42cd4cd3892e..8047a9caab2f 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -118,6 +118,7 @@ struct thread_info {
.task = &tsk, \
.current_ds = ASI_P, \
.preempt_count = INIT_PREEMPT_COUNT, \
+ .kregs = (struct pt_regs *)(init_stack+THREAD_SIZE)-1 \
}
/* how to get the thread information struct from C */
diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
index dd85bc2c2cad..390094200fc4 100644
--- a/arch/sparc/include/asm/uaccess.h
+++ b/arch/sparc/include/asm/uaccess.h
@@ -1,6 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ___ASM_SPARC_UACCESS_H
#define ___ASM_SPARC_UACCESS_H
+
+#include <asm/extable.h>
+
#if defined(__sparc__) && defined(__arch64__)
#include <asm/uaccess_64.h>
#else
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 0a2d3ebc4bb8..4a12346bb69c 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -13,9 +13,6 @@
#include <asm/processor.h>
-#define ARCH_HAS_SORT_EXTABLE
-#define ARCH_HAS_SEARCH_EXTABLE
-
/* Sparc is not segmented, however we need to be able to fool access_ok()
* when doing system calls from kernel mode legitimately.
*
@@ -40,36 +37,6 @@
#define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
#define access_ok(addr, size) __access_ok((unsigned long)(addr), size)
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- *
- * There is a special way how to put a range of potentially faulting
- * insns (like twenty ldd/std's with now intervening other instructions)
- * You specify address of first in insn and 0 in fixup and in the next
- * exception_table_entry you specify last potentially faulting insn + 1
- * and in fixup the routine which should handle the fault.
- * That fixup code will get
- * (faulting_insn_address - first_insn_in_the_range_address)/4
- * in %g2 (ie. index of the faulting instruction in the range).
- */
-
-struct exception_table_entry
-{
- unsigned long insn, fixup;
-};
-
-/* Returns 0 if exception not found and fixup otherwise. */
-unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
-
/* Uh, these should become the main single-value transfer routines..
* They automatically use the right size if we just have the right
* pointer type..
@@ -252,12 +219,7 @@ static inline unsigned long __clear_user(void __user *addr, unsigned long size)
unsigned long ret;
__asm__ __volatile__ (
- ".section __ex_table,#alloc\n\t"
- ".align 4\n\t"
- ".word 1f,3\n\t"
- ".previous\n\t"
"mov %2, %%o1\n"
- "1:\n\t"
"call __bzero\n\t"
" mov %1, %%o0\n\t"
"mov %%o0, %0\n"
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index 698cf69f74e9..30eb4c6414d1 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -10,7 +10,6 @@
#include <linux/string.h>
#include <asm/asi.h>
#include <asm/spitfire.h>
-#include <asm/extable_64.h>
#include <asm/processor.h>
diff --git a/arch/sparc/include/asm/unaligned.h b/arch/sparc/include/asm/unaligned.h
deleted file mode 100644
index 7971d89d2f54..000000000000
--- a/arch/sparc/include/asm/unaligned.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_SPARC_UNALIGNED_H
-#define _ASM_SPARC_UNALIGNED_H
-
-#include <linux/unaligned/be_struct.h>
-#include <linux/unaligned/le_byteshift.h>
-#include <linux/unaligned/generic.h>
-#define get_unaligned __get_unaligned_be
-#define put_unaligned __put_unaligned_be
-
-#endif /* _ASM_SPARC_UNALIGNED_H */
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index 848a22fbac20..92675dc380fa 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -120,6 +120,8 @@
#define SO_PREFER_BUSY_POLL 0x0048
#define SO_BUSY_POLL_BUDGET 0x0049
+#define SO_NETNS_COOKIE 0x0050
+
#if !defined(__KERNEL__)
diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S
index be30c8d4cc73..6044b82b9767 100644
--- a/arch/sparc/kernel/head_32.S
+++ b/arch/sparc/kernel/head_32.S
@@ -515,7 +515,7 @@ continue_boot:
/* I want a kernel stack NOW! */
set init_thread_union, %g1
- set (THREAD_SIZE - STACKFRAME_SZ), %g2
+ set (THREAD_SIZE - STACKFRAME_SZ - TRACEREG_SZ), %g2
add %g1, %g2, %sp
mov 0, %fp /* And for good luck */
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index c5ff2472b3d9..72a5bdc833ea 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -706,7 +706,7 @@ tlb_fixup_done:
wr %g0, ASI_P, %asi
mov 1, %g1
sllx %g1, THREAD_SHIFT, %g1
- sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1
+ sub %g1, (STACKFRAME_SZ + STACK_BIAS + TRACEREG_SZ), %g1
add %g6, %g1, %sp
/* Set per-cpu pointer initially to zero, this makes
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
index 217c21a6986a..4c05a4ee6a0e 100644
--- a/arch/sparc/kernel/kprobes.c
+++ b/arch/sparc/kernel/kprobes.c
@@ -346,23 +346,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index b91e88058e0c..93983d6d431d 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -216,16 +216,6 @@ void flush_thread(void)
clear_thread_flag(TIF_USEDFPU);
#endif
}
-
- /* This task is no longer a kernel thread. */
- if (current->thread.flags & SPARC_FLAG_KTHREAD) {
- current->thread.flags &= ~SPARC_FLAG_KTHREAD;
-
- /* We must fixup kregs as well. */
- /* XXX This was not fixed for ti for a while, worked. Unused? */
- current->thread.kregs = (struct pt_regs *)
- (task_stack_page(current) + (THREAD_SIZE - TRACEREG_SZ));
- }
}
static inline struct sparc_stackf __user *
@@ -313,7 +303,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
extern int nwindows;
unsigned long psr;
memset(new_stack, 0, STACKFRAME_SZ + TRACEREG_SZ);
- p->thread.flags |= SPARC_FLAG_KTHREAD;
p->thread.current_ds = KERNEL_DS;
ti->kpc = (((unsigned long) ret_from_kernel_thread) - 0x8);
childregs->u_regs[UREG_G1] = sp; /* function */
@@ -325,7 +314,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
}
memcpy(new_stack, (char *)regs - STACKFRAME_SZ, STACKFRAME_SZ + TRACEREG_SZ);
childregs->u_regs[UREG_FP] = sp;
- p->thread.flags &= ~SPARC_FLAG_KTHREAD;
p->thread.current_ds = USER_DS;
ti->kpc = (((unsigned long) ret_from_fork) - 0x8);
ti->kpsr = current->thread.fork_kpsr | PSR_PIL;
@@ -388,8 +376,7 @@ unsigned long get_wchan(struct task_struct *task)
struct reg_window32 *rw;
int count = 0;
- if (!task || task == current ||
- task->state == TASK_RUNNING)
+ if (!task || task == current || task_is_running(task))
goto out;
fp = task_thread_info(task)->ksp + bias;
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 7afd0a859a78..d33c58a58d4f 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -674,8 +674,7 @@ unsigned long get_wchan(struct task_struct *task)
unsigned long ret = 0;
int count = 0;
- if (!task || task == current ||
- task->state == TASK_RUNNING)
+ if (!task || task == current || task_is_running(task))
goto out;
tp = task_thread_info(task);
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index eea43a1aef1b..c8e0dd99f370 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -266,7 +266,6 @@ static __init void leon_patch(void)
}
struct tt_entry *sparc_ttable;
-static struct pt_regs fake_swapper_regs;
/* Called from head_32.S - before we have setup anything
* in the kernel. Be very careful with what you do here.
@@ -363,8 +362,6 @@ void __init setup_arch(char **cmdline_p)
(*(linux_dbvec->teach_debugger))();
}
- init_task.thread.kregs = &fake_swapper_regs;
-
/* Run-time patch instructions to match the cpu model */
per_cpu_patch();
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index d87244197d5c..48abee4eee29 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -165,8 +165,6 @@ extern int root_mountflags;
char reboot_command[COMMAND_LINE_SIZE];
-static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
-
static void __init per_cpu_patch(void)
{
struct cpuid_patch_entry *p;
@@ -661,8 +659,6 @@ void __init setup_arch(char **cmdline_p)
rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
#endif
- task_thread_info(&init_task)->kregs = &fake_swapper_regs;
-
#ifdef CONFIG_IP_PNP
if (!ic_set_manually) {
phandle chosen = prom_finddevice("/chosen");
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index 50c127ab46d5..22b148e5a5f8 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -348,7 +348,6 @@ static void sparc_start_secondary(void *arg)
*/
arch_cpu_pre_starting(arg);
- preempt_disable();
cpu = smp_processor_id();
notify_cpu_starting(cpu);
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index e38d8bf454e8..0224d8f19ed6 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -138,9 +138,6 @@ void smp_callin(void)
set_cpu_online(cpuid, true);
- /* idle thread is expected to have preempt disabled */
- preempt_disable();
-
local_irq_enable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
@@ -1546,7 +1543,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
size_t align)
{
const unsigned long goal = __pa(MAX_DMA_ADDRESS);
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
int node = cpu_to_node(cpu);
void *ptr;
diff --git a/arch/sparc/kernel/sstate.c b/arch/sparc/kernel/sstate.c
index ac8677c3841e..3bcc4ddc6911 100644
--- a/arch/sparc/kernel/sstate.c
+++ b/arch/sparc/kernel/sstate.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/notifier.h>
+#include <linux/panic_notifier.h>
#include <linux/reboot.h>
#include <linux/init.h>
diff --git a/arch/sparc/kernel/syscalls/Makefile b/arch/sparc/kernel/syscalls/Makefile
index 283f64407b07..0f2ea5bcb0d7 100644
--- a/arch/sparc/kernel/syscalls/Makefile
+++ b/arch/sparc/kernel/syscalls/Makefile
@@ -6,46 +6,34 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
$(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
syscall := $(src)/syscall.tbl
-syshdr := $(srctree)/$(src)/syscallhdr.sh
-systbl := $(srctree)/$(src)/syscalltbl.sh
+syshdr := $(srctree)/scripts/syscallhdr.sh
+systbl := $(srctree)/scripts/syscalltbl.sh
quiet_cmd_syshdr = SYSHDR $@
- cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
- '$(syshdr_abis_$(basetarget))' \
- '$(syshdr_pfx_$(basetarget))' \
- '$(syshdr_offset_$(basetarget))'
+ cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr --abis $(abis) $< $@
quiet_cmd_systbl = SYSTBL $@
- cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \
- '$(systbl_abis_$(basetarget))' \
- '$(systbl_abi_$(basetarget))' \
- '$(systbl_offset_$(basetarget))'
+ cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis $(abis) $< $@
-syshdr_abis_unistd_32 := common,32
+$(uapi)/unistd_32.h: abis := common,32
$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
-syshdr_abis_unistd_64 := common,64
+$(uapi)/unistd_64.h: abis := common,64
$(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
-systbl_abis_syscall_table_32 := common,32
+$(kapi)/syscall_table_32.h: abis := common,32
$(kapi)/syscall_table_32.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
-systbl_abis_syscall_table_64 := common,64
+$(kapi)/syscall_table_64.h: abis := common,64
$(kapi)/syscall_table_64.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
-systbl_abis_syscall_table_c32 := common,32
-systbl_abi_syscall_table_c32 := c32
-$(kapi)/syscall_table_c32.h: $(syscall) $(systbl) FORCE
- $(call if_changed,systbl)
-
uapisyshdr-y += unistd_32.h unistd_64.h
kapisyshdr-y += syscall_table_32.h \
- syscall_table_64.h \
- syscall_table_c32.h
+ syscall_table_64.h
uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y))
kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y))
diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
index 84403a99039c..603f5a821502 100644
--- a/arch/sparc/kernel/syscalls/syscall.tbl
+++ b/arch/sparc/kernel/syscalls/syscall.tbl
@@ -488,3 +488,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/sparc/kernel/syscalls/syscallhdr.sh b/arch/sparc/kernel/syscalls/syscallhdr.sh
deleted file mode 100644
index cf50a75cc0bb..000000000000
--- a/arch/sparc/kernel/syscalls/syscallhdr.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-prefix="$4"
-offset="$5"
-
-fileguard=_UAPI_ASM_SPARC_`basename "$out" | sed \
- -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
- -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- printf "#ifndef %s\n" "${fileguard}"
- printf "#define %s\n" "${fileguard}"
- printf "\n"
-
- nxt=0
- while read nr abi name entry compat ; do
- if [ -z "$offset" ]; then
- printf "#define __NR_%s%s\t%s\n" \
- "${prefix}" "${name}" "${nr}"
- else
- printf "#define __NR_%s%s\t(%s + %s)\n" \
- "${prefix}" "${name}" "${offset}" "${nr}"
- fi
- nxt=$((nr+1))
- done
-
- printf "\n"
- printf "#ifdef __KERNEL__\n"
- printf "#define __NR_syscalls\t%s\n" "${nxt}"
- printf "#endif\n"
- printf "\n"
- printf "#endif /* %s */\n" "${fileguard}"
-) > "$out"
diff --git a/arch/sparc/kernel/syscalls/syscalltbl.sh b/arch/sparc/kernel/syscalls/syscalltbl.sh
deleted file mode 100644
index 77cf0143ba19..000000000000
--- a/arch/sparc/kernel/syscalls/syscalltbl.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-my_abi="$4"
-offset="$5"
-
-emit() {
- t_nxt="$1"
- t_nr="$2"
- t_entry="$3"
-
- while [ $t_nxt -lt $t_nr ]; do
- printf "__SYSCALL(%s, sys_nis_syscall, )\n" "${t_nxt}"
- t_nxt=$((t_nxt+1))
- done
- printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}"
-}
-
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- nxt=0
- if [ -z "$offset" ]; then
- offset=0
- fi
-
- while read nr abi name entry compat ; do
- if [ "$my_abi" = "c32" ] && [ ! -z "$compat" ]; then
- emit $((nxt+offset)) $((nr+offset)) $compat
- else
- emit $((nxt+offset)) $((nr+offset)) $entry
- fi
- nxt=$((nr+1))
- done
-) > "$out"
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index ab9e4d57685a..3aaffa017706 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -9,10 +9,10 @@
* Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
*/
-#define __SYSCALL(nr, entry, nargs) .long entry
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
+#define __SYSCALL(nr, entry) .long entry
.data
.align 4
.globl sys_call_table
sys_call_table:
#include <asm/syscall_table_32.h> /* 32-bit native syscalls */
-#undef __SYSCALL
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index a27394bf7d7f..398fe449dd34 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -10,18 +10,20 @@
* Copyright (C) 1995 Adrian M. Rodriguez (adrian@remus.rutgers.edu)
*/
-#define __SYSCALL(nr, entry, nargs) .word entry
+#define __SYSCALL(nr, entry) .word entry
.text
.align 4
#ifdef CONFIG_COMPAT
.globl sys_call_table32
sys_call_table32:
-#include <asm/syscall_table_c32.h> /* Compat syscalls */
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat)
+#include <asm/syscall_table_32.h> /* Compat syscalls */
+#undef __SYSCALL_WITH_COMPAT
#endif /* CONFIG_COMPAT */
.align 4
.globl sys_call_table64, sys_call_table
sys_call_table64:
sys_call_table:
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
#include <asm/syscall_table_64.h> /* 64-bit native syscalls */
-#undef __SYSCALL
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index d92e5eaa4c1d..a850dccd78ea 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -275,14 +275,13 @@ bool is_no_fault_exception(struct pt_regs *regs)
asi = (regs->tstate >> 24); /* saved %asi */
else
asi = (insn >> 5); /* immediate asi */
- if ((asi & 0xf2) == ASI_PNF) {
- if (insn & 0x1000000) { /* op3[5:4]=3 */
- handle_ldf_stq(insn, regs);
- return true;
- } else if (insn & 0x200000) { /* op3[2], stores */
+ if ((asi & 0xf6) == ASI_PNF) {
+ if (insn & 0x200000) /* op3[2], stores */
return false;
- }
- handle_ld_nf(insn, regs);
+ if (insn & 0x1000000) /* op3[5:4]=3 (fp) */
+ handle_ldf_stq(insn, regs);
+ else
+ handle_ld_nf(insn, regs);
return true;
}
}
diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c
index 83db94c0b431..ef5c5207c9ff 100644
--- a/arch/sparc/kernel/unaligned_32.c
+++ b/arch/sparc/kernel/unaligned_32.c
@@ -16,6 +16,7 @@
#include <linux/uaccess.h>
#include <linux/smp.h>
#include <linux/perf_event.h>
+#include <linux/extable.h>
#include <asm/setup.h>
@@ -213,10 +214,10 @@ static inline int ok_for_kernel(unsigned int insn)
static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
{
- unsigned long g2 = regs->u_regs [UREG_G2];
- unsigned long fixup = search_extables_range(regs->pc, &g2);
+ const struct exception_table_entry *entry;
- if (!fixup) {
+ entry = search_exception_tables(regs->pc);
+ if (!entry) {
unsigned long address = compute_effective_address(regs, insn);
if(address < PAGE_SIZE) {
printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
@@ -232,9 +233,8 @@ static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
die_if_kernel("Oops", regs);
/* Not reached */
}
- regs->pc = fixup;
+ regs->pc = entry->fixup;
regs->npc = regs->pc + 4;
- regs->u_regs [UREG_G2] = g2;
}
asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
@@ -274,103 +274,9 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
}
}
-static inline int ok_for_user(struct pt_regs *regs, unsigned int insn,
- enum direction dir)
-{
- unsigned int reg;
- int size = ((insn >> 19) & 3) == 3 ? 8 : 4;
-
- if ((regs->pc | regs->npc) & 3)
- return 0;
-
- /* Must access_ok() in all the necessary places. */
-#define WINREG_ADDR(regnum) \
- ((void __user *)(((unsigned long *)regs->u_regs[UREG_FP])+(regnum)))
-
- reg = (insn >> 25) & 0x1f;
- if (reg >= 16) {
- if (!access_ok(WINREG_ADDR(reg - 16), size))
- return -EFAULT;
- }
- reg = (insn >> 14) & 0x1f;
- if (reg >= 16) {
- if (!access_ok(WINREG_ADDR(reg - 16), size))
- return -EFAULT;
- }
- if (!(insn & 0x2000)) {
- reg = (insn & 0x1f);
- if (reg >= 16) {
- if (!access_ok(WINREG_ADDR(reg - 16), size))
- return -EFAULT;
- }
- }
-#undef WINREG_ADDR
- return 0;
-}
-
-static void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
+asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
{
send_sig_fault(SIGBUS, BUS_ADRALN,
(void __user *)safe_compute_effective_address(regs, insn),
0, current);
}
-
-asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
-{
- enum direction dir;
-
- if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) ||
- (((insn >> 30) & 3) != 3))
- goto kill_user;
- dir = decode_direction(insn);
- if(!ok_for_user(regs, insn, dir)) {
- goto kill_user;
- } else {
- int err, size = decode_access_size(insn);
- unsigned long addr;
-
- if(floating_point_load_or_store_p(insn)) {
- printk("User FPU load/store unaligned unsupported.\n");
- goto kill_user;
- }
-
- addr = compute_effective_address(regs, insn);
- perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
- switch(dir) {
- case load:
- err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
- regs),
- size, (unsigned long *) addr,
- decode_signedness(insn));
- break;
-
- case store:
- err = do_int_store(((insn>>25)&0x1f), size,
- (unsigned long *) addr, regs);
- break;
-
- case both:
- /*
- * This was supported in 2.4. However, we question
- * the value of SWAP instruction across word boundaries.
- */
- printk("Unaligned SWAP unsupported.\n");
- err = -EFAULT;
- break;
-
- default:
- unaligned_panic("Impossible user unaligned trap.");
- goto out;
- }
- if (err)
- goto kill_user;
- else
- advance(regs);
- goto out;
- }
-
-kill_user:
- user_mna_trap_fault(regs, insn);
-out:
- ;
-}
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index 281fa634bb1a..8b81d0f00c97 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -29,7 +29,7 @@ static DEFINE_SPINLOCK(dummy);
#endif /* SMP */
#define ATOMIC_FETCH_OP(op, c_op) \
-int atomic_fetch_##op(int i, atomic_t *v) \
+int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int ret; \
unsigned long flags; \
@@ -41,10 +41,10 @@ int atomic_fetch_##op(int i, atomic_t *v) \
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
return ret; \
} \
-EXPORT_SYMBOL(atomic_fetch_##op);
+EXPORT_SYMBOL(arch_atomic_fetch_##op);
#define ATOMIC_OP_RETURN(op, c_op) \
-int atomic_##op##_return(int i, atomic_t *v) \
+int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int ret; \
unsigned long flags; \
@@ -55,7 +55,7 @@ int atomic_##op##_return(int i, atomic_t *v) \
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
return ret; \
} \
-EXPORT_SYMBOL(atomic_##op##_return);
+EXPORT_SYMBOL(arch_atomic_##op##_return);
ATOMIC_OP_RETURN(add, +=)
@@ -67,7 +67,7 @@ ATOMIC_FETCH_OP(xor, ^=)
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
-int atomic_xchg(atomic_t *v, int new)
+int arch_atomic_xchg(atomic_t *v, int new)
{
int ret;
unsigned long flags;
@@ -78,9 +78,9 @@ int atomic_xchg(atomic_t *v, int new)
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
-EXPORT_SYMBOL(atomic_xchg);
+EXPORT_SYMBOL(arch_atomic_xchg);
-int atomic_cmpxchg(atomic_t *v, int old, int new)
+int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
unsigned long flags;
@@ -93,9 +93,9 @@ int atomic_cmpxchg(atomic_t *v, int old, int new)
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
-EXPORT_SYMBOL(atomic_cmpxchg);
+EXPORT_SYMBOL(arch_atomic_cmpxchg);
-int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int ret;
unsigned long flags;
@@ -107,10 +107,10 @@ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret;
}
-EXPORT_SYMBOL(atomic_fetch_add_unless);
+EXPORT_SYMBOL(arch_atomic_fetch_add_unless);
/* Atomic operations are already serializing */
-void atomic_set(atomic_t *v, int i)
+void arch_atomic_set(atomic_t *v, int i)
{
unsigned long flags;
@@ -118,7 +118,7 @@ void atomic_set(atomic_t *v, int i)
v->counter = i;
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
}
-EXPORT_SYMBOL(atomic_set);
+EXPORT_SYMBOL(arch_atomic_set);
unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
{
diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
index 456b65a30ecf..8245d4a97301 100644
--- a/arch/sparc/lib/atomic_64.S
+++ b/arch/sparc/lib/atomic_64.S
@@ -19,7 +19,7 @@
*/
#define ATOMIC_OP(op) \
-ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \
1: lduw [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -30,11 +30,11 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
nop; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic_##op); \
-EXPORT_SYMBOL(atomic_##op);
+ENDPROC(arch_atomic_##op); \
+EXPORT_SYMBOL(arch_atomic_##op);
#define ATOMIC_OP_RETURN(op) \
-ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */\
BACKOFF_SETUP(%o2); \
1: lduw [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -45,11 +45,11 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
sra %g1, 0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic_##op##_return); \
-EXPORT_SYMBOL(atomic_##op##_return);
+ENDPROC(arch_atomic_##op##_return); \
+EXPORT_SYMBOL(arch_atomic_##op##_return);
#define ATOMIC_FETCH_OP(op) \
-ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \
1: lduw [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -60,8 +60,8 @@ ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
sra %g1, 0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic_fetch_##op); \
-EXPORT_SYMBOL(atomic_fetch_##op);
+ENDPROC(arch_atomic_fetch_##op); \
+EXPORT_SYMBOL(arch_atomic_fetch_##op);
ATOMIC_OP(add)
ATOMIC_OP_RETURN(add)
@@ -85,7 +85,7 @@ ATOMIC_FETCH_OP(xor)
#undef ATOMIC_OP
#define ATOMIC64_OP(op) \
-ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \
1: ldx [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -96,11 +96,11 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
nop; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic64_##op); \
-EXPORT_SYMBOL(atomic64_##op);
+ENDPROC(arch_atomic64_##op); \
+EXPORT_SYMBOL(arch_atomic64_##op);
#define ATOMIC64_OP_RETURN(op) \
-ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \
1: ldx [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -111,11 +111,11 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
op %g1, %o0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic64_##op##_return); \
-EXPORT_SYMBOL(atomic64_##op##_return);
+ENDPROC(arch_atomic64_##op##_return); \
+EXPORT_SYMBOL(arch_atomic64_##op##_return);
#define ATOMIC64_FETCH_OP(op) \
-ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
+ENTRY(arch_atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \
1: ldx [%o1], %g1; \
op %g1, %o0, %g7; \
@@ -126,8 +126,8 @@ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \
mov %g1, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \
-ENDPROC(atomic64_fetch_##op); \
-EXPORT_SYMBOL(atomic64_fetch_##op);
+ENDPROC(arch_atomic64_fetch_##op); \
+EXPORT_SYMBOL(arch_atomic64_fetch_##op);
ATOMIC64_OP(add)
ATOMIC64_OP_RETURN(add)
@@ -150,7 +150,7 @@ ATOMIC64_FETCH_OP(xor)
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
-ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
+ENTRY(arch_atomic64_dec_if_positive) /* %o0 = atomic_ptr */
BACKOFF_SETUP(%o2)
1: ldx [%o0], %g1
brlez,pn %g1, 3f
@@ -162,5 +162,5 @@ ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
3: retl
sub %g1, 1, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b)
-ENDPROC(atomic64_dec_if_positive)
-EXPORT_SYMBOL(atomic64_dec_if_positive)
+ENDPROC(arch_atomic64_dec_if_positive)
+EXPORT_SYMBOL(arch_atomic64_dec_if_positive)
diff --git a/arch/sparc/lib/checksum_32.S b/arch/sparc/lib/checksum_32.S
index 7488d130faf7..781e39b3c009 100644
--- a/arch/sparc/lib/checksum_32.S
+++ b/arch/sparc/lib/checksum_32.S
@@ -155,13 +155,6 @@ cpout: retl ! get outta here
.text; \
.align 4
-#define EXT(start,end) \
- .section __ex_table,ALLOC; \
- .align 4; \
- .word start, 0, end, cc_fault; \
- .text; \
- .align 4
-
/* This aligned version executes typically in 8.5 superscalar cycles, this
* is the best I can do. I say 8.5 because the final add will pair with
* the next ldd in the main unrolled loop. Thus the pipe is always full.
@@ -169,20 +162,20 @@ cpout: retl ! get outta here
* please check the fixup code below as well.
*/
#define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [src + off + 0x00], t0; \
- ldd [src + off + 0x08], t2; \
+ EX(ldd [src + off + 0x00], t0); \
+ EX(ldd [src + off + 0x08], t2); \
addxcc t0, sum, sum; \
- ldd [src + off + 0x10], t4; \
+ EX(ldd [src + off + 0x10], t4); \
addxcc t1, sum, sum; \
- ldd [src + off + 0x18], t6; \
+ EX(ldd [src + off + 0x18], t6); \
addxcc t2, sum, sum; \
- std t0, [dst + off + 0x00]; \
+ EX(std t0, [dst + off + 0x00]); \
addxcc t3, sum, sum; \
- std t2, [dst + off + 0x08]; \
+ EX(std t2, [dst + off + 0x08]); \
addxcc t4, sum, sum; \
- std t4, [dst + off + 0x10]; \
+ EX(std t4, [dst + off + 0x10]); \
addxcc t5, sum, sum; \
- std t6, [dst + off + 0x18]; \
+ EX(std t6, [dst + off + 0x18]); \
addxcc t6, sum, sum; \
addxcc t7, sum, sum;
@@ -191,39 +184,39 @@ cpout: retl ! get outta here
* Viking MXCC into streaming mode. Ho hum...
*/
#define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [src + off + 0x00], t0; \
- ldd [src + off + 0x08], t2; \
- ldd [src + off + 0x10], t4; \
- ldd [src + off + 0x18], t6; \
- st t0, [dst + off + 0x00]; \
+ EX(ldd [src + off + 0x00], t0); \
+ EX(ldd [src + off + 0x08], t2); \
+ EX(ldd [src + off + 0x10], t4); \
+ EX(ldd [src + off + 0x18], t6); \
+ EX(st t0, [dst + off + 0x00]); \
addxcc t0, sum, sum; \
- st t1, [dst + off + 0x04]; \
+ EX(st t1, [dst + off + 0x04]); \
addxcc t1, sum, sum; \
- st t2, [dst + off + 0x08]; \
+ EX(st t2, [dst + off + 0x08]); \
addxcc t2, sum, sum; \
- st t3, [dst + off + 0x0c]; \
+ EX(st t3, [dst + off + 0x0c]); \
addxcc t3, sum, sum; \
- st t4, [dst + off + 0x10]; \
+ EX(st t4, [dst + off + 0x10]); \
addxcc t4, sum, sum; \
- st t5, [dst + off + 0x14]; \
+ EX(st t5, [dst + off + 0x14]); \
addxcc t5, sum, sum; \
- st t6, [dst + off + 0x18]; \
+ EX(st t6, [dst + off + 0x18]); \
addxcc t6, sum, sum; \
- st t7, [dst + off + 0x1c]; \
+ EX(st t7, [dst + off + 0x1c]); \
addxcc t7, sum, sum;
/* Yuck, 6 superscalar cycles... */
#define CSUMCOPY_LASTCHUNK(src, dst, sum, off, t0, t1, t2, t3) \
- ldd [src - off - 0x08], t0; \
- ldd [src - off - 0x00], t2; \
+ EX(ldd [src - off - 0x08], t0); \
+ EX(ldd [src - off - 0x00], t2); \
addxcc t0, sum, sum; \
- st t0, [dst - off - 0x08]; \
+ EX(st t0, [dst - off - 0x08]); \
addxcc t1, sum, sum; \
- st t1, [dst - off - 0x04]; \
+ EX(st t1, [dst - off - 0x04]); \
addxcc t2, sum, sum; \
- st t2, [dst - off - 0x00]; \
+ EX(st t2, [dst - off - 0x00]); \
addxcc t3, sum, sum; \
- st t3, [dst - off + 0x04];
+ EX(st t3, [dst - off + 0x04]);
/* Handle the end cruft code out of band for better cache patterns. */
cc_end_cruft:
@@ -331,7 +324,6 @@ __csum_partial_copy_sparc_generic:
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
-10: EXT(5b, 10b) ! note for exception handling
sub %g1, 128, %g1 ! detract from length
addx %g0, %g7, %g7 ! add in last carry bit
andcc %g1, 0xffffff80, %g0 ! more to csum?
@@ -356,8 +348,7 @@ cctbl: CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5)
CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5)
-12: EXT(cctbl, 12b) ! note for exception table handling
- addx %g0, %g7, %g7
+12: addx %g0, %g7, %g7
andcc %o3, 0xf, %g0 ! check for low bits set
ccte: bne cc_end_cruft ! something left, handle it out of band
andcc %o3, 8, %g0 ! begin checks for that code
@@ -367,7 +358,6 @@ ccdbl: CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
-11: EXT(ccdbl, 11b) ! note for exception table handling
sub %g1, 128, %g1 ! detract from length
addx %g0, %g7, %g7 ! add in last carry bit
andcc %g1, 0xffffff80, %g0 ! more to csum?
diff --git a/arch/sparc/lib/copy_user.S b/arch/sparc/lib/copy_user.S
index dc72f2b970b7..954572c78539 100644
--- a/arch/sparc/lib/copy_user.S
+++ b/arch/sparc/lib/copy_user.S
@@ -21,98 +21,134 @@
/* Work around cpp -rob */
#define ALLOC #alloc
#define EXECINSTR #execinstr
+
+#define EX_ENTRY(l1, l2) \
+ .section __ex_table,ALLOC; \
+ .align 4; \
+ .word l1, l2; \
+ .text;
+
#define EX(x,y,a,b) \
98: x,y; \
.section .fixup,ALLOC,EXECINSTR; \
.align 4; \
-99: ba fixupretl; \
- a, b, %g3; \
- .section __ex_table,ALLOC; \
- .align 4; \
- .word 98b, 99b; \
- .text; \
- .align 4
+99: retl; \
+ a, b, %o0; \
+ EX_ENTRY(98b, 99b)
#define EX2(x,y,c,d,e,a,b) \
98: x,y; \
.section .fixup,ALLOC,EXECINSTR; \
.align 4; \
99: c, d, e; \
- ba fixupretl; \
- a, b, %g3; \
- .section __ex_table,ALLOC; \
- .align 4; \
- .word 98b, 99b; \
- .text; \
- .align 4
+ retl; \
+ a, b, %o0; \
+ EX_ENTRY(98b, 99b)
#define EXO2(x,y) \
98: x, y; \
- .section __ex_table,ALLOC; \
- .align 4; \
- .word 98b, 97f; \
- .text; \
- .align 4
+ EX_ENTRY(98b, 97f)
-#define EXT(start,end,handler) \
- .section __ex_table,ALLOC; \
- .align 4; \
- .word start, 0, end, handler; \
- .text; \
- .align 4
+#define LD(insn, src, offset, reg, label) \
+98: insn [%src + (offset)], %reg; \
+ .section .fixup,ALLOC,EXECINSTR; \
+99: ba label; \
+ mov offset, %g5; \
+ EX_ENTRY(98b, 99b)
-/* Please do not change following macros unless you change logic used
- * in .fixup at the end of this file as well
- */
+#define ST(insn, dst, offset, reg, label) \
+98: insn %reg, [%dst + (offset)]; \
+ .section .fixup,ALLOC,EXECINSTR; \
+99: ba label; \
+ mov offset, %g5; \
+ EX_ENTRY(98b, 99b)
/* Both these macros have to start with exactly the same insn */
+/* left: g7 + (g1 % 128) - offset */
#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src + (offset) + 0x00], %t0; \
- ldd [%src + (offset) + 0x08], %t2; \
- ldd [%src + (offset) + 0x10], %t4; \
- ldd [%src + (offset) + 0x18], %t6; \
- st %t0, [%dst + (offset) + 0x00]; \
- st %t1, [%dst + (offset) + 0x04]; \
- st %t2, [%dst + (offset) + 0x08]; \
- st %t3, [%dst + (offset) + 0x0c]; \
- st %t4, [%dst + (offset) + 0x10]; \
- st %t5, [%dst + (offset) + 0x14]; \
- st %t6, [%dst + (offset) + 0x18]; \
- st %t7, [%dst + (offset) + 0x1c];
-
+ LD(ldd, src, offset + 0x00, t0, bigchunk_fault) \
+ LD(ldd, src, offset + 0x08, t2, bigchunk_fault) \
+ LD(ldd, src, offset + 0x10, t4, bigchunk_fault) \
+ LD(ldd, src, offset + 0x18, t6, bigchunk_fault) \
+ ST(st, dst, offset + 0x00, t0, bigchunk_fault) \
+ ST(st, dst, offset + 0x04, t1, bigchunk_fault) \
+ ST(st, dst, offset + 0x08, t2, bigchunk_fault) \
+ ST(st, dst, offset + 0x0c, t3, bigchunk_fault) \
+ ST(st, dst, offset + 0x10, t4, bigchunk_fault) \
+ ST(st, dst, offset + 0x14, t5, bigchunk_fault) \
+ ST(st, dst, offset + 0x18, t6, bigchunk_fault) \
+ ST(st, dst, offset + 0x1c, t7, bigchunk_fault)
+
+/* left: g7 + (g1 % 128) - offset */
#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
- ldd [%src + (offset) + 0x00], %t0; \
- ldd [%src + (offset) + 0x08], %t2; \
- ldd [%src + (offset) + 0x10], %t4; \
- ldd [%src + (offset) + 0x18], %t6; \
- std %t0, [%dst + (offset) + 0x00]; \
- std %t2, [%dst + (offset) + 0x08]; \
- std %t4, [%dst + (offset) + 0x10]; \
- std %t6, [%dst + (offset) + 0x18];
+ LD(ldd, src, offset + 0x00, t0, bigchunk_fault) \
+ LD(ldd, src, offset + 0x08, t2, bigchunk_fault) \
+ LD(ldd, src, offset + 0x10, t4, bigchunk_fault) \
+ LD(ldd, src, offset + 0x18, t6, bigchunk_fault) \
+ ST(std, dst, offset + 0x00, t0, bigchunk_fault) \
+ ST(std, dst, offset + 0x08, t2, bigchunk_fault) \
+ ST(std, dst, offset + 0x10, t4, bigchunk_fault) \
+ ST(std, dst, offset + 0x18, t6, bigchunk_fault)
+ .section .fixup,#alloc,#execinstr
+bigchunk_fault:
+ sub %g7, %g5, %o0
+ and %g1, 127, %g1
+ retl
+ add %o0, %g1, %o0
+
+/* left: offset + 16 + (g1 % 16) */
#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
- ldd [%src - (offset) - 0x10], %t0; \
- ldd [%src - (offset) - 0x08], %t2; \
- st %t0, [%dst - (offset) - 0x10]; \
- st %t1, [%dst - (offset) - 0x0c]; \
- st %t2, [%dst - (offset) - 0x08]; \
- st %t3, [%dst - (offset) - 0x04];
+ LD(ldd, src, -(offset + 0x10), t0, lastchunk_fault) \
+ LD(ldd, src, -(offset + 0x08), t2, lastchunk_fault) \
+ ST(st, dst, -(offset + 0x10), t0, lastchunk_fault) \
+ ST(st, dst, -(offset + 0x0c), t1, lastchunk_fault) \
+ ST(st, dst, -(offset + 0x08), t2, lastchunk_fault) \
+ ST(st, dst, -(offset + 0x04), t3, lastchunk_fault)
-#define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \
- lduh [%src + (offset) + 0x00], %t0; \
- lduh [%src + (offset) + 0x02], %t1; \
- lduh [%src + (offset) + 0x04], %t2; \
- lduh [%src + (offset) + 0x06], %t3; \
- sth %t0, [%dst + (offset) + 0x00]; \
- sth %t1, [%dst + (offset) + 0x02]; \
- sth %t2, [%dst + (offset) + 0x04]; \
- sth %t3, [%dst + (offset) + 0x06];
+ .section .fixup,#alloc,#execinstr
+lastchunk_fault:
+ and %g1, 15, %g1
+ retl
+ sub %g1, %g5, %o0
+/* left: o3 + (o2 % 16) - offset */
+#define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \
+ LD(lduh, src, offset + 0x00, t0, halfchunk_fault) \
+ LD(lduh, src, offset + 0x02, t1, halfchunk_fault) \
+ LD(lduh, src, offset + 0x04, t2, halfchunk_fault) \
+ LD(lduh, src, offset + 0x06, t3, halfchunk_fault) \
+ ST(sth, dst, offset + 0x00, t0, halfchunk_fault) \
+ ST(sth, dst, offset + 0x02, t1, halfchunk_fault) \
+ ST(sth, dst, offset + 0x04, t2, halfchunk_fault) \
+ ST(sth, dst, offset + 0x06, t3, halfchunk_fault)
+
+/* left: o3 + (o2 % 16) + offset + 2 */
#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
- ldub [%src - (offset) - 0x02], %t0; \
- ldub [%src - (offset) - 0x01], %t1; \
- stb %t0, [%dst - (offset) - 0x02]; \
- stb %t1, [%dst - (offset) - 0x01];
+ LD(ldub, src, -(offset + 0x02), t0, halfchunk_fault) \
+ LD(ldub, src, -(offset + 0x01), t1, halfchunk_fault) \
+ ST(stb, dst, -(offset + 0x02), t0, halfchunk_fault) \
+ ST(stb, dst, -(offset + 0x01), t1, halfchunk_fault)
+
+ .section .fixup,#alloc,#execinstr
+halfchunk_fault:
+ and %o2, 15, %o2
+ sub %o3, %g5, %o3
+ retl
+ add %o2, %o3, %o0
+
+/* left: offset + 2 + (o2 % 2) */
+#define MOVE_LAST_SHORTCHUNK(src, dst, offset, t0, t1) \
+ LD(ldub, src, -(offset + 0x02), t0, last_shortchunk_fault) \
+ LD(ldub, src, -(offset + 0x01), t1, last_shortchunk_fault) \
+ ST(stb, dst, -(offset + 0x02), t0, last_shortchunk_fault) \
+ ST(stb, dst, -(offset + 0x01), t1, last_shortchunk_fault)
+
+ .section .fixup,#alloc,#execinstr
+last_shortchunk_fault:
+ and %o2, 1, %o2
+ retl
+ sub %o2, %g5, %o0
.text
.align 4
@@ -182,8 +218,6 @@ __copy_user: /* %o0=dst %o1=src %o2=len */
MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
-80:
- EXT(5b, 80b, 50f)
subcc %g7, 128, %g7
add %o1, 128, %o1
bne 5b
@@ -201,7 +235,6 @@ __copy_user: /* %o0=dst %o1=src %o2=len */
jmpl %o5 + %lo(copy_user_table_end), %g0
add %o0, %g7, %o0
-copy_user_table:
MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
@@ -210,7 +243,6 @@ copy_user_table:
MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
copy_user_table_end:
- EXT(copy_user_table, copy_user_table_end, 51f)
be copy_user_last7
andcc %g1, 4, %g0
@@ -250,8 +282,6 @@ ldd_std:
MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
-81:
- EXT(ldd_std, 81b, 52f)
subcc %g7, 128, %g7
add %o1, 128, %o1
bne ldd_std
@@ -290,8 +320,6 @@ cannot_optimize:
10:
MOVE_HALFCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
MOVE_HALFCHUNK(o1, o0, 0x08, g2, g3, g4, g5)
-82:
- EXT(10b, 82b, 53f)
subcc %o3, 0x10, %o3
add %o1, 0x10, %o1
bne 10b
@@ -308,8 +336,6 @@ byte_chunk:
MOVE_SHORTCHUNK(o1, o0, -0x0c, g2, g3)
MOVE_SHORTCHUNK(o1, o0, -0x0e, g2, g3)
MOVE_SHORTCHUNK(o1, o0, -0x10, g2, g3)
-83:
- EXT(byte_chunk, 83b, 54f)
subcc %o3, 0x10, %o3
add %o1, 0x10, %o1
bne byte_chunk
@@ -325,16 +351,14 @@ short_end:
add %o1, %o3, %o1
jmpl %o5 + %lo(short_table_end), %g0
andcc %o2, 1, %g0
-84:
- MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
- MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
+ MOVE_LAST_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
+ MOVE_LAST_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
+ MOVE_LAST_SHORTCHUNK(o1, o0, 0x08, g2, g3)
+ MOVE_LAST_SHORTCHUNK(o1, o0, 0x06, g2, g3)
+ MOVE_LAST_SHORTCHUNK(o1, o0, 0x04, g2, g3)
+ MOVE_LAST_SHORTCHUNK(o1, o0, 0x02, g2, g3)
+ MOVE_LAST_SHORTCHUNK(o1, o0, 0x00, g2, g3)
short_table_end:
- EXT(84b, short_table_end, 55f)
be 1f
nop
EX(ldub [%o1], %g2, add %g0, 1)
@@ -363,123 +387,8 @@ short_aligned_end:
.section .fixup,#alloc,#execinstr
.align 4
97:
- mov %o2, %g3
-fixupretl:
retl
- mov %g3, %o0
-
-/* exception routine sets %g2 to (broken_insn - first_insn)>>2 */
-50:
-/* This magic counts how many bytes are left when crash in MOVE_BIGCHUNK
- * happens. This is derived from the amount ldd reads, st stores, etc.
- * x = g2 % 12;
- * g3 = g1 + g7 - ((g2 / 12) * 32 + (x < 4) ? 0 : (x - 4) * 4);
- * o0 += (g2 / 12) * 32;
- */
- cmp %g2, 12
- add %o0, %g7, %o0
- bcs 1f
- cmp %g2, 24
- bcs 2f
- cmp %g2, 36
- bcs 3f
- nop
- sub %g2, 12, %g2
- sub %g7, 32, %g7
-3: sub %g2, 12, %g2
- sub %g7, 32, %g7
-2: sub %g2, 12, %g2
- sub %g7, 32, %g7
-1: cmp %g2, 4
- bcs,a 60f
- clr %g2
- sub %g2, 4, %g2
- sll %g2, 2, %g2
-60: and %g1, 0x7f, %g3
- sub %o0, %g7, %o0
- add %g3, %g7, %g3
- ba fixupretl
- sub %g3, %g2, %g3
-51:
-/* i = 41 - g2; j = i % 6;
- * g3 = (g1 & 15) + (i / 6) * 16 + (j < 4) ? (j + 1) * 4 : 16;
- * o0 -= (i / 6) * 16 + 16;
- */
- neg %g2
- and %g1, 0xf, %g1
- add %g2, 41, %g2
- add %o0, %g1, %o0
-1: cmp %g2, 6
- bcs,a 2f
- cmp %g2, 4
- add %g1, 16, %g1
- b 1b
- sub %g2, 6, %g2
-2: bcc,a 2f
- mov 16, %g2
- inc %g2
- sll %g2, 2, %g2
-2: add %g1, %g2, %g3
- ba fixupretl
- sub %o0, %g3, %o0
-52:
-/* g3 = g1 + g7 - (g2 / 8) * 32 + (g2 & 4) ? (g2 & 3) * 8 : 0;
- o0 += (g2 / 8) * 32 */
- andn %g2, 7, %g4
- add %o0, %g7, %o0
- andcc %g2, 4, %g0
- and %g2, 3, %g2
- sll %g4, 2, %g4
- sll %g2, 3, %g2
- bne 60b
- sub %g7, %g4, %g7
- ba 60b
- clr %g2
-53:
-/* g3 = o3 + (o2 & 15) - (g2 & 8) - (g2 & 4) ? (g2 & 3) * 2 : 0;
- o0 += (g2 & 8) */
- and %g2, 3, %g4
- andcc %g2, 4, %g0
- and %g2, 8, %g2
- sll %g4, 1, %g4
- be 1f
- add %o0, %g2, %o0
- add %g2, %g4, %g2
-1: and %o2, 0xf, %g3
- add %g3, %o3, %g3
- ba fixupretl
- sub %g3, %g2, %g3
-54:
-/* g3 = o3 + (o2 & 15) - (g2 / 4) * 2 - (g2 & 2) ? (g2 & 1) : 0;
- o0 += (g2 / 4) * 2 */
- srl %g2, 2, %o4
- and %g2, 1, %o5
- srl %g2, 1, %g2
- add %o4, %o4, %o4
- and %o5, %g2, %o5
- and %o2, 0xf, %o2
- add %o0, %o4, %o0
- sub %o3, %o5, %o3
- sub %o2, %o4, %o2
- ba fixupretl
- add %o2, %o3, %g3
-55:
-/* i = 27 - g2;
- g3 = (o2 & 1) + i / 4 * 2 + !(i & 3);
- o0 -= i / 4 * 2 + 1 */
- neg %g2
- and %o2, 1, %o2
- add %g2, 27, %g2
- srl %g2, 2, %o5
- andcc %g2, 3, %g0
- mov 1, %g2
- add %o5, %o5, %o5
- be,a 1f
- clr %g2
-1: add %g2, %o5, %g3
- sub %o0, %g3, %o0
- ba fixupretl
- add %g3, %o2, %g3
+ mov %o2, %o0
.globl __copy_user_end
__copy_user_end:
diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
index f427f34b8b79..eaff68213fdf 100644
--- a/arch/sparc/lib/memset.S
+++ b/arch/sparc/lib/memset.S
@@ -19,7 +19,7 @@
98: x,y; \
.section .fixup,ALLOC,EXECINSTR; \
.align 4; \
-99: ba 30f; \
+99: retl; \
a, b, %o0; \
.section __ex_table,ALLOC; \
.align 4; \
@@ -27,35 +27,44 @@
.text; \
.align 4
-#define EXT(start,end,handler) \
+#define STORE(source, base, offset, n) \
+98: std source, [base + offset + n]; \
+ .section .fixup,ALLOC,EXECINSTR; \
+ .align 4; \
+99: ba 30f; \
+ sub %o3, n - offset, %o3; \
.section __ex_table,ALLOC; \
.align 4; \
- .word start, 0, end, handler; \
+ .word 98b, 99b; \
.text; \
- .align 4
+ .align 4;
+
+#define STORE_LAST(source, base, offset, n) \
+ EX(std source, [base - offset - n], \
+ add %o1, offset + n);
/* Please don't change these macros, unless you change the logic
* in the .fixup section below as well.
* Store 64 bytes at (BASE + OFFSET) using value SOURCE. */
-#define ZERO_BIG_BLOCK(base, offset, source) \
- std source, [base + offset + 0x00]; \
- std source, [base + offset + 0x08]; \
- std source, [base + offset + 0x10]; \
- std source, [base + offset + 0x18]; \
- std source, [base + offset + 0x20]; \
- std source, [base + offset + 0x28]; \
- std source, [base + offset + 0x30]; \
- std source, [base + offset + 0x38];
+#define ZERO_BIG_BLOCK(base, offset, source) \
+ STORE(source, base, offset, 0x00); \
+ STORE(source, base, offset, 0x08); \
+ STORE(source, base, offset, 0x10); \
+ STORE(source, base, offset, 0x18); \
+ STORE(source, base, offset, 0x20); \
+ STORE(source, base, offset, 0x28); \
+ STORE(source, base, offset, 0x30); \
+ STORE(source, base, offset, 0x38);
#define ZERO_LAST_BLOCKS(base, offset, source) \
- std source, [base - offset - 0x38]; \
- std source, [base - offset - 0x30]; \
- std source, [base - offset - 0x28]; \
- std source, [base - offset - 0x20]; \
- std source, [base - offset - 0x18]; \
- std source, [base - offset - 0x10]; \
- std source, [base - offset - 0x08]; \
- std source, [base - offset - 0x00];
+ STORE_LAST(source, base, offset, 0x38); \
+ STORE_LAST(source, base, offset, 0x30); \
+ STORE_LAST(source, base, offset, 0x28); \
+ STORE_LAST(source, base, offset, 0x20); \
+ STORE_LAST(source, base, offset, 0x18); \
+ STORE_LAST(source, base, offset, 0x10); \
+ STORE_LAST(source, base, offset, 0x08); \
+ STORE_LAST(source, base, offset, 0x00);
.text
.align 4
@@ -68,8 +77,6 @@ __bzero_begin:
.globl memset
EXPORT_SYMBOL(__bzero)
EXPORT_SYMBOL(memset)
- .globl __memset_start, __memset_end
-__memset_start:
memset:
mov %o0, %g1
mov 1, %g4
@@ -122,8 +129,6 @@ __bzero:
ZERO_BIG_BLOCK(%o0, 0x00, %g2)
subcc %o3, 128, %o3
ZERO_BIG_BLOCK(%o0, 0x40, %g2)
-11:
- EXT(10b, 11b, 20f)
bne 10b
add %o0, 128, %o0
@@ -138,11 +143,9 @@ __bzero:
jmp %o4
add %o0, %o2, %o0
-12:
ZERO_LAST_BLOCKS(%o0, 0x48, %g2)
ZERO_LAST_BLOCKS(%o0, 0x08, %g2)
13:
- EXT(12b, 13b, 21f)
be 8f
andcc %o1, 4, %g0
@@ -182,37 +185,13 @@ __bzero:
5:
retl
clr %o0
-__memset_end:
.section .fixup,#alloc,#execinstr
.align 4
-20:
- cmp %g2, 8
- bleu 1f
- and %o1, 0x7f, %o1
- sub %g2, 9, %g2
- add %o3, 64, %o3
-1:
- sll %g2, 3, %g2
- add %o3, %o1, %o0
- b 30f
- sub %o0, %g2, %o0
-21:
- mov 8, %o0
- and %o1, 7, %o1
- sub %o0, %g2, %o0
- sll %o0, 3, %o0
- b 30f
- add %o0, %o1, %o0
30:
-/* %o4 is faulting address, %o5 is %pc where fault occurred */
- save %sp, -104, %sp
- mov %i5, %o0
- mov %i7, %o1
- call lookup_fault
- mov %i4, %o2
- ret
- restore
+ and %o1, 0x7f, %o1
+ retl
+ add %o3, %o1, %o0
.globl __bzero_end
__bzero_end:
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
index 68db1f859b02..871354aa3c00 100644
--- a/arch/sparc/mm/Makefile
+++ b/arch/sparc/mm/Makefile
@@ -8,7 +8,7 @@ ccflags-y := -Werror
obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
obj-y += fault_$(BITS).o
obj-y += init_$(BITS).o
-obj-$(CONFIG_SPARC32) += extable.o srmmu.o iommu.o io-unit.o
+obj-$(CONFIG_SPARC32) += srmmu.o iommu.o io-unit.o
obj-$(CONFIG_SPARC32) += srmmu_access.o
obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o
obj-$(CONFIG_SPARC32) += leon_mm.o
diff --git a/arch/sparc/mm/extable.c b/arch/sparc/mm/extable.c
deleted file mode 100644
index 241b40641873..000000000000
--- a/arch/sparc/mm/extable.c
+++ /dev/null
@@ -1,107 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/arch/sparc/mm/extable.c
- */
-
-#include <linux/module.h>
-#include <linux/extable.h>
-#include <linux/uaccess.h>
-
-void sort_extable(struct exception_table_entry *start,
- struct exception_table_entry *finish)
-{
-}
-
-/* Caller knows they are in a range if ret->fixup == 0 */
-const struct exception_table_entry *
-search_extable(const struct exception_table_entry *base,
- const size_t num,
- unsigned long value)
-{
- int i;
-
- /* Single insn entries are encoded as:
- * word 1: insn address
- * word 2: fixup code address
- *
- * Range entries are encoded as:
- * word 1: first insn address
- * word 2: 0
- * word 3: last insn address + 4 bytes
- * word 4: fixup code address
- *
- * Deleted entries are encoded as:
- * word 1: unused
- * word 2: -1
- *
- * See asm/uaccess.h for more details.
- */
-
- /* 1. Try to find an exact match. */
- for (i = 0; i < num; i++) {
- if (base[i].fixup == 0) {
- /* A range entry, skip both parts. */
- i++;
- continue;
- }
-
- /* A deleted entry; see trim_init_extable */
- if (base[i].fixup == -1)
- continue;
-
- if (base[i].insn == value)
- return &base[i];
- }
-
- /* 2. Try to find a range match. */
- for (i = 0; i < (num - 1); i++) {
- if (base[i].fixup)
- continue;
-
- if (base[i].insn <= value && base[i + 1].insn > value)
- return &base[i];
-
- i++;
- }
-
- return NULL;
-}
-
-#ifdef CONFIG_MODULES
-/* We could memmove them around; easier to mark the trimmed ones. */
-void trim_init_extable(struct module *m)
-{
- unsigned int i;
- bool range;
-
- for (i = 0; i < m->num_exentries; i += range ? 2 : 1) {
- range = m->extable[i].fixup == 0;
-
- if (within_module_init(m->extable[i].insn, m)) {
- m->extable[i].fixup = -1;
- if (range)
- m->extable[i+1].fixup = -1;
- }
- if (range)
- i++;
- }
-}
-#endif /* CONFIG_MODULES */
-
-/* Special extable search, which handles ranges. Returns fixup */
-unsigned long search_extables_range(unsigned long addr, unsigned long *g2)
-{
- const struct exception_table_entry *entry;
-
- entry = search_exception_tables(addr);
- if (!entry)
- return 0;
-
- /* Inside range? Fix g2 and return correct fixup */
- if (!entry->fixup) {
- *g2 = (addr - entry->insn) / 4;
- return (entry + 1)->fixup;
- }
-
- return entry->fixup;
-}
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 40ce087dfecf..de2031c2b2d7 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/kdebug.h>
#include <linux/uaccess.h>
+#include <linux/extable.h>
#include <asm/page.h>
#include <asm/openprom.h>
@@ -54,54 +55,6 @@ static void __noreturn unhandled_fault(unsigned long address,
die_if_kernel("Oops", regs);
}
-asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
- unsigned long address)
-{
- struct pt_regs regs;
- unsigned long g2;
- unsigned int insn;
- int i;
-
- i = search_extables_range(ret_pc, &g2);
- switch (i) {
- case 3:
- /* load & store will be handled by fixup */
- return 3;
-
- case 1:
- /* store will be handled by fixup, load will bump out */
- /* for _to_ macros */
- insn = *((unsigned int *) pc);
- if ((insn >> 21) & 1)
- return 1;
- break;
-
- case 2:
- /* load will be handled by fixup, store will bump out */
- /* for _from_ macros */
- insn = *((unsigned int *) pc);
- if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
- return 2;
- break;
-
- default:
- break;
- }
-
- memset(&regs, 0, sizeof(regs));
- regs.pc = pc;
- regs.npc = pc + 4;
- __asm__ __volatile__(
- "rd %%psr, %0\n\t"
- "nop\n\t"
- "nop\n\t"
- "nop\n" : "=r" (regs.psr));
- unhandled_fault(address, current, &regs);
-
- /* Not reached */
- return 0;
-}
-
static inline void
show_signal_msg(struct pt_regs *regs, int sig, int code,
unsigned long address, struct task_struct *tsk)
@@ -162,8 +115,6 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
struct vm_area_struct *vma;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
- unsigned int fixup;
- unsigned long g2;
int from_user = !(regs->psr & PSR_PS);
int code;
vm_fault_t fault;
@@ -281,30 +232,19 @@ bad_area_nosemaphore:
/* Is this in ex_table? */
no_context:
- g2 = regs->u_regs[UREG_G2];
if (!from_user) {
- fixup = search_extables_range(regs->pc, &g2);
- /* Values below 10 are reserved for other things */
- if (fixup > 10) {
- extern const unsigned int __memset_start[];
- extern const unsigned int __memset_end[];
+ const struct exception_table_entry *entry;
+ entry = search_exception_tables(regs->pc);
#ifdef DEBUG_EXCEPTIONS
- printk("Exception: PC<%08lx> faddr<%08lx>\n",
- regs->pc, address);
- printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
- regs->pc, fixup, g2);
+ printk("Exception: PC<%08lx> faddr<%08lx>\n",
+ regs->pc, address);
+ printk("EX_TABLE: insn<%08lx> fixup<%08x>\n",
+ regs->pc, entry->fixup);
#endif
- if ((regs->pc >= (unsigned long)__memset_start &&
- regs->pc < (unsigned long)__memset_end)) {
- regs->u_regs[UREG_I4] = address;
- regs->u_regs[UREG_I5] = regs->pc;
- }
- regs->u_regs[UREG_G2] = g2;
- regs->pc = fixup;
- regs->npc = regs->pc + 4;
- return;
- }
+ regs->pc = entry->fixup;
+ regs->npc = regs->pc + 4;
+ return;
}
unhandled_fault(address, tsk, regs);
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index ad4b42f04988..0f49fada2093 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -177,10 +177,8 @@ static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift)
return sun4u_hugepage_shift_to_tte(entry, shift);
}
-pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
- struct page *page, int writeable)
+pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
{
- unsigned int shift = huge_page_shift(hstate_vma(vma));
pte_t pte;
pte = hugepage_shift_to_tte(entry, shift);
@@ -188,7 +186,7 @@ pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
#ifdef CONFIG_SPARC64
/* If this vma has ADI enabled on it, turn on TTE.mcd
*/
- if (vma->vm_flags & VM_SPARC_ADI)
+ if (flags & VM_SPARC_ADI)
return pte_mkmcd(pte);
else
return pte_mknotmcd(pte);
@@ -279,7 +277,7 @@ unsigned long pud_leaf_size(pud_t pud) { return 1UL << tte_to_shift(*(pte_t *)&p
unsigned long pmd_leaf_size(pmd_t pmd) { return 1UL << tte_to_shift(*(pte_t *)&pmd); }
unsigned long pte_leaf_size(pte_t pte) { return 1UL << tte_to_shift(pte); }
-pte_t *huge_pte_alloc(struct mm_struct *mm,
+pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long sz)
{
pgd_t *pgd;
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index 6139c5700ccc..1e9f577f084d 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -292,8 +292,6 @@ void __init mem_init(void)
map_high_region(start_pfn, end_pfn);
}
-
- mem_init_print_info(NULL);
}
void sparc_flush_page_to_ram(struct page *page)
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 182bb7bdaa0a..1b23639e2fcd 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -27,6 +27,7 @@
#include <linux/percpu.h>
#include <linux/mmzone.h>
#include <linux/gfp.h>
+#include <linux/bootmem_info.h>
#include <asm/head.h>
#include <asm/page.h>
@@ -903,7 +904,7 @@ struct node_mem_mask {
static struct node_mem_mask node_masks[MAX_NUMNODES];
static int num_node_masks;
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
struct mdesc_mlgroup {
u64 node;
@@ -1059,7 +1060,7 @@ static void __init allocate_node_data(int nid)
{
struct pglist_data *p;
unsigned long start_pfn, end_pfn;
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
NODE_DATA(nid) = memblock_alloc_node(sizeof(struct pglist_data),
SMP_CACHE_BYTES, nid);
@@ -1080,7 +1081,7 @@ static void __init allocate_node_data(int nid)
static void init_node_masks_nonnuma(void)
{
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
int i;
#endif
@@ -1090,7 +1091,7 @@ static void init_node_masks_nonnuma(void)
node_masks[0].match = 0;
num_node_masks = 1;
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
for (i = 0; i < NR_CPUS; i++)
numa_cpu_lookup_table[i] = 0;
@@ -1098,7 +1099,7 @@ static void init_node_masks_nonnuma(void)
#endif
}
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
struct pglist_data *node_data[MAX_NUMNODES];
EXPORT_SYMBOL(numa_cpu_lookup_table);
@@ -2487,7 +2488,7 @@ int page_in_phys_avail(unsigned long paddr)
static void __init register_page_bootmem_info(void)
{
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
int i;
for_each_online_node(i)
@@ -2520,7 +2521,6 @@ void __init mem_init(void)
}
mark_page_reserved(mem_map_zero);
- mem_init_print_info(NULL);
if (tlb_type == cheetah || tlb_type == cheetah_plus)
cheetah_ecache_flush_init();
diff --git a/arch/sparc/mm/mm_32.h b/arch/sparc/mm/mm_32.h
index ce750a99eea9..ee55f1080634 100644
--- a/arch/sparc/mm/mm_32.h
+++ b/arch/sparc/mm/mm_32.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* fault_32.c - visible as they are called from assembler */
-asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
- unsigned long address);
asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
unsigned long address);
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index 20ee14739333..9a725547578e 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -9,6 +9,7 @@
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/preempt.h>
+#include <linux/pagemap.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index c3030db3325f..57cfd9a1c082 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -5,6 +5,7 @@ menu "UML-specific options"
config UML
bool
default y
+ select ARCH_EPHEMERAL_INODES
select ARCH_HAS_KCOV
select ARCH_NO_PREEMPT
select HAVE_ARCH_AUDITSYSCALL
diff --git a/arch/um/Kconfig.debug b/arch/um/Kconfig.debug
index 315d368e63ad..1dfb2959c73b 100644
--- a/arch/um/Kconfig.debug
+++ b/arch/um/Kconfig.debug
@@ -17,6 +17,7 @@ config GCOV
bool "Enable gcov support"
depends on DEBUG_INFO
depends on !KCOV
+ depends on !MODULES
help
This option allows developers to retrieve coverage data from a UML
session.
diff --git a/arch/um/configs/kunit_defconfig b/arch/um/configs/kunit_defconfig
deleted file mode 100644
index 9235b7d42d38..000000000000
--- a/arch/um/configs/kunit_defconfig
+++ /dev/null
@@ -1,3 +0,0 @@
-CONFIG_KUNIT=y
-CONFIG_KUNIT_TEST=y
-CONFIG_KUNIT_EXAMPLE_TEST=y
diff --git a/arch/um/drivers/cow.h b/arch/um/drivers/cow.h
index 103adac691ed..9a67c017000f 100644
--- a/arch/um/drivers/cow.h
+++ b/arch/um/drivers/cow.h
@@ -24,10 +24,3 @@ extern void cow_sizes(int version, __u64 size, int sectorsize, int align,
int *data_offset_out);
#endif
-
-/*
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/drivers/hostaudio_kern.c b/arch/um/drivers/hostaudio_kern.c
index d35d3f305a31..5b064d360cb7 100644
--- a/arch/um/drivers/hostaudio_kern.c
+++ b/arch/um/drivers/hostaudio_kern.c
@@ -122,13 +122,11 @@ static ssize_t hostaudio_write(struct file *file, const char __user *buffer,
static __poll_t hostaudio_poll(struct file *file,
struct poll_table_struct *wait)
{
- __poll_t mask = 0;
-
#ifdef DEBUG
printk(KERN_DEBUG "hostaudio: poll called (unimplemented)\n");
#endif
- return mask;
+ return 0;
}
static long hostaudio_ioctl(struct file *file,
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index c42b10024e26..6ead1e240457 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -12,6 +12,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/notifier.h>
+#include <linux/panic_notifier.h>
#include <linux/reboot.h>
#include <linux/sched/debug.h>
#include <linux/proc_fs.h>
diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
index 47a02e60898d..d27a2a9faf3e 100644
--- a/arch/um/drivers/vector_kern.c
+++ b/arch/um/drivers/vector_kern.c
@@ -8,7 +8,6 @@
* Copyright (C) 2001 by various other people who didn't put their name here.
*/
-#include <linux/version.h>
#include <linux/memblock.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h
index 2bbf28cf3aa9..8ec7cd46dd96 100644
--- a/arch/um/include/asm/pgalloc.h
+++ b/arch/um/include/asm/pgalloc.h
@@ -19,7 +19,6 @@
set_pmd(pmd, __pmd(_PAGE_TABLE + \
((unsigned long long)page_to_pfn(pte) << \
(unsigned long long) PAGE_SHIFT)))
-#define pmd_pgtable(pmd) pmd_page(pmd)
/*
* Allocate and free page tables.
diff --git a/arch/um/include/asm/pgtable-2level.h b/arch/um/include/asm/pgtable-2level.h
index 32106d31e4ab..8256ecc5b919 100644
--- a/arch/um/include/asm/pgtable-2level.h
+++ b/arch/um/include/asm/pgtable-2level.h
@@ -23,7 +23,6 @@
#define PTRS_PER_PTE 1024
#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
#define PTRS_PER_PGD 1024
-#define FIRST_USER_ADDRESS 0UL
#define pte_ERROR(e) \
printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \
diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
index 7e6a4180db9d..9289a86643a9 100644
--- a/arch/um/include/asm/pgtable-3level.h
+++ b/arch/um/include/asm/pgtable-3level.h
@@ -41,7 +41,6 @@
#endif
#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
#define pte_ERROR(e) \
printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index def376194dce..b9e20bbe2f75 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -302,7 +302,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
struct mm_struct;
extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
-#define update_mmu_cache(vma,address,ptep) do ; while (0)
+#define update_mmu_cache(vma,address,ptep) do {} while (0)
/* Encode and de-code a swap entry */
#define __swp_type(x) (((x).val >> 5) & 0x1f)
diff --git a/arch/um/include/uapi/asm/Kbuild b/arch/um/include/uapi/asm/Kbuild
new file mode 100644
index 000000000000..f66554cd5c45
--- /dev/null
+++ b/arch/um/include/uapi/asm/Kbuild
@@ -0,0 +1 @@
+# SPDX-License-Identifier: GPL-2.0
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index 5aa882011e04..e698e0c7dbdc 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -21,7 +21,6 @@ obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \
obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
obj-$(CONFIG_GPROF) += gprof_syms.o
-obj-$(CONFIG_GCOV) += gmon_syms.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S
index dacbfabf66d8..2f2a8ce92f1e 100644
--- a/arch/um/kernel/dyn.lds.S
+++ b/arch/um/kernel/dyn.lds.S
@@ -6,6 +6,12 @@ OUTPUT_ARCH(ELF_ARCH)
ENTRY(_start)
jiffies = jiffies_64;
+VERSION {
+ {
+ local: *;
+ };
+}
+
SECTIONS
{
PROVIDE (__executable_start = START);
diff --git a/arch/um/kernel/gmon_syms.c b/arch/um/kernel/gmon_syms.c
deleted file mode 100644
index 9361a8eb9bf1..000000000000
--- a/arch/um/kernel/gmon_syms.c
+++ /dev/null
@@ -1,16 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- */
-
-#include <linux/module.h>
-
-extern void __bb_init_func(void *) __attribute__((weak));
-EXPORT_SYMBOL(__bb_init_func);
-
-extern void __gcov_init(void *) __attribute__((weak));
-EXPORT_SYMBOL(__gcov_init);
-extern void __gcov_merge_add(void *, unsigned int) __attribute__((weak));
-EXPORT_SYMBOL(__gcov_merge_add);
-extern void __gcov_exit(void) __attribute__((weak));
-EXPORT_SYMBOL(__gcov_exit);
diff --git a/arch/um/kernel/kmsg_dump.c b/arch/um/kernel/kmsg_dump.c
index 6516ef1f8274..0224fcb36e22 100644
--- a/arch/um/kernel/kmsg_dump.c
+++ b/arch/um/kernel/kmsg_dump.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/kmsg_dump.h>
+#include <linux/spinlock.h>
#include <linux/console.h>
#include <linux/string.h>
#include <shared/init.h>
@@ -9,8 +10,11 @@
static void kmsg_dumper_stdout(struct kmsg_dumper *dumper,
enum kmsg_dump_reason reason)
{
+ static struct kmsg_dump_iter iter;
+ static DEFINE_SPINLOCK(lock);
static char line[1024];
struct console *con;
+ unsigned long flags;
size_t len = 0;
/* only dump kmsg when no console is available */
@@ -29,11 +33,18 @@ static void kmsg_dumper_stdout(struct kmsg_dumper *dumper,
if (con)
return;
+ if (!spin_trylock_irqsave(&lock, flags))
+ return;
+
+ kmsg_dump_rewind(&iter);
+
printf("kmsg_dump:\n");
- while (kmsg_dump_get_line(dumper, true, line, sizeof(line), &len)) {
+ while (kmsg_dump_get_line(&iter, true, line, sizeof(line), &len)) {
line[len] = '\0';
printf("%s", line);
}
+
+ spin_unlock_irqrestore(&lock, flags);
}
static struct kmsg_dumper kmsg_dumper = {
diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c
index 9242dc91d751..8e636ce02949 100644
--- a/arch/um/kernel/mem.c
+++ b/arch/um/kernel/mem.c
@@ -54,7 +54,6 @@ void __init mem_init(void)
memblock_free_all();
max_low_pfn = totalram_pages();
max_pfn = max_low_pfn;
- mem_init_print_info(NULL);
kmalloc_ok = 1;
}
@@ -73,8 +72,7 @@ static void __init one_page_table_init(pmd_t *pmd)
set_pmd(pmd, __pmd(_KERNPG_TABLE +
(unsigned long) __pa(pte)));
- if (pte != pte_offset_kernel(pmd, 0))
- BUG();
+ BUG_ON(pte != pte_offset_kernel(pmd, 0));
}
}
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index c5011064b5dd..457a38db368b 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -369,7 +369,7 @@ unsigned long get_wchan(struct task_struct *p)
unsigned long stack_page, sp, ip;
bool seen_sched = 0;
- if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
+ if ((p == NULL) || (p == current) || task_is_running(p))
return 0;
stack_page = (unsigned long) task_stack_page(p);
diff --git a/arch/um/kernel/skas/syscall.c b/arch/um/kernel/skas/syscall.c
index 3d91f89fd852..9ee19e566da3 100644
--- a/arch/um/kernel/skas/syscall.c
+++ b/arch/um/kernel/skas/syscall.c
@@ -41,7 +41,7 @@ void handle_syscall(struct uml_pt_regs *r)
goto out;
syscall = UPT_SYSCALL_NR(r);
- if (syscall >= 0 && syscall <= __NR_syscall_max)
+ if (syscall >= 0 && syscall < __NR_syscalls)
PT_REGS_SET_SYSCALL_RETURN(regs,
EXECUTE_SYSCALL(syscall, regs));
diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
index 74e07e748a9b..9512253947d5 100644
--- a/arch/um/kernel/um_arch.c
+++ b/arch/um/kernel/um_arch.c
@@ -7,6 +7,7 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/panic_notifier.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/utsname.h>
diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S
index 45d957d7004c..7a8e2b123e29 100644
--- a/arch/um/kernel/uml.lds.S
+++ b/arch/um/kernel/uml.lds.S
@@ -7,6 +7,12 @@ OUTPUT_ARCH(ELF_ARCH)
ENTRY(_start)
jiffies = jiffies_64;
+VERSION {
+ {
+ local: *;
+ };
+}
+
SECTIONS
{
/* This must contain the right address - not quite the default ELF one.*/
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 2792879d398e..49270655e827 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -33,6 +33,7 @@ config X86_64
select NEED_DMA_MAP_STATE
select SWIOTLB
select ARCH_HAS_ELFCORE_COMPAT
+ select ZONE_DMA32
config FORCE_DYNAMIC_FTRACE
def_bool y
@@ -60,7 +61,13 @@ config X86
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ARCH_32BIT_OFF_T if X86_32
select ARCH_CLOCKSOURCE_INIT
+ select ARCH_ENABLE_HUGEPAGE_MIGRATION if X86_64 && HUGETLB_PAGE && MIGRATION
+ select ARCH_ENABLE_MEMORY_HOTPLUG if X86_64 || (X86_32 && HIGHMEM)
+ select ARCH_ENABLE_MEMORY_HOTREMOVE if MEMORY_HOTPLUG
+ select ARCH_ENABLE_SPLIT_PMD_PTLOCK if (PGTABLE_LEVELS > 2) && (X86_64 || X86_PAE)
+ select ARCH_ENABLE_THP_MIGRATION if X86_64 && TRANSPARENT_HUGEPAGE
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
+ select ARCH_HAS_CACHE_LINE_SIZE
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEBUG_VM_PGTABLE if !X86_PAE
select ARCH_HAS_DEVMEM_IS_ALLOWED
@@ -87,6 +94,7 @@ config X86
select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAS_DEBUG_WX
+ select ARCH_HAS_ZONE_DMA_SET if EXPERT
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
select ARCH_MIGHT_HAVE_PC_PARPORT
@@ -97,15 +105,17 @@ config X86
select ARCH_SUPPORTS_DEBUG_PAGEALLOC
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
select ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP if NR_CPUS <= 4096
- select ARCH_SUPPORTS_LTO_CLANG if X86_64
- select ARCH_SUPPORTS_LTO_CLANG_THIN if X86_64
+ select ARCH_SUPPORTS_LTO_CLANG
+ select ARCH_SUPPORTS_LTO_CLANG_THIN
select ARCH_USE_BUILTIN_BSWAP
+ select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_USE_SYM_ANNOTATIONS
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
select ARCH_WANT_DEFAULT_BPF_JIT if X86_64
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
+ select ARCH_WANTS_NO_INSTR
select ARCH_WANT_HUGE_PMD_SHARE
select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANTS_THP_SWAP if X86_64
@@ -164,7 +174,9 @@ config X86
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64
select HAVE_ARCH_USERFAULTFD_WP if X86_64 && USERFAULTFD
+ select HAVE_ARCH_USERFAULTFD_MINOR if X86_64 && USERFAULTFD
select HAVE_ARCH_VMAP_STACK if X86_64
+ select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_WITHIN_STACK_FRAMES
select HAVE_ASM_MODVERSIONS
select HAVE_CMPXCHG_DOUBLE
@@ -313,9 +325,6 @@ config GENERIC_CALIBRATE_DELAY
config ARCH_HAS_CPU_RELAX
def_bool y
-config ARCH_HAS_CACHE_LINE_SIZE
- def_bool y
-
config ARCH_HAS_FILTER_PGPROT
def_bool y
@@ -337,9 +346,6 @@ config ARCH_SUSPEND_POSSIBLE
config ARCH_WANT_GENERAL_HUGETLB
def_bool y
-config ZONE_DMA32
- def_bool y if X86_64
-
config AUDIT_ARCH
def_bool y if X86_64
@@ -360,10 +366,6 @@ config X86_64_SMP
def_bool y
depends on X86_64 && SMP
-config X86_32_LAZY_GS
- def_bool y
- depends on X86_32 && !STACKPROTECTOR
-
config ARCH_SUPPORTS_UPROBES
def_bool y
@@ -386,20 +388,11 @@ config CC_HAS_SANE_STACKPROTECTOR
default $(success,$(srctree)/scripts/gcc-x86_32-has-stack-protector.sh $(CC))
help
We have to make sure stack protector is unconditionally disabled if
- the compiler produces broken code.
+ the compiler produces broken code or if it does not let us control
+ the segment on 32-bit kernels.
menu "Processor type and features"
-config ZONE_DMA
- bool "DMA memory allocation support" if EXPERT
- default y
- help
- DMA memory allocation support allows devices with less than 32-bit
- addressing to allocate within the first 16MB of address space.
- Disable if no such devices will be used.
-
- If unsure, say Y.
-
config SMP
bool "Symmetric multi-processing support"
help
@@ -571,6 +564,7 @@ config X86_UV
depends on X86_EXTENDED_PLATFORM
depends on NUMA
depends on EFI
+ depends on KEXEC_CORE
depends on X86_X2APIC
depends on PCI
help
@@ -777,6 +771,7 @@ if HYPERVISOR_GUEST
config PARAVIRT
bool "Enable paravirtualization code"
+ depends on HAVE_STATIC_CALL
help
This changes the kernel so it can modify itself when it is run
under a hypervisor, potentially improving performance significantly
@@ -1406,7 +1401,7 @@ config HIGHMEM4G
config HIGHMEM64G
bool "64GB"
- depends on !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
+ depends on !M486SX && !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
select X86_PAE
help
Select this if you have a 32-bit processor and more than 4
@@ -1518,6 +1513,7 @@ config AMD_MEM_ENCRYPT
select ARCH_USE_MEMREMAP_PROT
select ARCH_HAS_FORCE_DMA_UNENCRYPTED
select INSTRUCTION_DECODER
+ select ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
help
Say yes to enable support for the encryption of system memory.
This requires an AMD processor that supports Secure Memory
@@ -1591,7 +1587,7 @@ config NODES_SHIFT
default "10" if MAXSMP
default "6" if X86_64
default "3"
- depends on NEED_MULTIPLE_NODES
+ depends on NUMA
help
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accommodate various tables.
@@ -1687,35 +1683,6 @@ config X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK
Set whether the default state of memory_corruption_check is
on or off.
-config X86_RESERVE_LOW
- int "Amount of low memory, in kilobytes, to reserve for the BIOS"
- default 64
- range 4 640
- help
- Specify the amount of low memory to reserve for the BIOS.
-
- The first page contains BIOS data structures that the kernel
- must not use, so that page must always be reserved.
-
- By default we reserve the first 64K of physical RAM, as a
- number of BIOSes are known to corrupt that memory range
- during events such as suspend/resume or monitor cable
- insertion, so it must not be used by the kernel.
-
- You can set this to 4 if you are absolutely sure that you
- trust the BIOS to get all its memory reservations and usages
- right. If you know your BIOS have problems beyond the
- default 64K area, you can set this to 640 to avoid using the
- entire low memory range.
-
- If you have doubts about the BIOS (e.g. suspend/resume does
- not work or there's kernel crashes after certain hardware
- hotplug events) then you might want to enable
- X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check
- typical corruption patterns.
-
- Leave this to the default value of 64 if you are unsure.
-
config MATH_EMULATION
bool
depends on MODIFY_LDT_SYSCALL
@@ -1931,6 +1898,7 @@ config X86_SGX
depends on CRYPTO_SHA256=y
select SRCU
select MMU_NOTIFIER
+ select NUMA_KEEP_MEMINFO if NUMA
help
Intel(R) Software Guard eXtensions (SGX) is a set of CPU instructions
that can be used by applications to set aside private regions of code
@@ -2425,30 +2393,13 @@ config ARCH_HAS_ADD_PAGES
def_bool y
depends on X86_64 && ARCH_ENABLE_MEMORY_HOTPLUG
-config ARCH_ENABLE_MEMORY_HOTPLUG
- def_bool y
- depends on X86_64 || (X86_32 && HIGHMEM)
-
-config ARCH_ENABLE_MEMORY_HOTREMOVE
+config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
def_bool y
- depends on MEMORY_HOTPLUG
config USE_PERCPU_NUMA_NODE_ID
def_bool y
depends on NUMA
-config ARCH_ENABLE_SPLIT_PMD_PTLOCK
- def_bool y
- depends on X86_64 || X86_PAE
-
-config ARCH_ENABLE_HUGEPAGE_MIGRATION
- def_bool y
- depends on X86_64 && HUGETLB_PAGE && MIGRATION
-
-config ARCH_ENABLE_THP_MIGRATION
- def_bool y
- depends on X86_64 && TRANSPARENT_HUGEPAGE
-
menu "Power management and ACPI options"
config ARCH_HIBERNATION_HEADER
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 2d6d5a28c3bf..53eceaf71ab7 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -27,12 +27,13 @@ endif
REALMODE_CFLAGS := -m16 -g -Os -DDISABLE_BRANCH_PROFILING \
-Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \
- -mno-mmx -mno-sse
+ -mno-mmx -mno-sse $(call cc-option,-fcf-protection=none)
REALMODE_CFLAGS += -ffreestanding
REALMODE_CFLAGS += -fno-stack-protector
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
+REALMODE_CFLAGS += $(CLANG_FLAGS)
export REALMODE_CFLAGS
# BITS is used as extension for files which are available in a 32 bit
@@ -79,6 +80,14 @@ ifeq ($(CONFIG_X86_32),y)
# temporary until string.h is fixed
KBUILD_CFLAGS += -ffreestanding
+
+ ifeq ($(CONFIG_STACKPROTECTOR),y)
+ ifeq ($(CONFIG_SMP),y)
+ KBUILD_CFLAGS += -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard
+ else
+ KBUILD_CFLAGS += -mstack-protector-guard=global
+ endif
+ endif
else
BITS := 64
UTS_MACHINE := x86_64
@@ -129,8 +138,8 @@ ifdef CONFIG_X86_X32
x32_ld_ok := $(call try-run,\
/bin/echo -e '1: .quad 1b' | \
$(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" - && \
- $(OBJCOPY) -O elf32-x86-64 "$$TMP" "$$TMPO" && \
- $(LD) -m elf32_x86_64 "$$TMPO" -o "$$TMP",y,n)
+ $(OBJCOPY) -O elf32-x86-64 "$$TMP" "$$TMP.o" && \
+ $(LD) -m elf32_x86_64 "$$TMP.o" -o "$$TMP",y,n)
ifeq ($(x32_ld_ok),y)
CONFIG_X86_X32_ABI := y
KBUILD_AFLAGS += -DCONFIG_X86_X32_ABI
@@ -169,11 +178,6 @@ ifeq ($(ACCUMULATE_OUTGOING_ARGS), 1)
KBUILD_CFLAGS += $(call cc-option,-maccumulate-outgoing-args,)
endif
-ifdef CONFIG_LTO_CLANG
-KBUILD_LDFLAGS += -plugin-opt=-code-model=kernel \
- -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
-endif
-
# Workaround for a gcc prelease that unfortunately was shipped in a suse release
KBUILD_CFLAGS += -Wno-sign-compare
#
@@ -193,7 +197,13 @@ ifdef CONFIG_RETPOLINE
endif
endif
-KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE)
+KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
+
+ifdef CONFIG_LTO_CLANG
+ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 130000; echo $$?),0)
+KBUILD_LDFLAGS += -plugin-opt=-stack-alignment=$(if $(CONFIG_X86_32),4,8)
+endif
+endif
ifdef CONFIG_X86_NEED_RELOCS
LDFLAGS_vmlinux := --emit-relocs --discard-none
@@ -247,7 +257,7 @@ drivers-$(CONFIG_FB) += arch/x86/video/
boot := arch/x86/boot
-BOOT_TARGETS = bzdisk fdimage fdimage144 fdimage288 isoimage
+BOOT_TARGETS = bzdisk fdimage fdimage144 fdimage288 hdimage isoimage
PHONY += bzImage $(BOOT_TARGETS)
@@ -305,8 +315,9 @@ define archhelp
echo ' fdimage - Create 1.4MB boot floppy image (arch/x86/boot/fdimage)'
echo ' fdimage144 - Create 1.4MB boot floppy image (arch/x86/boot/fdimage)'
echo ' fdimage288 - Create 2.8MB boot floppy image (arch/x86/boot/fdimage)'
+ echo ' hdimage - Create a BIOS/EFI hard disk image (arch/x86/boot/hdimage)'
echo ' isoimage - Create a boot CD-ROM image (arch/x86/boot/image.iso)'
- echo ' bzdisk/fdimage*/isoimage also accept:'
+ echo ' bzdisk/fdimage*/hdimage/isoimage also accept:'
echo ' FDARGS="..." arguments for the booted kernel'
echo ' FDINITRD=file initrd for the booted kernel'
echo ''
diff --git a/arch/x86/boot/.gitignore b/arch/x86/boot/.gitignore
index 9cc7f1357b9b..1189be057ebd 100644
--- a/arch/x86/boot/.gitignore
+++ b/arch/x86/boot/.gitignore
@@ -11,3 +11,4 @@ setup.elf
fdimage
mtools.conf
image.iso
+hdimage
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index fe605205b4ce..dfbc26a8e924 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -29,7 +29,7 @@ KCOV_INSTRUMENT := n
SVGA_MODE := -DSVGA_MODE=NORMAL_VGA
targets := vmlinux.bin setup.bin setup.elf bzImage
-targets += fdimage fdimage144 fdimage288 image.iso mtools.conf
+targets += fdimage fdimage144 fdimage288 image.iso hdimage
subdir- := compressed
setup-y += a20.o bioscall.o cmdline.o copy.o cpu.o cpuflags.o cpucheck.o
@@ -115,47 +115,49 @@ $(obj)/compressed/vmlinux: FORCE
$(Q)$(MAKE) $(build)=$(obj)/compressed $@
# Set this if you want to pass append arguments to the
-# bzdisk/fdimage/isoimage kernel
+# bzdisk/fdimage/hdimage/isoimage kernel
FDARGS =
-# Set this if you want an initrd included with the
-# bzdisk/fdimage/isoimage kernel
+# Set this if you want one or more initrds included in the image
FDINITRD =
-image_cmdline = default linux $(FDARGS) $(if $(FDINITRD),initrd=initrd.img,)
+imgdeps = $(obj)/bzImage $(obj)/mtools.conf $(src)/genimage.sh
$(obj)/mtools.conf: $(src)/mtools.conf.in
sed -e 's|@OBJ@|$(obj)|g' < $< > $@
+targets += mtools.conf
+
+# genimage.sh requires bash, but it also has a bunch of other
+# external dependencies.
quiet_cmd_genimage = GENIMAGE $3
-cmd_genimage = sh $(srctree)/$(src)/genimage.sh $2 $3 $(obj)/bzImage \
- $(obj)/mtools.conf '$(image_cmdline)' $(FDINITRD)
+cmd_genimage = $(BASH) $(srctree)/$(src)/genimage.sh $2 $3 $(obj)/bzImage \
+ $(obj)/mtools.conf '$(FDARGS)' $(FDINITRD)
-PHONY += bzdisk fdimage fdimage144 fdimage288 isoimage bzlilo install
+PHONY += bzdisk fdimage fdimage144 fdimage288 hdimage isoimage install
# This requires write access to /dev/fd0
-bzdisk: $(obj)/bzImage $(obj)/mtools.conf
+# All images require syslinux to be installed; hdimage also requires
+# EDK2/OVMF if the kernel is compiled with the EFI stub.
+bzdisk: $(imgdeps)
$(call cmd,genimage,bzdisk,/dev/fd0)
-# These require being root or having syslinux 2.02 or higher installed
-fdimage fdimage144: $(obj)/bzImage $(obj)/mtools.conf
+fdimage fdimage144: $(imgdeps)
$(call cmd,genimage,fdimage144,$(obj)/fdimage)
@$(kecho) 'Kernel: $(obj)/fdimage is ready'
-fdimage288: $(obj)/bzImage $(obj)/mtools.conf
+fdimage288: $(imgdeps)
$(call cmd,genimage,fdimage288,$(obj)/fdimage)
@$(kecho) 'Kernel: $(obj)/fdimage is ready'
-isoimage: $(obj)/bzImage
+hdimage: $(imgdeps)
+ $(call cmd,genimage,hdimage,$(obj)/hdimage)
+ @$(kecho) 'Kernel: $(obj)/hdimage is ready'
+
+isoimage: $(imgdeps)
$(call cmd,genimage,isoimage,$(obj)/image.iso)
@$(kecho) 'Kernel: $(obj)/image.iso is ready'
-bzlilo:
- if [ -f $(INSTALL_PATH)/vmlinuz ]; then mv $(INSTALL_PATH)/vmlinuz $(INSTALL_PATH)/vmlinuz.old; fi
- if [ -f $(INSTALL_PATH)/System.map ]; then mv $(INSTALL_PATH)/System.map $(INSTALL_PATH)/System.old; fi
- cat $(obj)/bzImage > $(INSTALL_PATH)/vmlinuz
- cp System.map $(INSTALL_PATH)/
- if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi
-
install:
- sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
+ $(CONFIG_SHELL) $(srctree)/$(src)/install.sh \
+ $(KERNELRELEASE) $(obj)/bzImage \
System.map "$(INSTALL_PATH)"
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index e0bc3988c3fa..431bf7f846c3 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -30,6 +30,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
KBUILD_CFLAGS := -m$(BITS) -O2
KBUILD_CFLAGS += -fno-strict-aliasing -fPIE
+KBUILD_CFLAGS += -Wundef
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small -mno-red-zone
@@ -46,11 +47,12 @@ KBUILD_CFLAGS += -D__DISABLE_EXPORTS
# Disable relocation relaxation in case the link is not PIE.
KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mrelax-relocations=no)
KBUILD_CFLAGS += -include $(srctree)/include/linux/hidden.h
+KBUILD_CFLAGS += $(CLANG_FLAGS)
-# sev-es.c indirectly inludes inat-table.h which is generated during
+# sev.c indirectly inludes inat-table.h which is generated during
# compilation and stored in $(objtree). Add the directory to the includes so
# that the compiler finds it even with out-of-tree builds (make O=/some/path).
-CFLAGS_sev-es.o += -I$(objtree)/arch/x86/lib/
+CFLAGS_sev.o += -I$(objtree)/arch/x86/lib/
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
@@ -92,7 +94,7 @@ ifdef CONFIG_X86_64
vmlinux-objs-y += $(obj)/idt_64.o $(obj)/idt_handlers_64.o
vmlinux-objs-y += $(obj)/mem_encrypt.o
vmlinux-objs-y += $(obj)/pgtable_64.o
- vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev-es.o
+ vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o
endif
vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o
diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
index c4bb0f9363f5..95a223b3e56a 100644
--- a/arch/x86/boot/compressed/efi_thunk_64.S
+++ b/arch/x86/boot/compressed/efi_thunk_64.S
@@ -5,7 +5,7 @@
* Early support for invoking 32-bit EFI services from a 64-bit kernel.
*
* Because this thunking occurs before ExitBootServices() we have to
- * restore the firmware's 32-bit GDT before we make EFI serivce calls,
+ * restore the firmware's 32-bit GDT before we make EFI service calls,
* since the firmware's 32-bit IDT is still currently installed and it
* needs to be able to service interrupts.
*
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index e94874f4bbc1..a2347ded77ea 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -34,6 +34,7 @@
#include <asm/asm-offsets.h>
#include <asm/bootparam.h>
#include <asm/desc_defs.h>
+#include <asm/trapnr.h>
#include "pgtable.h"
/*
@@ -107,9 +108,19 @@ SYM_FUNC_START(startup_32)
movl %eax, %gs
movl %eax, %ss
-/* setup a stack and make sure cpu supports long mode. */
+ /* Setup a stack and load CS from current GDT */
leal rva(boot_stack_end)(%ebp), %esp
+ pushl $__KERNEL32_CS
+ leal rva(1f)(%ebp), %eax
+ pushl %eax
+ lretl
+1:
+
+ /* Setup Exception handling for SEV-ES */
+ call startup32_load_idt
+
+ /* Make sure cpu supports long mode. */
call verify_cpu
testl %eax, %eax
jnz .Lno_longmode
@@ -172,11 +183,21 @@ SYM_FUNC_START(startup_32)
*/
call get_sev_encryption_bit
xorl %edx, %edx
+#ifdef CONFIG_AMD_MEM_ENCRYPT
testl %eax, %eax
jz 1f
subl $32, %eax /* Encryption bit is always above bit 31 */
bts %eax, %edx /* Set encryption mask for page tables */
+ /*
+ * Mark SEV as active in sev_status so that startup32_check_sev_cbit()
+ * will do a check. The sev_status memory will be fully initialized
+ * with the contents of MSR_AMD_SEV_STATUS later in
+ * set_sev_encryption_mask(). For now it is sufficient to know that SEV
+ * is active.
+ */
+ movl $1, rva(sev_status)(%ebp)
1:
+#endif
/* Initialize Page tables to 0 */
leal rva(pgtable)(%ebx), %edi
@@ -231,7 +252,7 @@ SYM_FUNC_START(startup_32)
/*
* Setup for the jump to 64bit mode
*
- * When the jump is performend we will be in long mode but
+ * When the jump is performed we will be in long mode but
* in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1
* (and in turn EFER.LMA = 1). To jump into 64bit mode we use
* the new gdt/idt that has __KERNEL_CS with CS.L = 1.
@@ -261,6 +282,9 @@ SYM_FUNC_START(startup_32)
movl %esi, %edx
1:
#endif
+ /* Check if the C-bit position is correct when SEV is active */
+ call startup32_check_sev_cbit
+
pushl $__KERNEL_CS
pushl %eax
@@ -694,6 +718,19 @@ SYM_DATA_START(boot_idt)
.endr
SYM_DATA_END_LABEL(boot_idt, SYM_L_GLOBAL, boot_idt_end)
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+SYM_DATA_START(boot32_idt_desc)
+ .word boot32_idt_end - boot32_idt - 1
+ .long 0
+SYM_DATA_END(boot32_idt_desc)
+ .balign 8
+SYM_DATA_START(boot32_idt)
+ .rept 32
+ .quad 0
+ .endr
+SYM_DATA_END_LABEL(boot32_idt, SYM_L_GLOBAL, boot32_idt_end)
+#endif
+
#ifdef CONFIG_EFI_STUB
SYM_DATA(image_offset, .long 0)
#endif
@@ -786,6 +823,137 @@ SYM_DATA_START_LOCAL(loaded_image_proto)
SYM_DATA_END(loaded_image_proto)
#endif
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ __HEAD
+ .code32
+/*
+ * Write an IDT entry into boot32_idt
+ *
+ * Parameters:
+ *
+ * %eax: Handler address
+ * %edx: Vector number
+ *
+ * Physical offset is expected in %ebp
+ */
+SYM_FUNC_START(startup32_set_idt_entry)
+ push %ebx
+ push %ecx
+
+ /* IDT entry address to %ebx */
+ leal rva(boot32_idt)(%ebp), %ebx
+ shl $3, %edx
+ addl %edx, %ebx
+
+ /* Build IDT entry, lower 4 bytes */
+ movl %eax, %edx
+ andl $0x0000ffff, %edx # Target code segment offset [15:0]
+ movl $__KERNEL32_CS, %ecx # Target code segment selector
+ shl $16, %ecx
+ orl %ecx, %edx
+
+ /* Store lower 4 bytes to IDT */
+ movl %edx, (%ebx)
+
+ /* Build IDT entry, upper 4 bytes */
+ movl %eax, %edx
+ andl $0xffff0000, %edx # Target code segment offset [31:16]
+ orl $0x00008e00, %edx # Present, Type 32-bit Interrupt Gate
+
+ /* Store upper 4 bytes to IDT */
+ movl %edx, 4(%ebx)
+
+ pop %ecx
+ pop %ebx
+ ret
+SYM_FUNC_END(startup32_set_idt_entry)
+#endif
+
+SYM_FUNC_START(startup32_load_idt)
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ /* #VC handler */
+ leal rva(startup32_vc_handler)(%ebp), %eax
+ movl $X86_TRAP_VC, %edx
+ call startup32_set_idt_entry
+
+ /* Load IDT */
+ leal rva(boot32_idt)(%ebp), %eax
+ movl %eax, rva(boot32_idt_desc+2)(%ebp)
+ lidt rva(boot32_idt_desc)(%ebp)
+#endif
+ ret
+SYM_FUNC_END(startup32_load_idt)
+
+/*
+ * Check for the correct C-bit position when the startup_32 boot-path is used.
+ *
+ * The check makes use of the fact that all memory is encrypted when paging is
+ * disabled. The function creates 64 bits of random data using the RDRAND
+ * instruction. RDRAND is mandatory for SEV guests, so always available. If the
+ * hypervisor violates that the kernel will crash right here.
+ *
+ * The 64 bits of random data are stored to a memory location and at the same
+ * time kept in the %eax and %ebx registers. Since encryption is always active
+ * when paging is off the random data will be stored encrypted in main memory.
+ *
+ * Then paging is enabled. When the C-bit position is correct all memory is
+ * still mapped encrypted and comparing the register values with memory will
+ * succeed. An incorrect C-bit position will map all memory unencrypted, so that
+ * the compare will use the encrypted random data and fail.
+ */
+SYM_FUNC_START(startup32_check_sev_cbit)
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ pushl %eax
+ pushl %ebx
+ pushl %ecx
+ pushl %edx
+
+ /* Check for non-zero sev_status */
+ movl rva(sev_status)(%ebp), %eax
+ testl %eax, %eax
+ jz 4f
+
+ /*
+ * Get two 32-bit random values - Don't bail out if RDRAND fails
+ * because it is better to prevent forward progress if no random value
+ * can be gathered.
+ */
+1: rdrand %eax
+ jnc 1b
+2: rdrand %ebx
+ jnc 2b
+
+ /* Store to memory and keep it in the registers */
+ movl %eax, rva(sev_check_data)(%ebp)
+ movl %ebx, rva(sev_check_data+4)(%ebp)
+
+ /* Enable paging to see if encryption is active */
+ movl %cr0, %edx /* Backup %cr0 in %edx */
+ movl $(X86_CR0_PG | X86_CR0_PE), %ecx /* Enable Paging and Protected mode */
+ movl %ecx, %cr0
+
+ cmpl %eax, rva(sev_check_data)(%ebp)
+ jne 3f
+ cmpl %ebx, rva(sev_check_data+4)(%ebp)
+ jne 3f
+
+ movl %edx, %cr0 /* Restore previous %cr0 */
+
+ jmp 4f
+
+3: /* Check failed - hlt the machine */
+ hlt
+ jmp 3b
+
+4:
+ popl %edx
+ popl %ecx
+ popl %ebx
+ popl %eax
+#endif
+ ret
+SYM_FUNC_END(startup32_check_sev_cbit)
+
/*
* Stack and heap for uncompression
*/
diff --git a/arch/x86/boot/compressed/idt_64.c b/arch/x86/boot/compressed/idt_64.c
index 804a502ee0d2..9b93567d663a 100644
--- a/arch/x86/boot/compressed/idt_64.c
+++ b/arch/x86/boot/compressed/idt_64.c
@@ -52,3 +52,17 @@ void load_stage2_idt(void)
load_boot_idt(&boot_idt_desc);
}
+
+void cleanup_exception_handling(void)
+{
+ /*
+ * Flush GHCB from cache and map it encrypted again when running as
+ * SEV-ES guest.
+ */
+ sev_es_shutdown_ghcb();
+
+ /* Set a null-idt, disabling #PF and #VC handling */
+ boot_idt_desc.size = 0;
+ boot_idt_desc.address = 0;
+ load_boot_idt(&boot_idt_desc);
+}
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index b92fffbe761f..e36690778497 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -639,9 +639,9 @@ static bool process_mem_region(struct mem_vector *region,
if (slot_area_index == MAX_SLOT_AREA) {
debug_putstr("Aborted e820/efi memmap scan (slot_areas full)!\n");
- return 1;
+ return true;
}
- return 0;
+ return false;
}
#if defined(CONFIG_MEMORY_HOTREMOVE) && defined(CONFIG_ACPI)
diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
index aa561795efd1..c1e81a848b2a 100644
--- a/arch/x86/boot/compressed/mem_encrypt.S
+++ b/arch/x86/boot/compressed/mem_encrypt.S
@@ -23,12 +23,6 @@ SYM_FUNC_START(get_sev_encryption_bit)
push %ecx
push %edx
- /* Check if running under a hypervisor */
- movl $1, %eax
- cpuid
- bt $31, %ecx /* Check the hypervisor bit */
- jnc .Lno_sev
-
movl $0x80000000, %eax /* CPUID to check the highest leaf */
cpuid
cmpl $0x8000001f, %eax /* See if 0x8000001f is available */
@@ -67,10 +61,132 @@ SYM_FUNC_START(get_sev_encryption_bit)
ret
SYM_FUNC_END(get_sev_encryption_bit)
+/**
+ * sev_es_req_cpuid - Request a CPUID value from the Hypervisor using
+ * the GHCB MSR protocol
+ *
+ * @%eax: Register to request (0=EAX, 1=EBX, 2=ECX, 3=EDX)
+ * @%edx: CPUID Function
+ *
+ * Returns 0 in %eax on success, non-zero on failure
+ * %edx returns CPUID value on success
+ */
+SYM_CODE_START_LOCAL(sev_es_req_cpuid)
+ shll $30, %eax
+ orl $0x00000004, %eax
+ movl $MSR_AMD64_SEV_ES_GHCB, %ecx
+ wrmsr
+ rep; vmmcall # VMGEXIT
+ rdmsr
+
+ /* Check response */
+ movl %eax, %ecx
+ andl $0x3ffff000, %ecx # Bits [12-29] MBZ
+ jnz 2f
+
+ /* Check return code */
+ andl $0xfff, %eax
+ cmpl $5, %eax
+ jne 2f
+
+ /* All good - return success */
+ xorl %eax, %eax
+1:
+ ret
+2:
+ movl $-1, %eax
+ jmp 1b
+SYM_CODE_END(sev_es_req_cpuid)
+
+SYM_CODE_START(startup32_vc_handler)
+ pushl %eax
+ pushl %ebx
+ pushl %ecx
+ pushl %edx
+
+ /* Keep CPUID function in %ebx */
+ movl %eax, %ebx
+
+ /* Check if error-code == SVM_EXIT_CPUID */
+ cmpl $0x72, 16(%esp)
+ jne .Lfail
+
+ movl $0, %eax # Request CPUID[fn].EAX
+ movl %ebx, %edx # CPUID fn
+ call sev_es_req_cpuid # Call helper
+ testl %eax, %eax # Check return code
+ jnz .Lfail
+ movl %edx, 12(%esp) # Store result
+
+ movl $1, %eax # Request CPUID[fn].EBX
+ movl %ebx, %edx # CPUID fn
+ call sev_es_req_cpuid # Call helper
+ testl %eax, %eax # Check return code
+ jnz .Lfail
+ movl %edx, 8(%esp) # Store result
+
+ movl $2, %eax # Request CPUID[fn].ECX
+ movl %ebx, %edx # CPUID fn
+ call sev_es_req_cpuid # Call helper
+ testl %eax, %eax # Check return code
+ jnz .Lfail
+ movl %edx, 4(%esp) # Store result
+
+ movl $3, %eax # Request CPUID[fn].EDX
+ movl %ebx, %edx # CPUID fn
+ call sev_es_req_cpuid # Call helper
+ testl %eax, %eax # Check return code
+ jnz .Lfail
+ movl %edx, 0(%esp) # Store result
+
+ /*
+ * Sanity check CPUID results from the Hypervisor. See comment in
+ * do_vc_no_ghcb() for more details on why this is necessary.
+ */
+
+ /* Fail if SEV leaf not available in CPUID[0x80000000].EAX */
+ cmpl $0x80000000, %ebx
+ jne .Lcheck_sev
+ cmpl $0x8000001f, 12(%esp)
+ jb .Lfail
+ jmp .Ldone
+
+.Lcheck_sev:
+ /* Fail if SEV bit not set in CPUID[0x8000001f].EAX[1] */
+ cmpl $0x8000001f, %ebx
+ jne .Ldone
+ btl $1, 12(%esp)
+ jnc .Lfail
+
+.Ldone:
+ popl %edx
+ popl %ecx
+ popl %ebx
+ popl %eax
+
+ /* Remove error code */
+ addl $4, %esp
+
+ /* Jump over CPUID instruction */
+ addl $2, (%esp)
+
+ iret
+.Lfail:
+ /* Send terminate request to Hypervisor */
+ movl $0x100, %eax
+ xorl %edx, %edx
+ movl $MSR_AMD64_SEV_ES_GHCB, %ecx
+ wrmsr
+ rep; vmmcall
+
+ /* If request fails, go to hlt loop */
+ hlt
+ jmp .Lfail
+SYM_CODE_END(startup32_vc_handler)
+
.code64
#include "../../kernel/sev_verify_cbit.S"
-
SYM_FUNC_START(set_sev_encryption_mask)
#ifdef CONFIG_AMD_MEM_ENCRYPT
push %rbp
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 267e7f93050e..743f13ea25c1 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -172,7 +172,7 @@ void __puthex(unsigned long value)
}
}
-#if CONFIG_X86_NEED_RELOCS
+#ifdef CONFIG_X86_NEED_RELOCS
static void handle_relocations(void *output, unsigned long output_len,
unsigned long virt_addr)
{
@@ -430,8 +430,6 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
error("Destination address too large");
#endif
#ifndef CONFIG_RELOCATABLE
- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
- error("Destination address does not match LOAD_PHYSICAL_ADDR");
if (virt_addr != LOAD_PHYSICAL_ADDR)
error("Destination virtual address changed when not relocatable");
#endif
@@ -443,11 +441,8 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
handle_relocations(output, output_len, virt_addr);
debug_putstr("done.\nBooting the kernel.\n");
- /*
- * Flush GHCB from cache and map it encrypted again when running as
- * SEV-ES guest.
- */
- sev_es_shutdown_ghcb();
+ /* Disable exception handling before booting the kernel */
+ cleanup_exception_handling();
return output;
}
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 901ea5ebec22..31139256859f 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -79,7 +79,7 @@ struct mem_vector {
u64 size;
};
-#if CONFIG_RANDOMIZE_BASE
+#ifdef CONFIG_RANDOMIZE_BASE
/* kaslr.c */
void choose_random_location(unsigned long input,
unsigned long input_size,
@@ -155,6 +155,12 @@ extern pteval_t __default_kernel_pte_mask;
extern gate_desc boot_idt[BOOT_IDT_ENTRIES];
extern struct desc_ptr boot_idt_desc;
+#ifdef CONFIG_X86_64
+void cleanup_exception_handling(void);
+#else
+static inline void cleanup_exception_handling(void) { }
+#endif
+
/* IDT Entry Points */
void boot_page_fault(void);
void boot_stage1_vc(void);
diff --git a/arch/x86/boot/compressed/sev-es.c b/arch/x86/boot/compressed/sev.c
index 27826c265aab..670e998fe930 100644
--- a/arch/x86/boot/compressed/sev-es.c
+++ b/arch/x86/boot/compressed/sev.c
@@ -13,7 +13,7 @@
#include "misc.h"
#include <asm/pgtable_types.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
#include <asm/trapnr.h>
#include <asm/trap_pf.h>
#include <asm/msr-index.h>
@@ -78,16 +78,15 @@ static inline void sev_es_wr_ghcb_msr(u64 val)
static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
{
char buffer[MAX_INSN_SIZE];
- enum es_result ret;
+ int ret;
memcpy(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
- insn_init(&ctxt->insn, buffer, MAX_INSN_SIZE, 1);
- insn_get_length(&ctxt->insn);
+ ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64);
+ if (ret < 0)
+ return ES_DECODE_FAILED;
- ret = ctxt->insn.immediate.got ? ES_OK : ES_DECODE_FAILED;
-
- return ret;
+ return ES_OK;
}
static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
@@ -118,7 +117,7 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
#include "../../lib/insn.c"
/* Include code for early handlers */
-#include "../../kernel/sev-es-shared.c"
+#include "../../kernel/sev-shared.c"
static bool early_setup_sev_es(void)
{
@@ -200,14 +199,8 @@ void do_boot_stage2_vc(struct pt_regs *regs, unsigned long exit_code)
}
finish:
- if (result == ES_OK) {
+ if (result == ES_OK)
vc_finish_insn(&ctxt);
- } else if (result != ES_RETRY) {
- /*
- * For now, just halt the machine. That makes debugging easier,
- * later we just call sev_es_terminate() here.
- */
- while (true)
- asm volatile("hlt\n");
- }
+ else if (result != ES_RETRY)
+ sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST);
}
diff --git a/arch/x86/boot/genimage.sh b/arch/x86/boot/genimage.sh
index 6a10d52a4145..0673fdfc1a11 100644
--- a/arch/x86/boot/genimage.sh
+++ b/arch/x86/boot/genimage.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
@@ -8,15 +8,24 @@
#
# Adapted from code in arch/x86/boot/Makefile by H. Peter Anvin and others
#
-# "make fdimage/fdimage144/fdimage288/isoimage" script for x86 architecture
+# "make fdimage/fdimage144/fdimage288/hdimage/isoimage"
+# script for x86 architecture
#
# Arguments:
-# $1 - fdimage format
-# $2 - target image file
-# $3 - kernel bzImage file
-# $4 - mtool configuration file
-# $5 - kernel cmdline
-# $6 - inird image file
+# $1 - fdimage format
+# $2 - target image file
+# $3 - kernel bzImage file
+# $4 - mtools configuration file
+# $5 - kernel cmdline
+# $6+ - initrd image file(s)
+#
+# This script requires:
+# bash
+# syslinux
+# mtools (for fdimage* and hdimage)
+# edk2/OVMF (for hdimage)
+#
+# Otherwise try to stick to POSIX shell commands...
#
# Use "make V=1" to debug this script
@@ -26,105 +35,237 @@ case "${KBUILD_VERBOSE}" in
;;
esac
-verify () {
- if [ ! -f "$1" ]; then
- echo "" 1>&2
- echo " *** Missing file: $1" 1>&2
- echo "" 1>&2
- exit 1
+# Exit the top-level shell with an error
+topshell=$$
+trap 'exit 1' USR1
+die() {
+ echo "" 1>&2
+ echo " *** $*" 1>&2
+ echo "" 1>&2
+ kill -USR1 $topshell
+}
+
+# Verify the existence and readability of a file
+verify() {
+ if [ ! -f "$1" -o ! -r "$1" ]; then
+ die "Missing file: $1"
fi
}
+diskfmt="$1"
+FIMAGE="$2"
+FBZIMAGE="$3"
+MTOOLSRC="$4"
+KCMDLINE="$5"
+shift 5 # Remaining arguments = initrd files
+
+export MTOOLSRC
-export MTOOLSRC=$4
-FIMAGE=$2
-FBZIMAGE=$3
-KCMDLINE=$5
-FDINITRD=$6
+# common options for dd
+dd='dd iflag=fullblock'
# Make sure the files actually exist
verify "$FBZIMAGE"
-genbzdisk() {
- verify "$MTOOLSRC"
- mformat a:
- syslinux $FIMAGE
- echo "$KCMDLINE" | mcopy - a:syslinux.cfg
- if [ -f "$FDINITRD" ] ; then
- mcopy "$FDINITRD" a:initrd.img
+declare -a FDINITRDS
+irdpfx=' initrd='
+initrdopts_syslinux=''
+initrdopts_efi=''
+for f in "$@"; do
+ if [ -f "$f" -a -r "$f" ]; then
+ FDINITRDS=("${FDINITRDS[@]}" "$f")
+ fname="$(basename "$f")"
+ initrdopts_syslinux="${initrdopts_syslinux}${irdpfx}${fname}"
+ irdpfx=,
+ initrdopts_efi="${initrdopts_efi} initrd=${fname}"
fi
- mcopy $FBZIMAGE a:linux
+done
+
+# Read a $3-byte littleendian unsigned value at offset $2 from file $1
+le() {
+ local n=0
+ local m=1
+ for b in $(od -A n -v -j $2 -N $3 -t u1 "$1"); do
+ n=$((n + b*m))
+ m=$((m * 256))
+ done
+ echo $n
}
-genfdimage144() {
- verify "$MTOOLSRC"
- dd if=/dev/zero of=$FIMAGE bs=1024 count=1440 2> /dev/null
- mformat v:
- syslinux $FIMAGE
- echo "$KCMDLINE" | mcopy - v:syslinux.cfg
- if [ -f "$FDINITRD" ] ; then
- mcopy "$FDINITRD" v:initrd.img
- fi
- mcopy $FBZIMAGE v:linux
+# Get the EFI architecture name such that boot{name}.efi is the default
+# boot file name. Returns false with no output if the file is not an
+# EFI image or otherwise unknown.
+efiarch() {
+ [ -f "$1" ] || return
+ [ $(le "$1" 0 2) -eq 23117 ] || return # MZ magic
+ peoffs=$(le "$1" 60 4) # PE header offset
+ [ $peoffs -ge 64 ] || return
+ [ $(le "$1" $peoffs 4) -eq 17744 ] || return # PE magic
+ case $(le "$1" $((peoffs+4+20)) 2) in # PE type
+ 267) ;; # PE32
+ 523) ;; # PE32+
+ *) return 1 ;; # Invalid
+ esac
+ [ $(le "$1" $((peoffs+4+20+68)) 2) -eq 10 ] || return # EFI app
+ case $(le "$1" $((peoffs+4)) 2) in # Machine type
+ 332) echo i386 ;;
+ 450) echo arm ;;
+ 512) echo ia64 ;;
+ 20530) echo riscv32 ;;
+ 20580) echo riscv64 ;;
+ 20776) echo riscv128 ;;
+ 34404) echo x64 ;;
+ 43620) echo aa64 ;;
+ esac
}
-genfdimage288() {
- verify "$MTOOLSRC"
- dd if=/dev/zero of=$FIMAGE bs=1024 count=2880 2> /dev/null
- mformat w:
- syslinux $FIMAGE
- echo "$KCMDLINE" | mcopy - W:syslinux.cfg
- if [ -f "$FDINITRD" ] ; then
- mcopy "$FDINITRD" w:initrd.img
- fi
- mcopy $FBZIMAGE w:linux
+# Get the combined sizes in bytes of the files given, counting sparse
+# files as full length, and padding each file to a 4K block size
+filesizes() {
+ local t=0
+ local s
+ for s in $(ls -lnL "$@" 2>/dev/null | awk '/^-/{ print $5; }'); do
+ t=$((t + ((s+4095)/4096)*4096))
+ done
+ echo $t
}
-geniso() {
- tmp_dir=`dirname $FIMAGE`/isoimage
- rm -rf $tmp_dir
- mkdir $tmp_dir
- for i in lib lib64 share ; do
- for j in syslinux ISOLINUX ; do
- if [ -f /usr/$i/$j/isolinux.bin ] ; then
- isolinux=/usr/$i/$j/isolinux.bin
- fi
+# Expand directory names which should be in /usr/share into a list
+# of possible alternatives
+sharedirs() {
+ local dir file
+ for dir in /usr/share /usr/lib64 /usr/lib; do
+ for file; do
+ echo "$dir/$file"
+ echo "$dir/${file^^}"
done
- for j in syslinux syslinux/modules/bios ; do
- if [ -f /usr/$i/$j/ldlinux.c32 ]; then
- ldlinux=/usr/$i/$j/ldlinux.c32
- fi
+ done
+}
+efidirs() {
+ local dir file
+ for dir in /usr/share /boot /usr/lib64 /usr/lib; do
+ for file; do
+ echo "$dir/$file"
+ echo "$dir/${file^^}"
done
- if [ -n "$isolinux" -a -n "$ldlinux" ] ; then
- break
+ done
+}
+
+findsyslinux() {
+ local f="$(find -L $(sharedirs syslinux isolinux) \
+ -name "$1" -readable -type f -print -quit 2>/dev/null)"
+ if [ ! -f "$f" ]; then
+ die "Need a $1 file, please install syslinux/isolinux."
+ fi
+ echo "$f"
+ return 0
+}
+
+findovmf() {
+ local arch="$1"
+ shift
+ local -a names=(-false)
+ local name f
+ for name; do
+ names=("${names[@]}" -or -iname "$name")
+ done
+ for f in $(find -L $(efidirs edk2 ovmf) \
+ \( "${names[@]}" \) -readable -type f \
+ -print 2>/dev/null); do
+ if [ "$(efiarch "$f")" = "$arch" ]; then
+ echo "$f"
+ return 0
fi
done
- if [ -z "$isolinux" ] ; then
- echo 'Need an isolinux.bin file, please install syslinux/isolinux.'
- exit 1
+ die "Need a $1 file for $arch, please install EDK2/OVMF."
+}
+
+do_mcopy() {
+ if [ ${#FDINITRDS[@]} -gt 0 ]; then
+ mcopy "${FDINITRDS[@]}" "$1"
+ fi
+ if [ -n "$efishell" ]; then
+ mmd "$1"EFI "$1"EFI/Boot
+ mcopy "$efishell" "$1"EFI/Boot/boot${kefiarch}.efi
fi
- if [ -z "$ldlinux" ] ; then
- echo 'Need an ldlinux.c32 file, please install syslinux/isolinux.'
- exit 1
+ if [ -n "$kefiarch" ]; then
+ echo linux "$KCMDLINE$initrdopts_efi" | \
+ mcopy - "$1"startup.nsh
fi
- cp $isolinux $tmp_dir
- cp $ldlinux $tmp_dir
- cp $FBZIMAGE $tmp_dir/linux
- echo "$KCMDLINE" > $tmp_dir/isolinux.cfg
- if [ -f "$FDINITRD" ] ; then
- cp "$FDINITRD" $tmp_dir/initrd.img
+ echo default linux "$KCMDLINE$initrdopts_syslinux" | \
+ mcopy - "$1"syslinux.cfg
+ mcopy "$FBZIMAGE" "$1"linux
+}
+
+genbzdisk() {
+ verify "$MTOOLSRC"
+ mformat -v 'LINUX_BOOT' a:
+ syslinux "$FIMAGE"
+ do_mcopy a:
+}
+
+genfdimage144() {
+ verify "$MTOOLSRC"
+ $dd if=/dev/zero of="$FIMAGE" bs=1024 count=1440 2>/dev/null
+ mformat -v 'LINUX_BOOT' v:
+ syslinux "$FIMAGE"
+ do_mcopy v:
+}
+
+genfdimage288() {
+ verify "$MTOOLSRC"
+ $dd if=/dev/zero of="$FIMAGE" bs=1024 count=2880 2>/dev/null
+ mformat -v 'LINUX_BOOT' w:
+ syslinux "$FIMAGE"
+ do_mcopy w:
+}
+
+genhdimage() {
+ verify "$MTOOLSRC"
+ mbr="$(findsyslinux mbr.bin)"
+ kefiarch="$(efiarch "$FBZIMAGE")"
+ if [ -n "$kefiarch" ]; then
+ # The efishell provides command line handling
+ efishell="$(findovmf $kefiarch shell.efi shell${kefiarch}.efi)"
+ ptype='-T 0xef' # EFI system partition, no GPT
fi
- genisoimage -J -r -input-charset=utf-8 -quiet -o $FIMAGE \
- -b isolinux.bin -c boot.cat -no-emul-boot -boot-load-size 4 \
- -boot-info-table $tmp_dir
- isohybrid $FIMAGE 2>/dev/null || true
- rm -rf $tmp_dir
+ sizes=$(filesizes "$FBZIMAGE" "${FDINITRDS[@]}" "$efishell")
+ # Allow 1% + 1 MiB for filesystem and partition table overhead,
+ # syslinux, and config files
+ megs=$(((sizes + sizes/100 + 2*1024*1024 - 1)/(1024*1024)))
+ $dd if=/dev/zero of="$FIMAGE" bs=$((1024*1024)) count=$megs 2>/dev/null
+ mpartition -I -c -s 32 -h 64 -t $megs $ptype -b 512 -a h:
+ $dd if="$mbr" of="$FIMAGE" bs=440 count=1 conv=notrunc 2>/dev/null
+ mformat -v 'LINUX_BOOT' -s 32 -h 64 -t $megs h:
+ syslinux --offset $((512*512)) "$FIMAGE"
+ do_mcopy h:
+}
+
+geniso() {
+ tmp_dir="$(dirname "$FIMAGE")/isoimage"
+ rm -rf "$tmp_dir"
+ mkdir "$tmp_dir"
+ isolinux=$(findsyslinux isolinux.bin)
+ ldlinux=$(findsyslinux ldlinux.c32)
+ cp "$isolinux" "$ldlinux" "$tmp_dir"
+ cp "$FBZIMAGE" "$tmp_dir"/linux
+ echo default linux "$KCMDLINE" > "$tmp_dir"/isolinux.cfg
+ cp "${FDINITRDS[@]}" "$tmp_dir"/
+ genisoimage -J -r -appid 'LINUX_BOOT' -input-charset=utf-8 \
+ -quiet -o "$FIMAGE" -b isolinux.bin \
+ -c boot.cat -no-emul-boot -boot-load-size 4 \
+ -boot-info-table "$tmp_dir"
+ isohybrid "$FIMAGE" 2>/dev/null || true
+ rm -rf "$tmp_dir"
}
-case $1 in
+rm -f "$FIMAGE"
+
+case "$diskfmt" in
bzdisk) genbzdisk;;
fdimage144) genfdimage144;;
fdimage288) genfdimage288;;
+ hdimage) genhdimage;;
isoimage) geniso;;
- *) echo 'Unknown image format'; exit 1;
+ *) die "Unknown image format: $diskfmt";;
esac
diff --git a/arch/x86/boot/mtools.conf.in b/arch/x86/boot/mtools.conf.in
index efd6d2490c1d..9e2662d01364 100644
--- a/arch/x86/boot/mtools.conf.in
+++ b/arch/x86/boot/mtools.conf.in
@@ -14,4 +14,7 @@ drive v:
drive w:
file="@OBJ@/fdimage" cylinders=80 heads=2 sectors=36 filter
+# Hard disk
+drive h:
+ file="@OBJ@/hdimage" partition=1 mformat_only
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index b28e36b7c96b..d0959e7b809f 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -2,8 +2,6 @@
#
# x86 crypto algorithms
-OBJECT_FILES_NON_STANDARD := y
-
obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o
obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S
index 2cf8e94d986a..98e3552b6e03 100644
--- a/arch/x86/crypto/aesni-intel_avx-x86_64.S
+++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S
@@ -212,10 +212,6 @@ HashKey_8_k = 16*21 # store XOR of HashKey^8 <<1 mod poly here (for Karatsu
#define arg4 %rcx
#define arg5 %r8
#define arg6 %r9
-#define arg7 STACK_OFFSET+8*1(%r14)
-#define arg8 STACK_OFFSET+8*2(%r14)
-#define arg9 STACK_OFFSET+8*3(%r14)
-#define arg10 STACK_OFFSET+8*4(%r14)
#define keysize 2*15*16(arg1)
i = 0
@@ -237,9 +233,6 @@ define_reg j %j
.noaltmacro
.endm
-# need to push 4 registers into stack to maintain
-STACK_OFFSET = 8*4
-
TMP1 = 16*0 # Temporary storage for AAD
TMP2 = 16*1 # Temporary storage for AES State 2 (State 1 is stored in an XMM register)
TMP3 = 16*2 # Temporary storage for AES State 3
@@ -256,25 +249,22 @@ VARIABLE_OFFSET = 16*8
################################
.macro FUNC_SAVE
- #the number of pushes must equal STACK_OFFSET
push %r12
push %r13
- push %r14
push %r15
- mov %rsp, %r14
-
-
+ push %rbp
+ mov %rsp, %rbp
sub $VARIABLE_OFFSET, %rsp
and $~63, %rsp # align rsp to 64 bytes
.endm
.macro FUNC_RESTORE
- mov %r14, %rsp
+ mov %rbp, %rsp
+ pop %rbp
pop %r15
- pop %r14
pop %r13
pop %r12
.endm
@@ -294,7 +284,7 @@ VARIABLE_OFFSET = 16*8
# combined for GCM encrypt and decrypt functions
# clobbering all xmm registers
-# clobbering r10, r11, r12, r13, r14, r15
+# clobbering r10, r11, r12, r13, r15, rax
.macro GCM_ENC_DEC INITIAL_BLOCKS GHASH_8_ENCRYPT_8_PARALLEL GHASH_LAST_8 GHASH_MUL ENC_DEC REP
vmovdqu AadHash(arg2), %xmm8
vmovdqu HashKey(arg2), %xmm13 # xmm13 = HashKey
@@ -996,7 +986,7 @@ _partial_block_done_\@:
## num_initial_blocks = b mod 4#
## encrypt the initial num_initial_blocks blocks and apply ghash on the ciphertext
## r10, r11, r12, rax are clobbered
-## arg1, arg3, arg4, r14 are used as a pointer only, not modified
+## arg1, arg2, arg3, arg4 are used as pointers only, not modified
.macro INITIAL_BLOCKS_AVX REP num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC
i = (8-\num_initial_blocks)
@@ -1231,7 +1221,7 @@ _initial_blocks_done\@:
# encrypt 8 blocks at a time
# ghash the 8 previously encrypted ciphertext blocks
-# arg1, arg3, arg4 are used as pointers only, not modified
+# arg1, arg2, arg3, arg4 are used as pointers only, not modified
# r11 is the data offset value
.macro GHASH_8_ENCRYPT_8_PARALLEL_AVX REP T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T7 loop_idx ENC_DEC
@@ -1944,7 +1934,7 @@ SYM_FUNC_END(aesni_gcm_finalize_avx_gen2)
## num_initial_blocks = b mod 4#
## encrypt the initial num_initial_blocks blocks and apply ghash on the ciphertext
## r10, r11, r12, rax are clobbered
-## arg1, arg3, arg4, r14 are used as a pointer only, not modified
+## arg1, arg2, arg3, arg4 are used as pointers only, not modified
.macro INITIAL_BLOCKS_AVX2 REP num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC VER
i = (8-\num_initial_blocks)
@@ -2186,7 +2176,7 @@ _initial_blocks_done\@:
# encrypt 8 blocks at a time
# ghash the 8 previously encrypted ciphertext blocks
-# arg1, arg3, arg4 are used as pointers only, not modified
+# arg1, arg2, arg3, arg4 are used as pointers only, not modified
# r11 is the data offset value
.macro GHASH_8_ENCRYPT_8_PARALLEL_AVX2 REP T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T7 loop_idx ENC_DEC
diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
index 782e9712a1ec..706f70829a07 100644
--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
@@ -990,6 +990,7 @@ SYM_FUNC_START(camellia_cbc_dec_32way)
* %rdx: src (32 blocks)
*/
FRAME_BEGIN
+ subq $(16 * 32), %rsp;
vzeroupper;
@@ -1002,7 +1003,6 @@ SYM_FUNC_START(camellia_cbc_dec_32way)
%ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
%ymm15, %rdx, (key_table)(CTX, %r8, 8));
- movq %rsp, %r10;
cmpq %rsi, %rdx;
je .Lcbc_dec_use_stack;
@@ -1015,7 +1015,6 @@ SYM_FUNC_START(camellia_cbc_dec_32way)
* dst still in-use (because dst == src), so use stack for temporary
* storage.
*/
- subq $(16 * 32), %rsp;
movq %rsp, %rax;
.Lcbc_dec_continue:
@@ -1025,7 +1024,6 @@ SYM_FUNC_START(camellia_cbc_dec_32way)
vpxor %ymm7, %ymm7, %ymm7;
vinserti128 $1, (%rdx), %ymm7, %ymm7;
vpxor (%rax), %ymm7, %ymm7;
- movq %r10, %rsp;
vpxor (0 * 32 + 16)(%rdx), %ymm6, %ymm6;
vpxor (1 * 32 + 16)(%rdx), %ymm5, %ymm5;
vpxor (2 * 32 + 16)(%rdx), %ymm4, %ymm4;
@@ -1047,6 +1045,7 @@ SYM_FUNC_START(camellia_cbc_dec_32way)
vzeroupper;
+ addq $(16 * 32), %rsp;
FRAME_END
ret;
SYM_FUNC_END(camellia_cbc_dec_32way)
diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c
index 7c4c7b2fbf05..98cf3b4e4c9f 100644
--- a/arch/x86/crypto/crc32-pclmul_glue.c
+++ b/arch/x86/crypto/crc32-pclmul_glue.c
@@ -24,7 +24,7 @@
/*
* Copyright 2012 Xyratex Technology Limited
*
- * Wrappers for kernel crypto shash api to pclmulqdq crc32 imlementation.
+ * Wrappers for kernel crypto shash api to pclmulqdq crc32 implementation.
*/
#include <linux/init.h>
#include <linux/module.h>
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
index 884dc767b051..ac1f303eed0f 100644
--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
@@ -53,7 +53,7 @@
.endm
.macro JMPTBL_ENTRY i
-.word crc_\i - crc_array
+.quad crc_\i
.endm
.macro JNC_LESS_THAN j
@@ -168,10 +168,7 @@ continue_block:
xor crc2, crc2
## branch into array
- lea jump_table(%rip), %bufp
- movzwq (%bufp, %rax, 2), len
- lea crc_array(%rip), %bufp
- lea (%bufp, len, 1), %bufp
+ mov jump_table(,%rax,8), %bufp
JMP_NOSPEC bufp
################################################################
diff --git a/arch/x86/crypto/curve25519-x86_64.c b/arch/x86/crypto/curve25519-x86_64.c
index 5af8021b98ce..38caf61cd5b7 100644
--- a/arch/x86/crypto/curve25519-x86_64.c
+++ b/arch/x86/crypto/curve25519-x86_64.c
@@ -114,11 +114,11 @@ static inline void fadd(u64 *out, const u64 *f1, const u64 *f2)
);
}
-/* Computes the field substraction of two field elements */
+/* Computes the field subtraction of two field elements */
static inline void fsub(u64 *out, const u64 *f1, const u64 *f2)
{
asm volatile(
- /* Compute the raw substraction of f1-f2 */
+ /* Compute the raw subtraction of f1-f2 */
" movq 0(%1), %%r8;"
" subq 0(%2), %%r8;"
" movq 8(%1), %%r9;"
@@ -135,7 +135,7 @@ static inline void fsub(u64 *out, const u64 *f1, const u64 *f2)
" mov $38, %%rcx;"
" cmovc %%rcx, %%rax;"
- /* Step 2: Substract carry*38 from the original difference */
+ /* Step 2: Subtract carry*38 from the original difference */
" sub %%rax, %%r8;"
" sbb $0, %%r9;"
" sbb $0, %%r10;"
@@ -1500,7 +1500,7 @@ static int __init curve25519_mod_init(void)
static void __exit curve25519_mod_exit(void)
{
if (IS_REACHABLE(CONFIG_CRYPTO_KPP) &&
- (boot_cpu_has(X86_FEATURE_BMI2) || boot_cpu_has(X86_FEATURE_ADX)))
+ static_branch_likely(&curve25519_use_bmi2_adx))
crypto_unregister_kpp(&curve25519_alg);
}
diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
index 646da46e8d10..1dfb8af48a3c 100644
--- a/arch/x86/crypto/poly1305_glue.c
+++ b/arch/x86/crypto/poly1305_glue.c
@@ -16,7 +16,7 @@
#include <asm/simd.h>
asmlinkage void poly1305_init_x86_64(void *ctx,
- const u8 key[POLY1305_KEY_SIZE]);
+ const u8 key[POLY1305_BLOCK_SIZE]);
asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp,
const size_t len, const u32 padbit);
asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
@@ -81,7 +81,7 @@ static void convert_to_base2_64(void *ctx)
state->is_base2_26 = 0;
}
-static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_KEY_SIZE])
+static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_BLOCK_SIZE])
{
poly1305_init_x86_64(ctx, key);
}
@@ -129,7 +129,7 @@ static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
poly1305_emit_avx(ctx, mac, nonce);
}
-void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
+void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
{
poly1305_simd_init(&dctx->h, key);
dctx->s[0] = get_unaligned_le32(&key[16]);
diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
index 1e594d60afa5..5eed620f4676 100644
--- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S
+++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
@@ -645,9 +645,9 @@ _loop3:
RESERVE_STACK = (W_SIZE*4 + 8+24)
/* Align stack */
- mov %rsp, %rbx
+ push %rbp
+ mov %rsp, %rbp
and $~(0x20-1), %rsp
- push %rbx
sub $RESERVE_STACK, %rsp
avx2_zeroupper
@@ -665,8 +665,8 @@ _loop3:
avx2_zeroupper
- add $RESERVE_STACK, %rsp
- pop %rsp
+ mov %rbp, %rsp
+ pop %rbp
pop %r15
pop %r14
diff --git a/arch/x86/crypto/sha1_ni_asm.S b/arch/x86/crypto/sha1_ni_asm.S
index 11efe3a45a1f..5d8415f482bd 100644
--- a/arch/x86/crypto/sha1_ni_asm.S
+++ b/arch/x86/crypto/sha1_ni_asm.S
@@ -59,8 +59,6 @@
#define DATA_PTR %rsi /* 2nd arg */
#define NUM_BLKS %rdx /* 3rd arg */
-#define RSPSAVE %rax
-
/* gcc conversion */
#define FRAME_SIZE 32 /* space for 2x16 bytes */
@@ -96,7 +94,8 @@
.text
.align 32
SYM_FUNC_START(sha1_ni_transform)
- mov %rsp, RSPSAVE
+ push %rbp
+ mov %rsp, %rbp
sub $FRAME_SIZE, %rsp
and $~0xF, %rsp
@@ -288,7 +287,8 @@ SYM_FUNC_START(sha1_ni_transform)
pextrd $3, E0, 1*16(DIGEST_PTR)
.Ldone_hash:
- mov RSPSAVE, %rsp
+ mov %rbp, %rsp
+ pop %rbp
ret
SYM_FUNC_END(sha1_ni_transform)
diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
index 11ff60c29c8b..4087f7432a7e 100644
--- a/arch/x86/crypto/sha256-avx2-asm.S
+++ b/arch/x86/crypto/sha256-avx2-asm.S
@@ -117,15 +117,13 @@ _XMM_SAVE_SIZE = 0
_INP_END_SIZE = 8
_INP_SIZE = 8
_CTX_SIZE = 8
-_RSP_SIZE = 8
_XFER = 0
_XMM_SAVE = _XFER + _XFER_SIZE
_INP_END = _XMM_SAVE + _XMM_SAVE_SIZE
_INP = _INP_END + _INP_END_SIZE
_CTX = _INP + _INP_SIZE
-_RSP = _CTX + _CTX_SIZE
-STACK_SIZE = _RSP + _RSP_SIZE
+STACK_SIZE = _CTX + _CTX_SIZE
# rotate_Xs
# Rotate values of symbols X0...X3
@@ -533,11 +531,11 @@ SYM_FUNC_START(sha256_transform_rorx)
pushq %r14
pushq %r15
- mov %rsp, %rax
+ push %rbp
+ mov %rsp, %rbp
+
subq $STACK_SIZE, %rsp
and $-32, %rsp # align rsp to 32 byte boundary
- mov %rax, _RSP(%rsp)
-
shl $6, NUM_BLKS # convert to bytes
jz done_hash
@@ -704,7 +702,8 @@ only_one_block:
done_hash:
- mov _RSP(%rsp), %rsp
+ mov %rbp, %rsp
+ pop %rbp
popq %r15
popq %r14
diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
index 684d58c8bc4f..3d8f0fd4eea8 100644
--- a/arch/x86/crypto/sha512-avx-asm.S
+++ b/arch/x86/crypto/sha512-avx-asm.S
@@ -76,14 +76,10 @@ tmp0 = %rax
W_SIZE = 80*8
# W[t] + K[t] | W[t+1] + K[t+1]
WK_SIZE = 2*8
-RSPSAVE_SIZE = 1*8
-GPRSAVE_SIZE = 5*8
frame_W = 0
frame_WK = frame_W + W_SIZE
-frame_RSPSAVE = frame_WK + WK_SIZE
-frame_GPRSAVE = frame_RSPSAVE + RSPSAVE_SIZE
-frame_size = frame_GPRSAVE + GPRSAVE_SIZE
+frame_size = frame_WK + WK_SIZE
# Useful QWORD "arrays" for simpler memory references
# MSG, DIGEST, K_t, W_t are arrays
@@ -281,18 +277,18 @@ SYM_FUNC_START(sha512_transform_avx)
test msglen, msglen
je nowork
+ # Save GPRs
+ push %rbx
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
# Allocate Stack Space
- mov %rsp, %rax
+ push %rbp
+ mov %rsp, %rbp
sub $frame_size, %rsp
and $~(0x20 - 1), %rsp
- mov %rax, frame_RSPSAVE(%rsp)
-
- # Save GPRs
- mov %rbx, frame_GPRSAVE(%rsp)
- mov %r12, frame_GPRSAVE +8*1(%rsp)
- mov %r13, frame_GPRSAVE +8*2(%rsp)
- mov %r14, frame_GPRSAVE +8*3(%rsp)
- mov %r15, frame_GPRSAVE +8*4(%rsp)
updateblock:
@@ -353,15 +349,16 @@ updateblock:
dec msglen
jnz updateblock
- # Restore GPRs
- mov frame_GPRSAVE(%rsp), %rbx
- mov frame_GPRSAVE +8*1(%rsp), %r12
- mov frame_GPRSAVE +8*2(%rsp), %r13
- mov frame_GPRSAVE +8*3(%rsp), %r14
- mov frame_GPRSAVE +8*4(%rsp), %r15
-
# Restore Stack Pointer
- mov frame_RSPSAVE(%rsp), %rsp
+ mov %rbp, %rsp
+ pop %rbp
+
+ # Restore GPRs
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbx
nowork:
ret
diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
index 3a44bdcfd583..072cb0f0deae 100644
--- a/arch/x86/crypto/sha512-avx2-asm.S
+++ b/arch/x86/crypto/sha512-avx2-asm.S
@@ -102,17 +102,13 @@ SRND_SIZE = 1*8
INP_SIZE = 1*8
INPEND_SIZE = 1*8
CTX_SIZE = 1*8
-RSPSAVE_SIZE = 1*8
-GPRSAVE_SIZE = 5*8
frame_XFER = 0
frame_SRND = frame_XFER + XFER_SIZE
frame_INP = frame_SRND + SRND_SIZE
frame_INPEND = frame_INP + INP_SIZE
frame_CTX = frame_INPEND + INPEND_SIZE
-frame_RSPSAVE = frame_CTX + CTX_SIZE
-frame_GPRSAVE = frame_RSPSAVE + RSPSAVE_SIZE
-frame_size = frame_GPRSAVE + GPRSAVE_SIZE
+frame_size = frame_CTX + CTX_SIZE
## assume buffers not aligned
#define VMOVDQ vmovdqu
@@ -570,18 +566,18 @@ frame_size = frame_GPRSAVE + GPRSAVE_SIZE
# "blocks" is the message length in SHA512 blocks
########################################################################
SYM_FUNC_START(sha512_transform_rorx)
+ # Save GPRs
+ push %rbx
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
# Allocate Stack Space
- mov %rsp, %rax
+ push %rbp
+ mov %rsp, %rbp
sub $frame_size, %rsp
and $~(0x20 - 1), %rsp
- mov %rax, frame_RSPSAVE(%rsp)
-
- # Save GPRs
- mov %rbx, 8*0+frame_GPRSAVE(%rsp)
- mov %r12, 8*1+frame_GPRSAVE(%rsp)
- mov %r13, 8*2+frame_GPRSAVE(%rsp)
- mov %r14, 8*3+frame_GPRSAVE(%rsp)
- mov %r15, 8*4+frame_GPRSAVE(%rsp)
shl $7, NUM_BLKS # convert to bytes
jz done_hash
@@ -672,15 +668,17 @@ loop2:
done_hash:
-# Restore GPRs
- mov 8*0+frame_GPRSAVE(%rsp), %rbx
- mov 8*1+frame_GPRSAVE(%rsp), %r12
- mov 8*2+frame_GPRSAVE(%rsp), %r13
- mov 8*3+frame_GPRSAVE(%rsp), %r14
- mov 8*4+frame_GPRSAVE(%rsp), %r15
-
# Restore Stack Pointer
- mov frame_RSPSAVE(%rsp), %rsp
+ mov %rbp, %rsp
+ pop %rbp
+
+ # Restore GPRs
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbx
+
ret
SYM_FUNC_END(sha512_transform_rorx)
diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
index 50812af0b083..bd51c9070bed 100644
--- a/arch/x86/crypto/sha512-ssse3-asm.S
+++ b/arch/x86/crypto/sha512-ssse3-asm.S
@@ -74,14 +74,10 @@ tmp0 = %rax
W_SIZE = 80*8
WK_SIZE = 2*8
-RSPSAVE_SIZE = 1*8
-GPRSAVE_SIZE = 5*8
frame_W = 0
frame_WK = frame_W + W_SIZE
-frame_RSPSAVE = frame_WK + WK_SIZE
-frame_GPRSAVE = frame_RSPSAVE + RSPSAVE_SIZE
-frame_size = frame_GPRSAVE + GPRSAVE_SIZE
+frame_size = frame_WK + WK_SIZE
# Useful QWORD "arrays" for simpler memory references
# MSG, DIGEST, K_t, W_t are arrays
@@ -283,18 +279,18 @@ SYM_FUNC_START(sha512_transform_ssse3)
test msglen, msglen
je nowork
+ # Save GPRs
+ push %rbx
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
# Allocate Stack Space
- mov %rsp, %rax
+ push %rbp
+ mov %rsp, %rbp
sub $frame_size, %rsp
and $~(0x20 - 1), %rsp
- mov %rax, frame_RSPSAVE(%rsp)
-
- # Save GPRs
- mov %rbx, frame_GPRSAVE(%rsp)
- mov %r12, frame_GPRSAVE +8*1(%rsp)
- mov %r13, frame_GPRSAVE +8*2(%rsp)
- mov %r14, frame_GPRSAVE +8*3(%rsp)
- mov %r15, frame_GPRSAVE +8*4(%rsp)
updateblock:
@@ -355,15 +351,16 @@ updateblock:
dec msglen
jnz updateblock
- # Restore GPRs
- mov frame_GPRSAVE(%rsp), %rbx
- mov frame_GPRSAVE +8*1(%rsp), %r12
- mov frame_GPRSAVE +8*2(%rsp), %r13
- mov frame_GPRSAVE +8*3(%rsp), %r14
- mov frame_GPRSAVE +8*4(%rsp), %r15
-
# Restore Stack Pointer
- mov frame_RSPSAVE(%rsp), %rsp
+ mov %rbp, %rsp
+ pop %rbp
+
+ # Restore GPRs
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbx
nowork:
ret
diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
index fc23552afe37..bca4cea757ce 100644
--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
@@ -88,7 +88,7 @@
/*
* Combined G1 & G2 function. Reordered with help of rotates to have moves
- * at begining.
+ * at beginning.
*/
#define g1g2_3(ab, cd, Tx0, Tx1, Tx2, Tx3, Ty0, Ty1, Ty2, Ty3, x, y) \
/* G1,1 && G2,1 */ \
diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c
index 03725696397c..3507cf2064f1 100644
--- a/arch/x86/crypto/twofish_glue_3way.c
+++ b/arch/x86/crypto/twofish_glue_3way.c
@@ -117,7 +117,7 @@ static bool is_blacklisted_cpu(void)
* storing blocks in 64bit registers to allow three blocks to
* be processed parallel. Parallel operation then allows gaining
* more performance than was trade off, on out-of-order CPUs.
- * However Atom does not benefit from this parallellism and
+ * However Atom does not benefit from this parallelism and
* should be blacklisted.
*/
return true;
diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile
index 08bf95dbc911..7fec5dcf6438 100644
--- a/arch/x86/entry/Makefile
+++ b/arch/x86/entry/Makefile
@@ -8,18 +8,8 @@ UBSAN_SANITIZE := n
KCOV_INSTRUMENT := n
CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_syscall_64.o = $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_syscall_32.o = $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_syscall_x32.o = $(CC_FLAGS_FTRACE)
CFLAGS_common.o += -fno-stack-protector
-CFLAGS_syscall_64.o += -fno-stack-protector
-CFLAGS_syscall_32.o += -fno-stack-protector
-CFLAGS_syscall_x32.o += -fno-stack-protector
-
-CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_syscall_32.o += $(call cc-option,-Wno-override-init,)
-CFLAGS_syscall_x32.o += $(call cc-option,-Wno-override-init,)
obj-y := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
obj-y += common.o
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 07a9331d55e7..a4c061fb7c6e 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -6,6 +6,7 @@
#include <asm/percpu.h>
#include <asm/asm-offsets.h>
#include <asm/processor-flags.h>
+#include <asm/ptrace-abi.h>
/*
@@ -62,42 +63,7 @@ For 32-bit we have the following conventions - kernel is built with
* for assembly code:
*/
-/* The layout forms the "struct pt_regs" on the stack: */
-/*
- * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
- * unless syscall needs a complete, fully filled "struct pt_regs".
- */
-#define R15 0*8
-#define R14 1*8
-#define R13 2*8
-#define R12 3*8
-#define RBP 4*8
-#define RBX 5*8
-/* These regs are callee-clobbered. Always saved on kernel entry. */
-#define R11 6*8
-#define R10 7*8
-#define R9 8*8
-#define R8 9*8
-#define RAX 10*8
-#define RCX 11*8
-#define RDX 12*8
-#define RSI 13*8
-#define RDI 14*8
-/*
- * On syscall entry, this is syscall#. On CPU exception, this is error code.
- * On hw interrupt, it's IRQ number:
- */
-#define ORIG_RAX 15*8
-/* Return frame for iretq */
-#define RIP 16*8
-#define CS 17*8
-#define EFLAGS 18*8
-#define RSP 19*8
-#define SS 20*8
-
-#define SIZEOF_PTREGS 21*8
-
-.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
+.macro PUSH_REGS rdx=%rdx rax=%rax save_ret=0
.if \save_ret
pushq %rsi /* pt_regs->si */
movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */
@@ -124,7 +90,9 @@ For 32-bit we have the following conventions - kernel is built with
.if \save_ret
pushq %rsi /* return address on top of stack */
.endif
+.endm
+.macro CLEAR_REGS
/*
* Sanitize registers of values that a speculation attack might
* otherwise want to exploit. The lower registers are likely clobbered
@@ -146,6 +114,11 @@ For 32-bit we have the following conventions - kernel is built with
.endm
+.macro PUSH_AND_CLEAR_REGS rdx=%rdx rax=%rax save_ret=0
+ PUSH_REGS rdx=\rdx, rax=\rax, save_ret=\save_ret
+ CLEAR_REGS
+.endm
+
.macro POP_REGS pop_rdi=1 skip_r11rcx=0
popq %r15
popq %r14
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index a2433ae8a65e..6c2826417b33 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -36,59 +36,97 @@
#include <asm/irq_stack.h>
#ifdef CONFIG_X86_64
-__visible noinstr void do_syscall_64(unsigned long nr, struct pt_regs *regs)
+
+static __always_inline bool do_syscall_x64(struct pt_regs *regs, int nr)
+{
+ /*
+ * Convert negative numbers to very high and thus out of range
+ * numbers for comparisons.
+ */
+ unsigned int unr = nr;
+
+ if (likely(unr < NR_syscalls)) {
+ unr = array_index_nospec(unr, NR_syscalls);
+ regs->ax = sys_call_table[unr](regs);
+ return true;
+ }
+ return false;
+}
+
+static __always_inline bool do_syscall_x32(struct pt_regs *regs, int nr)
+{
+ /*
+ * Adjust the starting offset of the table, and convert numbers
+ * < __X32_SYSCALL_BIT to very high and thus out of range
+ * numbers for comparisons.
+ */
+ unsigned int xnr = nr - __X32_SYSCALL_BIT;
+
+ if (IS_ENABLED(CONFIG_X86_X32_ABI) && likely(xnr < X32_NR_syscalls)) {
+ xnr = array_index_nospec(xnr, X32_NR_syscalls);
+ regs->ax = x32_sys_call_table[xnr](regs);
+ return true;
+ }
+ return false;
+}
+
+__visible noinstr void do_syscall_64(struct pt_regs *regs, int nr)
{
+ add_random_kstack_offset();
nr = syscall_enter_from_user_mode(regs, nr);
instrumentation_begin();
- if (likely(nr < NR_syscalls)) {
- nr = array_index_nospec(nr, NR_syscalls);
- regs->ax = sys_call_table[nr](regs);
-#ifdef CONFIG_X86_X32_ABI
- } else if (likely((nr & __X32_SYSCALL_BIT) &&
- (nr & ~__X32_SYSCALL_BIT) < X32_NR_syscalls)) {
- nr = array_index_nospec(nr & ~__X32_SYSCALL_BIT,
- X32_NR_syscalls);
- regs->ax = x32_sys_call_table[nr](regs);
-#endif
+
+ if (!do_syscall_x64(regs, nr) && !do_syscall_x32(regs, nr) && nr != -1) {
+ /* Invalid system call, but still a system call. */
+ regs->ax = __x64_sys_ni_syscall(regs);
}
+
instrumentation_end();
syscall_exit_to_user_mode(regs);
}
#endif
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
-static __always_inline unsigned int syscall_32_enter(struct pt_regs *regs)
+static __always_inline int syscall_32_enter(struct pt_regs *regs)
{
if (IS_ENABLED(CONFIG_IA32_EMULATION))
current_thread_info()->status |= TS_COMPAT;
- return (unsigned int)regs->orig_ax;
+ return (int)regs->orig_ax;
}
/*
* Invoke a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL.
*/
-static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs,
- unsigned int nr)
+static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs, int nr)
{
- if (likely(nr < IA32_NR_syscalls)) {
- nr = array_index_nospec(nr, IA32_NR_syscalls);
- regs->ax = ia32_sys_call_table[nr](regs);
+ /*
+ * Convert negative numbers to very high and thus out of range
+ * numbers for comparisons.
+ */
+ unsigned int unr = nr;
+
+ if (likely(unr < IA32_NR_syscalls)) {
+ unr = array_index_nospec(unr, IA32_NR_syscalls);
+ regs->ax = ia32_sys_call_table[unr](regs);
+ } else if (nr != -1) {
+ regs->ax = __ia32_sys_ni_syscall(regs);
}
}
/* Handles int $0x80 */
__visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
{
- unsigned int nr = syscall_32_enter(regs);
+ int nr = syscall_32_enter(regs);
+ add_random_kstack_offset();
/*
- * Subtlety here: if ptrace pokes something larger than 2^32-1 into
- * orig_ax, the unsigned int return value truncates it. This may
- * or may not be necessary, but it matches the old asm behavior.
+ * Subtlety here: if ptrace pokes something larger than 2^31-1 into
+ * orig_ax, the int return value truncates it. This matches
+ * the semantics of syscall_get_nr().
*/
- nr = (unsigned int)syscall_enter_from_user_mode(regs, nr);
+ nr = syscall_enter_from_user_mode(regs, nr);
instrumentation_begin();
do_syscall_32_irqs_on(regs, nr);
@@ -99,9 +137,10 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
{
- unsigned int nr = syscall_32_enter(regs);
+ int nr = syscall_32_enter(regs);
int res;
+ add_random_kstack_offset();
/*
* This cannot use syscall_enter_from_user_mode() as it has to
* fetch EBP before invoking any of the syscall entry work
@@ -127,13 +166,13 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
/* User code screwed up. */
regs->ax = -EFAULT;
+ local_irq_disable();
instrumentation_end();
- syscall_exit_to_user_mode(regs);
+ irqentry_exit_to_user_mode(regs);
return false;
}
- /* The case truncates any ptrace induced syscall nr > 2^32 -1 */
- nr = (unsigned int)syscall_enter_from_user_mode_work(regs, nr);
+ nr = syscall_enter_from_user_mode_work(regs, nr);
/* Now this is just like a normal syscall. */
do_syscall_32_irqs_on(regs, nr);
@@ -265,15 +304,16 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
irqentry_state_t state = irqentry_enter(regs);
bool inhcall;
+ instrumentation_begin();
run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
inhcall = get_and_clear_inhcall();
if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
- instrumentation_begin();
irqentry_exit_cond_resched();
instrumentation_end();
restore_inhcall(inhcall);
} else {
+ instrumentation_end();
irqentry_exit(regs, state);
}
}
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index df8c017e6161..ccb9d32768f3 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -20,7 +20,7 @@
* 1C(%esp) - %ds
* 20(%esp) - %es
* 24(%esp) - %fs
- * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
+ * 28(%esp) - unused -- was %gs on old stackprotector kernels
* 2C(%esp) - orig_eax
* 30(%esp) - %eip
* 34(%esp) - %cs
@@ -40,7 +40,7 @@
#include <asm/processor-flags.h>
#include <asm/irq_vectors.h>
#include <asm/cpufeatures.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/frame.h>
@@ -53,83 +53,6 @@
#define PTI_SWITCH_MASK (1 << PAGE_SHIFT)
-/*
- * User gs save/restore
- *
- * %gs is used for userland TLS and kernel only uses it for stack
- * canary which is required to be at %gs:20 by gcc. Read the comment
- * at the top of stackprotector.h for more info.
- *
- * Local labels 98 and 99 are used.
- */
-#ifdef CONFIG_X86_32_LAZY_GS
-
- /* unfortunately push/pop can't be no-op */
-.macro PUSH_GS
- pushl $0
-.endm
-.macro POP_GS pop=0
- addl $(4 + \pop), %esp
-.endm
-.macro POP_GS_EX
-.endm
-
- /* all the rest are no-op */
-.macro PTGS_TO_GS
-.endm
-.macro PTGS_TO_GS_EX
-.endm
-.macro GS_TO_REG reg
-.endm
-.macro REG_TO_PTGS reg
-.endm
-.macro SET_KERNEL_GS reg
-.endm
-
-#else /* CONFIG_X86_32_LAZY_GS */
-
-.macro PUSH_GS
- pushl %gs
-.endm
-
-.macro POP_GS pop=0
-98: popl %gs
- .if \pop <> 0
- add $\pop, %esp
- .endif
-.endm
-.macro POP_GS_EX
-.pushsection .fixup, "ax"
-99: movl $0, (%esp)
- jmp 98b
-.popsection
- _ASM_EXTABLE(98b, 99b)
-.endm
-
-.macro PTGS_TO_GS
-98: mov PT_GS(%esp), %gs
-.endm
-.macro PTGS_TO_GS_EX
-.pushsection .fixup, "ax"
-99: movl $0, PT_GS(%esp)
- jmp 98b
-.popsection
- _ASM_EXTABLE(98b, 99b)
-.endm
-
-.macro GS_TO_REG reg
- movl %gs, \reg
-.endm
-.macro REG_TO_PTGS reg
- movl \reg, PT_GS(%esp)
-.endm
-.macro SET_KERNEL_GS reg
- movl $(__KERNEL_STACK_CANARY), \reg
- movl \reg, %gs
-.endm
-
-#endif /* CONFIG_X86_32_LAZY_GS */
-
/* Unconditionally switch to user cr3 */
.macro SWITCH_TO_USER_CR3 scratch_reg:req
ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
@@ -209,7 +132,7 @@
*
* Lets build a 5 entry IRET frame after that, such that struct pt_regs
* is complete and in particular regs->sp is correct. This gives us
- * the original 6 enties as gap:
+ * the original 6 entries as gap:
*
* 14*4(%esp) - <previous context>
* 13*4(%esp) - gap / flags
@@ -282,7 +205,7 @@
.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 unwind_espfix=0
cld
.if \skip_gs == 0
- PUSH_GS
+ pushl $0
.endif
pushl %fs
@@ -307,9 +230,6 @@
movl $(__USER_DS), %edx
movl %edx, %ds
movl %edx, %es
-.if \skip_gs == 0
- SET_KERNEL_GS %edx
-.endif
/* Switch to kernel stack if necessary */
.if \switch_stacks > 0
SWITCH_TO_KERNEL_STACK
@@ -348,7 +268,7 @@
1: popl %ds
2: popl %es
3: popl %fs
- POP_GS \pop
+ addl $(4 + \pop), %esp /* pop the unused "gs" slot */
IRET_FRAME
.pushsection .fixup, "ax"
4: movl $0, (%esp)
@@ -361,7 +281,6 @@
_ASM_EXTABLE(1b, 4b)
_ASM_EXTABLE(2b, 5b)
_ASM_EXTABLE(3b, 6b)
- POP_GS_EX
.endm
.macro RESTORE_ALL_NMI cr3_reg:req pop=0
@@ -430,7 +349,7 @@
* will soon execute iret and the tracer was already set to
* the irqstate after the IRET:
*/
- DISABLE_INTERRUPTS(CLBR_ANY)
+ cli
lss (%esp), %esp /* switch to espfix segment */
.Lend_\@:
#endif /* CONFIG_X86_ESPFIX32 */
@@ -779,7 +698,7 @@ SYM_CODE_START(__switch_to_asm)
#ifdef CONFIG_STACKPROTECTOR
movl TASK_stack_canary(%edx), %ebx
- movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
+ movl %ebx, PER_CPU_VAR(__stack_chk_guard)
#endif
#ifdef CONFIG_RETPOLINE
@@ -976,7 +895,6 @@ SYM_FUNC_START(entry_SYSENTER_32)
movl PT_EIP(%esp), %edx /* pt_regs->ip */
movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
1: mov PT_FS(%esp), %fs
- PTGS_TO_GS
popl %ebx /* pt_regs->bx */
addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
@@ -1012,7 +930,6 @@ SYM_FUNC_START(entry_SYSENTER_32)
jmp 1b
.popsection
_ASM_EXTABLE(1b, 2b)
- PTGS_TO_GS_EX
.Lsysenter_fix_flags:
pushl $X86_EFLAGS_FIXED
@@ -1077,7 +994,7 @@ restore_all_switch_stack:
* when returning from IPI handler and when returning from
* scheduler to user-space.
*/
- INTERRUPT_RETURN
+ iret
.section .fixup, "ax"
SYM_CODE_START(asm_iret_error)
@@ -1154,11 +1071,7 @@ SYM_CODE_START_LOCAL_NOALIGN(handle_exception)
SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
ENCODE_FRAME_POINTER
- /* fixup %gs */
- GS_TO_REG %ecx
movl PT_GS(%esp), %edi # get the function address
- REG_TO_PTGS %ecx
- SET_KERNEL_GS %ecx
/* fixup orig %eax */
movl PT_ORIG_EAX(%esp), %edx # get the error code
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 400908dff42e..e38a4cf795d9 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -107,8 +107,9 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL)
PUSH_AND_CLEAR_REGS rax=$-ENOSYS
/* IRQs are off. */
- movq %rax, %rdi
- movq %rsp, %rsi
+ movq %rsp, %rdi
+ /* Sign extend the lower 32bit as syscall numbers are treated as int */
+ movslq %eax, %rsi
call do_syscall_64 /* returns with IRQs disabled */
/*
@@ -305,7 +306,7 @@ SYM_CODE_END(ret_from_fork)
.macro DEBUG_ENTRY_ASSERT_IRQS_OFF
#ifdef CONFIG_DEBUG_ENTRY
pushq %rax
- SAVE_FLAGS(CLBR_RAX)
+ SAVE_FLAGS
testl $X86_EFLAGS_IF, %eax
jz .Lokay_\@
ud2
@@ -506,18 +507,18 @@ SYM_CODE_START(\asmsym)
movq %rsp, %rdi /* pt_regs pointer */
- call \cfunc
+ call kernel_\cfunc
/*
* No need to switch back to the IST stack. The current stack is either
* identical to the stack in the IRET frame or the VC fall-back stack,
- * so it is definitly mapped even with PTI enabled.
+ * so it is definitely mapped even with PTI enabled.
*/
jmp paranoid_exit
/* Switch to the regular task stack */
.Lfrom_usermode_switch_stack_\@:
- idtentry_body safe_stack_\cfunc, has_error_code=1
+ idtentry_body user_\cfunc, has_error_code=1
_ASM_NOKPROBE(\asmsym)
SYM_CODE_END(\asmsym)
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 541fdaf64045..0051cf5c792d 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -210,6 +210,8 @@ SYM_CODE_START(entry_SYSCALL_compat)
/* Switch to the kernel stack */
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+SYM_INNER_LABEL(entry_SYSCALL_compat_safe_stack, SYM_L_GLOBAL)
+
/* Construct struct pt_regs on stack */
pushq $__USER32_DS /* pt_regs->ss */
pushq %r8 /* pt_regs->sp */
diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c
index 86eb0d89d46f..8cfc9bc73e7f 100644
--- a/arch/x86/entry/syscall_32.c
+++ b/arch/x86/entry/syscall_32.c
@@ -5,21 +5,21 @@
#include <linux/sys.h>
#include <linux/cache.h>
#include <linux/syscalls.h>
-#include <asm/unistd.h>
#include <asm/syscall.h>
-#define __SYSCALL_I386(nr, sym) extern long __ia32_##sym(const struct pt_regs *);
+#ifdef CONFIG_IA32_EMULATION
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat)
+#else
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
+#endif
+
+#define __SYSCALL(nr, sym) extern long __ia32_##sym(const struct pt_regs *);
#include <asm/syscalls_32.h>
-#undef __SYSCALL_I386
+#undef __SYSCALL
-#define __SYSCALL_I386(nr, sym) [nr] = __ia32_##sym,
+#define __SYSCALL(nr, sym) __ia32_##sym,
-__visible const sys_call_ptr_t ia32_sys_call_table[__NR_ia32_syscall_max+1] = {
- /*
- * Smells like a compiler bug -- it doesn't work
- * when the & below is removed.
- */
- [0 ... __NR_ia32_syscall_max] = &__ia32_sys_ni_syscall,
+__visible const sys_call_ptr_t ia32_sys_call_table[] = {
#include <asm/syscalls_32.h>
};
diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c
index 1594ec72bcbb..be120eec1fc9 100644
--- a/arch/x86/entry/syscall_64.c
+++ b/arch/x86/entry/syscall_64.c
@@ -5,23 +5,14 @@
#include <linux/sys.h>
#include <linux/cache.h>
#include <linux/syscalls.h>
-#include <asm/unistd.h>
#include <asm/syscall.h>
-#define __SYSCALL_X32(nr, sym)
-#define __SYSCALL_COMMON(nr, sym) __SYSCALL_64(nr, sym)
-
-#define __SYSCALL_64(nr, sym) extern long __x64_##sym(const struct pt_regs *);
+#define __SYSCALL(nr, sym) extern long __x64_##sym(const struct pt_regs *);
#include <asm/syscalls_64.h>
-#undef __SYSCALL_64
+#undef __SYSCALL
-#define __SYSCALL_64(nr, sym) [nr] = __x64_##sym,
+#define __SYSCALL(nr, sym) __x64_##sym,
-asmlinkage const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
- /*
- * Smells like a compiler bug -- it doesn't work
- * when the & below is removed.
- */
- [0 ... __NR_syscall_max] = &__x64_sys_ni_syscall,
+asmlinkage const sys_call_ptr_t sys_call_table[] = {
#include <asm/syscalls_64.h>
};
diff --git a/arch/x86/entry/syscall_x32.c b/arch/x86/entry/syscall_x32.c
index f2fe0a33bcfd..bdd0e03a1265 100644
--- a/arch/x86/entry/syscall_x32.c
+++ b/arch/x86/entry/syscall_x32.c
@@ -5,37 +5,14 @@
#include <linux/sys.h>
#include <linux/cache.h>
#include <linux/syscalls.h>
-#include <asm/unistd.h>
#include <asm/syscall.h>
-/*
- * Reuse the 64-bit entry points for the x32 versions that occupy different
- * slots in the syscall table.
- */
-#define __x32_sys_readv __x64_sys_readv
-#define __x32_sys_writev __x64_sys_writev
-#define __x32_sys_getsockopt __x64_sys_getsockopt
-#define __x32_sys_setsockopt __x64_sys_setsockopt
-#define __x32_sys_vmsplice __x64_sys_vmsplice
-#define __x32_sys_process_vm_readv __x64_sys_process_vm_readv
-#define __x32_sys_process_vm_writev __x64_sys_process_vm_writev
+#define __SYSCALL(nr, sym) extern long __x64_##sym(const struct pt_regs *);
+#include <asm/syscalls_x32.h>
+#undef __SYSCALL
-#define __SYSCALL_64(nr, sym)
+#define __SYSCALL(nr, sym) __x64_##sym,
-#define __SYSCALL_X32(nr, sym) extern long __x32_##sym(const struct pt_regs *);
-#define __SYSCALL_COMMON(nr, sym) extern long __x64_##sym(const struct pt_regs *);
-#include <asm/syscalls_64.h>
-#undef __SYSCALL_X32
-#undef __SYSCALL_COMMON
-
-#define __SYSCALL_X32(nr, sym) [nr] = __x32_##sym,
-#define __SYSCALL_COMMON(nr, sym) [nr] = __x64_##sym,
-
-asmlinkage const sys_call_ptr_t x32_sys_call_table[__NR_x32_syscall_max+1] = {
- /*
- * Smells like a compiler bug -- it doesn't work
- * when the & below is removed.
- */
- [0 ... __NR_x32_syscall_max] = &__x64_sys_ni_syscall,
-#include <asm/syscalls_64.h>
+asmlinkage const sys_call_ptr_t x32_sys_call_table[] = {
+#include <asm/syscalls_x32.h>
};
diff --git a/arch/x86/entry/syscalls/Makefile b/arch/x86/entry/syscalls/Makefile
index d8c4f6c9eadc..5b3efed0e4e8 100644
--- a/arch/x86/entry/syscalls/Makefile
+++ b/arch/x86/entry/syscalls/Makefile
@@ -9,47 +9,54 @@ _dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') \
syscall32 := $(src)/syscall_32.tbl
syscall64 := $(src)/syscall_64.tbl
-syshdr := $(srctree)/$(src)/syscallhdr.sh
-systbl := $(srctree)/$(src)/syscalltbl.sh
+syshdr := $(srctree)/scripts/syscallhdr.sh
+systbl := $(srctree)/scripts/syscalltbl.sh
+offset :=
+prefix :=
quiet_cmd_syshdr = SYSHDR $@
- cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
- '$(syshdr_abi_$(basetarget))' \
- '$(syshdr_pfx_$(basetarget))' \
- '$(syshdr_offset_$(basetarget))'
+ cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --abis $(abis) --emit-nr \
+ $(if $(offset),--offset $(offset)) \
+ $(if $(prefix),--prefix $(prefix)) \
+ $< $@
quiet_cmd_systbl = SYSTBL $@
- cmd_systbl = $(CONFIG_SHELL) '$(systbl)' $< $@
+ cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis $(abis) $< $@
quiet_cmd_hypercalls = HYPERCALLS $@
cmd_hypercalls = $(CONFIG_SHELL) '$<' $@ $(filter-out $<, $(real-prereqs))
-syshdr_abi_unistd_32 := i386
+$(uapi)/unistd_32.h: abis := i386
$(uapi)/unistd_32.h: $(syscall32) $(syshdr) FORCE
$(call if_changed,syshdr)
-syshdr_abi_unistd_32_ia32 := i386
-syshdr_pfx_unistd_32_ia32 := ia32_
+$(out)/unistd_32_ia32.h: abis := i386
+$(out)/unistd_32_ia32.h: prefix := ia32_
$(out)/unistd_32_ia32.h: $(syscall32) $(syshdr) FORCE
$(call if_changed,syshdr)
-syshdr_abi_unistd_x32 := common,x32
-syshdr_offset_unistd_x32 := __X32_SYSCALL_BIT
+$(uapi)/unistd_x32.h: abis := common,x32
+$(uapi)/unistd_x32.h: offset := __X32_SYSCALL_BIT
$(uapi)/unistd_x32.h: $(syscall64) $(syshdr) FORCE
$(call if_changed,syshdr)
-syshdr_abi_unistd_64 := common,64
+$(uapi)/unistd_64.h: abis := common,64
$(uapi)/unistd_64.h: $(syscall64) $(syshdr) FORCE
$(call if_changed,syshdr)
-syshdr_abi_unistd_64_x32 := x32
-syshdr_pfx_unistd_64_x32 := x32_
+$(out)/unistd_64_x32.h: abis := x32
+$(out)/unistd_64_x32.h: prefix := x32_
$(out)/unistd_64_x32.h: $(syscall64) $(syshdr) FORCE
$(call if_changed,syshdr)
+$(out)/syscalls_32.h: abis := i386
$(out)/syscalls_32.h: $(syscall32) $(systbl) FORCE
$(call if_changed,systbl)
+$(out)/syscalls_64.h: abis := common,64
$(out)/syscalls_64.h: $(syscall64) $(systbl) FORCE
$(call if_changed,systbl)
+$(out)/syscalls_x32.h: abis := common,x32
+$(out)/syscalls_x32.h: $(syscall64) $(systbl) FORCE
+ $(call if_changed,systbl)
$(out)/xen-hypercalls.h: $(srctree)/scripts/xen-hypercalls.sh FORCE
$(call if_changed,hypercalls)
@@ -60,6 +67,7 @@ uapisyshdr-y += unistd_32.h unistd_64.h unistd_x32.h
syshdr-y += syscalls_32.h
syshdr-$(CONFIG_X86_64) += unistd_32_ia32.h unistd_64_x32.h
syshdr-$(CONFIG_X86_64) += syscalls_64.h
+syshdr-$(CONFIG_X86_X32) += syscalls_x32.h
syshdr-$(CONFIG_XEN) += xen-hypercalls.h
uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y))
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index a1c9f496fca6..fba2f615119a 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -447,3 +447,7 @@
440 i386 process_madvise sys_process_madvise
441 i386 epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
442 i386 mount_setattr sys_mount_setattr
+443 i386 quotactl_fd sys_quotactl_fd
+444 i386 landlock_create_ruleset sys_landlock_create_ruleset
+445 i386 landlock_add_rule sys_landlock_add_rule
+446 i386 landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index 7bf01cbe582f..af973e400053 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -364,6 +364,10 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
#
# Due to a historical design error, certain syscalls are numbered differently
diff --git a/arch/x86/entry/syscalls/syscallhdr.sh b/arch/x86/entry/syscalls/syscallhdr.sh
deleted file mode 100644
index cc1e63857427..000000000000
--- a/arch/x86/entry/syscalls/syscallhdr.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-prefix="$4"
-offset="$5"
-
-fileguard=_ASM_X86_`basename "$out" | sed \
- -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
- -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- echo "#ifndef ${fileguard}"
- echo "#define ${fileguard} 1"
- echo ""
-
- max=0
- while read nr abi name entry ; do
- if [ -z "$offset" ]; then
- echo "#define __NR_${prefix}${name} $nr"
- else
- echo "#define __NR_${prefix}${name} ($offset + $nr)"
- fi
-
- max=$nr
- done
-
- echo ""
- echo "#ifdef __KERNEL__"
- echo "#define __NR_${prefix}syscall_max $max"
- echo "#endif"
- echo ""
- echo "#endif /* ${fileguard} */"
-) > "$out"
diff --git a/arch/x86/entry/syscalls/syscalltbl.sh b/arch/x86/entry/syscalls/syscalltbl.sh
deleted file mode 100644
index 929bde120d6b..000000000000
--- a/arch/x86/entry/syscalls/syscalltbl.sh
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-
-syscall_macro() {
- local abi="$1"
- local nr="$2"
- local entry="$3"
-
- echo "__SYSCALL_${abi}($nr, $entry)"
-}
-
-emit() {
- local abi="$1"
- local nr="$2"
- local entry="$3"
- local compat="$4"
-
- if [ "$abi" != "I386" -a -n "$compat" ]; then
- echo "a compat entry ($abi: $compat) for a 64-bit syscall makes no sense" >&2
- exit 1
- fi
-
- if [ -z "$compat" ]; then
- if [ -n "$entry" ]; then
- syscall_macro "$abi" "$nr" "$entry"
- fi
- else
- echo "#ifdef CONFIG_X86_32"
- if [ -n "$entry" ]; then
- syscall_macro "$abi" "$nr" "$entry"
- fi
- echo "#else"
- syscall_macro "$abi" "$nr" "$compat"
- echo "#endif"
- fi
-}
-
-grep '^[0-9]' "$in" | sort -n | (
- while read nr abi name entry compat; do
- abi=`echo "$abi" | tr '[a-z]' '[A-Z]'`
- emit "$abi" "$nr" "$entry" "$compat"
- done
-) > "$out"
diff --git a/arch/x86/entry/vdso/vdso2c.c b/arch/x86/entry/vdso/vdso2c.c
index 2d0f3d8bcc25..edfe9780f6d1 100644
--- a/arch/x86/entry/vdso/vdso2c.c
+++ b/arch/x86/entry/vdso/vdso2c.c
@@ -218,7 +218,7 @@ int main(int argc, char **argv)
/*
* Figure out the struct name. If we're writing to a .so file,
- * generate raw output insted.
+ * generate raw output instead.
*/
name = strdup(argv[3]);
namelen = strlen(name);
diff --git a/arch/x86/entry/vdso/vdso2c.h b/arch/x86/entry/vdso/vdso2c.h
index 1c7cfac7e64a..5264daa8859f 100644
--- a/arch/x86/entry/vdso/vdso2c.h
+++ b/arch/x86/entry/vdso/vdso2c.h
@@ -35,7 +35,7 @@ static void BITSFUNC(extract)(const unsigned char *data, size_t data_len,
if (offset + len > data_len)
fail("section to extract overruns input data");
- fprintf(outfile, "static const unsigned char %s[%lu] = {", name, len);
+ fprintf(outfile, "static const unsigned char %s[%zu] = {", name, len);
BITSFUNC(copy)(outfile, data + offset, len);
fprintf(outfile, "\n};\n\n");
}
diff --git a/arch/x86/entry/vdso/vdso32/system_call.S b/arch/x86/entry/vdso/vdso32/system_call.S
index de1fff7188aa..6ddd7a937b3e 100644
--- a/arch/x86/entry/vdso/vdso32/system_call.S
+++ b/arch/x86/entry/vdso/vdso32/system_call.S
@@ -6,7 +6,7 @@
#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeatures.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
.text
.globl __kernel_vsyscall
@@ -29,7 +29,7 @@ __kernel_vsyscall:
* anyone with an AMD CPU, for example). Nonetheless, we try to keep
* it working approximately as well as it ever worked.
*
- * This link may eludicate some of the history:
+ * This link may elucidate some of the history:
* https://android-review.googlesource.com/#/q/Iac3295376d61ef83e713ac9b528f3b50aa780cd7
* personally, I find it hard to understand what's going on there.
*
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 825e829ffff1..235a5794296a 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -358,7 +358,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
mmap_write_lock(mm);
/*
* Check if we have already mapped vdso blob - fail to prevent
- * abusing from userspace install_speciall_mapping, which may
+ * abusing from userspace install_special_mapping, which may
* not do accounting and rlimit right.
* We could search vma near context.vdso, but it's a slowpath,
* so let's explicitly check all VMAs to be completely sure.
diff --git a/arch/x86/entry/vdso/vsgx.S b/arch/x86/entry/vdso/vsgx.S
index 86a0e94f68df..99dafac992e2 100644
--- a/arch/x86/entry/vdso/vsgx.S
+++ b/arch/x86/entry/vdso/vsgx.S
@@ -137,7 +137,7 @@ SYM_FUNC_START(__vdso_sgx_enter_enclave)
/*
* If the return from callback is zero or negative, return immediately,
- * else re-execute ENCLU with the postive return value interpreted as
+ * else re-execute ENCLU with the positive return value interpreted as
* the requested ENCLU function.
*/
cmp $0, %eax
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index 2c1791c4a518..9687a8aef01c 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -623,7 +623,7 @@ static void amd_pmu_disable_all(void)
/*
* Check each counter for overflow and wait for it to be reset by the
* NMI if it has overflowed. This relies on the fact that all active
- * counters are always enabled when this function is caled and
+ * counters are always enabled when this function is called and
* ARCH_PERFMON_EVENTSEL_INT is always set.
*/
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
index be50ef8572cc..913745f1419b 100644
--- a/arch/x86/events/amd/iommu.c
+++ b/arch/x86/events/amd/iommu.c
@@ -14,12 +14,11 @@
#include <linux/init.h>
#include <linux/cpumask.h>
#include <linux/slab.h>
+#include <linux/amd-iommu.h>
#include "../perf_event.h"
#include "iommu.h"
-#define COUNTER_SHIFT 16
-
/* iommu pmu conf masks */
#define GET_CSOURCE(x) ((x)->conf & 0xFFULL)
#define GET_DEVID(x) (((x)->conf >> 8) & 0xFFFFULL)
@@ -81,12 +80,12 @@ static struct attribute_group amd_iommu_events_group = {
};
struct amd_iommu_event_desc {
- struct kobj_attribute attr;
+ struct device_attribute attr;
const char *event;
};
-static ssize_t _iommu_event_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+static ssize_t _iommu_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct amd_iommu_event_desc *event =
container_of(attr, struct amd_iommu_event_desc, attr);
@@ -285,22 +284,31 @@ static void perf_iommu_start(struct perf_event *event, int flags)
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;
+ /*
+ * To account for power-gating, which prevents write to
+ * the counter, we need to enable the counter
+ * before setting up counter register.
+ */
+ perf_iommu_enable_event(event);
+
if (flags & PERF_EF_RELOAD) {
- u64 prev_raw_count = local64_read(&hwc->prev_count);
+ u64 count = 0;
struct amd_iommu *iommu = perf_event_2_iommu(event);
+ /*
+ * Since the IOMMU PMU only support counting mode,
+ * the counter always start with value zero.
+ */
amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr,
- IOMMU_PC_COUNTER_REG, &prev_raw_count);
+ IOMMU_PC_COUNTER_REG, &count);
}
- perf_iommu_enable_event(event);
perf_event_update_userpage(event);
-
}
static void perf_iommu_read(struct perf_event *event)
{
- u64 count, prev, delta;
+ u64 count;
struct hw_perf_event *hwc = &event->hw;
struct amd_iommu *iommu = perf_event_2_iommu(event);
@@ -311,14 +319,11 @@ static void perf_iommu_read(struct perf_event *event)
/* IOMMU pc counter register is only 48 bits */
count &= GENMASK_ULL(47, 0);
- prev = local64_read(&hwc->prev_count);
- if (local64_cmpxchg(&hwc->prev_count, prev, count) != prev)
- return;
-
- /* Handle 48-bit counter overflow */
- delta = (count << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
- delta >>= COUNTER_SHIFT;
- local64_add(delta, &event->count);
+ /*
+ * Since the counter always start with value zero,
+ * simply just accumulate the count for the event.
+ */
+ local64_add(count, &event->count);
}
static void perf_iommu_stop(struct perf_event *event, int flags)
@@ -328,15 +333,16 @@ static void perf_iommu_stop(struct perf_event *event, int flags)
if (hwc->state & PERF_HES_UPTODATE)
return;
+ /*
+ * To account for power-gating, in which reading the counter would
+ * return zero, we need to read the register before disabling.
+ */
+ perf_iommu_read(event);
+ hwc->state |= PERF_HES_UPTODATE;
+
perf_iommu_disable_event(event);
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
hwc->state |= PERF_HES_STOPPED;
-
- if (hwc->state & PERF_HES_UPTODATE)
- return;
-
- perf_iommu_read(event);
- hwc->state |= PERF_HES_UPTODATE;
}
static int perf_iommu_add(struct perf_event *event, int flags)
diff --git a/arch/x86/events/amd/iommu.h b/arch/x86/events/amd/iommu.h
index 0e5c036fd7be..e6310c635c8b 100644
--- a/arch/x86/events/amd/iommu.h
+++ b/arch/x86/events/amd/iommu.h
@@ -17,27 +17,8 @@
#define IOMMU_PC_DEVID_MATCH_REG 0x20
#define IOMMU_PC_COUNTER_REPORT_REG 0x28
-/* maximun specified bank/counters */
+/* maximum specified bank/counters */
#define PC_MAX_SPEC_BNKS 64
#define PC_MAX_SPEC_CNTRS 16
-struct amd_iommu;
-
-/* amd_iommu_init.c external support functions */
-extern int amd_iommu_get_num_iommus(void);
-
-extern bool amd_iommu_pc_supported(void);
-
-extern u8 amd_iommu_pc_get_max_banks(unsigned int idx);
-
-extern u8 amd_iommu_pc_get_max_counters(unsigned int idx);
-
-extern int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
- u8 fxn, u64 *value);
-
-extern int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
- u8 fxn, u64 *value);
-
-extern struct amd_iommu *get_amd_iommu(int idx);
-
#endif /*_PERF_EVENT_AMD_IOMMU_H_*/
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index 7f014d450bc2..582c0ffb5e98 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -275,14 +275,14 @@ static struct attribute_group amd_uncore_attr_group = {
};
#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
-static ssize_t __uncore_##_var##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, \
+static ssize_t __uncore_##_var##_show(struct device *dev, \
+ struct device_attribute *attr, \
char *page) \
{ \
BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
return sprintf(page, _format "\n"); \
} \
-static struct kobj_attribute format_attr_##_var = \
+static struct device_attribute format_attr_##_var = \
__ATTR(_name, 0444, __uncore_##_var##_show, NULL)
DEFINE_UNCORE_FORMAT_ATTR(event12, event, "config:0-7,32-35");
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 6ddeed3cd2ac..1eb45139fcc6 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -45,13 +45,16 @@
#include "perf_event.h"
struct x86_pmu x86_pmu __read_mostly;
+static struct pmu pmu;
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
.enabled = 1,
+ .pmu = &pmu,
};
DEFINE_STATIC_KEY_FALSE(rdpmc_never_available_key);
DEFINE_STATIC_KEY_FALSE(rdpmc_always_available_key);
+DEFINE_STATIC_KEY_FALSE(perf_is_hybrid);
/*
* This here uses DEFINE_STATIC_CALL_NULL() to get a static_call defined
@@ -81,7 +84,11 @@ DEFINE_STATIC_CALL_NULL(x86_pmu_swap_task_ctx, *x86_pmu.swap_task_ctx);
DEFINE_STATIC_CALL_NULL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs);
DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_aliases, *x86_pmu.pebs_aliases);
-DEFINE_STATIC_CALL_NULL(x86_pmu_guest_get_msrs, *x86_pmu.guest_get_msrs);
+/*
+ * This one is magic, it will get called even when PMU init fails (because
+ * there is no PMU), in which case it should simply return NULL.
+ */
+DEFINE_STATIC_CALL_RET0(x86_pmu_guest_get_msrs, *x86_pmu.guest_get_msrs);
u64 __read_mostly hw_cache_event_ids
[PERF_COUNT_HW_CACHE_MAX]
@@ -147,15 +154,16 @@ again:
*/
static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
{
+ struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs);
struct hw_perf_event_extra *reg;
struct extra_reg *er;
reg = &event->hw.extra_reg;
- if (!x86_pmu.extra_regs)
+ if (!extra_regs)
return 0;
- for (er = x86_pmu.extra_regs; er->msr; er++) {
+ for (er = extra_regs; er->msr; er++) {
if (er->event != (config & er->config_mask))
continue;
if (event->attr.config1 & ~er->valid_mask)
@@ -178,16 +186,29 @@ static DEFINE_MUTEX(pmc_reserve_mutex);
#ifdef CONFIG_X86_LOCAL_APIC
+static inline int get_possible_num_counters(void)
+{
+ int i, num_counters = x86_pmu.num_counters;
+
+ if (!is_hybrid())
+ return num_counters;
+
+ for (i = 0; i < x86_pmu.num_hybrid_pmus; i++)
+ num_counters = max_t(int, num_counters, x86_pmu.hybrid_pmu[i].num_counters);
+
+ return num_counters;
+}
+
static bool reserve_pmc_hardware(void)
{
- int i;
+ int i, num_counters = get_possible_num_counters();
- for (i = 0; i < x86_pmu.num_counters; i++) {
+ for (i = 0; i < num_counters; i++) {
if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
goto perfctr_fail;
}
- for (i = 0; i < x86_pmu.num_counters; i++) {
+ for (i = 0; i < num_counters; i++) {
if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
goto eventsel_fail;
}
@@ -198,7 +219,7 @@ eventsel_fail:
for (i--; i >= 0; i--)
release_evntsel_nmi(x86_pmu_config_addr(i));
- i = x86_pmu.num_counters;
+ i = num_counters;
perfctr_fail:
for (i--; i >= 0; i--)
@@ -209,9 +230,9 @@ perfctr_fail:
static void release_pmc_hardware(void)
{
- int i;
+ int i, num_counters = get_possible_num_counters();
- for (i = 0; i < x86_pmu.num_counters; i++) {
+ for (i = 0; i < num_counters; i++) {
release_perfctr_nmi(x86_pmu_event_addr(i));
release_evntsel_nmi(x86_pmu_config_addr(i));
}
@@ -224,7 +245,7 @@ static void release_pmc_hardware(void) {}
#endif
-static bool check_hw_exists(void)
+bool check_hw_exists(struct pmu *pmu, int num_counters, int num_counters_fixed)
{
u64 val, val_fail = -1, val_new= ~0;
int i, reg, reg_fail = -1, ret = 0;
@@ -235,7 +256,7 @@ static bool check_hw_exists(void)
* Check to see if the BIOS enabled any of the counters, if so
* complain and bail.
*/
- for (i = 0; i < x86_pmu.num_counters; i++) {
+ for (i = 0; i < num_counters; i++) {
reg = x86_pmu_config_addr(i);
ret = rdmsrl_safe(reg, &val);
if (ret)
@@ -249,15 +270,15 @@ static bool check_hw_exists(void)
}
}
- if (x86_pmu.num_counters_fixed) {
+ if (num_counters_fixed) {
reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
ret = rdmsrl_safe(reg, &val);
if (ret)
goto msr_fail;
- for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
- if (fixed_counter_disabled(i))
+ for (i = 0; i < num_counters_fixed; i++) {
+ if (fixed_counter_disabled(i, pmu))
continue;
- if (val & (0x03 << i*4)) {
+ if (val & (0x03ULL << i*4)) {
bios_fail = 1;
val_fail = val;
reg_fail = reg;
@@ -356,8 +377,7 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
return -EINVAL;
cache_result = array_index_nospec(cache_result, PERF_COUNT_HW_CACHE_RESULT_MAX);
- val = hw_cache_event_ids[cache_type][cache_op][cache_result];
-
+ val = hybrid_var(event->pmu, hw_cache_event_ids)[cache_type][cache_op][cache_result];
if (val == 0)
return -ENOENT;
@@ -365,7 +385,7 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
return -EINVAL;
hwc->config |= val;
- attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
+ attr->config1 = hybrid_var(event->pmu, hw_cache_extra_regs)[cache_type][cache_op][cache_result];
return x86_pmu_extra_regs(val, event);
}
@@ -376,10 +396,12 @@ int x86_reserve_hardware(void)
if (!atomic_inc_not_zero(&pmc_refcount)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&pmc_refcount) == 0) {
- if (!reserve_pmc_hardware())
+ if (!reserve_pmc_hardware()) {
err = -EBUSY;
- else
+ } else {
reserve_ds_buffers();
+ reserve_lbr_buffers();
+ }
}
if (!err)
atomic_inc(&pmc_refcount);
@@ -458,7 +480,7 @@ int x86_setup_perfctr(struct perf_event *event)
local64_set(&hwc->period_left, hwc->sample_period);
}
- if (attr->type == PERF_TYPE_RAW)
+ if (attr->type == event->pmu->type)
return x86_pmu_extra_regs(event->attr.config, event);
if (attr->type == PERF_TYPE_HW_CACHE)
@@ -593,7 +615,7 @@ int x86_pmu_hw_config(struct perf_event *event)
if (!event->attr.exclude_kernel)
event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
- if (event->attr.type == PERF_TYPE_RAW)
+ if (event->attr.type == event->pmu->type)
event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
if (event->attr.sample_period && x86_pmu.limit_period) {
@@ -720,16 +742,33 @@ void x86_pmu_enable_all(int added)
}
}
-static struct pmu pmu;
-
static inline int is_x86_event(struct perf_event *event)
{
- return event->pmu == &pmu;
+ int i;
+
+ if (!is_hybrid())
+ return event->pmu == &pmu;
+
+ for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
+ if (event->pmu == &x86_pmu.hybrid_pmu[i].pmu)
+ return true;
+ }
+
+ return false;
}
-struct pmu *x86_get_pmu(void)
+struct pmu *x86_get_pmu(unsigned int cpu)
{
- return &pmu;
+ struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+
+ /*
+ * All CPUs of the hybrid type have been offline.
+ * The x86_get_pmu() should not be invoked.
+ */
+ if (WARN_ON_ONCE(!cpuc->pmu))
+ return &pmu;
+
+ return cpuc->pmu;
}
/*
* Event scheduler state:
@@ -761,7 +800,7 @@ struct perf_sched {
};
/*
- * Initialize interator that runs through all events and counters.
+ * Initialize iterator that runs through all events and counters.
*/
static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints,
int num, int wmin, int wmax, int gpmax)
@@ -932,6 +971,7 @@ EXPORT_SYMBOL_GPL(perf_assign_events);
int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
{
+ int num_counters = hybrid(cpuc->pmu, num_counters);
struct event_constraint *c;
struct perf_event *e;
int n0, i, wmin, wmax, unsched = 0;
@@ -1007,7 +1047,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
/* slow path */
if (i != n) {
- int gpmax = x86_pmu.num_counters;
+ int gpmax = num_counters;
/*
* Do not allow scheduling of more than half the available
@@ -1028,7 +1068,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
* the extra Merge events needed by large increment events.
*/
if (x86_pmu.flags & PMU_FL_PAIR) {
- gpmax = x86_pmu.num_counters - cpuc->n_pair;
+ gpmax = num_counters - cpuc->n_pair;
WARN_ON(gpmax <= 0);
}
@@ -1092,8 +1132,9 @@ static void del_nr_metric_event(struct cpu_hw_events *cpuc,
static int collect_event(struct cpu_hw_events *cpuc, struct perf_event *event,
int max_count, int n)
{
+ union perf_capabilities intel_cap = hybrid(cpuc->pmu, intel_cap);
- if (x86_pmu.intel_cap.perf_metrics && add_nr_metric_event(cpuc, event))
+ if (intel_cap.perf_metrics && add_nr_metric_event(cpuc, event))
return -EINVAL;
if (n >= max_count + cpuc->n_metric)
@@ -1114,10 +1155,12 @@ static int collect_event(struct cpu_hw_events *cpuc, struct perf_event *event,
*/
static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
{
+ int num_counters = hybrid(cpuc->pmu, num_counters);
+ int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
struct perf_event *event;
int n, max_count;
- max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
+ max_count = num_counters + num_counters_fixed;
/* current number of events already accepted */
n = cpuc->n_events;
@@ -1476,7 +1519,6 @@ static void x86_pmu_start(struct perf_event *event, int flags)
cpuc->events[idx] = event;
__set_bit(idx, cpuc->active_mask);
- __set_bit(idx, cpuc->running);
static_call(x86_pmu_enable)(event);
perf_event_update_userpage(event);
}
@@ -1485,18 +1527,19 @@ void perf_event_print_debug(void)
{
u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
u64 pebs, debugctl;
- struct cpu_hw_events *cpuc;
+ int cpu = smp_processor_id();
+ struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+ int num_counters = hybrid(cpuc->pmu, num_counters);
+ int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
+ struct event_constraint *pebs_constraints = hybrid(cpuc->pmu, pebs_constraints);
unsigned long flags;
- int cpu, idx;
+ int idx;
- if (!x86_pmu.num_counters)
+ if (!num_counters)
return;
local_irq_save(flags);
- cpu = smp_processor_id();
- cpuc = &per_cpu(cpu_hw_events, cpu);
-
if (x86_pmu.version >= 2) {
rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
@@ -1508,7 +1551,7 @@ void perf_event_print_debug(void)
pr_info("CPU#%d: status: %016llx\n", cpu, status);
pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
- if (x86_pmu.pebs_constraints) {
+ if (pebs_constraints) {
rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
}
@@ -1519,7 +1562,7 @@ void perf_event_print_debug(void)
}
pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ for (idx = 0; idx < num_counters; idx++) {
rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
rdmsrl(x86_pmu_event_addr(idx), pmc_count);
@@ -1532,8 +1575,8 @@ void perf_event_print_debug(void)
pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
cpu, idx, prev_left);
}
- for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
- if (fixed_counter_disabled(idx))
+ for (idx = 0; idx < num_counters_fixed; idx++) {
+ if (fixed_counter_disabled(idx, cpuc->pmu))
continue;
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
@@ -1569,6 +1612,7 @@ void x86_pmu_stop(struct perf_event *event, int flags)
static void x86_pmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ union perf_capabilities intel_cap = hybrid(cpuc->pmu, intel_cap);
int i;
/*
@@ -1582,6 +1626,8 @@ static void x86_pmu_del(struct perf_event *event, int flags)
if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
goto do_del;
+ __set_bit(event->hw.idx, cpuc->dirty);
+
/*
* Not a TXN, therefore cleanup properly.
*/
@@ -1608,7 +1654,7 @@ static void x86_pmu_del(struct perf_event *event, int flags)
}
cpuc->event_constraint[i-1] = NULL;
--cpuc->n_events;
- if (x86_pmu.intel_cap.perf_metrics)
+ if (intel_cap.perf_metrics)
del_nr_metric_event(cpuc, event);
perf_event_update_userpage(event);
@@ -1818,6 +1864,49 @@ ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
pmu_attr->event_str_noht);
}
+ssize_t events_hybrid_sysfs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct perf_pmu_events_hybrid_attr *pmu_attr =
+ container_of(attr, struct perf_pmu_events_hybrid_attr, attr);
+ struct x86_hybrid_pmu *pmu;
+ const char *str, *next_str;
+ int i;
+
+ if (hweight64(pmu_attr->pmu_type) == 1)
+ return sprintf(page, "%s", pmu_attr->event_str);
+
+ /*
+ * Hybrid PMUs may support the same event name, but with different
+ * event encoding, e.g., the mem-loads event on an Atom PMU has
+ * different event encoding from a Core PMU.
+ *
+ * The event_str includes all event encodings. Each event encoding
+ * is divided by ";". The order of the event encodings must follow
+ * the order of the hybrid PMU index.
+ */
+ pmu = container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
+
+ str = pmu_attr->event_str;
+ for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
+ if (!(x86_pmu.hybrid_pmu[i].cpu_type & pmu_attr->pmu_type))
+ continue;
+ if (x86_pmu.hybrid_pmu[i].cpu_type & pmu->cpu_type) {
+ next_str = strchr(str, ';');
+ if (next_str)
+ return snprintf(page, next_str - str + 1, "%s", str);
+ else
+ return sprintf(page, "%s", str);
+ }
+ str = strchr(str, ';');
+ str++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(events_hybrid_sysfs_show);
+
EVENT_ATTR(cpu-cycles, CPU_CYCLES );
EVENT_ATTR(instructions, INSTRUCTIONS );
EVENT_ATTR(cache-references, CACHE_REFERENCES );
@@ -1944,11 +2033,35 @@ static void _x86_pmu_read(struct perf_event *event)
x86_perf_event_update(event);
}
-static inline struct perf_guest_switch_msr *
-perf_guest_get_msrs_nop(int *nr)
+void x86_pmu_show_pmu_cap(int num_counters, int num_counters_fixed,
+ u64 intel_ctrl)
+{
+ pr_info("... version: %d\n", x86_pmu.version);
+ pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
+ pr_info("... generic registers: %d\n", num_counters);
+ pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
+ pr_info("... max period: %016Lx\n", x86_pmu.max_period);
+ pr_info("... fixed-purpose events: %lu\n",
+ hweight64((((1ULL << num_counters_fixed) - 1)
+ << INTEL_PMC_IDX_FIXED) & intel_ctrl));
+ pr_info("... event mask: %016Lx\n", intel_ctrl);
+}
+
+/*
+ * The generic code is not hybrid friendly. The hybrid_pmu->pmu
+ * of the first registered PMU is unconditionally assigned to
+ * each possible cpuctx->ctx.pmu.
+ * Update the correct hybrid PMU to the cpuctx->ctx.pmu.
+ */
+void x86_pmu_update_cpu_context(struct pmu *pmu, int cpu)
{
- *nr = 0;
- return NULL;
+ struct perf_cpu_context *cpuctx;
+
+ if (!pmu->pmu_cpu_context)
+ return;
+
+ cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+ cpuctx->ctx.pmu = pmu;
}
static int __init init_hw_perf_events(void)
@@ -1984,7 +2097,7 @@ static int __init init_hw_perf_events(void)
pmu_check_apic();
/* sanity check that the hardware exists or is emulated */
- if (!check_hw_exists())
+ if (!check_hw_exists(&pmu, x86_pmu.num_counters, x86_pmu.num_counters_fixed))
return 0;
pr_cont("%s PMU driver.\n", x86_pmu.name);
@@ -2011,21 +2124,17 @@ static int __init init_hw_perf_events(void)
pmu.attr_update = x86_pmu.attr_update;
- pr_info("... version: %d\n", x86_pmu.version);
- pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
- pr_info("... generic registers: %d\n", x86_pmu.num_counters);
- pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
- pr_info("... max period: %016Lx\n", x86_pmu.max_period);
- pr_info("... fixed-purpose events: %lu\n",
- hweight64((((1ULL << x86_pmu.num_counters_fixed) - 1)
- << INTEL_PMC_IDX_FIXED) & x86_pmu.intel_ctrl));
- pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
+ if (!is_hybrid()) {
+ x86_pmu_show_pmu_cap(x86_pmu.num_counters,
+ x86_pmu.num_counters_fixed,
+ x86_pmu.intel_ctrl);
+ }
if (!x86_pmu.read)
x86_pmu.read = _x86_pmu_read;
if (!x86_pmu.guest_get_msrs)
- x86_pmu.guest_get_msrs = perf_guest_get_msrs_nop;
+ x86_pmu.guest_get_msrs = (void *)&__static_call_return0;
x86_pmu_static_call_update();
@@ -2049,9 +2158,46 @@ static int __init init_hw_perf_events(void)
if (err)
goto out1;
- err = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
- if (err)
- goto out2;
+ if (!is_hybrid()) {
+ err = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
+ if (err)
+ goto out2;
+ } else {
+ u8 cpu_type = get_this_hybrid_cpu_type();
+ struct x86_hybrid_pmu *hybrid_pmu;
+ int i, j;
+
+ if (!cpu_type && x86_pmu.get_hybrid_cpu_type)
+ cpu_type = x86_pmu.get_hybrid_cpu_type();
+
+ for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
+ hybrid_pmu = &x86_pmu.hybrid_pmu[i];
+
+ hybrid_pmu->pmu = pmu;
+ hybrid_pmu->pmu.type = -1;
+ hybrid_pmu->pmu.attr_update = x86_pmu.attr_update;
+ hybrid_pmu->pmu.capabilities |= PERF_PMU_CAP_HETEROGENEOUS_CPUS;
+ hybrid_pmu->pmu.capabilities |= PERF_PMU_CAP_EXTENDED_HW_TYPE;
+
+ err = perf_pmu_register(&hybrid_pmu->pmu, hybrid_pmu->name,
+ (hybrid_pmu->cpu_type == hybrid_big) ? PERF_TYPE_RAW : -1);
+ if (err)
+ break;
+
+ if (cpu_type == hybrid_pmu->cpu_type)
+ x86_pmu_update_cpu_context(&hybrid_pmu->pmu, raw_smp_processor_id());
+ }
+
+ if (i < x86_pmu.num_hybrid_pmus) {
+ for (j = 0; j < i; j++)
+ perf_pmu_unregister(&x86_pmu.hybrid_pmu[j].pmu);
+ pr_warn("Failed to register hybrid PMUs\n");
+ kfree(x86_pmu.hybrid_pmu);
+ x86_pmu.hybrid_pmu = NULL;
+ x86_pmu.num_hybrid_pmus = 0;
+ goto out2;
+ }
+ }
return 0;
@@ -2176,16 +2322,27 @@ static void free_fake_cpuc(struct cpu_hw_events *cpuc)
kfree(cpuc);
}
-static struct cpu_hw_events *allocate_fake_cpuc(void)
+static struct cpu_hw_events *allocate_fake_cpuc(struct pmu *event_pmu)
{
struct cpu_hw_events *cpuc;
- int cpu = raw_smp_processor_id();
+ int cpu;
cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
if (!cpuc)
return ERR_PTR(-ENOMEM);
cpuc->is_fake = 1;
+ if (is_hybrid()) {
+ struct x86_hybrid_pmu *h_pmu;
+
+ h_pmu = hybrid_pmu(event_pmu);
+ if (cpumask_empty(&h_pmu->supported_cpus))
+ goto error;
+ cpu = cpumask_first(&h_pmu->supported_cpus);
+ } else
+ cpu = raw_smp_processor_id();
+ cpuc->pmu = event_pmu;
+
if (intel_cpuc_prepare(cpuc, cpu))
goto error;
@@ -2204,7 +2361,7 @@ static int validate_event(struct perf_event *event)
struct event_constraint *c;
int ret = 0;
- fake_cpuc = allocate_fake_cpuc();
+ fake_cpuc = allocate_fake_cpuc(event->pmu);
if (IS_ERR(fake_cpuc))
return PTR_ERR(fake_cpuc);
@@ -2238,7 +2395,27 @@ static int validate_group(struct perf_event *event)
struct cpu_hw_events *fake_cpuc;
int ret = -EINVAL, n;
- fake_cpuc = allocate_fake_cpuc();
+ /*
+ * Reject events from different hybrid PMUs.
+ */
+ if (is_hybrid()) {
+ struct perf_event *sibling;
+ struct pmu *pmu = NULL;
+
+ if (is_x86_event(leader))
+ pmu = leader->pmu;
+
+ for_each_sibling_event(sibling, leader) {
+ if (!is_x86_event(sibling))
+ continue;
+ if (!pmu)
+ pmu = sibling->pmu;
+ else if (pmu != sibling->pmu)
+ return ret;
+ }
+ }
+
+ fake_cpuc = allocate_fake_cpuc(event->pmu);
if (IS_ERR(fake_cpuc))
return PTR_ERR(fake_cpuc);
/*
@@ -2266,35 +2443,26 @@ out:
static int x86_pmu_event_init(struct perf_event *event)
{
- struct pmu *tmp;
+ struct x86_hybrid_pmu *pmu = NULL;
int err;
- switch (event->attr.type) {
- case PERF_TYPE_RAW:
- case PERF_TYPE_HARDWARE:
- case PERF_TYPE_HW_CACHE:
- break;
-
- default:
+ if ((event->attr.type != event->pmu->type) &&
+ (event->attr.type != PERF_TYPE_HARDWARE) &&
+ (event->attr.type != PERF_TYPE_HW_CACHE))
return -ENOENT;
+
+ if (is_hybrid() && (event->cpu != -1)) {
+ pmu = hybrid_pmu(event->pmu);
+ if (!cpumask_test_cpu(event->cpu, &pmu->supported_cpus))
+ return -ENOENT;
}
err = __x86_pmu_event_init(event);
if (!err) {
- /*
- * we temporarily connect event to its pmu
- * such that validate_group() can classify
- * it as an x86 event using is_x86_event()
- */
- tmp = event->pmu;
- event->pmu = &pmu;
-
if (event->group_leader != event)
err = validate_group(event);
else
err = validate_event(event);
-
- event->pmu = tmp;
}
if (err) {
if (event->destroy)
@@ -2308,6 +2476,31 @@ static int x86_pmu_event_init(struct perf_event *event)
return err;
}
+void perf_clear_dirty_counters(void)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ int i;
+
+ /* Don't need to clear the assigned counter. */
+ for (i = 0; i < cpuc->n_events; i++)
+ __clear_bit(cpuc->assign[i], cpuc->dirty);
+
+ if (bitmap_empty(cpuc->dirty, X86_PMC_IDX_MAX))
+ return;
+
+ for_each_set_bit(i, cpuc->dirty, X86_PMC_IDX_MAX) {
+ /* Metrics and fake events don't have corresponding HW counters. */
+ if (is_metric_idx(i) || (i == INTEL_PMC_IDX_FIXED_VLBR))
+ continue;
+ else if (i >= INTEL_PMC_IDX_FIXED)
+ wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + (i - INTEL_PMC_IDX_FIXED), 0);
+ else
+ wrmsrl(x86_pmu_event_addr(i), 0);
+ }
+
+ bitmap_zero(cpuc->dirty, X86_PMC_IDX_MAX);
+}
+
static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
{
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
@@ -2331,7 +2524,6 @@ static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
static void x86_pmu_event_unmapped(struct perf_event *event, struct mm_struct *mm)
{
-
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
return;
@@ -2478,6 +2670,14 @@ static int x86_pmu_aux_output_match(struct perf_event *event)
return 0;
}
+static int x86_pmu_filter_match(struct perf_event *event)
+{
+ if (x86_pmu.filter_match)
+ return x86_pmu.filter_match(event);
+
+ return 1;
+}
+
static struct pmu pmu = {
.pmu_enable = x86_pmu_enable,
.pmu_disable = x86_pmu_disable,
@@ -2505,6 +2705,8 @@ static struct pmu pmu = {
.check_period = x86_pmu_check_period,
.aux_output_match = x86_pmu_aux_output_match,
+
+ .filter_match = x86_pmu_filter_match,
};
void arch_perf_update_userpage(struct perf_event *event,
@@ -2773,6 +2975,11 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
{
cap->version = x86_pmu.version;
+ /*
+ * KVM doesn't support the hybrid PMU yet.
+ * Return the common value in global x86_pmu,
+ * which available for all cores.
+ */
cap->num_counters_gp = x86_pmu.num_counters;
cap->num_counters_fixed = x86_pmu.num_counters_fixed;
cap->bit_width_gp = x86_pmu.cntval_bits;
diff --git a/arch/x86/events/intel/Makefile b/arch/x86/events/intel/Makefile
index e67a5886336c..10bde6c5abb2 100644
--- a/arch/x86/events/intel/Makefile
+++ b/arch/x86/events/intel/Makefile
@@ -3,6 +3,6 @@ obj-$(CONFIG_CPU_SUP_INTEL) += core.o bts.o
obj-$(CONFIG_CPU_SUP_INTEL) += ds.o knc.o
obj-$(CONFIG_CPU_SUP_INTEL) += lbr.o p4.o p6.o pt.o
obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += intel-uncore.o
-intel-uncore-objs := uncore.o uncore_nhmex.o uncore_snb.o uncore_snbep.o
+intel-uncore-objs := uncore.o uncore_nhmex.o uncore_snb.o uncore_snbep.o uncore_discovery.o
obj-$(CONFIG_PERF_EVENTS_INTEL_CSTATE) += intel-cstate.o
intel-cstate-objs := cstate.o
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index 731dd8d0dbb1..6320d2cfd9d3 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -594,7 +594,7 @@ static __init int bts_init(void)
* we cannot use the user mapping since it will not be available
* if we're not running the owning process.
*
- * With PTI we can't use the kernal map either, because its not
+ * With PTI we can't use the kernel map either, because its not
* there when we run userspace.
*
* For now, disable this driver when using PTI.
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 5bac48d5c18e..fca7a6e2242f 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -137,7 +137,7 @@ static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
- INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
+ INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMPTY */
INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
@@ -280,6 +280,8 @@ static struct extra_reg intel_spr_extra_regs[] __read_mostly = {
INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
+ INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
+ INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
EVENT_EXTRA_END
};
@@ -2076,6 +2078,14 @@ static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
EVENT_EXTRA_END
};
+static struct extra_reg intel_grt_extra_regs[] __read_mostly = {
+ /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
+ INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
+ INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
+ INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
+ EVENT_EXTRA_END
+};
+
#define KNL_OT_L2_HITE BIT_ULL(19) /* Other Tile L2 Hit */
#define KNL_OT_L2_HITF BIT_ULL(20) /* Other Tile L2 Hit */
#define KNL_MCDRAM_LOCAL BIT_ULL(21)
@@ -2153,10 +2163,11 @@ static void intel_pmu_disable_all(void)
static void __intel_pmu_enable_all(int added, bool pmi)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
intel_pmu_lbr_enable_all(pmi);
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
- x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
+ intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
struct perf_event *event =
@@ -2186,7 +2197,7 @@ static void intel_pmu_enable_all(int added)
* magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
* in sequence on the same PMC or on different PMCs.
*
- * In practise it appears some of these events do in fact count, and
+ * In practice it appears some of these events do in fact count, and
* we need to program all 4 events.
*/
static void intel_pmu_nhm_workaround(void)
@@ -2429,13 +2440,23 @@ static int icl_set_topdown_event_period(struct perf_event *event)
return 0;
}
+static int adl_set_topdown_event_period(struct perf_event *event)
+{
+ struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
+
+ if (pmu->cpu_type != hybrid_big)
+ return 0;
+
+ return icl_set_topdown_event_period(event);
+}
+
static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx)
{
u32 val;
/*
* The metric is reported as an 8bit integer fraction
- * suming up to 0xff.
+ * summing up to 0xff.
* slots-in-metric = (Metric / 0xff) * slots
*/
val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff;
@@ -2569,6 +2590,17 @@ static u64 icl_update_topdown_event(struct perf_event *event)
x86_pmu.num_topdown_events - 1);
}
+static u64 adl_update_topdown_event(struct perf_event *event)
+{
+ struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
+
+ if (pmu->cpu_type != hybrid_big)
+ return 0;
+
+ return icl_update_topdown_event(event);
+}
+
+
static void intel_pmu_read_topdown_event(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -2709,22 +2741,25 @@ int intel_pmu_save_and_restart(struct perf_event *event)
static void intel_pmu_reset(void)
{
struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
+ int num_counters = hybrid(cpuc->pmu, num_counters);
unsigned long flags;
int idx;
- if (!x86_pmu.num_counters)
+ if (!num_counters)
return;
local_irq_save(flags);
pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
- for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ for (idx = 0; idx < num_counters; idx++) {
wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
}
- for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
- if (fixed_counter_disabled(idx))
+ for (idx = 0; idx < num_counters_fixed; idx++) {
+ if (fixed_counter_disabled(idx, cpuc->pmu))
continue;
wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
}
@@ -2753,6 +2788,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int bit;
int handled = 0;
+ u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
inc_irq_stat(apic_perf_irqs);
@@ -2776,7 +2812,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
* processing loop coming after that the function, otherwise
* phony regular samples may be generated in the sampling buffer
* not marked with the EXACT tag. Another possibility is to have
- * one PEBS event and at least one non-PEBS event whic hoverflows
+ * one PEBS event and at least one non-PEBS event which overflows
* while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
* not be set, yet the overflow status bit for the PEBS counter will
* be on Skylake.
@@ -2798,7 +2834,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
handled++;
x86_pmu.drain_pebs(regs, &data);
- status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
+ status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
/*
* PMI throttle may be triggered, which stops the PEBS event.
@@ -2824,7 +2860,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
}
/*
- * Intel Perf mertrics
+ * Intel Perf metrics
*/
if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) {
handled++;
@@ -2961,8 +2997,10 @@ intel_vlbr_constraints(struct perf_event *event)
return NULL;
}
-static int intel_alt_er(int idx, u64 config)
+static int intel_alt_er(struct cpu_hw_events *cpuc,
+ int idx, u64 config)
{
+ struct extra_reg *extra_regs = hybrid(cpuc->pmu, extra_regs);
int alt_idx = idx;
if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
@@ -2974,7 +3012,7 @@ static int intel_alt_er(int idx, u64 config)
if (idx == EXTRA_REG_RSP_1)
alt_idx = EXTRA_REG_RSP_0;
- if (config & ~x86_pmu.extra_regs[alt_idx].valid_mask)
+ if (config & ~extra_regs[alt_idx].valid_mask)
return idx;
return alt_idx;
@@ -2982,15 +3020,16 @@ static int intel_alt_er(int idx, u64 config)
static void intel_fixup_er(struct perf_event *event, int idx)
{
+ struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs);
event->hw.extra_reg.idx = idx;
if (idx == EXTRA_REG_RSP_0) {
event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
- event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event;
+ event->hw.config |= extra_regs[EXTRA_REG_RSP_0].event;
event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
} else if (idx == EXTRA_REG_RSP_1) {
event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
- event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event;
+ event->hw.config |= extra_regs[EXTRA_REG_RSP_1].event;
event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
}
}
@@ -3066,7 +3105,7 @@ again:
*/
c = NULL;
} else {
- idx = intel_alt_er(idx, reg->config);
+ idx = intel_alt_er(cpuc, idx, reg->config);
if (idx != reg->idx) {
raw_spin_unlock_irqrestore(&era->lock, flags);
goto again;
@@ -3131,10 +3170,11 @@ struct event_constraint *
x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{
+ struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints);
struct event_constraint *c;
- if (x86_pmu.event_constraints) {
- for_each_event_constraint(c, x86_pmu.event_constraints) {
+ if (event_constraints) {
+ for_each_event_constraint(c, event_constraints) {
if (constraint_match(c, event->hw.config)) {
event->hw.flags |= c->flags;
return c;
@@ -3142,7 +3182,7 @@ x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
}
}
- return &unconstrained;
+ return &hybrid_var(cpuc->pmu, unconstrained);
}
static struct event_constraint *
@@ -3646,6 +3686,23 @@ static inline bool is_mem_loads_aux_event(struct perf_event *event)
return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0x03, .umask=0x82);
}
+static inline bool require_mem_loads_aux_event(struct perf_event *event)
+{
+ if (!(x86_pmu.flags & PMU_FL_MEM_LOADS_AUX))
+ return false;
+
+ if (is_hybrid())
+ return hybrid_pmu(event->pmu)->cpu_type == hybrid_big;
+
+ return true;
+}
+
+static inline bool intel_pmu_has_cap(struct perf_event *event, int idx)
+{
+ union perf_capabilities *intel_cap = &hybrid(event->pmu, intel_cap);
+
+ return test_bit(idx, (unsigned long *)&intel_cap->capabilities);
+}
static int intel_pmu_hw_config(struct perf_event *event)
{
@@ -3659,11 +3716,16 @@ static int intel_pmu_hw_config(struct perf_event *event)
return ret;
if (event->attr.precise_ip) {
+ if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
+ return -EINVAL;
+
if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
if (!(event->attr.sample_type &
- ~intel_pmu_large_pebs_flags(event)))
+ ~intel_pmu_large_pebs_flags(event))) {
event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
+ event->attach_state |= PERF_ATTACH_SCHED_CB;
+ }
}
if (x86_pmu.pebs_aliases)
x86_pmu.pebs_aliases(event);
@@ -3676,6 +3738,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
ret = intel_pmu_setup_lbr_filter(event);
if (ret)
return ret;
+ event->attach_state |= PERF_ATTACH_SCHED_CB;
/*
* BTS is set up earlier in this path, so don't account twice
@@ -3696,7 +3759,8 @@ static int intel_pmu_hw_config(struct perf_event *event)
event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT;
}
- if (event->attr.type != PERF_TYPE_RAW)
+ if ((event->attr.type == PERF_TYPE_HARDWARE) ||
+ (event->attr.type == PERF_TYPE_HW_CACHE))
return 0;
/*
@@ -3709,7 +3773,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
* with a slots event as group leader. When the slots event
* is used in a metrics group, it too cannot support sampling.
*/
- if (x86_pmu.intel_cap.perf_metrics && is_topdown_event(event)) {
+ if (intel_pmu_has_cap(event, PERF_CAP_METRICS_IDX) && is_topdown_event(event)) {
if (event->attr.config1 || event->attr.config2)
return -EINVAL;
@@ -3760,7 +3824,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
* event. The rule is to simplify the implementation of the check.
* That's because perf cannot have a complete group at the moment.
*/
- if (x86_pmu.flags & PMU_FL_MEM_LOADS_AUX &&
+ if (require_mem_loads_aux_event(event) &&
(event->attr.sample_type & PERF_SAMPLE_DATA_SRC) &&
is_mem_loads_event(event)) {
struct perf_event *leader = event->group_leader;
@@ -3795,10 +3859,11 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
+ u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
- arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
- arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
+ arr[0].host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
+ arr[0].guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask;
if (x86_pmu.flags & PMU_FL_PEBS_ALL)
arr[0].guest &= ~cpuc->pebs_enabled;
else
@@ -3967,8 +4032,10 @@ spr_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
* The :ppp indicates the Precise Distribution (PDist) facility, which
* is only supported on the GP counter 0. If a :ppp event which is not
* available on the GP counter 0, error out.
+ * Exception: Instruction PDIR is only available on the fixed counter 0.
*/
- if (event->attr.precise_ip == 3) {
+ if ((event->attr.precise_ip == 3) &&
+ !constraint_match(&fixed0_constraint, event->hw.config)) {
if (c->idxmsk64 & BIT_ULL(0))
return &counter0_constraint;
@@ -4036,6 +4103,39 @@ tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
return c;
}
+static struct event_constraint *
+adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event)
+{
+ struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
+
+ if (pmu->cpu_type == hybrid_big)
+ return spr_get_event_constraints(cpuc, idx, event);
+ else if (pmu->cpu_type == hybrid_small)
+ return tnt_get_event_constraints(cpuc, idx, event);
+
+ WARN_ON(1);
+ return &emptyconstraint;
+}
+
+static int adl_hw_config(struct perf_event *event)
+{
+ struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
+
+ if (pmu->cpu_type == hybrid_big)
+ return hsw_hw_config(event);
+ else if (pmu->cpu_type == hybrid_small)
+ return intel_pmu_hw_config(event);
+
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+}
+
+static u8 adl_get_hybrid_cpu_type(void)
+{
+ return hybrid_big;
+}
+
/*
* Broadwell:
*
@@ -4139,7 +4239,7 @@ int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
{
cpuc->pebs_record_size = x86_pmu.pebs_record_size;
- if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
+ if (is_hybrid() || x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
cpuc->shared_regs = allocate_shared_regs(cpu);
if (!cpuc->shared_regs)
goto err;
@@ -4193,12 +4293,62 @@ static void flip_smm_bit(void *data)
}
}
+static bool init_hybrid_pmu(int cpu)
+{
+ struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+ u8 cpu_type = get_this_hybrid_cpu_type();
+ struct x86_hybrid_pmu *pmu = NULL;
+ int i;
+
+ if (!cpu_type && x86_pmu.get_hybrid_cpu_type)
+ cpu_type = x86_pmu.get_hybrid_cpu_type();
+
+ for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
+ if (x86_pmu.hybrid_pmu[i].cpu_type == cpu_type) {
+ pmu = &x86_pmu.hybrid_pmu[i];
+ break;
+ }
+ }
+ if (WARN_ON_ONCE(!pmu || (pmu->pmu.type == -1))) {
+ cpuc->pmu = NULL;
+ return false;
+ }
+
+ /* Only check and dump the PMU information for the first CPU */
+ if (!cpumask_empty(&pmu->supported_cpus))
+ goto end;
+
+ if (!check_hw_exists(&pmu->pmu, pmu->num_counters, pmu->num_counters_fixed))
+ return false;
+
+ pr_info("%s PMU driver: ", pmu->name);
+
+ if (pmu->intel_cap.pebs_output_pt_available)
+ pr_cont("PEBS-via-PT ");
+
+ pr_cont("\n");
+
+ x86_pmu_show_pmu_cap(pmu->num_counters, pmu->num_counters_fixed,
+ pmu->intel_ctrl);
+
+end:
+ cpumask_set_cpu(cpu, &pmu->supported_cpus);
+ cpuc->pmu = &pmu->pmu;
+
+ x86_pmu_update_cpu_context(&pmu->pmu, cpu);
+
+ return true;
+}
+
static void intel_pmu_cpu_starting(int cpu)
{
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
int core_id = topology_core_id(cpu);
int i;
+ if (is_hybrid() && !init_hybrid_pmu(cpu))
+ return;
+
init_debug_store_on_cpu(cpu);
/*
* Deal with CPUs that don't clear their LBRs on power-up.
@@ -4216,8 +4366,16 @@ static void intel_pmu_cpu_starting(int cpu)
if (x86_pmu.version > 1)
flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
- /* Disable perf metrics if any added CPU doesn't support it. */
- if (x86_pmu.intel_cap.perf_metrics) {
+ /*
+ * Disable perf metrics if any added CPU doesn't support it.
+ *
+ * Turn off the check for a hybrid architecture, because the
+ * architecture MSR, MSR_IA32_PERF_CAPABILITIES, only indicate
+ * the architecture features. The perf metrics is a model-specific
+ * feature for now. The corresponding bit should always be 0 on
+ * a hybrid platform, e.g., Alder Lake.
+ */
+ if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) {
union perf_capabilities perf_cap;
rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities);
@@ -4304,7 +4462,12 @@ void intel_cpuc_finish(struct cpu_hw_events *cpuc)
static void intel_pmu_cpu_dead(int cpu)
{
- intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
+ struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+
+ intel_cpuc_finish(cpuc);
+
+ if (is_hybrid() && cpuc->pmu)
+ cpumask_clear_cpu(cpu, &hybrid_pmu(cpuc->pmu)->supported_cpus);
}
static void intel_pmu_sched_task(struct perf_event_context *ctx,
@@ -4333,6 +4496,14 @@ static int intel_pmu_aux_output_match(struct perf_event *event)
return is_intel_pt_event(event);
}
+static int intel_pmu_filter_match(struct perf_event *event)
+{
+ struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
+ unsigned int cpu = smp_processor_id();
+
+ return cpumask_test_cpu(cpu, &pmu->supported_cpus);
+}
+
PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
PMU_FORMAT_ATTR(ldlat, "config1:0-15");
@@ -4510,7 +4681,7 @@ static const struct x86_cpu_desc isolation_ucodes[] = {
INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 3, 0x07000009),
INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 4, 0x0f000009),
INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_D, 5, 0x0e000002),
- INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 2, 0x0b000014),
+ INTEL_CPU_DESC(INTEL_FAM6_BROADWELL_X, 1, 0x0b000014),
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 3, 0x00000021),
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 4, 0x00000000),
INTEL_CPU_DESC(INTEL_FAM6_SKYLAKE_X, 5, 0x00000000),
@@ -4588,7 +4759,7 @@ static bool check_msr(unsigned long msr, u64 mask)
/*
* Disable the check for real HW, so we don't
- * mess with potentionaly enabled registers:
+ * mess with potentially enabled registers:
*/
if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
return true;
@@ -4653,7 +4824,7 @@ static __init void intel_arch_events_quirk(void)
{
int bit;
- /* disable event that reported as not presend by cpuid */
+ /* disable event that reported as not present by cpuid */
for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
pr_warn("CPUID marked event: \'%s\' unavailable\n",
@@ -4873,7 +5044,7 @@ static void update_tfa_sched(void *ignored)
* and if so force schedule out for all event types all contexts
*/
if (test_bit(3, cpuc->active_mask))
- perf_pmu_resched(x86_get_pmu());
+ perf_pmu_resched(x86_get_pmu(smp_processor_id()));
}
static ssize_t show_sysctl_tfa(struct device *cdev,
@@ -5035,8 +5206,299 @@ static const struct attribute_group *attr_update[] = {
NULL,
};
+EVENT_ATTR_STR_HYBRID(slots, slots_adl, "event=0x00,umask=0x4", hybrid_big);
+EVENT_ATTR_STR_HYBRID(topdown-retiring, td_retiring_adl, "event=0xc2,umask=0x0;event=0x00,umask=0x80", hybrid_big_small);
+EVENT_ATTR_STR_HYBRID(topdown-bad-spec, td_bad_spec_adl, "event=0x73,umask=0x0;event=0x00,umask=0x81", hybrid_big_small);
+EVENT_ATTR_STR_HYBRID(topdown-fe-bound, td_fe_bound_adl, "event=0x71,umask=0x0;event=0x00,umask=0x82", hybrid_big_small);
+EVENT_ATTR_STR_HYBRID(topdown-be-bound, td_be_bound_adl, "event=0x74,umask=0x0;event=0x00,umask=0x83", hybrid_big_small);
+EVENT_ATTR_STR_HYBRID(topdown-heavy-ops, td_heavy_ops_adl, "event=0x00,umask=0x84", hybrid_big);
+EVENT_ATTR_STR_HYBRID(topdown-br-mispredict, td_br_mis_adl, "event=0x00,umask=0x85", hybrid_big);
+EVENT_ATTR_STR_HYBRID(topdown-fetch-lat, td_fetch_lat_adl, "event=0x00,umask=0x86", hybrid_big);
+EVENT_ATTR_STR_HYBRID(topdown-mem-bound, td_mem_bound_adl, "event=0x00,umask=0x87", hybrid_big);
+
+static struct attribute *adl_hybrid_events_attrs[] = {
+ EVENT_PTR(slots_adl),
+ EVENT_PTR(td_retiring_adl),
+ EVENT_PTR(td_bad_spec_adl),
+ EVENT_PTR(td_fe_bound_adl),
+ EVENT_PTR(td_be_bound_adl),
+ EVENT_PTR(td_heavy_ops_adl),
+ EVENT_PTR(td_br_mis_adl),
+ EVENT_PTR(td_fetch_lat_adl),
+ EVENT_PTR(td_mem_bound_adl),
+ NULL,
+};
+
+/* Must be in IDX order */
+EVENT_ATTR_STR_HYBRID(mem-loads, mem_ld_adl, "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3", hybrid_big_small);
+EVENT_ATTR_STR_HYBRID(mem-stores, mem_st_adl, "event=0xd0,umask=0x6;event=0xcd,umask=0x2", hybrid_big_small);
+EVENT_ATTR_STR_HYBRID(mem-loads-aux, mem_ld_aux_adl, "event=0x03,umask=0x82", hybrid_big);
+
+static struct attribute *adl_hybrid_mem_attrs[] = {
+ EVENT_PTR(mem_ld_adl),
+ EVENT_PTR(mem_st_adl),
+ EVENT_PTR(mem_ld_aux_adl),
+ NULL,
+};
+
+EVENT_ATTR_STR_HYBRID(tx-start, tx_start_adl, "event=0xc9,umask=0x1", hybrid_big);
+EVENT_ATTR_STR_HYBRID(tx-commit, tx_commit_adl, "event=0xc9,umask=0x2", hybrid_big);
+EVENT_ATTR_STR_HYBRID(tx-abort, tx_abort_adl, "event=0xc9,umask=0x4", hybrid_big);
+EVENT_ATTR_STR_HYBRID(tx-conflict, tx_conflict_adl, "event=0x54,umask=0x1", hybrid_big);
+EVENT_ATTR_STR_HYBRID(cycles-t, cycles_t_adl, "event=0x3c,in_tx=1", hybrid_big);
+EVENT_ATTR_STR_HYBRID(cycles-ct, cycles_ct_adl, "event=0x3c,in_tx=1,in_tx_cp=1", hybrid_big);
+EVENT_ATTR_STR_HYBRID(tx-capacity-read, tx_capacity_read_adl, "event=0x54,umask=0x80", hybrid_big);
+EVENT_ATTR_STR_HYBRID(tx-capacity-write, tx_capacity_write_adl, "event=0x54,umask=0x2", hybrid_big);
+
+static struct attribute *adl_hybrid_tsx_attrs[] = {
+ EVENT_PTR(tx_start_adl),
+ EVENT_PTR(tx_abort_adl),
+ EVENT_PTR(tx_commit_adl),
+ EVENT_PTR(tx_capacity_read_adl),
+ EVENT_PTR(tx_capacity_write_adl),
+ EVENT_PTR(tx_conflict_adl),
+ EVENT_PTR(cycles_t_adl),
+ EVENT_PTR(cycles_ct_adl),
+ NULL,
+};
+
+FORMAT_ATTR_HYBRID(in_tx, hybrid_big);
+FORMAT_ATTR_HYBRID(in_tx_cp, hybrid_big);
+FORMAT_ATTR_HYBRID(offcore_rsp, hybrid_big_small);
+FORMAT_ATTR_HYBRID(ldlat, hybrid_big_small);
+FORMAT_ATTR_HYBRID(frontend, hybrid_big);
+
+static struct attribute *adl_hybrid_extra_attr_rtm[] = {
+ FORMAT_HYBRID_PTR(in_tx),
+ FORMAT_HYBRID_PTR(in_tx_cp),
+ FORMAT_HYBRID_PTR(offcore_rsp),
+ FORMAT_HYBRID_PTR(ldlat),
+ FORMAT_HYBRID_PTR(frontend),
+ NULL,
+};
+
+static struct attribute *adl_hybrid_extra_attr[] = {
+ FORMAT_HYBRID_PTR(offcore_rsp),
+ FORMAT_HYBRID_PTR(ldlat),
+ FORMAT_HYBRID_PTR(frontend),
+ NULL,
+};
+
+static bool is_attr_for_this_pmu(struct kobject *kobj, struct attribute *attr)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct x86_hybrid_pmu *pmu =
+ container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
+ struct perf_pmu_events_hybrid_attr *pmu_attr =
+ container_of(attr, struct perf_pmu_events_hybrid_attr, attr.attr);
+
+ return pmu->cpu_type & pmu_attr->pmu_type;
+}
+
+static umode_t hybrid_events_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ return is_attr_for_this_pmu(kobj, attr) ? attr->mode : 0;
+}
+
+static inline int hybrid_find_supported_cpu(struct x86_hybrid_pmu *pmu)
+{
+ int cpu = cpumask_first(&pmu->supported_cpus);
+
+ return (cpu >= nr_cpu_ids) ? -1 : cpu;
+}
+
+static umode_t hybrid_tsx_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct x86_hybrid_pmu *pmu =
+ container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
+ int cpu = hybrid_find_supported_cpu(pmu);
+
+ return (cpu >= 0) && is_attr_for_this_pmu(kobj, attr) && cpu_has(&cpu_data(cpu), X86_FEATURE_RTM) ? attr->mode : 0;
+}
+
+static umode_t hybrid_format_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct x86_hybrid_pmu *pmu =
+ container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
+ struct perf_pmu_format_hybrid_attr *pmu_attr =
+ container_of(attr, struct perf_pmu_format_hybrid_attr, attr.attr);
+ int cpu = hybrid_find_supported_cpu(pmu);
+
+ return (cpu >= 0) && (pmu->cpu_type & pmu_attr->pmu_type) ? attr->mode : 0;
+}
+
+static struct attribute_group hybrid_group_events_td = {
+ .name = "events",
+ .is_visible = hybrid_events_is_visible,
+};
+
+static struct attribute_group hybrid_group_events_mem = {
+ .name = "events",
+ .is_visible = hybrid_events_is_visible,
+};
+
+static struct attribute_group hybrid_group_events_tsx = {
+ .name = "events",
+ .is_visible = hybrid_tsx_is_visible,
+};
+
+static struct attribute_group hybrid_group_format_extra = {
+ .name = "format",
+ .is_visible = hybrid_format_is_visible,
+};
+
+static ssize_t intel_hybrid_get_attr_cpus(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct x86_hybrid_pmu *pmu =
+ container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu);
+
+ return cpumap_print_to_pagebuf(true, buf, &pmu->supported_cpus);
+}
+
+static DEVICE_ATTR(cpus, S_IRUGO, intel_hybrid_get_attr_cpus, NULL);
+static struct attribute *intel_hybrid_cpus_attrs[] = {
+ &dev_attr_cpus.attr,
+ NULL,
+};
+
+static struct attribute_group hybrid_group_cpus = {
+ .attrs = intel_hybrid_cpus_attrs,
+};
+
+static const struct attribute_group *hybrid_attr_update[] = {
+ &hybrid_group_events_td,
+ &hybrid_group_events_mem,
+ &hybrid_group_events_tsx,
+ &group_caps_gen,
+ &group_caps_lbr,
+ &hybrid_group_format_extra,
+ &group_default,
+ &hybrid_group_cpus,
+ NULL,
+};
+
static struct attribute *empty_attrs;
+static void intel_pmu_check_num_counters(int *num_counters,
+ int *num_counters_fixed,
+ u64 *intel_ctrl, u64 fixed_mask)
+{
+ if (*num_counters > INTEL_PMC_MAX_GENERIC) {
+ WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
+ *num_counters, INTEL_PMC_MAX_GENERIC);
+ *num_counters = INTEL_PMC_MAX_GENERIC;
+ }
+ *intel_ctrl = (1ULL << *num_counters) - 1;
+
+ if (*num_counters_fixed > INTEL_PMC_MAX_FIXED) {
+ WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
+ *num_counters_fixed, INTEL_PMC_MAX_FIXED);
+ *num_counters_fixed = INTEL_PMC_MAX_FIXED;
+ }
+
+ *intel_ctrl |= fixed_mask << INTEL_PMC_IDX_FIXED;
+}
+
+static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints,
+ int num_counters,
+ int num_counters_fixed,
+ u64 intel_ctrl)
+{
+ struct event_constraint *c;
+
+ if (!event_constraints)
+ return;
+
+ /*
+ * event on fixed counter2 (REF_CYCLES) only works on this
+ * counter, so do not extend mask to generic counters
+ */
+ for_each_event_constraint(c, event_constraints) {
+ /*
+ * Don't extend the topdown slots and metrics
+ * events to the generic counters.
+ */
+ if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) {
+ /*
+ * Disable topdown slots and metrics events,
+ * if slots event is not in CPUID.
+ */
+ if (!(INTEL_PMC_MSK_FIXED_SLOTS & intel_ctrl))
+ c->idxmsk64 = 0;
+ c->weight = hweight64(c->idxmsk64);
+ continue;
+ }
+
+ if (c->cmask == FIXED_EVENT_FLAGS) {
+ /* Disabled fixed counters which are not in CPUID */
+ c->idxmsk64 &= intel_ctrl;
+
+ if (c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES)
+ c->idxmsk64 |= (1ULL << num_counters) - 1;
+ }
+ c->idxmsk64 &=
+ ~(~0ULL << (INTEL_PMC_IDX_FIXED + num_counters_fixed));
+ c->weight = hweight64(c->idxmsk64);
+ }
+}
+
+static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs)
+{
+ struct extra_reg *er;
+
+ /*
+ * Access extra MSR may cause #GP under certain circumstances.
+ * E.g. KVM doesn't support offcore event
+ * Check all extra_regs here.
+ */
+ if (!extra_regs)
+ return;
+
+ for (er = extra_regs; er->msr; er++) {
+ er->extra_msr_access = check_msr(er->msr, 0x11UL);
+ /* Disable LBR select mapping */
+ if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
+ x86_pmu.lbr_sel_map = NULL;
+ }
+}
+
+static void intel_pmu_check_hybrid_pmus(u64 fixed_mask)
+{
+ struct x86_hybrid_pmu *pmu;
+ int i;
+
+ for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
+ pmu = &x86_pmu.hybrid_pmu[i];
+
+ intel_pmu_check_num_counters(&pmu->num_counters,
+ &pmu->num_counters_fixed,
+ &pmu->intel_ctrl,
+ fixed_mask);
+
+ if (pmu->intel_cap.perf_metrics) {
+ pmu->intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
+ pmu->intel_ctrl |= INTEL_PMC_MSK_FIXED_SLOTS;
+ }
+
+ if (pmu->intel_cap.pebs_output_pt_available)
+ pmu->pmu.capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
+
+ intel_pmu_check_event_constraints(pmu->event_constraints,
+ pmu->num_counters,
+ pmu->num_counters_fixed,
+ pmu->intel_ctrl);
+
+ intel_pmu_check_extra_regs(pmu->extra_regs);
+ }
+}
+
__init int intel_pmu_init(void)
{
struct attribute **extra_skl_attr = &empty_attrs;
@@ -5047,12 +5509,11 @@ __init int intel_pmu_init(void)
union cpuid10_edx edx;
union cpuid10_eax eax;
union cpuid10_ebx ebx;
- struct event_constraint *c;
unsigned int fixed_mask;
- struct extra_reg *er;
bool pmem = false;
int version, i;
char *name;
+ struct x86_hybrid_pmu *pmu;
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
switch (boot_cpu_data.x86) {
@@ -5558,7 +6019,13 @@ __init int intel_pmu_init(void)
tsx_attr = hsw_tsx_events_attrs;
intel_pmu_pebs_data_source_skl(pmem);
- if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
+ /*
+ * Processors with CPUID.RTM_ALWAYS_ABORT have TSX deprecated by default.
+ * TSX force abort hooks are not required on these systems. Only deploy
+ * workaround when microcode has not enabled X86_FEATURE_RTM_ALWAYS_ABORT.
+ */
+ if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT) &&
+ !boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT)) {
x86_pmu.flags |= PMU_FL_TFA;
x86_pmu.get_event_constraints = tfa_get_event_constraints;
x86_pmu.enable_all = intel_tfa_pmu_enable_all;
@@ -5647,6 +6114,104 @@ __init int intel_pmu_init(void)
name = "sapphire_rapids";
break;
+ case INTEL_FAM6_ALDERLAKE:
+ case INTEL_FAM6_ALDERLAKE_L:
+ /*
+ * Alder Lake has 2 types of CPU, core and atom.
+ *
+ * Initialize the common PerfMon capabilities here.
+ */
+ x86_pmu.hybrid_pmu = kcalloc(X86_HYBRID_NUM_PMUS,
+ sizeof(struct x86_hybrid_pmu),
+ GFP_KERNEL);
+ if (!x86_pmu.hybrid_pmu)
+ return -ENOMEM;
+ static_branch_enable(&perf_is_hybrid);
+ x86_pmu.num_hybrid_pmus = X86_HYBRID_NUM_PMUS;
+
+ x86_pmu.late_ack = true;
+ x86_pmu.pebs_aliases = NULL;
+ x86_pmu.pebs_prec_dist = true;
+ x86_pmu.pebs_block = true;
+ x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+ x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
+ x86_pmu.flags |= PMU_FL_PEBS_ALL;
+ x86_pmu.flags |= PMU_FL_INSTR_LATENCY;
+ x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
+ x86_pmu.lbr_pt_coexist = true;
+ intel_pmu_pebs_data_source_skl(false);
+ x86_pmu.num_topdown_events = 8;
+ x86_pmu.update_topdown_event = adl_update_topdown_event;
+ x86_pmu.set_topdown_event_period = adl_set_topdown_event_period;
+
+ x86_pmu.filter_match = intel_pmu_filter_match;
+ x86_pmu.get_event_constraints = adl_get_event_constraints;
+ x86_pmu.hw_config = adl_hw_config;
+ x86_pmu.limit_period = spr_limit_period;
+ x86_pmu.get_hybrid_cpu_type = adl_get_hybrid_cpu_type;
+ /*
+ * The rtm_abort_event is used to check whether to enable GPRs
+ * for the RTM abort event. Atom doesn't have the RTM abort
+ * event. There is no harmful to set it in the common
+ * x86_pmu.rtm_abort_event.
+ */
+ x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
+
+ td_attr = adl_hybrid_events_attrs;
+ mem_attr = adl_hybrid_mem_attrs;
+ tsx_attr = adl_hybrid_tsx_attrs;
+ extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
+ adl_hybrid_extra_attr_rtm : adl_hybrid_extra_attr;
+
+ /* Initialize big core specific PerfMon capabilities.*/
+ pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
+ pmu->name = "cpu_core";
+ pmu->cpu_type = hybrid_big;
+ if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) {
+ pmu->num_counters = x86_pmu.num_counters + 2;
+ pmu->num_counters_fixed = x86_pmu.num_counters_fixed + 1;
+ } else {
+ pmu->num_counters = x86_pmu.num_counters;
+ pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
+ }
+ pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters);
+ pmu->unconstrained = (struct event_constraint)
+ __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
+ 0, pmu->num_counters, 0, 0);
+ pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities;
+ pmu->intel_cap.perf_metrics = 1;
+ pmu->intel_cap.pebs_output_pt_available = 0;
+
+ memcpy(pmu->hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids));
+ memcpy(pmu->hw_cache_extra_regs, spr_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs));
+ pmu->event_constraints = intel_spr_event_constraints;
+ pmu->pebs_constraints = intel_spr_pebs_event_constraints;
+ pmu->extra_regs = intel_spr_extra_regs;
+
+ /* Initialize Atom core specific PerfMon capabilities.*/
+ pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
+ pmu->name = "cpu_atom";
+ pmu->cpu_type = hybrid_small;
+ pmu->num_counters = x86_pmu.num_counters;
+ pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
+ pmu->max_pebs_events = x86_pmu.max_pebs_events;
+ pmu->unconstrained = (struct event_constraint)
+ __EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
+ 0, pmu->num_counters, 0, 0);
+ pmu->intel_cap.capabilities = x86_pmu.intel_cap.capabilities;
+ pmu->intel_cap.perf_metrics = 0;
+ pmu->intel_cap.pebs_output_pt_available = 1;
+
+ memcpy(pmu->hw_cache_event_ids, glp_hw_cache_event_ids, sizeof(pmu->hw_cache_event_ids));
+ memcpy(pmu->hw_cache_extra_regs, tnt_hw_cache_extra_regs, sizeof(pmu->hw_cache_extra_regs));
+ pmu->hw_cache_event_ids[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = -1;
+ pmu->event_constraints = intel_slm_event_constraints;
+ pmu->pebs_constraints = intel_grt_pebs_event_constraints;
+ pmu->extra_regs = intel_grt_extra_regs;
+ pr_cont("Alderlake Hybrid events, ");
+ name = "alderlake_hybrid";
+ break;
+
default:
switch (x86_pmu.version) {
case 1:
@@ -5667,75 +6232,43 @@ __init int intel_pmu_init(void)
snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
+ if (!is_hybrid()) {
+ group_events_td.attrs = td_attr;
+ group_events_mem.attrs = mem_attr;
+ group_events_tsx.attrs = tsx_attr;
+ group_format_extra.attrs = extra_attr;
+ group_format_extra_skl.attrs = extra_skl_attr;
- group_events_td.attrs = td_attr;
- group_events_mem.attrs = mem_attr;
- group_events_tsx.attrs = tsx_attr;
- group_format_extra.attrs = extra_attr;
- group_format_extra_skl.attrs = extra_skl_attr;
-
- x86_pmu.attr_update = attr_update;
-
- if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
- WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
- x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
- x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
- }
- x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;
+ x86_pmu.attr_update = attr_update;
+ } else {
+ hybrid_group_events_td.attrs = td_attr;
+ hybrid_group_events_mem.attrs = mem_attr;
+ hybrid_group_events_tsx.attrs = tsx_attr;
+ hybrid_group_format_extra.attrs = extra_attr;
- if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
- WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
- x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
- x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
+ x86_pmu.attr_update = hybrid_attr_update;
}
- x86_pmu.intel_ctrl |= (u64)fixed_mask << INTEL_PMC_IDX_FIXED;
+ intel_pmu_check_num_counters(&x86_pmu.num_counters,
+ &x86_pmu.num_counters_fixed,
+ &x86_pmu.intel_ctrl,
+ (u64)fixed_mask);
/* AnyThread may be deprecated on arch perfmon v5 or later */
if (x86_pmu.intel_cap.anythread_deprecated)
x86_pmu.format_attrs = intel_arch_formats_attr;
- if (x86_pmu.event_constraints) {
- /*
- * event on fixed counter2 (REF_CYCLES) only works on this
- * counter, so do not extend mask to generic counters
- */
- for_each_event_constraint(c, x86_pmu.event_constraints) {
- /*
- * Don't extend the topdown slots and metrics
- * events to the generic counters.
- */
- if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) {
- /*
- * Disable topdown slots and metrics events,
- * if slots event is not in CPUID.
- */
- if (!(INTEL_PMC_MSK_FIXED_SLOTS & x86_pmu.intel_ctrl))
- c->idxmsk64 = 0;
- c->weight = hweight64(c->idxmsk64);
- continue;
- }
-
- if (c->cmask == FIXED_EVENT_FLAGS) {
- /* Disabled fixed counters which are not in CPUID */
- c->idxmsk64 &= x86_pmu.intel_ctrl;
-
- if (c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES)
- c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
- }
- c->idxmsk64 &=
- ~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
- c->weight = hweight64(c->idxmsk64);
- }
- }
-
+ intel_pmu_check_event_constraints(x86_pmu.event_constraints,
+ x86_pmu.num_counters,
+ x86_pmu.num_counters_fixed,
+ x86_pmu.intel_ctrl);
/*
* Access LBR MSR may cause #GP under certain circumstances.
* E.g. KVM doesn't support LBR MSR
* Check all LBT MSR here.
* Disable LBR access if any LBR MSRs can not be accessed.
*/
- if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
+ if (x86_pmu.lbr_tos && !check_msr(x86_pmu.lbr_tos, 0x3UL))
x86_pmu.lbr_nr = 0;
for (i = 0; i < x86_pmu.lbr_nr; i++) {
if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
@@ -5746,19 +6279,7 @@ __init int intel_pmu_init(void)
if (x86_pmu.lbr_nr)
pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
- /*
- * Access extra MSR may cause #GP under certain circumstances.
- * E.g. KVM doesn't support offcore event
- * Check all extra_regs here.
- */
- if (x86_pmu.extra_regs) {
- for (er = x86_pmu.extra_regs; er->msr; er++) {
- er->extra_msr_access = check_msr(er->msr, 0x11UL);
- /* Disable LBR select mapping */
- if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
- x86_pmu.lbr_sel_map = NULL;
- }
- }
+ intel_pmu_check_extra_regs(x86_pmu.extra_regs);
/* Support full width counters using alternative MSR range */
if (x86_pmu.intel_cap.full_width_write) {
@@ -5767,9 +6288,12 @@ __init int intel_pmu_init(void)
pr_cont("full-width counters, ");
}
- if (x86_pmu.intel_cap.perf_metrics)
+ if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics)
x86_pmu.intel_ctrl |= 1ULL << GLOBAL_CTRL_EN_PERF_METRICS;
+ if (is_hybrid())
+ intel_pmu_check_hybrid_pmus((u64)fixed_mask);
+
return 0;
}
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 407eee5f6f95..433399069e27 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -40,7 +40,7 @@
* Model specific counters:
* MSR_CORE_C1_RES: CORE C1 Residency Counter
* perf code: 0x00
- * Available model: SLM,AMT,GLM,CNL,TNT
+ * Available model: SLM,AMT,GLM,CNL,TNT,ADL
* Scope: Core (each processor core has a MSR)
* MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
* perf code: 0x01
@@ -51,46 +51,49 @@
* perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
* SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL,
- * TNT,RKL
+ * TNT,RKL,ADL
* Scope: Core
* MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
* perf code: 0x03
* Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML,
- * ICL,TGL,RKL
+ * ICL,TGL,RKL,ADL
* Scope: Core
* MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
* perf code: 0x00
* Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
- * KBL,CML,ICL,TGL,TNT,RKL
+ * KBL,CML,ICL,TGL,TNT,RKL,ADL
* Scope: Package (physical package)
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
* perf code: 0x01
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
- * GLM,CNL,KBL,CML,ICL,TGL,TNT,RKL
+ * GLM,CNL,KBL,CML,ICL,TGL,TNT,RKL,
+ * ADL
* Scope: Package (physical package)
* MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
* perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
* SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL,
- * TNT,RKL
+ * TNT,RKL,ADL
* Scope: Package (physical package)
* MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
* perf code: 0x03
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL,
- * KBL,CML,ICL,TGL,RKL
+ * KBL,CML,ICL,TGL,RKL,ADL
* Scope: Package (physical package)
* MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
* perf code: 0x04
- * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL
+ * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
+ * ADL
* Scope: Package (physical package)
* MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
* perf code: 0x05
- * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL
+ * Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
+ * ADL
* Scope: Package (physical package)
* MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
* perf code: 0x06
* Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL,
- * TNT,RKL
+ * TNT,RKL,ADL
* Scope: Package (physical package)
*
*/
@@ -563,6 +566,20 @@ static const struct cstate_model icl_cstates __initconst = {
BIT(PERF_CSTATE_PKG_C10_RES),
};
+static const struct cstate_model adl_cstates __initconst = {
+ .core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
+ BIT(PERF_CSTATE_CORE_C6_RES) |
+ BIT(PERF_CSTATE_CORE_C7_RES),
+
+ .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) |
+ BIT(PERF_CSTATE_PKG_C3_RES) |
+ BIT(PERF_CSTATE_PKG_C6_RES) |
+ BIT(PERF_CSTATE_PKG_C7_RES) |
+ BIT(PERF_CSTATE_PKG_C8_RES) |
+ BIT(PERF_CSTATE_PKG_C9_RES) |
+ BIT(PERF_CSTATE_PKG_C10_RES),
+};
+
static const struct cstate_model slm_cstates __initconst = {
.core_events = BIT(PERF_CSTATE_CORE_C1_RES) |
BIT(PERF_CSTATE_CORE_C6_RES),
@@ -650,6 +667,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &icl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &icl_cstates),
X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &icl_cstates),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_cstates),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &adl_cstates),
{ },
};
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 7ebae1826403..8647713276a7 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -779,6 +779,13 @@ struct event_constraint intel_glm_pebs_event_constraints[] = {
EVENT_CONSTRAINT_END
};
+struct event_constraint intel_grt_pebs_event_constraints[] = {
+ /* Allow all events as PEBS with no flags */
+ INTEL_PLD_CONSTRAINT(0x5d0, 0xf),
+ INTEL_PSD_CONSTRAINT(0x6d0, 0xf),
+ EVENT_CONSTRAINT_END
+};
+
struct event_constraint intel_nehalem_pebs_event_constraints[] = {
INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
@@ -959,13 +966,14 @@ struct event_constraint intel_spr_pebs_event_constraints[] = {
struct event_constraint *intel_pebs_constraints(struct perf_event *event)
{
+ struct event_constraint *pebs_constraints = hybrid(event->pmu, pebs_constraints);
struct event_constraint *c;
if (!event->attr.precise_ip)
return NULL;
- if (x86_pmu.pebs_constraints) {
- for_each_event_constraint(c, x86_pmu.pebs_constraints) {
+ if (pebs_constraints) {
+ for_each_event_constraint(c, pebs_constraints) {
if (constraint_match(c, event->hw.config)) {
event->hw.flags |= c->flags;
return c;
@@ -1007,6 +1015,8 @@ void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in)
static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
{
struct debug_store *ds = cpuc->ds;
+ int max_pebs_events = hybrid(cpuc->pmu, max_pebs_events);
+ int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
u64 threshold;
int reserved;
@@ -1014,9 +1024,9 @@ static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
return;
if (x86_pmu.flags & PMU_FL_PEBS_ALL)
- reserved = x86_pmu.max_pebs_events + x86_pmu.num_counters_fixed;
+ reserved = max_pebs_events + num_counters_fixed;
else
- reserved = x86_pmu.max_pebs_events;
+ reserved = max_pebs_events;
if (cpuc->n_pebs == cpuc->n_large_pebs) {
threshold = ds->pebs_absolute_maximum -
@@ -1177,6 +1187,9 @@ static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
struct debug_store *ds = cpuc->ds;
+ u64 value = ds->pebs_event_reset[hwc->idx];
+ u32 base = MSR_RELOAD_PMC0;
+ unsigned int idx = hwc->idx;
if (!is_pebs_pt(event))
return;
@@ -1186,7 +1199,12 @@ static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
cpuc->pebs_enabled |= PEBS_OUTPUT_PT;
- wrmsrl(MSR_RELOAD_PMC0 + hwc->idx, ds->pebs_event_reset[hwc->idx]);
+ if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
+ base = MSR_RELOAD_FIXED_CTR0;
+ idx = hwc->idx - INTEL_PMC_IDX_FIXED;
+ value = ds->pebs_event_reset[MAX_PEBS_EVENTS + idx];
+ }
+ wrmsrl(base + idx, value);
}
void intel_pmu_pebs_enable(struct perf_event *event)
@@ -1194,6 +1212,7 @@ void intel_pmu_pebs_enable(struct perf_event *event)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
struct debug_store *ds = cpuc->ds;
+ unsigned int idx = hwc->idx;
hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
@@ -1212,19 +1231,18 @@ void intel_pmu_pebs_enable(struct perf_event *event)
}
}
+ if (idx >= INTEL_PMC_IDX_FIXED)
+ idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED);
+
/*
* Use auto-reload if possible to save a MSR write in the PMI.
* This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD.
*/
if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
- unsigned int idx = hwc->idx;
-
- if (idx >= INTEL_PMC_IDX_FIXED)
- idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED);
ds->pebs_event_reset[idx] =
(u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
} else {
- ds->pebs_event_reset[hwc->idx] = 0;
+ ds->pebs_event_reset[idx] = 0;
}
intel_pmu_pebs_via_pt_enable(event);
@@ -1353,14 +1371,13 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
is_64bit = kernel_ip(to) || any_64bit_mode(regs);
#endif
insn_init(&insn, kaddr, size, is_64bit);
- insn_get_length(&insn);
+
/*
- * Make sure there was not a problem decoding the
- * instruction and getting the length. This is
- * doubly important because we have an infinite
- * loop if insn.length=0.
+ * Make sure there was not a problem decoding the instruction.
+ * This is doubly important because we have an infinite loop if
+ * insn.length=0.
*/
- if (!insn.length)
+ if (insn_get_length(&insn))
break;
to += insn.length;
@@ -1805,7 +1822,7 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
*
* [-period, 0]
*
- * the difference between two consequtive reads is:
+ * the difference between two consecutive reads is:
*
* A) value2 - value1;
* when no overflows have happened in between,
@@ -2010,7 +2027,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_d
*/
if (!pebs_status && cpuc->pebs_enabled &&
!(cpuc->pebs_enabled & (cpuc->pebs_enabled-1)))
- pebs_status = cpuc->pebs_enabled;
+ pebs_status = p->status = cpuc->pebs_enabled;
bit = find_first_bit((unsigned long *)&pebs_status,
x86_pmu.max_pebs_events);
@@ -2072,6 +2089,8 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d
{
short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ int max_pebs_events = hybrid(cpuc->pmu, max_pebs_events);
+ int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
struct debug_store *ds = cpuc->ds;
struct perf_event *event;
void *base, *at, *top;
@@ -2086,9 +2105,9 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_d
ds->pebs_index = ds->pebs_buffer_base;
- mask = ((1ULL << x86_pmu.max_pebs_events) - 1) |
- (((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED);
- size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed;
+ mask = ((1ULL << max_pebs_events) - 1) |
+ (((1ULL << num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED);
+ size = INTEL_PMC_IDX_FIXED + num_counters_fixed;
if (unlikely(base >= top)) {
intel_pmu_pebs_event_update_no_drain(cpuc, size);
@@ -2192,7 +2211,7 @@ void __init intel_ds_init(void)
PERF_SAMPLE_TIME;
x86_pmu.flags |= PMU_FL_PEBS_ALL;
pebs_qual = "-baseline";
- x86_get_pmu()->capabilities |= PERF_PMU_CAP_EXTENDED_REGS;
+ x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_EXTENDED_REGS;
} else {
/* Only basic record supported */
x86_pmu.large_pebs_flags &=
@@ -2205,9 +2224,9 @@ void __init intel_ds_init(void)
}
pr_cont("PEBS fmt4%c%s, ", pebs_type, pebs_qual);
- if (x86_pmu.intel_cap.pebs_output_pt_available) {
+ if (!is_hybrid() && x86_pmu.intel_cap.pebs_output_pt_available) {
pr_cont("PEBS-via-PT, ");
- x86_get_pmu()->capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
+ x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
}
break;
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 21890dacfcfe..e8453de7a964 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -658,7 +658,6 @@ static inline bool branch_user_callstack(unsigned br_sel)
void intel_pmu_lbr_add(struct perf_event *event)
{
- struct kmem_cache *kmem_cache = event->pmu->task_ctx_cache;
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (!x86_pmu.lbr_nr)
@@ -696,16 +695,11 @@ void intel_pmu_lbr_add(struct perf_event *event)
perf_sched_cb_inc(event->ctx->pmu);
if (!cpuc->lbr_users++ && !event->total_time_running)
intel_pmu_lbr_reset();
-
- if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
- kmem_cache && !cpuc->lbr_xsave &&
- (cpuc->lbr_users != cpuc->lbr_pebs_users))
- cpuc->lbr_xsave = kmem_cache_alloc(kmem_cache, GFP_KERNEL);
}
void release_lbr_buffers(void)
{
- struct kmem_cache *kmem_cache = x86_get_pmu()->task_ctx_cache;
+ struct kmem_cache *kmem_cache;
struct cpu_hw_events *cpuc;
int cpu;
@@ -714,6 +708,7 @@ void release_lbr_buffers(void)
for_each_possible_cpu(cpu) {
cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
+ kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
if (kmem_cache && cpuc->lbr_xsave) {
kmem_cache_free(kmem_cache, cpuc->lbr_xsave);
cpuc->lbr_xsave = NULL;
@@ -721,6 +716,27 @@ void release_lbr_buffers(void)
}
}
+void reserve_lbr_buffers(void)
+{
+ struct kmem_cache *kmem_cache;
+ struct cpu_hw_events *cpuc;
+ int cpu;
+
+ if (!static_cpu_has(X86_FEATURE_ARCH_LBR))
+ return;
+
+ for_each_possible_cpu(cpu) {
+ cpuc = per_cpu_ptr(&cpu_hw_events, cpu);
+ kmem_cache = x86_get_pmu(cpu)->task_ctx_cache;
+ if (!kmem_cache || cpuc->lbr_xsave)
+ continue;
+
+ cpuc->lbr_xsave = kmem_cache_alloc_node(kmem_cache,
+ GFP_KERNEL | __GFP_ZERO,
+ cpu_to_node(cpu));
+ }
+}
+
void intel_pmu_lbr_del(struct perf_event *event)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -1198,7 +1214,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
/*
* The LBR logs any address in the IP, even if the IP just
* faulted. This means userspace can control the from address.
- * Ensure we don't blindy read any address by validating it is
+ * Ensure we don't blindly read any address by validating it is
* a known text address.
*/
if (kernel_text_address(from)) {
@@ -1224,8 +1240,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
is64 = kernel_ip((unsigned long)addr) || any_64bit_mode(current_pt_regs());
#endif
insn_init(&insn, addr, bytes_read, is64);
- insn_get_opcode(&insn);
- if (!insn.opcode.got)
+ if (insn_get_opcode(&insn))
return X86_BR_ABORT;
switch (insn.opcode.bytes[0]) {
@@ -1262,8 +1277,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
ret = X86_BR_INT;
break;
case 0xe8: /* call near rel */
- insn_get_immediate(&insn);
- if (insn.immediate1.value == 0) {
+ if (insn_get_immediate(&insn) || insn.immediate1.value == 0) {
/* zero length call */
ret = X86_BR_ZERO_CALL;
break;
@@ -1279,7 +1293,9 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
ret = X86_BR_JMP;
break;
case 0xff: /* call near absolute, call far absolute ind */
- insn_get_modrm(&insn);
+ if (insn_get_modrm(&insn))
+ return X86_BR_ABORT;
+
ext = (insn.modrm.bytes[0] >> 3) & 0x7;
switch (ext) {
case 2: /* near ind call */
@@ -1609,7 +1625,7 @@ void intel_pmu_lbr_init_hsw(void)
x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
- x86_get_pmu()->task_ctx_cache = create_lbr_kmem_cache(size, 0);
+ x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
if (lbr_from_signext_quirk_needed())
static_branch_enable(&lbr_from_quirk_key);
@@ -1629,7 +1645,7 @@ __init void intel_pmu_lbr_init_skl(void)
x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
- x86_get_pmu()->task_ctx_cache = create_lbr_kmem_cache(size, 0);
+ x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0);
/*
* SW branch filter usage:
@@ -1726,7 +1742,7 @@ static bool is_arch_lbr_xsave_available(void)
void __init intel_pmu_arch_lbr_init(void)
{
- struct pmu *pmu = x86_get_pmu();
+ struct pmu *pmu = x86_get_pmu(smp_processor_id());
union cpuid28_eax eax;
union cpuid28_ebx ebx;
union cpuid28_ecx ecx;
diff --git a/arch/x86/events/intel/p4.c b/arch/x86/events/intel/p4.c
index a4cc66005ce8..7951a5dc73b6 100644
--- a/arch/x86/events/intel/p4.c
+++ b/arch/x86/events/intel/p4.c
@@ -24,7 +24,7 @@ struct p4_event_bind {
unsigned int escr_msr[2]; /* ESCR MSR for this event */
unsigned int escr_emask; /* valid ESCR EventMask bits */
unsigned int shared; /* event is shared across threads */
- char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on abscence */
+ char cntr[2][P4_CNTR_LIMIT]; /* counter index (offset), -1 on absence */
};
struct p4_pebs_bind {
@@ -45,7 +45,7 @@ struct p4_pebs_bind {
* it's needed for mapping P4_PEBS_CONFIG_METRIC_MASK bits of
* event configuration to find out which values are to be
* written into MSR_IA32_PEBS_ENABLE and MSR_P4_PEBS_MATRIX_VERT
- * resgisters
+ * registers
*/
static struct p4_pebs_bind p4_pebs_bind_map[] = {
P4_GEN_PEBS_BIND(1stl_cache_load_miss_retired, 0x0000001, 0x0000001),
@@ -947,7 +947,7 @@ static void p4_pmu_enable_pebs(u64 config)
(void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)bind->metric_vert);
}
-static void p4_pmu_enable_event(struct perf_event *event)
+static void __p4_pmu_enable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int thread = p4_ht_config_thread(hwc->config);
@@ -983,6 +983,16 @@ static void p4_pmu_enable_event(struct perf_event *event)
(cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE);
}
+static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(X86_PMC_IDX_MAX)], p4_running);
+
+static void p4_pmu_enable_event(struct perf_event *event)
+{
+ int idx = event->hw.idx;
+
+ __set_bit(idx, per_cpu(p4_running, smp_processor_id()));
+ __p4_pmu_enable_event(event);
+}
+
static void p4_pmu_enable_all(int added)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -992,7 +1002,7 @@ static void p4_pmu_enable_all(int added)
struct perf_event *event = cpuc->events[idx];
if (!test_bit(idx, cpuc->active_mask))
continue;
- p4_pmu_enable_event(event);
+ __p4_pmu_enable_event(event);
}
}
@@ -1012,7 +1022,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
if (!test_bit(idx, cpuc->active_mask)) {
/* catch in-flight IRQs */
- if (__test_and_clear_bit(idx, cpuc->running))
+ if (__test_and_clear_bit(idx, per_cpu(p4_running, smp_processor_id())))
handled++;
continue;
}
@@ -1313,7 +1323,7 @@ static __initconst const struct x86_pmu p4_pmu = {
.get_event_constraints = x86_get_event_constraints,
/*
* IF HT disabled we may need to use all
- * ARCH_P4_MAX_CCCR counters simulaneously
+ * ARCH_P4_MAX_CCCR counters simultaneously
* though leave it restricted at moment assuming
* HT is on
*/
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index e94af4a54d0d..915847655c06 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -362,7 +362,7 @@ static bool pt_event_valid(struct perf_event *event)
/*
* Setting bit 0 (TraceEn in RTIT_CTL MSR) in the attr.config
- * clears the assomption that BranchEn must always be enabled,
+ * clears the assumption that BranchEn must always be enabled,
* as was the case with the first implementation of PT.
* If this bit is not set, the legacy behavior is preserved
* for compatibility with the older userspace.
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 33c8180d5a87..9bf4dbbc26e2 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -4,8 +4,13 @@
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
#include "uncore.h"
+#include "uncore_discovery.h"
-static struct intel_uncore_type *empty_uncore[] = { NULL, };
+static bool uncore_no_discover;
+module_param(uncore_no_discover, bool, 0);
+MODULE_PARM_DESC(uncore_no_discover, "Don't enable the Intel uncore PerfMon discovery mechanism "
+ "(default: enable the discovery mechanism).");
+struct intel_uncore_type *empty_uncore[] = { NULL, };
struct intel_uncore_type **uncore_msr_uncores = empty_uncore;
struct intel_uncore_type **uncore_pci_uncores = empty_uncore;
struct intel_uncore_type **uncore_mmio_uncores = empty_uncore;
@@ -48,6 +53,18 @@ int uncore_pcibus_to_dieid(struct pci_bus *bus)
return die_id;
}
+int uncore_die_to_segment(int die)
+{
+ struct pci_bus *bus = NULL;
+
+ /* Find first pci bus which attributes to specified die. */
+ while ((bus = pci_find_next_bus(bus)) &&
+ (die != uncore_pcibus_to_dieid(bus)))
+ ;
+
+ return bus ? pci_domain_nr(bus) : -EINVAL;
+}
+
static void uncore_free_pcibus_map(void)
{
struct pci2phy_map *map, *tmp;
@@ -784,8 +801,6 @@ static void uncore_pmu_enable(struct pmu *pmu)
struct intel_uncore_box *box;
uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
- if (!uncore_pmu)
- return;
box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
if (!box)
@@ -801,8 +816,6 @@ static void uncore_pmu_disable(struct pmu *pmu)
struct intel_uncore_box *box;
uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
- if (!uncore_pmu)
- return;
box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
if (!box)
@@ -829,6 +842,34 @@ static const struct attribute_group uncore_pmu_attr_group = {
.attrs = uncore_pmu_attrs,
};
+static void uncore_get_pmu_name(struct intel_uncore_pmu *pmu)
+{
+ struct intel_uncore_type *type = pmu->type;
+
+ /*
+ * No uncore block name in discovery table.
+ * Use uncore_type_&typeid_&boxid as name.
+ */
+ if (!type->name) {
+ if (type->num_boxes == 1)
+ sprintf(pmu->name, "uncore_type_%u", type->type_id);
+ else {
+ sprintf(pmu->name, "uncore_type_%u_%d",
+ type->type_id, type->box_ids[pmu->pmu_idx]);
+ }
+ return;
+ }
+
+ if (type->num_boxes == 1) {
+ if (strlen(type->name) > 0)
+ sprintf(pmu->name, "uncore_%s", type->name);
+ else
+ sprintf(pmu->name, "uncore");
+ } else
+ sprintf(pmu->name, "uncore_%s_%d", type->name, pmu->pmu_idx);
+
+}
+
static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
{
int ret;
@@ -855,15 +896,7 @@ static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
pmu->pmu.attr_update = pmu->type->attr_update;
}
- if (pmu->type->num_boxes == 1) {
- if (strlen(pmu->type->name) > 0)
- sprintf(pmu->name, "uncore_%s", pmu->type->name);
- else
- sprintf(pmu->name, "uncore");
- } else {
- sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
- pmu->pmu_idx);
- }
+ uncore_get_pmu_name(pmu);
ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
if (!ret)
@@ -904,6 +937,10 @@ static void uncore_type_exit(struct intel_uncore_type *type)
kfree(type->pmus);
type->pmus = NULL;
}
+ if (type->box_ids) {
+ kfree(type->box_ids);
+ type->box_ids = NULL;
+ }
kfree(type->events_group);
type->events_group = NULL;
}
@@ -1003,10 +1040,37 @@ static int uncore_pci_get_dev_die_info(struct pci_dev *pdev, int *die)
return 0;
}
+static struct intel_uncore_pmu *
+uncore_pci_find_dev_pmu_from_types(struct pci_dev *pdev)
+{
+ struct intel_uncore_type **types = uncore_pci_uncores;
+ struct intel_uncore_type *type;
+ u64 box_ctl;
+ int i, die;
+
+ for (; *types; types++) {
+ type = *types;
+ for (die = 0; die < __uncore_max_dies; die++) {
+ for (i = 0; i < type->num_boxes; i++) {
+ if (!type->box_ctls[die])
+ continue;
+ box_ctl = type->box_ctls[die] + type->pci_offsets[i];
+ if (pdev->devfn == UNCORE_DISCOVERY_PCI_DEVFN(box_ctl) &&
+ pdev->bus->number == UNCORE_DISCOVERY_PCI_BUS(box_ctl) &&
+ pci_domain_nr(pdev->bus) == UNCORE_DISCOVERY_PCI_DOMAIN(box_ctl))
+ return &type->pmus[i];
+ }
+ }
+ }
+
+ return NULL;
+}
+
/*
* Find the PMU of a PCI device.
* @pdev: The PCI device.
* @ids: The ID table of the available PCI devices with a PMU.
+ * If NULL, search the whole uncore_pci_uncores.
*/
static struct intel_uncore_pmu *
uncore_pci_find_dev_pmu(struct pci_dev *pdev, const struct pci_device_id *ids)
@@ -1016,6 +1080,9 @@ uncore_pci_find_dev_pmu(struct pci_dev *pdev, const struct pci_device_id *ids)
kernel_ulong_t data;
unsigned int devfn;
+ if (!ids)
+ return uncore_pci_find_dev_pmu_from_types(pdev);
+
while (ids && ids->vendor) {
if ((ids->vendor == pdev->vendor) &&
(ids->device == pdev->device)) {
@@ -1174,7 +1241,8 @@ static void uncore_pci_remove(struct pci_dev *pdev)
}
static int uncore_bus_notify(struct notifier_block *nb,
- unsigned long action, void *data)
+ unsigned long action, void *data,
+ const struct pci_device_id *ids)
{
struct device *dev = data;
struct pci_dev *pdev = to_pci_dev(dev);
@@ -1185,7 +1253,7 @@ static int uncore_bus_notify(struct notifier_block *nb,
if (action != BUS_NOTIFY_DEL_DEVICE)
return NOTIFY_DONE;
- pmu = uncore_pci_find_dev_pmu(pdev, uncore_pci_sub_driver->id_table);
+ pmu = uncore_pci_find_dev_pmu(pdev, ids);
if (!pmu)
return NOTIFY_DONE;
@@ -1197,8 +1265,15 @@ static int uncore_bus_notify(struct notifier_block *nb,
return NOTIFY_OK;
}
-static struct notifier_block uncore_notifier = {
- .notifier_call = uncore_bus_notify,
+static int uncore_pci_sub_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ return uncore_bus_notify(nb, action, data,
+ uncore_pci_sub_driver->id_table);
+}
+
+static struct notifier_block uncore_pci_sub_notifier = {
+ .notifier_call = uncore_pci_sub_bus_notify,
};
static void uncore_pci_sub_driver_init(void)
@@ -1239,13 +1314,55 @@ static void uncore_pci_sub_driver_init(void)
ids++;
}
- if (notify && bus_register_notifier(&pci_bus_type, &uncore_notifier))
+ if (notify && bus_register_notifier(&pci_bus_type, &uncore_pci_sub_notifier))
notify = false;
if (!notify)
uncore_pci_sub_driver = NULL;
}
+static int uncore_pci_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ return uncore_bus_notify(nb, action, data, NULL);
+}
+
+static struct notifier_block uncore_pci_notifier = {
+ .notifier_call = uncore_pci_bus_notify,
+};
+
+
+static void uncore_pci_pmus_register(void)
+{
+ struct intel_uncore_type **types = uncore_pci_uncores;
+ struct intel_uncore_type *type;
+ struct intel_uncore_pmu *pmu;
+ struct pci_dev *pdev;
+ u64 box_ctl;
+ int i, die;
+
+ for (; *types; types++) {
+ type = *types;
+ for (die = 0; die < __uncore_max_dies; die++) {
+ for (i = 0; i < type->num_boxes; i++) {
+ if (!type->box_ctls[die])
+ continue;
+ box_ctl = type->box_ctls[die] + type->pci_offsets[i];
+ pdev = pci_get_domain_bus_and_slot(UNCORE_DISCOVERY_PCI_DOMAIN(box_ctl),
+ UNCORE_DISCOVERY_PCI_BUS(box_ctl),
+ UNCORE_DISCOVERY_PCI_DEVFN(box_ctl));
+ if (!pdev)
+ continue;
+ pmu = &type->pmus[i];
+
+ uncore_pci_pmu_register(pdev, type, pmu, die);
+ }
+ }
+ }
+
+ bus_register_notifier(&pci_bus_type, &uncore_pci_notifier);
+}
+
static int __init uncore_pci_init(void)
{
size_t size;
@@ -1262,12 +1379,15 @@ static int __init uncore_pci_init(void)
if (ret)
goto errtype;
- uncore_pci_driver->probe = uncore_pci_probe;
- uncore_pci_driver->remove = uncore_pci_remove;
+ if (uncore_pci_driver) {
+ uncore_pci_driver->probe = uncore_pci_probe;
+ uncore_pci_driver->remove = uncore_pci_remove;
- ret = pci_register_driver(uncore_pci_driver);
- if (ret)
- goto errtype;
+ ret = pci_register_driver(uncore_pci_driver);
+ if (ret)
+ goto errtype;
+ } else
+ uncore_pci_pmus_register();
if (uncore_pci_sub_driver)
uncore_pci_sub_driver_init();
@@ -1290,8 +1410,11 @@ static void uncore_pci_exit(void)
if (pcidrv_registered) {
pcidrv_registered = false;
if (uncore_pci_sub_driver)
- bus_unregister_notifier(&pci_bus_type, &uncore_notifier);
- pci_unregister_driver(uncore_pci_driver);
+ bus_unregister_notifier(&pci_bus_type, &uncore_pci_sub_notifier);
+ if (uncore_pci_driver)
+ pci_unregister_driver(uncore_pci_driver);
+ else
+ bus_unregister_notifier(&pci_bus_type, &uncore_pci_notifier);
uncore_types_exit(uncore_pci_uncores);
kfree(uncore_extra_pci_dev);
uncore_free_pcibus_map();
@@ -1625,6 +1748,11 @@ static const struct intel_uncore_init_fun rkl_uncore_init __initconst = {
.pci_init = skl_uncore_pci_init,
};
+static const struct intel_uncore_init_fun adl_uncore_init __initconst = {
+ .cpu_init = adl_uncore_cpu_init,
+ .mmio_init = tgl_uncore_mmio_init,
+};
+
static const struct intel_uncore_init_fun icx_uncore_init __initconst = {
.cpu_init = icx_uncore_cpu_init,
.pci_init = icx_uncore_pci_init,
@@ -1637,6 +1765,12 @@ static const struct intel_uncore_init_fun snr_uncore_init __initconst = {
.mmio_init = snr_uncore_mmio_init,
};
+static const struct intel_uncore_init_fun generic_uncore_init __initconst = {
+ .cpu_init = intel_uncore_generic_uncore_cpu_init,
+ .pci_init = intel_uncore_generic_uncore_pci_init,
+ .mmio_init = intel_uncore_generic_uncore_mmio_init,
+};
+
static const struct x86_cpu_id intel_uncore_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &nhm_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &nhm_uncore_init),
@@ -1673,6 +1807,8 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &tgl_l_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &tgl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &rkl_uncore_init),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_uncore_init),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &adl_uncore_init),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init),
{},
};
@@ -1684,17 +1820,21 @@ static int __init intel_uncore_init(void)
struct intel_uncore_init_fun *uncore_init;
int pret = 0, cret = 0, mret = 0, ret;
- id = x86_match_cpu(intel_uncore_match);
- if (!id)
- return -ENODEV;
-
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
return -ENODEV;
__uncore_max_dies =
topology_max_packages() * topology_max_die_per_package();
- uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
+ id = x86_match_cpu(intel_uncore_match);
+ if (!id) {
+ if (!uncore_no_discover && intel_uncore_has_discovery_tables())
+ uncore_init = (struct intel_uncore_init_fun *)&generic_uncore_init;
+ else
+ return -ENODEV;
+ } else
+ uncore_init = (struct intel_uncore_init_fun *)id->driver_data;
+
if (uncore_init->pci_init) {
pret = uncore_init->pci_init();
if (!pret)
@@ -1711,8 +1851,10 @@ static int __init intel_uncore_init(void)
mret = uncore_mmio_init();
}
- if (cret && pret && mret)
- return -ENODEV;
+ if (cret && pret && mret) {
+ ret = -ENODEV;
+ goto free_discovery;
+ }
/* Install hotplug callbacks to setup the targets for each package */
ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
@@ -1727,6 +1869,8 @@ err:
uncore_types_exit(uncore_msr_uncores);
uncore_types_exit(uncore_mmio_uncores);
uncore_pci_exit();
+free_discovery:
+ intel_uncore_clear_discovery_tables();
return ret;
}
module_init(intel_uncore_init);
@@ -1737,5 +1881,6 @@ static void __exit intel_uncore_exit(void)
uncore_types_exit(uncore_msr_uncores);
uncore_types_exit(uncore_mmio_uncores);
uncore_pci_exit();
+ intel_uncore_clear_discovery_tables();
}
module_exit(intel_uncore_exit);
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index a3c6e1643ad2..187d7287039c 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -42,6 +42,7 @@ struct intel_uncore_pmu;
struct intel_uncore_box;
struct uncore_event_desc;
struct freerunning_counters;
+struct intel_uncore_topology;
struct intel_uncore_type {
const char *name;
@@ -50,6 +51,7 @@ struct intel_uncore_type {
int perf_ctr_bits;
int fixed_ctr_bits;
int num_freerunning_types;
+ int type_id;
unsigned perf_ctr;
unsigned event_ctl;
unsigned event_mask;
@@ -57,6 +59,7 @@ struct intel_uncore_type {
unsigned fixed_ctr;
unsigned fixed_ctl;
unsigned box_ctl;
+ u64 *box_ctls; /* Unit ctrl addr of the first box of each die */
union {
unsigned msr_offset;
unsigned mmio_offset;
@@ -65,7 +68,12 @@ struct intel_uncore_type {
unsigned num_shared_regs:8;
unsigned single_fixed:1;
unsigned pair_ctr_ctl:1;
- unsigned *msr_offsets;
+ union {
+ unsigned *msr_offsets;
+ unsigned *pci_offsets;
+ unsigned *mmio_offsets;
+ };
+ unsigned *box_ids;
struct event_constraint unconstrainted;
struct event_constraint *constraints;
struct intel_uncore_pmu *pmus;
@@ -80,10 +88,11 @@ struct intel_uncore_type {
* to identify which platform component each PMON block of that type is
* supposed to monitor.
*/
- u64 *topology;
+ struct intel_uncore_topology *topology;
/*
* Optional callbacks for managing mapping of Uncore units to PMONs
*/
+ int (*get_topology)(struct intel_uncore_type *type);
int (*set_mapping)(struct intel_uncore_type *type);
void (*cleanup_mapping)(struct intel_uncore_type *type);
};
@@ -169,6 +178,11 @@ struct freerunning_counters {
unsigned *box_offsets;
};
+struct intel_uncore_topology {
+ u64 configuration;
+ int segment;
+};
+
struct pci2phy_map {
struct list_head list;
int segment;
@@ -177,6 +191,7 @@ struct pci2phy_map {
struct pci2phy_map *__find_pci2phy_map(int segment);
int uncore_pcibus_to_dieid(struct pci_bus *bus);
+int uncore_die_to_segment(int die);
ssize_t uncore_event_show(struct device *dev,
struct device_attribute *attr, char *buf);
@@ -547,6 +562,7 @@ uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event);
void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event);
u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
+extern struct intel_uncore_type *empty_uncore[];
extern struct intel_uncore_type **uncore_msr_uncores;
extern struct intel_uncore_type **uncore_pci_uncores;
extern struct intel_uncore_type **uncore_mmio_uncores;
@@ -567,6 +583,7 @@ void snb_uncore_cpu_init(void);
void nhm_uncore_cpu_init(void);
void skl_uncore_cpu_init(void);
void icl_uncore_cpu_init(void);
+void adl_uncore_cpu_init(void);
void tgl_uncore_cpu_init(void);
void tgl_uncore_mmio_init(void);
void tgl_l_uncore_mmio_init(void);
diff --git a/arch/x86/events/intel/uncore_discovery.c b/arch/x86/events/intel/uncore_discovery.c
new file mode 100644
index 000000000000..aba9bff95413
--- /dev/null
+++ b/arch/x86/events/intel/uncore_discovery.c
@@ -0,0 +1,622 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Support Intel uncore PerfMon discovery mechanism.
+ * Copyright(c) 2021 Intel Corporation.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include "uncore.h"
+#include "uncore_discovery.h"
+
+static struct rb_root discovery_tables = RB_ROOT;
+static int num_discovered_types[UNCORE_ACCESS_MAX];
+
+static bool has_generic_discovery_table(void)
+{
+ struct pci_dev *dev;
+ int dvsec;
+
+ dev = pci_get_device(PCI_VENDOR_ID_INTEL, UNCORE_DISCOVERY_TABLE_DEVICE, NULL);
+ if (!dev)
+ return false;
+
+ /* A discovery table device has the unique capability ID. */
+ dvsec = pci_find_next_ext_capability(dev, 0, UNCORE_EXT_CAP_ID_DISCOVERY);
+ pci_dev_put(dev);
+ if (dvsec)
+ return true;
+
+ return false;
+}
+
+static int logical_die_id;
+
+static int get_device_die_id(struct pci_dev *dev)
+{
+ int cpu, node = pcibus_to_node(dev->bus);
+
+ /*
+ * If the NUMA info is not available, assume that the logical die id is
+ * continuous in the order in which the discovery table devices are
+ * detected.
+ */
+ if (node < 0)
+ return logical_die_id++;
+
+ for_each_cpu(cpu, cpumask_of_node(node)) {
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+ if (c->initialized && cpu_to_node(cpu) == node)
+ return c->logical_die_id;
+ }
+
+ /*
+ * All CPUs of a node may be offlined. For this case,
+ * the PCI and MMIO type of uncore blocks which are
+ * enumerated by the device will be unavailable.
+ */
+ return -1;
+}
+
+#define __node_2_type(cur) \
+ rb_entry((cur), struct intel_uncore_discovery_type, node)
+
+static inline int __type_cmp(const void *key, const struct rb_node *b)
+{
+ struct intel_uncore_discovery_type *type_b = __node_2_type(b);
+ const u16 *type_id = key;
+
+ if (type_b->type > *type_id)
+ return -1;
+ else if (type_b->type < *type_id)
+ return 1;
+
+ return 0;
+}
+
+static inline struct intel_uncore_discovery_type *
+search_uncore_discovery_type(u16 type_id)
+{
+ struct rb_node *node = rb_find(&type_id, &discovery_tables, __type_cmp);
+
+ return (node) ? __node_2_type(node) : NULL;
+}
+
+static inline bool __type_less(struct rb_node *a, const struct rb_node *b)
+{
+ return (__node_2_type(a)->type < __node_2_type(b)->type);
+}
+
+static struct intel_uncore_discovery_type *
+add_uncore_discovery_type(struct uncore_unit_discovery *unit)
+{
+ struct intel_uncore_discovery_type *type;
+
+ if (unit->access_type >= UNCORE_ACCESS_MAX) {
+ pr_warn("Unsupported access type %d\n", unit->access_type);
+ return NULL;
+ }
+
+ type = kzalloc(sizeof(struct intel_uncore_discovery_type), GFP_KERNEL);
+ if (!type)
+ return NULL;
+
+ type->box_ctrl_die = kcalloc(__uncore_max_dies, sizeof(u64), GFP_KERNEL);
+ if (!type->box_ctrl_die)
+ goto free_type;
+
+ type->access_type = unit->access_type;
+ num_discovered_types[type->access_type]++;
+ type->type = unit->box_type;
+
+ rb_add(&type->node, &discovery_tables, __type_less);
+
+ return type;
+
+free_type:
+ kfree(type);
+
+ return NULL;
+
+}
+
+static struct intel_uncore_discovery_type *
+get_uncore_discovery_type(struct uncore_unit_discovery *unit)
+{
+ struct intel_uncore_discovery_type *type;
+
+ type = search_uncore_discovery_type(unit->box_type);
+ if (type)
+ return type;
+
+ return add_uncore_discovery_type(unit);
+}
+
+static void
+uncore_insert_box_info(struct uncore_unit_discovery *unit,
+ int die, bool parsed)
+{
+ struct intel_uncore_discovery_type *type;
+ unsigned int *box_offset, *ids;
+ int i;
+
+ if (WARN_ON_ONCE(!unit->ctl || !unit->ctl_offset || !unit->ctr_offset))
+ return;
+
+ if (parsed) {
+ type = search_uncore_discovery_type(unit->box_type);
+ if (WARN_ON_ONCE(!type))
+ return;
+ /* Store the first box of each die */
+ if (!type->box_ctrl_die[die])
+ type->box_ctrl_die[die] = unit->ctl;
+ return;
+ }
+
+ type = get_uncore_discovery_type(unit);
+ if (!type)
+ return;
+
+ box_offset = kcalloc(type->num_boxes + 1, sizeof(unsigned int), GFP_KERNEL);
+ if (!box_offset)
+ return;
+
+ ids = kcalloc(type->num_boxes + 1, sizeof(unsigned int), GFP_KERNEL);
+ if (!ids)
+ goto free_box_offset;
+
+ /* Store generic information for the first box */
+ if (!type->num_boxes) {
+ type->box_ctrl = unit->ctl;
+ type->box_ctrl_die[die] = unit->ctl;
+ type->num_counters = unit->num_regs;
+ type->counter_width = unit->bit_width;
+ type->ctl_offset = unit->ctl_offset;
+ type->ctr_offset = unit->ctr_offset;
+ *ids = unit->box_id;
+ goto end;
+ }
+
+ for (i = 0; i < type->num_boxes; i++) {
+ ids[i] = type->ids[i];
+ box_offset[i] = type->box_offset[i];
+
+ if (WARN_ON_ONCE(unit->box_id == ids[i]))
+ goto free_ids;
+ }
+ ids[i] = unit->box_id;
+ box_offset[i] = unit->ctl - type->box_ctrl;
+ kfree(type->ids);
+ kfree(type->box_offset);
+end:
+ type->ids = ids;
+ type->box_offset = box_offset;
+ type->num_boxes++;
+ return;
+
+free_ids:
+ kfree(ids);
+
+free_box_offset:
+ kfree(box_offset);
+
+}
+
+static int parse_discovery_table(struct pci_dev *dev, int die,
+ u32 bar_offset, bool *parsed)
+{
+ struct uncore_global_discovery global;
+ struct uncore_unit_discovery unit;
+ void __iomem *io_addr;
+ resource_size_t addr;
+ unsigned long size;
+ u32 val;
+ int i;
+
+ pci_read_config_dword(dev, bar_offset, &val);
+
+ if (val & UNCORE_DISCOVERY_MASK)
+ return -EINVAL;
+
+ addr = (resource_size_t)(val & ~UNCORE_DISCOVERY_MASK);
+ size = UNCORE_DISCOVERY_GLOBAL_MAP_SIZE;
+ io_addr = ioremap(addr, size);
+ if (!io_addr)
+ return -ENOMEM;
+
+ /* Read Global Discovery State */
+ memcpy_fromio(&global, io_addr, sizeof(struct uncore_global_discovery));
+ if (uncore_discovery_invalid_unit(global)) {
+ pr_info("Invalid Global Discovery State: 0x%llx 0x%llx 0x%llx\n",
+ global.table1, global.ctl, global.table3);
+ iounmap(io_addr);
+ return -EINVAL;
+ }
+ iounmap(io_addr);
+
+ size = (1 + global.max_units) * global.stride * 8;
+ io_addr = ioremap(addr, size);
+ if (!io_addr)
+ return -ENOMEM;
+
+ /* Parsing Unit Discovery State */
+ for (i = 0; i < global.max_units; i++) {
+ memcpy_fromio(&unit, io_addr + (i + 1) * (global.stride * 8),
+ sizeof(struct uncore_unit_discovery));
+
+ if (uncore_discovery_invalid_unit(unit))
+ continue;
+
+ if (unit.access_type >= UNCORE_ACCESS_MAX)
+ continue;
+
+ uncore_insert_box_info(&unit, die, *parsed);
+ }
+
+ *parsed = true;
+ iounmap(io_addr);
+ return 0;
+}
+
+bool intel_uncore_has_discovery_tables(void)
+{
+ u32 device, val, entry_id, bar_offset;
+ int die, dvsec = 0, ret = true;
+ struct pci_dev *dev = NULL;
+ bool parsed = false;
+
+ if (has_generic_discovery_table())
+ device = UNCORE_DISCOVERY_TABLE_DEVICE;
+ else
+ device = PCI_ANY_ID;
+
+ /*
+ * Start a new search and iterates through the list of
+ * the discovery table devices.
+ */
+ while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, dev)) != NULL) {
+ while ((dvsec = pci_find_next_ext_capability(dev, dvsec, UNCORE_EXT_CAP_ID_DISCOVERY))) {
+ pci_read_config_dword(dev, dvsec + UNCORE_DISCOVERY_DVSEC_OFFSET, &val);
+ entry_id = val & UNCORE_DISCOVERY_DVSEC_ID_MASK;
+ if (entry_id != UNCORE_DISCOVERY_DVSEC_ID_PMON)
+ continue;
+
+ pci_read_config_dword(dev, dvsec + UNCORE_DISCOVERY_DVSEC2_OFFSET, &val);
+
+ if (val & ~UNCORE_DISCOVERY_DVSEC2_BIR_MASK) {
+ ret = false;
+ goto err;
+ }
+ bar_offset = UNCORE_DISCOVERY_BIR_BASE +
+ (val & UNCORE_DISCOVERY_DVSEC2_BIR_MASK) * UNCORE_DISCOVERY_BIR_STEP;
+
+ die = get_device_die_id(dev);
+ if (die < 0)
+ continue;
+
+ parse_discovery_table(dev, die, bar_offset, &parsed);
+ }
+ }
+
+ /* None of the discovery tables are available */
+ if (!parsed)
+ ret = false;
+err:
+ pci_dev_put(dev);
+
+ return ret;
+}
+
+void intel_uncore_clear_discovery_tables(void)
+{
+ struct intel_uncore_discovery_type *type, *next;
+
+ rbtree_postorder_for_each_entry_safe(type, next, &discovery_tables, node) {
+ kfree(type->box_ctrl_die);
+ kfree(type);
+ }
+}
+
+DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
+DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
+DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
+DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
+DEFINE_UNCORE_FORMAT_ATTR(thresh, thresh, "config:24-31");
+
+static struct attribute *generic_uncore_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh.attr,
+ NULL,
+};
+
+static const struct attribute_group generic_uncore_format_group = {
+ .name = "format",
+ .attrs = generic_uncore_formats_attr,
+};
+
+static void intel_generic_uncore_msr_init_box(struct intel_uncore_box *box)
+{
+ wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_INT);
+}
+
+static void intel_generic_uncore_msr_disable_box(struct intel_uncore_box *box)
+{
+ wrmsrl(uncore_msr_box_ctl(box), GENERIC_PMON_BOX_CTL_FRZ);
+}
+
+static void intel_generic_uncore_msr_enable_box(struct intel_uncore_box *box)
+{
+ wrmsrl(uncore_msr_box_ctl(box), 0);
+}
+
+static void intel_generic_uncore_msr_enable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ wrmsrl(hwc->config_base, hwc->config);
+}
+
+static void intel_generic_uncore_msr_disable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ wrmsrl(hwc->config_base, 0);
+}
+
+static struct intel_uncore_ops generic_uncore_msr_ops = {
+ .init_box = intel_generic_uncore_msr_init_box,
+ .disable_box = intel_generic_uncore_msr_disable_box,
+ .enable_box = intel_generic_uncore_msr_enable_box,
+ .disable_event = intel_generic_uncore_msr_disable_event,
+ .enable_event = intel_generic_uncore_msr_enable_event,
+ .read_counter = uncore_msr_read_counter,
+};
+
+static void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ int box_ctl = uncore_pci_box_ctl(box);
+
+ __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags);
+ pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_INT);
+}
+
+static void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ int box_ctl = uncore_pci_box_ctl(box);
+
+ pci_write_config_dword(pdev, box_ctl, GENERIC_PMON_BOX_CTL_FRZ);
+}
+
+static void intel_generic_uncore_pci_enable_box(struct intel_uncore_box *box)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ int box_ctl = uncore_pci_box_ctl(box);
+
+ pci_write_config_dword(pdev, box_ctl, 0);
+}
+
+static void intel_generic_uncore_pci_enable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ struct hw_perf_event *hwc = &event->hw;
+
+ pci_write_config_dword(pdev, hwc->config_base, hwc->config);
+}
+
+static void intel_generic_uncore_pci_disable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ struct hw_perf_event *hwc = &event->hw;
+
+ pci_write_config_dword(pdev, hwc->config_base, 0);
+}
+
+static u64 intel_generic_uncore_pci_read_counter(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ struct hw_perf_event *hwc = &event->hw;
+ u64 count = 0;
+
+ pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
+ pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
+
+ return count;
+}
+
+static struct intel_uncore_ops generic_uncore_pci_ops = {
+ .init_box = intel_generic_uncore_pci_init_box,
+ .disable_box = intel_generic_uncore_pci_disable_box,
+ .enable_box = intel_generic_uncore_pci_enable_box,
+ .disable_event = intel_generic_uncore_pci_disable_event,
+ .enable_event = intel_generic_uncore_pci_enable_event,
+ .read_counter = intel_generic_uncore_pci_read_counter,
+};
+
+#define UNCORE_GENERIC_MMIO_SIZE 0x4000
+
+static unsigned int generic_uncore_mmio_box_ctl(struct intel_uncore_box *box)
+{
+ struct intel_uncore_type *type = box->pmu->type;
+
+ if (!type->box_ctls || !type->box_ctls[box->dieid] || !type->mmio_offsets)
+ return 0;
+
+ return type->box_ctls[box->dieid] + type->mmio_offsets[box->pmu->pmu_idx];
+}
+
+static void intel_generic_uncore_mmio_init_box(struct intel_uncore_box *box)
+{
+ unsigned int box_ctl = generic_uncore_mmio_box_ctl(box);
+ struct intel_uncore_type *type = box->pmu->type;
+ resource_size_t addr;
+
+ if (!box_ctl) {
+ pr_warn("Uncore type %d box %d: Invalid box control address.\n",
+ type->type_id, type->box_ids[box->pmu->pmu_idx]);
+ return;
+ }
+
+ addr = box_ctl;
+ box->io_addr = ioremap(addr, UNCORE_GENERIC_MMIO_SIZE);
+ if (!box->io_addr) {
+ pr_warn("Uncore type %d box %d: ioremap error for 0x%llx.\n",
+ type->type_id, type->box_ids[box->pmu->pmu_idx],
+ (unsigned long long)addr);
+ return;
+ }
+
+ writel(GENERIC_PMON_BOX_CTL_INT, box->io_addr);
+}
+
+static void intel_generic_uncore_mmio_disable_box(struct intel_uncore_box *box)
+{
+ if (!box->io_addr)
+ return;
+
+ writel(GENERIC_PMON_BOX_CTL_FRZ, box->io_addr);
+}
+
+static void intel_generic_uncore_mmio_enable_box(struct intel_uncore_box *box)
+{
+ if (!box->io_addr)
+ return;
+
+ writel(0, box->io_addr);
+}
+
+static void intel_generic_uncore_mmio_enable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (!box->io_addr)
+ return;
+
+ writel(hwc->config, box->io_addr + hwc->config_base);
+}
+
+static void intel_generic_uncore_mmio_disable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (!box->io_addr)
+ return;
+
+ writel(0, box->io_addr + hwc->config_base);
+}
+
+static struct intel_uncore_ops generic_uncore_mmio_ops = {
+ .init_box = intel_generic_uncore_mmio_init_box,
+ .exit_box = uncore_mmio_exit_box,
+ .disable_box = intel_generic_uncore_mmio_disable_box,
+ .enable_box = intel_generic_uncore_mmio_enable_box,
+ .disable_event = intel_generic_uncore_mmio_disable_event,
+ .enable_event = intel_generic_uncore_mmio_enable_event,
+ .read_counter = uncore_mmio_read_counter,
+};
+
+static bool uncore_update_uncore_type(enum uncore_access_type type_id,
+ struct intel_uncore_type *uncore,
+ struct intel_uncore_discovery_type *type)
+{
+ uncore->type_id = type->type;
+ uncore->num_boxes = type->num_boxes;
+ uncore->num_counters = type->num_counters;
+ uncore->perf_ctr_bits = type->counter_width;
+ uncore->box_ids = type->ids;
+
+ switch (type_id) {
+ case UNCORE_ACCESS_MSR:
+ uncore->ops = &generic_uncore_msr_ops;
+ uncore->perf_ctr = (unsigned int)type->box_ctrl + type->ctr_offset;
+ uncore->event_ctl = (unsigned int)type->box_ctrl + type->ctl_offset;
+ uncore->box_ctl = (unsigned int)type->box_ctrl;
+ uncore->msr_offsets = type->box_offset;
+ break;
+ case UNCORE_ACCESS_PCI:
+ uncore->ops = &generic_uncore_pci_ops;
+ uncore->perf_ctr = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl) + type->ctr_offset;
+ uncore->event_ctl = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl) + type->ctl_offset;
+ uncore->box_ctl = (unsigned int)UNCORE_DISCOVERY_PCI_BOX_CTRL(type->box_ctrl);
+ uncore->box_ctls = type->box_ctrl_die;
+ uncore->pci_offsets = type->box_offset;
+ break;
+ case UNCORE_ACCESS_MMIO:
+ uncore->ops = &generic_uncore_mmio_ops;
+ uncore->perf_ctr = (unsigned int)type->ctr_offset;
+ uncore->event_ctl = (unsigned int)type->ctl_offset;
+ uncore->box_ctl = (unsigned int)type->box_ctrl;
+ uncore->box_ctls = type->box_ctrl_die;
+ uncore->mmio_offsets = type->box_offset;
+ uncore->mmio_map_size = UNCORE_GENERIC_MMIO_SIZE;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static struct intel_uncore_type **
+intel_uncore_generic_init_uncores(enum uncore_access_type type_id)
+{
+ struct intel_uncore_discovery_type *type;
+ struct intel_uncore_type **uncores;
+ struct intel_uncore_type *uncore;
+ struct rb_node *node;
+ int i = 0;
+
+ uncores = kcalloc(num_discovered_types[type_id] + 1,
+ sizeof(struct intel_uncore_type *), GFP_KERNEL);
+ if (!uncores)
+ return empty_uncore;
+
+ for (node = rb_first(&discovery_tables); node; node = rb_next(node)) {
+ type = rb_entry(node, struct intel_uncore_discovery_type, node);
+ if (type->access_type != type_id)
+ continue;
+
+ uncore = kzalloc(sizeof(struct intel_uncore_type), GFP_KERNEL);
+ if (!uncore)
+ break;
+
+ uncore->event_mask = GENERIC_PMON_RAW_EVENT_MASK;
+ uncore->format_group = &generic_uncore_format_group;
+
+ if (!uncore_update_uncore_type(type_id, uncore, type)) {
+ kfree(uncore);
+ continue;
+ }
+ uncores[i++] = uncore;
+ }
+
+ return uncores;
+}
+
+void intel_uncore_generic_uncore_cpu_init(void)
+{
+ uncore_msr_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MSR);
+}
+
+int intel_uncore_generic_uncore_pci_init(void)
+{
+ uncore_pci_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_PCI);
+
+ return 0;
+}
+
+void intel_uncore_generic_uncore_mmio_init(void)
+{
+ uncore_mmio_uncores = intel_uncore_generic_init_uncores(UNCORE_ACCESS_MMIO);
+}
diff --git a/arch/x86/events/intel/uncore_discovery.h b/arch/x86/events/intel/uncore_discovery.h
new file mode 100644
index 000000000000..1d652939a01c
--- /dev/null
+++ b/arch/x86/events/intel/uncore_discovery.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/* Generic device ID of a discovery table device */
+#define UNCORE_DISCOVERY_TABLE_DEVICE 0x09a7
+/* Capability ID for a discovery table device */
+#define UNCORE_EXT_CAP_ID_DISCOVERY 0x23
+/* First DVSEC offset */
+#define UNCORE_DISCOVERY_DVSEC_OFFSET 0x8
+/* Mask of the supported discovery entry type */
+#define UNCORE_DISCOVERY_DVSEC_ID_MASK 0xffff
+/* PMON discovery entry type ID */
+#define UNCORE_DISCOVERY_DVSEC_ID_PMON 0x1
+/* Second DVSEC offset */
+#define UNCORE_DISCOVERY_DVSEC2_OFFSET 0xc
+/* Mask of the discovery table BAR offset */
+#define UNCORE_DISCOVERY_DVSEC2_BIR_MASK 0x7
+/* Discovery table BAR base offset */
+#define UNCORE_DISCOVERY_BIR_BASE 0x10
+/* Discovery table BAR step */
+#define UNCORE_DISCOVERY_BIR_STEP 0x4
+/* Mask of the discovery table offset */
+#define UNCORE_DISCOVERY_MASK 0xf
+/* Global discovery table size */
+#define UNCORE_DISCOVERY_GLOBAL_MAP_SIZE 0x20
+
+#define UNCORE_DISCOVERY_PCI_DOMAIN(data) ((data >> 28) & 0x7)
+#define UNCORE_DISCOVERY_PCI_BUS(data) ((data >> 20) & 0xff)
+#define UNCORE_DISCOVERY_PCI_DEVFN(data) ((data >> 12) & 0xff)
+#define UNCORE_DISCOVERY_PCI_BOX_CTRL(data) (data & 0xfff)
+
+
+#define uncore_discovery_invalid_unit(unit) \
+ (!unit.table1 || !unit.ctl || !unit.table3 || \
+ unit.table1 == -1ULL || unit.ctl == -1ULL || \
+ unit.table3 == -1ULL)
+
+#define GENERIC_PMON_CTL_EV_SEL_MASK 0x000000ff
+#define GENERIC_PMON_CTL_UMASK_MASK 0x0000ff00
+#define GENERIC_PMON_CTL_EDGE_DET (1 << 18)
+#define GENERIC_PMON_CTL_INVERT (1 << 23)
+#define GENERIC_PMON_CTL_TRESH_MASK 0xff000000
+#define GENERIC_PMON_RAW_EVENT_MASK (GENERIC_PMON_CTL_EV_SEL_MASK | \
+ GENERIC_PMON_CTL_UMASK_MASK | \
+ GENERIC_PMON_CTL_EDGE_DET | \
+ GENERIC_PMON_CTL_INVERT | \
+ GENERIC_PMON_CTL_TRESH_MASK)
+
+#define GENERIC_PMON_BOX_CTL_FRZ (1 << 0)
+#define GENERIC_PMON_BOX_CTL_RST_CTRL (1 << 8)
+#define GENERIC_PMON_BOX_CTL_RST_CTRS (1 << 9)
+#define GENERIC_PMON_BOX_CTL_INT (GENERIC_PMON_BOX_CTL_RST_CTRL | \
+ GENERIC_PMON_BOX_CTL_RST_CTRS)
+
+enum uncore_access_type {
+ UNCORE_ACCESS_MSR = 0,
+ UNCORE_ACCESS_MMIO,
+ UNCORE_ACCESS_PCI,
+
+ UNCORE_ACCESS_MAX,
+};
+
+struct uncore_global_discovery {
+ union {
+ u64 table1;
+ struct {
+ u64 type : 8,
+ stride : 8,
+ max_units : 10,
+ __reserved_1 : 36,
+ access_type : 2;
+ };
+ };
+
+ u64 ctl; /* Global Control Address */
+
+ union {
+ u64 table3;
+ struct {
+ u64 status_offset : 8,
+ num_status : 16,
+ __reserved_2 : 40;
+ };
+ };
+};
+
+struct uncore_unit_discovery {
+ union {
+ u64 table1;
+ struct {
+ u64 num_regs : 8,
+ ctl_offset : 8,
+ bit_width : 8,
+ ctr_offset : 8,
+ status_offset : 8,
+ __reserved_1 : 22,
+ access_type : 2;
+ };
+ };
+
+ u64 ctl; /* Unit Control Address */
+
+ union {
+ u64 table3;
+ struct {
+ u64 box_type : 16,
+ box_id : 16,
+ __reserved_2 : 32;
+ };
+ };
+};
+
+struct intel_uncore_discovery_type {
+ struct rb_node node;
+ enum uncore_access_type access_type;
+ u64 box_ctrl; /* Unit ctrl addr of the first box */
+ u64 *box_ctrl_die; /* Unit ctrl addr of the first box of each die */
+ u16 type; /* Type ID of the uncore block */
+ u8 num_counters;
+ u8 counter_width;
+ u8 ctl_offset; /* Counter Control 0 offset */
+ u8 ctr_offset; /* Counter 0 offset */
+ u16 num_boxes; /* number of boxes for the uncore block */
+ unsigned int *ids; /* Box IDs */
+ unsigned int *box_offset; /* Box offset */
+};
+
+bool intel_uncore_has_discovery_tables(void);
+void intel_uncore_clear_discovery_tables(void);
+void intel_uncore_generic_uncore_cpu_init(void);
+int intel_uncore_generic_uncore_pci_init(void);
+void intel_uncore_generic_uncore_mmio_init(void);
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index 51271288499e..0f63706cdadf 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -62,6 +62,8 @@
#define PCI_DEVICE_ID_INTEL_TGL_H_IMC 0x9a36
#define PCI_DEVICE_ID_INTEL_RKL_1_IMC 0x4c43
#define PCI_DEVICE_ID_INTEL_RKL_2_IMC 0x4c53
+#define PCI_DEVICE_ID_INTEL_ADL_1_IMC 0x4660
+#define PCI_DEVICE_ID_INTEL_ADL_2_IMC 0x4641
/* SNB event control */
#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
@@ -131,12 +133,33 @@
#define ICL_UNC_ARB_PER_CTR 0x3b1
#define ICL_UNC_ARB_PERFEVTSEL 0x3b3
+/* ADL uncore global control */
+#define ADL_UNC_PERF_GLOBAL_CTL 0x2ff0
+#define ADL_UNC_FIXED_CTR_CTRL 0x2fde
+#define ADL_UNC_FIXED_CTR 0x2fdf
+
+/* ADL Cbo register */
+#define ADL_UNC_CBO_0_PER_CTR0 0x2002
+#define ADL_UNC_CBO_0_PERFEVTSEL0 0x2000
+#define ADL_UNC_CTL_THRESHOLD 0x3f000000
+#define ADL_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
+ SNB_UNC_CTL_UMASK_MASK | \
+ SNB_UNC_CTL_EDGE_DET | \
+ SNB_UNC_CTL_INVERT | \
+ ADL_UNC_CTL_THRESHOLD)
+
+/* ADL ARB register */
+#define ADL_UNC_ARB_PER_CTR0 0x2FD2
+#define ADL_UNC_ARB_PERFEVTSEL0 0x2FD0
+#define ADL_UNC_ARB_MSR_OFFSET 0x8
+
DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
+DEFINE_UNCORE_FORMAT_ATTR(threshold, threshold, "config:24-29");
/* Sandy Bridge uncore support */
static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event)
@@ -422,6 +445,106 @@ void tgl_uncore_cpu_init(void)
skl_uncore_msr_ops.init_box = rkl_uncore_msr_init_box;
}
+static void adl_uncore_msr_init_box(struct intel_uncore_box *box)
+{
+ if (box->pmu->pmu_idx == 0)
+ wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
+}
+
+static void adl_uncore_msr_enable_box(struct intel_uncore_box *box)
+{
+ wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, SNB_UNC_GLOBAL_CTL_EN);
+}
+
+static void adl_uncore_msr_disable_box(struct intel_uncore_box *box)
+{
+ if (box->pmu->pmu_idx == 0)
+ wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, 0);
+}
+
+static void adl_uncore_msr_exit_box(struct intel_uncore_box *box)
+{
+ if (box->pmu->pmu_idx == 0)
+ wrmsrl(ADL_UNC_PERF_GLOBAL_CTL, 0);
+}
+
+static struct intel_uncore_ops adl_uncore_msr_ops = {
+ .init_box = adl_uncore_msr_init_box,
+ .enable_box = adl_uncore_msr_enable_box,
+ .disable_box = adl_uncore_msr_disable_box,
+ .exit_box = adl_uncore_msr_exit_box,
+ .disable_event = snb_uncore_msr_disable_event,
+ .enable_event = snb_uncore_msr_enable_event,
+ .read_counter = uncore_msr_read_counter,
+};
+
+static struct attribute *adl_uncore_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_threshold.attr,
+ NULL,
+};
+
+static const struct attribute_group adl_uncore_format_group = {
+ .name = "format",
+ .attrs = adl_uncore_formats_attr,
+};
+
+static struct intel_uncore_type adl_uncore_cbox = {
+ .name = "cbox",
+ .num_counters = 2,
+ .perf_ctr_bits = 44,
+ .perf_ctr = ADL_UNC_CBO_0_PER_CTR0,
+ .event_ctl = ADL_UNC_CBO_0_PERFEVTSEL0,
+ .event_mask = ADL_UNC_RAW_EVENT_MASK,
+ .msr_offset = ICL_UNC_CBO_MSR_OFFSET,
+ .ops = &adl_uncore_msr_ops,
+ .format_group = &adl_uncore_format_group,
+};
+
+static struct intel_uncore_type adl_uncore_arb = {
+ .name = "arb",
+ .num_counters = 2,
+ .num_boxes = 2,
+ .perf_ctr_bits = 44,
+ .perf_ctr = ADL_UNC_ARB_PER_CTR0,
+ .event_ctl = ADL_UNC_ARB_PERFEVTSEL0,
+ .event_mask = SNB_UNC_RAW_EVENT_MASK,
+ .msr_offset = ADL_UNC_ARB_MSR_OFFSET,
+ .constraints = snb_uncore_arb_constraints,
+ .ops = &adl_uncore_msr_ops,
+ .format_group = &snb_uncore_format_group,
+};
+
+static struct intel_uncore_type adl_uncore_clockbox = {
+ .name = "clock",
+ .num_counters = 1,
+ .num_boxes = 1,
+ .fixed_ctr_bits = 48,
+ .fixed_ctr = ADL_UNC_FIXED_CTR,
+ .fixed_ctl = ADL_UNC_FIXED_CTR_CTRL,
+ .single_fixed = 1,
+ .event_mask = SNB_UNC_CTL_EV_SEL_MASK,
+ .format_group = &icl_uncore_clock_format_group,
+ .ops = &adl_uncore_msr_ops,
+ .event_descs = icl_uncore_events,
+};
+
+static struct intel_uncore_type *adl_msr_uncores[] = {
+ &adl_uncore_cbox,
+ &adl_uncore_arb,
+ &adl_uncore_clockbox,
+ NULL,
+};
+
+void adl_uncore_cpu_init(void)
+{
+ adl_uncore_cbox.num_boxes = icl_get_cbox_num();
+ uncore_msr_uncores = adl_msr_uncores;
+}
+
enum {
SNB_PCI_UNCORE_IMC,
};
@@ -1203,6 +1326,14 @@ static const struct pci_device_id tgl_uncore_pci_ids[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGL_H_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_1_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_2_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
{ /* end: all zeroes */ }
};
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index b79951d0707c..bb6eb1e5569c 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -280,17 +280,17 @@
* | [63] | 00h | VALID - When set, indicates the CPU bus
* numbers have been initialized. (RO)
* |[62:48]| --- | Reserved
- * |[47:40]| 00h | BUS_NUM_5 — Return the bus number BIOS assigned
+ * |[47:40]| 00h | BUS_NUM_5 - Return the bus number BIOS assigned
* CPUBUSNO(5). (RO)
- * |[39:32]| 00h | BUS_NUM_4 — Return the bus number BIOS assigned
+ * |[39:32]| 00h | BUS_NUM_4 - Return the bus number BIOS assigned
* CPUBUSNO(4). (RO)
- * |[31:24]| 00h | BUS_NUM_3 — Return the bus number BIOS assigned
+ * |[31:24]| 00h | BUS_NUM_3 - Return the bus number BIOS assigned
* CPUBUSNO(3). (RO)
- * |[23:16]| 00h | BUS_NUM_2 — Return the bus number BIOS assigned
+ * |[23:16]| 00h | BUS_NUM_2 - Return the bus number BIOS assigned
* CPUBUSNO(2). (RO)
- * |[15:8] | 00h | BUS_NUM_1 — Return the bus number BIOS assigned
+ * |[15:8] | 00h | BUS_NUM_1 - Return the bus number BIOS assigned
* CPUBUSNO(1). (RO)
- * | [7:0] | 00h | BUS_NUM_0 — Return the bus number BIOS assigned
+ * | [7:0] | 00h | BUS_NUM_0 - Return the bus number BIOS assigned
* CPUBUSNO(0). (RO)
*/
#define SKX_MSR_CPU_BUS_NUMBER 0x300
@@ -348,6 +348,13 @@
#define SKX_M2M_PCI_PMON_CTR0 0x200
#define SKX_M2M_PCI_PMON_BOX_CTL 0x258
+/* Memory Map registers device ID */
+#define SNR_ICX_MESH2IIO_MMAP_DID 0x9a2
+#define SNR_ICX_SAD_CONTROL_CFG 0x3f4
+
+/* Getting I/O stack id in SAD_COTROL_CFG notation */
+#define SAD_CONTROL_STACK_ID(data) (((data) >> 4) & 0x7)
+
/* SNR Ubox */
#define SNR_U_MSR_PMON_CTR0 0x1f98
#define SNR_U_MSR_PMON_CTL0 0x1f91
@@ -1159,7 +1166,6 @@ enum {
SNBEP_PCI_QPI_PORT0_FILTER,
SNBEP_PCI_QPI_PORT1_FILTER,
BDX_PCI_QPI_PORT2_FILTER,
- HSWEP_PCI_PCU_3,
};
static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
@@ -1407,6 +1413,8 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
die_id = i;
else
die_id = topology_phys_to_logical_pkg(i);
+ if (die_id < 0)
+ die_id = -ENODEV;
map->pbus_to_dieid[bus] = die_id;
break;
}
@@ -1453,14 +1461,14 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
i = -1;
if (reverse) {
for (bus = 255; bus >= 0; bus--) {
- if (map->pbus_to_dieid[bus] >= 0)
+ if (map->pbus_to_dieid[bus] != -1)
i = map->pbus_to_dieid[bus];
else
map->pbus_to_dieid[bus] = i;
}
} else {
for (bus = 0; bus <= 255; bus++) {
- if (map->pbus_to_dieid[bus] >= 0)
+ if (map->pbus_to_dieid[bus] != -1)
i = map->pbus_to_dieid[bus];
else
map->pbus_to_dieid[bus] = i;
@@ -2857,22 +2865,33 @@ static struct intel_uncore_type *hswep_msr_uncores[] = {
NULL,
};
-void hswep_uncore_cpu_init(void)
+#define HSWEP_PCU_DID 0x2fc0
+#define HSWEP_PCU_CAPID4_OFFET 0x94
+#define hswep_get_chop(_cap) (((_cap) >> 6) & 0x3)
+
+static bool hswep_has_limit_sbox(unsigned int device)
{
- int pkg = boot_cpu_data.logical_proc_id;
+ struct pci_dev *dev = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
+ u32 capid4;
+
+ if (!dev)
+ return false;
+
+ pci_read_config_dword(dev, HSWEP_PCU_CAPID4_OFFET, &capid4);
+ if (!hswep_get_chop(capid4))
+ return true;
+
+ return false;
+}
+void hswep_uncore_cpu_init(void)
+{
if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
/* Detect 6-8 core systems with only two SBOXes */
- if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
- u32 capid4;
-
- pci_read_config_dword(uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3],
- 0x94, &capid4);
- if (((capid4 >> 6) & 0x3) == 0)
- hswep_uncore_sbox.num_boxes = 2;
- }
+ if (hswep_has_limit_sbox(HSWEP_PCU_DID))
+ hswep_uncore_sbox.num_boxes = 2;
uncore_msr_uncores = hswep_msr_uncores;
}
@@ -3135,11 +3154,6 @@ static const struct pci_device_id hswep_uncore_pci_ids[] = {
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
SNBEP_PCI_QPI_PORT1_FILTER),
},
- { /* PCU.3 (for Capability registers) */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
- .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
- HSWEP_PCI_PCU_3),
- },
{ /* end: all zeroes */ }
};
@@ -3231,27 +3245,18 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = {
EVENT_CONSTRAINT_END
};
+#define BDX_PCU_DID 0x6fc0
+
void bdx_uncore_cpu_init(void)
{
- int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
-
if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
uncore_msr_uncores = bdx_msr_uncores;
- /* BDX-DE doesn't have SBOX */
- if (boot_cpu_data.x86_model == 86) {
- uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
/* Detect systems with no SBOXes */
- } else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) {
- struct pci_dev *pdev;
- u32 capid4;
-
- pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3];
- pci_read_config_dword(pdev, 0x94, &capid4);
- if (((capid4 >> 6) & 0x3) == 0)
- bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
- }
+ if ((boot_cpu_data.x86_model == 86) || hswep_has_limit_sbox(BDX_PCU_DID))
+ uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL;
+
hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
}
@@ -3472,11 +3477,6 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = {
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
BDX_PCI_QPI_PORT2_FILTER),
},
- { /* PCU.3 (for Capability registers) */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0),
- .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
- HSWEP_PCI_PCU_3),
- },
{ /* end: all zeroes */ }
};
@@ -3684,32 +3684,35 @@ static struct intel_uncore_ops skx_uncore_iio_ops = {
static inline u8 skx_iio_stack(struct intel_uncore_pmu *pmu, int die)
{
- return pmu->type->topology[die] >> (pmu->pmu_idx * BUS_NUM_STRIDE);
+ return pmu->type->topology[die].configuration >>
+ (pmu->pmu_idx * BUS_NUM_STRIDE);
}
static umode_t
-skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
+pmu_iio_mapping_visible(struct kobject *kobj, struct attribute *attr,
+ int die, int zero_bus_pmu)
{
struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(kobj_to_dev(kobj));
- /* Root bus 0x00 is valid only for die 0 AND pmu_idx = 0. */
- return (!skx_iio_stack(pmu, die) && pmu->pmu_idx) ? 0 : attr->mode;
+ return (!skx_iio_stack(pmu, die) && pmu->pmu_idx != zero_bus_pmu) ? 0 : attr->mode;
+}
+
+static umode_t
+skx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
+{
+ /* Root bus 0x00 is valid only for pmu_idx = 0. */
+ return pmu_iio_mapping_visible(kobj, attr, die, 0);
}
static ssize_t skx_iio_mapping_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
- struct pci_bus *bus = pci_find_next_bus(NULL);
- struct intel_uncore_pmu *uncore_pmu = dev_to_uncore_pmu(dev);
+ struct intel_uncore_pmu *pmu = dev_to_uncore_pmu(dev);
struct dev_ext_attribute *ea = to_dev_ext_attribute(attr);
long die = (long)ea->var;
- /*
- * Current implementation is for single segment configuration hence it's
- * safe to take the segment value from the first available root bus.
- */
- return sprintf(buf, "%04x:%02x\n", pci_domain_nr(bus),
- skx_iio_stack(uncore_pmu, die));
+ return sprintf(buf, "%04x:%02x\n", pmu->type->topology[die].segment,
+ skx_iio_stack(pmu, die));
}
static int skx_msr_cpu_bus_read(int cpu, u64 *topology)
@@ -3746,34 +3749,32 @@ static int die_to_cpu(int die)
static int skx_iio_get_topology(struct intel_uncore_type *type)
{
- int i, ret;
- struct pci_bus *bus = NULL;
+ int die, ret = -EPERM;
- /*
- * Verified single-segment environments only; disabled for multiple
- * segment topologies for now except VMD domains.
- * VMD domains start at 0x10000 to not clash with ACPI _SEG domains.
- */
- while ((bus = pci_find_next_bus(bus))
- && (!pci_domain_nr(bus) || pci_domain_nr(bus) > 0xffff))
- ;
- if (bus)
- return -EPERM;
-
- type->topology = kcalloc(uncore_max_dies(), sizeof(u64), GFP_KERNEL);
+ type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology),
+ GFP_KERNEL);
if (!type->topology)
return -ENOMEM;
- for (i = 0; i < uncore_max_dies(); i++) {
- ret = skx_msr_cpu_bus_read(die_to_cpu(i), &type->topology[i]);
- if (ret) {
- kfree(type->topology);
- type->topology = NULL;
- return ret;
- }
+ for (die = 0; die < uncore_max_dies(); die++) {
+ ret = skx_msr_cpu_bus_read(die_to_cpu(die),
+ &type->topology[die].configuration);
+ if (ret)
+ break;
+
+ ret = uncore_die_to_segment(die);
+ if (ret < 0)
+ break;
+
+ type->topology[die].segment = ret;
}
- return 0;
+ if (ret < 0) {
+ kfree(type->topology);
+ type->topology = NULL;
+ }
+
+ return ret;
}
static struct attribute_group skx_iio_mapping_group = {
@@ -3785,7 +3786,8 @@ static const struct attribute_group *skx_iio_attr_update[] = {
NULL,
};
-static int skx_iio_set_mapping(struct intel_uncore_type *type)
+static int
+pmu_iio_set_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
{
char buf[64];
int ret;
@@ -3793,8 +3795,8 @@ static int skx_iio_set_mapping(struct intel_uncore_type *type)
struct attribute **attrs = NULL;
struct dev_ext_attribute *eas = NULL;
- ret = skx_iio_get_topology(type);
- if (ret)
+ ret = type->get_topology(type);
+ if (ret < 0)
goto clear_attr_update;
ret = -ENOMEM;
@@ -3820,7 +3822,7 @@ static int skx_iio_set_mapping(struct intel_uncore_type *type)
eas[die].var = (void *)die;
attrs[die] = &eas[die].attr.attr;
}
- skx_iio_mapping_group.attrs = attrs;
+ ag->attrs = attrs;
return 0;
err:
@@ -3834,6 +3836,11 @@ clear_attr_update:
return ret;
}
+static int skx_iio_set_mapping(struct intel_uncore_type *type)
+{
+ return pmu_iio_set_mapping(type, &skx_iio_mapping_group);
+}
+
static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
{
struct attribute **attr = skx_iio_mapping_group.attrs;
@@ -3864,6 +3871,7 @@ static struct intel_uncore_type skx_uncore_iio = {
.ops = &skx_uncore_iio_ops,
.format_group = &skx_uncore_iio_format_group,
.attr_update = skx_iio_attr_update,
+ .get_topology = skx_iio_get_topology,
.set_mapping = skx_iio_set_mapping,
.cleanup_mapping = skx_iio_cleanup_mapping,
};
@@ -4406,6 +4414,91 @@ static const struct attribute_group snr_uncore_iio_format_group = {
.attrs = snr_uncore_iio_formats_attr,
};
+static umode_t
+snr_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
+{
+ /* Root bus 0x00 is valid only for pmu_idx = 1. */
+ return pmu_iio_mapping_visible(kobj, attr, die, 1);
+}
+
+static struct attribute_group snr_iio_mapping_group = {
+ .is_visible = snr_iio_mapping_visible,
+};
+
+static const struct attribute_group *snr_iio_attr_update[] = {
+ &snr_iio_mapping_group,
+ NULL,
+};
+
+static int sad_cfg_iio_topology(struct intel_uncore_type *type, u8 *sad_pmon_mapping)
+{
+ u32 sad_cfg;
+ int die, stack_id, ret = -EPERM;
+ struct pci_dev *dev = NULL;
+
+ type->topology = kcalloc(uncore_max_dies(), sizeof(*type->topology),
+ GFP_KERNEL);
+ if (!type->topology)
+ return -ENOMEM;
+
+ while ((dev = pci_get_device(PCI_VENDOR_ID_INTEL, SNR_ICX_MESH2IIO_MMAP_DID, dev))) {
+ ret = pci_read_config_dword(dev, SNR_ICX_SAD_CONTROL_CFG, &sad_cfg);
+ if (ret) {
+ ret = pcibios_err_to_errno(ret);
+ break;
+ }
+
+ die = uncore_pcibus_to_dieid(dev->bus);
+ stack_id = SAD_CONTROL_STACK_ID(sad_cfg);
+ if (die < 0 || stack_id >= type->num_boxes) {
+ ret = -EPERM;
+ break;
+ }
+
+ /* Convert stack id from SAD_CONTROL to PMON notation. */
+ stack_id = sad_pmon_mapping[stack_id];
+
+ ((u8 *)&(type->topology[die].configuration))[stack_id] = dev->bus->number;
+ type->topology[die].segment = pci_domain_nr(dev->bus);
+ }
+
+ if (ret) {
+ kfree(type->topology);
+ type->topology = NULL;
+ }
+
+ return ret;
+}
+
+/*
+ * SNR has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
+ */
+enum {
+ SNR_QAT_PMON_ID,
+ SNR_CBDMA_DMI_PMON_ID,
+ SNR_NIS_PMON_ID,
+ SNR_DLB_PMON_ID,
+ SNR_PCIE_GEN3_PMON_ID
+};
+
+static u8 snr_sad_pmon_mapping[] = {
+ SNR_CBDMA_DMI_PMON_ID,
+ SNR_PCIE_GEN3_PMON_ID,
+ SNR_DLB_PMON_ID,
+ SNR_NIS_PMON_ID,
+ SNR_QAT_PMON_ID
+};
+
+static int snr_iio_get_topology(struct intel_uncore_type *type)
+{
+ return sad_cfg_iio_topology(type, snr_sad_pmon_mapping);
+}
+
+static int snr_iio_set_mapping(struct intel_uncore_type *type)
+{
+ return pmu_iio_set_mapping(type, &snr_iio_mapping_group);
+}
+
static struct intel_uncore_type snr_uncore_iio = {
.name = "iio",
.num_counters = 4,
@@ -4419,6 +4512,10 @@ static struct intel_uncore_type snr_uncore_iio = {
.msr_offset = SNR_IIO_MSR_OFFSET,
.ops = &ivbep_uncore_msr_ops,
.format_group = &snr_uncore_iio_format_group,
+ .attr_update = snr_iio_attr_update,
+ .get_topology = snr_iio_get_topology,
+ .set_mapping = snr_iio_set_mapping,
+ .cleanup_mapping = skx_iio_cleanup_mapping,
};
static struct intel_uncore_type snr_uncore_irp = {
@@ -4946,6 +5043,53 @@ static struct event_constraint icx_uncore_iio_constraints[] = {
EVENT_CONSTRAINT_END
};
+static umode_t
+icx_iio_mapping_visible(struct kobject *kobj, struct attribute *attr, int die)
+{
+ /* Root bus 0x00 is valid only for pmu_idx = 5. */
+ return pmu_iio_mapping_visible(kobj, attr, die, 5);
+}
+
+static struct attribute_group icx_iio_mapping_group = {
+ .is_visible = icx_iio_mapping_visible,
+};
+
+static const struct attribute_group *icx_iio_attr_update[] = {
+ &icx_iio_mapping_group,
+ NULL,
+};
+
+/*
+ * ICX has a static mapping of stack IDs from SAD_CONTROL_CFG notation to PMON
+ */
+enum {
+ ICX_PCIE1_PMON_ID,
+ ICX_PCIE2_PMON_ID,
+ ICX_PCIE3_PMON_ID,
+ ICX_PCIE4_PMON_ID,
+ ICX_PCIE5_PMON_ID,
+ ICX_CBDMA_DMI_PMON_ID
+};
+
+static u8 icx_sad_pmon_mapping[] = {
+ ICX_CBDMA_DMI_PMON_ID,
+ ICX_PCIE1_PMON_ID,
+ ICX_PCIE2_PMON_ID,
+ ICX_PCIE3_PMON_ID,
+ ICX_PCIE4_PMON_ID,
+ ICX_PCIE5_PMON_ID,
+};
+
+static int icx_iio_get_topology(struct intel_uncore_type *type)
+{
+ return sad_cfg_iio_topology(type, icx_sad_pmon_mapping);
+}
+
+static int icx_iio_set_mapping(struct intel_uncore_type *type)
+{
+ return pmu_iio_set_mapping(type, &icx_iio_mapping_group);
+}
+
static struct intel_uncore_type icx_uncore_iio = {
.name = "iio",
.num_counters = 4,
@@ -4960,6 +5104,10 @@ static struct intel_uncore_type icx_uncore_iio = {
.constraints = icx_uncore_iio_constraints,
.ops = &skx_uncore_iio_ops,
.format_group = &snr_uncore_iio_format_group,
+ .attr_update = icx_iio_attr_update,
+ .get_topology = icx_iio_get_topology,
+ .set_mapping = icx_iio_set_mapping,
+ .cleanup_mapping = skx_iio_cleanup_mapping,
};
static struct intel_uncore_type icx_uncore_irp = {
@@ -5112,9 +5260,10 @@ static struct intel_uncore_type icx_uncore_m2m = {
.perf_ctr = SNR_M2M_PCI_PMON_CTR0,
.event_ctl = SNR_M2M_PCI_PMON_CTL0,
.event_mask = SNBEP_PMON_RAW_EVENT_MASK,
+ .event_mask_ext = SNR_M2M_PCI_PMON_UMASK_EXT,
.box_ctl = SNR_M2M_PCI_PMON_BOX_CTL,
.ops = &snr_m2m_uncore_pci_ops,
- .format_group = &skx_uncore_format_group,
+ .format_group = &snr_m2m_uncore_format_group,
};
static struct attribute *icx_upi_uncore_formats_attr[] = {
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index 680404c58cb1..c853b28efa33 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -100,6 +100,8 @@ static bool test_intel(int idx, void *data)
case INTEL_FAM6_TIGERLAKE_L:
case INTEL_FAM6_TIGERLAKE:
case INTEL_FAM6_ROCKETLAKE:
+ case INTEL_FAM6_ALDERLAKE:
+ case INTEL_FAM6_ALDERLAKE_L:
if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
return true;
break;
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 53b2b5fc23bc..2bf1c7ea2758 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -15,6 +15,7 @@
#include <linux/perf_event.h>
#include <asm/intel_ds.h>
+#include <asm/cpu.h>
/* To enable MSR tracing please use the generic trace points. */
@@ -228,7 +229,7 @@ struct cpu_hw_events {
*/
struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
- unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+ unsigned long dirty[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
int enabled;
int n_events; /* the # of events in the below arrays */
@@ -327,6 +328,8 @@ struct cpu_hw_events {
int n_pair; /* Large increment events */
void *kfree_on_online[X86_PERF_KFREE_MAX];
+
+ struct pmu *pmu;
};
#define __EVENT_CONSTRAINT_RANGE(c, e, n, m, w, o, f) { \
@@ -630,6 +633,71 @@ enum {
x86_lbr_exclusive_max,
};
+struct x86_hybrid_pmu {
+ struct pmu pmu;
+ const char *name;
+ u8 cpu_type;
+ cpumask_t supported_cpus;
+ union perf_capabilities intel_cap;
+ u64 intel_ctrl;
+ int max_pebs_events;
+ int num_counters;
+ int num_counters_fixed;
+ struct event_constraint unconstrained;
+
+ u64 hw_cache_event_ids
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX];
+ u64 hw_cache_extra_regs
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX];
+ struct event_constraint *event_constraints;
+ struct event_constraint *pebs_constraints;
+ struct extra_reg *extra_regs;
+};
+
+static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu)
+{
+ return container_of(pmu, struct x86_hybrid_pmu, pmu);
+}
+
+extern struct static_key_false perf_is_hybrid;
+#define is_hybrid() static_branch_unlikely(&perf_is_hybrid)
+
+#define hybrid(_pmu, _field) \
+(*({ \
+ typeof(&x86_pmu._field) __Fp = &x86_pmu._field; \
+ \
+ if (is_hybrid() && (_pmu)) \
+ __Fp = &hybrid_pmu(_pmu)->_field; \
+ \
+ __Fp; \
+}))
+
+#define hybrid_var(_pmu, _var) \
+(*({ \
+ typeof(&_var) __Fp = &_var; \
+ \
+ if (is_hybrid() && (_pmu)) \
+ __Fp = &hybrid_pmu(_pmu)->_var; \
+ \
+ __Fp; \
+}))
+
+enum hybrid_pmu_type {
+ hybrid_big = 0x40,
+ hybrid_small = 0x20,
+
+ hybrid_big_small = hybrid_big | hybrid_small,
+};
+
+#define X86_HYBRID_PMU_ATOM_IDX 0
+#define X86_HYBRID_PMU_CORE_IDX 1
+
+#define X86_HYBRID_NUM_PMUS 2
+
/*
* struct x86_pmu - generic x86 pmu
*/
@@ -816,6 +884,19 @@ struct x86_pmu {
int (*check_period) (struct perf_event *event, u64 period);
int (*aux_output_match) (struct perf_event *event);
+
+ int (*filter_match)(struct perf_event *event);
+ /*
+ * Hybrid support
+ *
+ * Most PMU capabilities are the same among different hybrid PMUs.
+ * The global x86_pmu saves the architecture capabilities, which
+ * are available for all PMUs. The hybrid_pmu only includes the
+ * unique capabilities.
+ */
+ int num_hybrid_pmus;
+ struct x86_hybrid_pmu *hybrid_pmu;
+ u8 (*get_hybrid_cpu_type) (void);
};
struct x86_perf_task_context_opt {
@@ -905,7 +986,23 @@ static struct perf_pmu_events_ht_attr event_attr_##v = { \
.event_str_ht = ht, \
}
-struct pmu *x86_get_pmu(void);
+#define EVENT_ATTR_STR_HYBRID(_name, v, str, _pmu) \
+static struct perf_pmu_events_hybrid_attr event_attr_##v = { \
+ .attr = __ATTR(_name, 0444, events_hybrid_sysfs_show, NULL),\
+ .id = 0, \
+ .event_str = str, \
+ .pmu_type = _pmu, \
+}
+
+#define FORMAT_HYBRID_PTR(_id) (&format_attr_hybrid_##_id.attr.attr)
+
+#define FORMAT_ATTR_HYBRID(_name, _pmu) \
+static struct perf_pmu_format_hybrid_attr format_attr_hybrid_##_name = {\
+ .attr = __ATTR_RO(_name), \
+ .pmu_type = _pmu, \
+}
+
+struct pmu *x86_get_pmu(unsigned int cpu);
extern struct x86_pmu x86_pmu __read_mostly;
static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx)
@@ -964,6 +1061,9 @@ static inline int x86_pmu_rdpmc_index(int index)
return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
}
+bool check_hw_exists(struct pmu *pmu, int num_counters,
+ int num_counters_fixed);
+
int x86_add_exclusive(unsigned int what);
void x86_del_exclusive(unsigned int what);
@@ -1027,6 +1127,11 @@ void x86_pmu_enable_event(struct perf_event *event);
int x86_pmu_handle_irq(struct pt_regs *regs);
+void x86_pmu_show_pmu_cap(int num_counters, int num_counters_fixed,
+ u64 intel_ctrl);
+
+void x86_pmu_update_cpu_context(struct pmu *pmu, int cpu);
+
extern struct event_constraint emptyconstraint;
extern struct event_constraint unconstrained;
@@ -1067,10 +1172,15 @@ ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
char *page);
ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
char *page);
+ssize_t events_hybrid_sysfs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page);
-static inline bool fixed_counter_disabled(int i)
+static inline bool fixed_counter_disabled(int i, struct pmu *pmu)
{
- return !(x86_pmu.intel_ctrl >> (i + INTEL_PMC_IDX_FIXED));
+ u64 intel_ctrl = hybrid(pmu, intel_ctrl);
+
+ return !(intel_ctrl >> (i + INTEL_PMC_IDX_FIXED));
}
#ifdef CONFIG_CPU_SUP_AMD
@@ -1135,6 +1245,8 @@ void reserve_ds_buffers(void);
void release_lbr_buffers(void);
+void reserve_lbr_buffers(void);
+
extern struct event_constraint bts_constraint;
extern struct event_constraint vlbr_constraint;
@@ -1154,6 +1266,8 @@ extern struct event_constraint intel_glm_pebs_event_constraints[];
extern struct event_constraint intel_glp_pebs_event_constraints[];
+extern struct event_constraint intel_grt_pebs_event_constraints[];
+
extern struct event_constraint intel_nehalem_pebs_event_constraints[];
extern struct event_constraint intel_westmere_pebs_event_constraints[];
@@ -1282,6 +1396,10 @@ static inline void release_lbr_buffers(void)
{
}
+static inline void reserve_lbr_buffers(void)
+{
+}
+
static inline int intel_pmu_init(void)
{
return 0;
diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index f42a70496a24..85feafacc445 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -764,13 +764,14 @@ static struct rapl_model model_spr = {
.rapl_msrs = intel_rapl_spr_msrs,
};
-static struct rapl_model model_amd_fam17h = {
+static struct rapl_model model_amd_hygon = {
.events = BIT(PERF_RAPL_PKG),
.msr_power_unit = MSR_AMD_RAPL_POWER_UNIT,
.rapl_msrs = amd_rapl_msrs,
};
static const struct x86_cpu_id rapl_model_match[] __initconst = {
+ X86_MATCH_FEATURE(X86_FEATURE_RAPL, &model_amd_hygon),
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &model_snb),
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &model_snbep),
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &model_snb),
@@ -800,10 +801,9 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &model_hsx),
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &model_skl),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &model_skl),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &model_skl),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &model_spr),
- X86_MATCH_VENDOR_FAM(AMD, 0x17, &model_amd_fam17h),
- X86_MATCH_VENDOR_FAM(HYGON, 0x18, &model_amd_fam17h),
- X86_MATCH_VENDOR_FAM(AMD, 0x19, &model_amd_fam17h),
{},
};
MODULE_DEVICE_TABLE(x86cpu, rapl_model_match);
diff --git a/arch/x86/events/zhaoxin/core.c b/arch/x86/events/zhaoxin/core.c
index e68827e604ad..949d845c922b 100644
--- a/arch/x86/events/zhaoxin/core.c
+++ b/arch/x86/events/zhaoxin/core.c
@@ -494,7 +494,7 @@ static __init void zhaoxin_arch_events_quirk(void)
{
int bit;
- /* disable event that reported as not presend by cpuid */
+ /* disable event that reported as not present by cpuid */
for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(zx_arch_events_map)) {
zx_pmon_event_map[zx_arch_events_map[bit].id] = 0;
pr_warn("CPUID marked event: \'%s\' unavailable\n",
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index 284e73661a18..90e682a92820 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -60,9 +60,11 @@ static u32 hv_apic_read(u32 reg)
switch (reg) {
case APIC_EOI:
rdmsr(HV_X64_MSR_EOI, reg_val, hi);
+ (void)hi;
return reg_val;
case APIC_TASKPRI:
rdmsr(HV_X64_MSR_TPR, reg_val, hi);
+ (void)hi;
return reg_val;
default:
@@ -103,7 +105,7 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
struct hv_send_ipi_ex *ipi_arg;
unsigned long flags;
int nr_bank = 0;
- int ret = 1;
+ u64 status = HV_STATUS_INVALID_PARAMETER;
if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
return false;
@@ -128,19 +130,19 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
if (!nr_bank)
ipi_arg->vp_set.format = HV_GENERIC_SET_ALL;
- ret = hv_do_rep_hypercall(HVCALL_SEND_IPI_EX, 0, nr_bank,
+ status = hv_do_rep_hypercall(HVCALL_SEND_IPI_EX, 0, nr_bank,
ipi_arg, NULL);
ipi_mask_ex_done:
local_irq_restore(flags);
- return ((ret == 0) ? true : false);
+ return hv_result_success(status);
}
static bool __send_ipi_mask(const struct cpumask *mask, int vector)
{
int cur_cpu, vcpu;
struct hv_send_ipi ipi_arg;
- int ret = 1;
+ u64 status;
trace_hyperv_send_ipi_mask(mask, vector);
@@ -184,9 +186,9 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
__set_bit(vcpu, (unsigned long *)&ipi_arg.cpu_mask);
}
- ret = hv_do_fast_hypercall16(HVCALL_SEND_IPI, ipi_arg.vector,
+ status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, ipi_arg.vector,
ipi_arg.cpu_mask);
- return ((ret == 0) ? true : false);
+ return hv_result_success(status);
do_ex_hypercall:
return __send_ipi_mask_ex(mask, vector);
@@ -195,6 +197,7 @@ do_ex_hypercall:
static bool __send_ipi_one(int cpu, int vector)
{
int vp = hv_cpu_number_to_vp_number(cpu);
+ u64 status;
trace_hyperv_send_ipi_one(cpu, vector);
@@ -207,7 +210,8 @@ static bool __send_ipi_one(int cpu, int vector)
if (vp >= 64)
return __send_ipi_mask_ex(cpumask_of(cpu), vector);
- return !hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp));
+ status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, vector, BIT_ULL(vp));
+ return hv_result_success(status);
}
static void hv_send_ipi(int cpu, int vector)
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index b81047dec1da..6952e219cba3 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -54,28 +54,6 @@ EXPORT_SYMBOL_GPL(hyperv_pcpu_output_arg);
u32 hv_max_vp_index;
EXPORT_SYMBOL_GPL(hv_max_vp_index);
-void *hv_alloc_hyperv_page(void)
-{
- BUILD_BUG_ON(PAGE_SIZE != HV_HYP_PAGE_SIZE);
-
- return (void *)__get_free_page(GFP_KERNEL);
-}
-EXPORT_SYMBOL_GPL(hv_alloc_hyperv_page);
-
-void *hv_alloc_hyperv_zeroed_page(void)
-{
- BUILD_BUG_ON(PAGE_SIZE != HV_HYP_PAGE_SIZE);
-
- return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
-}
-EXPORT_SYMBOL_GPL(hv_alloc_hyperv_zeroed_page);
-
-void hv_free_hyperv_page(unsigned long addr)
-{
- free_page(addr);
-}
-EXPORT_SYMBOL_GPL(hv_free_hyperv_page);
-
static int hv_cpu_init(unsigned int cpu)
{
u64 msr_vp_index;
@@ -97,7 +75,7 @@ static int hv_cpu_init(unsigned int cpu)
*output_arg = page_address(pg + 1);
}
- hv_get_vp_index(msr_vp_index);
+ msr_vp_index = hv_get_register(HV_REGISTER_VP_INDEX);
hv_vp_index[smp_processor_id()] = msr_vp_index;
@@ -162,7 +140,7 @@ EXPORT_SYMBOL_GPL(hyperv_stop_tsc_emulation);
static inline bool hv_reenlightenment_available(void)
{
/*
- * Check for required features and priviliges to make TSC frequency
+ * Check for required features and privileges to make TSC frequency
* change notifications work.
*/
return ms_hyperv.features & HV_ACCESS_FREQUENCY_MSRS &&
@@ -292,7 +270,7 @@ static int hv_suspend(void)
/*
* Reset the hypercall page as it is going to be invalidated
- * accross hibernation. Setting hv_hypercall_pg to NULL ensures
+ * across hibernation. Setting hv_hypercall_pg to NULL ensures
* that any subsequent hypercall operation fails safely instead of
* crashing due to an access of an invalid page. The hypercall page
* pointer is restored on resume.
@@ -349,7 +327,7 @@ static void __init hv_stimer_setup_percpu_clockev(void)
* Ignore any errors in setting up stimer clockevents
* as we can run with the LAPIC timer as a fallback.
*/
- (void)hv_stimer_alloc();
+ (void)hv_stimer_alloc(false);
/*
* Still register the LAPIC timer, because the direct-mode STIMER is
@@ -369,7 +347,7 @@ static void __init hv_get_partition_id(void)
local_irq_save(flags);
output_page = *this_cpu_ptr(hyperv_pcpu_output_arg);
status = hv_do_hypercall(HVCALL_GET_PARTITION_ID, NULL, output_page);
- if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS) {
+ if (!hv_result_success(status)) {
/* No point in proceeding if this failed */
pr_err("Failed to get partition ID: %lld\n", status);
BUG();
@@ -520,6 +498,8 @@ void __init hyperv_init(void)
x86_init.irqs.create_pci_msi_domain = hv_create_pci_msi_domain;
#endif
+ /* Query the VMs extended capability once, so that it can be cached. */
+ hv_query_ext_cap(0);
return;
remove_cpuhp_state:
@@ -593,33 +573,6 @@ void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die)
}
EXPORT_SYMBOL_GPL(hyperv_report_panic);
-/**
- * hyperv_report_panic_msg - report panic message to Hyper-V
- * @pa: physical address of the panic page containing the message
- * @size: size of the message in the page
- */
-void hyperv_report_panic_msg(phys_addr_t pa, size_t size)
-{
- /*
- * P3 to contain the physical address of the panic page & P4 to
- * contain the size of the panic data in that page. Rest of the
- * registers are no-op when the NOTIFY_MSG flag is set.
- */
- wrmsrl(HV_X64_MSR_CRASH_P0, 0);
- wrmsrl(HV_X64_MSR_CRASH_P1, 0);
- wrmsrl(HV_X64_MSR_CRASH_P2, 0);
- wrmsrl(HV_X64_MSR_CRASH_P3, pa);
- wrmsrl(HV_X64_MSR_CRASH_P4, size);
-
- /*
- * Let Hyper-V know there is crash data available along with
- * the panic message.
- */
- wrmsrl(HV_X64_MSR_CRASH_CTL,
- (HV_CRASH_CTL_CRASH_NOTIFY | HV_CRASH_CTL_CRASH_NOTIFY_MSG));
-}
-EXPORT_SYMBOL_GPL(hyperv_report_panic_msg);
-
bool hv_is_hyperv_initialized(void)
{
union hv_x64_msr_hypercall_contents hypercall_msr;
@@ -650,7 +603,7 @@ EXPORT_SYMBOL_GPL(hv_is_hibernation_supported);
enum hv_isolation_type hv_get_isolation_type(void)
{
- if (!(ms_hyperv.features_b & HV_ISOLATION))
+ if (!(ms_hyperv.priv_high & HV_ISOLATION))
return HV_ISOLATION_TYPE_NONE;
return FIELD_GET(HV_ISOLATION_TYPE, ms_hyperv.isolation_config_b);
}
diff --git a/arch/x86/hyperv/hv_proc.c b/arch/x86/hyperv/hv_proc.c
index 60461e598239..68a0843d4750 100644
--- a/arch/x86/hyperv/hv_proc.c
+++ b/arch/x86/hyperv/hv_proc.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
-#include <linux/version.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/clockchips.h>
@@ -93,10 +92,9 @@ int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages)
status = hv_do_rep_hypercall(HVCALL_DEPOSIT_MEMORY,
page_count, 0, input_page, NULL);
local_irq_restore(flags);
-
- if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS) {
+ if (!hv_result_success(status)) {
pr_err("Failed to deposit pages: %lld\n", status);
- ret = status;
+ ret = hv_result(status);
goto err_free_allocations;
}
@@ -122,7 +120,7 @@ int hv_call_add_logical_proc(int node, u32 lp_index, u32 apic_id)
struct hv_add_logical_processor_out *output;
u64 status;
unsigned long flags;
- int ret = 0;
+ int ret = HV_STATUS_SUCCESS;
int pxm = node_to_pxm(node);
/*
@@ -148,13 +146,11 @@ int hv_call_add_logical_proc(int node, u32 lp_index, u32 apic_id)
input, output);
local_irq_restore(flags);
- status &= HV_HYPERCALL_RESULT_MASK;
-
- if (status != HV_STATUS_INSUFFICIENT_MEMORY) {
- if (status != HV_STATUS_SUCCESS) {
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ if (!hv_result_success(status)) {
pr_err("%s: cpu %u apic ID %u, %lld\n", __func__,
lp_index, apic_id, status);
- ret = status;
+ ret = hv_result(status);
}
break;
}
@@ -169,7 +165,7 @@ int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags)
struct hv_create_vp *input;
u64 status;
unsigned long irq_flags;
- int ret = 0;
+ int ret = HV_STATUS_SUCCESS;
int pxm = node_to_pxm(node);
/* Root VPs don't seem to need pages deposited */
@@ -200,13 +196,11 @@ int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags)
status = hv_do_hypercall(HVCALL_CREATE_VP, input, NULL);
local_irq_restore(irq_flags);
- status &= HV_HYPERCALL_RESULT_MASK;
-
- if (status != HV_STATUS_INSUFFICIENT_MEMORY) {
- if (status != HV_STATUS_SUCCESS) {
+ if (hv_result(status) != HV_STATUS_INSUFFICIENT_MEMORY) {
+ if (!hv_result_success(status)) {
pr_err("%s: vcpu %u, lp %u, %lld\n", __func__,
vp_index, flags, status);
- ret = status;
+ ret = hv_result(status);
}
break;
}
diff --git a/arch/x86/hyperv/hv_spinlock.c b/arch/x86/hyperv/hv_spinlock.c
index f3270c1fc48c..91cfe698bde0 100644
--- a/arch/x86/hyperv/hv_spinlock.c
+++ b/arch/x86/hyperv/hv_spinlock.c
@@ -25,7 +25,6 @@ static void hv_qlock_kick(int cpu)
static void hv_qlock_wait(u8 *byte, u8 val)
{
- unsigned long msr_val;
unsigned long flags;
if (in_nmi())
@@ -48,8 +47,13 @@ static void hv_qlock_wait(u8 *byte, u8 val)
/*
* Only issue the rdmsrl() when the lock state has not changed.
*/
- if (READ_ONCE(*byte) == val)
+ if (READ_ONCE(*byte) == val) {
+ unsigned long msr_val;
+
rdmsrl(HV_X64_MSR_GUEST_IDLE, msr_val);
+
+ (void)msr_val;
+ }
local_irq_restore(flags);
}
diff --git a/arch/x86/hyperv/irqdomain.c b/arch/x86/hyperv/irqdomain.c
index 4421a8d92e23..514fc64e23d5 100644
--- a/arch/x86/hyperv/irqdomain.c
+++ b/arch/x86/hyperv/irqdomain.c
@@ -63,10 +63,10 @@ static int hv_map_interrupt(union hv_device_id device_id, bool level,
local_irq_restore(flags);
- if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS)
+ if (!hv_result_success(status))
pr_err("%s: hypercall failed, status %lld\n", __func__, status);
- return status & HV_HYPERCALL_RESULT_MASK;
+ return hv_result(status);
}
static int hv_unmap_interrupt(u64 id, struct hv_interrupt_entry *old_entry)
@@ -88,7 +88,7 @@ static int hv_unmap_interrupt(u64 id, struct hv_interrupt_entry *old_entry)
status = hv_do_hypercall(HVCALL_UNMAP_DEVICE_INTERRUPT, input, NULL);
local_irq_restore(flags);
- return status & HV_HYPERCALL_RESULT_MASK;
+ return hv_result(status);
}
#ifdef CONFIG_PCI_MSI
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
index 2c87350c1fb0..bd13736d0c05 100644
--- a/arch/x86/hyperv/mmu.c
+++ b/arch/x86/hyperv/mmu.c
@@ -52,16 +52,16 @@ static inline int fill_gva_list(u64 gva_list[], int offset,
return gva_n - offset;
}
-static void hyperv_flush_tlb_others(const struct cpumask *cpus,
- const struct flush_tlb_info *info)
+static void hyperv_flush_tlb_multi(const struct cpumask *cpus,
+ const struct flush_tlb_info *info)
{
int cpu, vcpu, gva_n, max_gvas;
struct hv_tlb_flush **flush_pcpu;
struct hv_tlb_flush *flush;
- u64 status = U64_MAX;
+ u64 status;
unsigned long flags;
- trace_hyperv_mmu_flush_tlb_others(cpus, info);
+ trace_hyperv_mmu_flush_tlb_multi(cpus, info);
if (!hv_hypercall_pg)
goto do_native;
@@ -161,10 +161,10 @@ do_ex_hypercall:
check_status:
local_irq_restore(flags);
- if (!(status & HV_HYPERCALL_RESULT_MASK))
+ if (hv_result_success(status))
return;
do_native:
- native_flush_tlb_others(cpus, info);
+ native_flush_tlb_multi(cpus, info);
}
static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
@@ -176,7 +176,7 @@ static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
u64 status;
if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
- return U64_MAX;
+ return HV_STATUS_INVALID_PARAMETER;
flush_pcpu = (struct hv_tlb_flush_ex **)
this_cpu_ptr(hyperv_pcpu_input_arg);
@@ -201,7 +201,7 @@ static u64 hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
nr_bank = cpumask_to_vpset(&(flush->hv_vp_set), cpus);
if (nr_bank < 0)
- return U64_MAX;
+ return HV_STATUS_INVALID_PARAMETER;
/*
* We can flush not more than max_gvas with one hypercall. Flush the
@@ -239,6 +239,6 @@ void hyperv_setup_mmu_ops(void)
return;
pr_info("Using hypercall for remote TLB flush\n");
- pv_ops.mmu.flush_tlb_others = hyperv_flush_tlb_others;
+ pv_ops.mmu.flush_tlb_multi = hyperv_flush_tlb_multi;
pv_ops.mmu.tlb_remove_table = tlb_remove_table;
}
diff --git a/arch/x86/hyperv/nested.c b/arch/x86/hyperv/nested.c
index dd0a843f766d..5d70968c8538 100644
--- a/arch/x86/hyperv/nested.c
+++ b/arch/x86/hyperv/nested.c
@@ -47,7 +47,7 @@ int hyperv_flush_guest_mapping(u64 as)
flush, NULL);
local_irq_restore(flags);
- if (!(status & HV_HYPERCALL_RESULT_MASK))
+ if (hv_result_success(status))
ret = 0;
fault:
@@ -92,7 +92,7 @@ int hyperv_flush_guest_mapping_range(u64 as,
{
struct hv_guest_mapping_flush_list **flush_pcpu;
struct hv_guest_mapping_flush_list *flush;
- u64 status = 0;
+ u64 status;
unsigned long flags;
int ret = -ENOTSUPP;
int gpa_n = 0;
@@ -125,10 +125,10 @@ int hyperv_flush_guest_mapping_range(u64 as,
local_irq_restore(flags);
- if (!(status & HV_HYPERCALL_RESULT_MASK))
+ if (hv_result_success(status))
ret = 0;
else
- ret = status;
+ ret = hv_result(status);
fault:
trace_hyperv_nested_flush_guest_mapping_range(as, ret);
return ret;
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index a09fc37ead9d..5e5b9fc2747f 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -203,7 +203,7 @@ static int load_aout_binary(struct linux_binprm *bprm)
error = vm_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
PROT_READ | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE |
- MAP_EXECUTABLE | MAP_32BIT,
+ MAP_32BIT,
fd_offset);
if (error != N_TXTADDR(ex))
@@ -212,7 +212,7 @@ static int load_aout_binary(struct linux_binprm *bprm)
error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE |
- MAP_EXECUTABLE | MAP_32BIT,
+ MAP_32BIT,
fd_offset + ex.a_text);
if (error != N_DATADDR(ex))
return error;
diff --git a/arch/x86/include/asm/Kbuild b/arch/x86/include/asm/Kbuild
index b19ec8282d50..1e51650b79d7 100644
--- a/arch/x86/include/asm/Kbuild
+++ b/arch/x86/include/asm/Kbuild
@@ -3,6 +3,7 @@
generated-y += syscalls_32.h
generated-y += syscalls_64.h
+generated-y += syscalls_x32.h
generated-y += unistd_32_ia32.h
generated-y += unistd_64_x32.h
generated-y += xen-hypercalls.h
diff --git a/arch/x86/include/asm/agp.h b/arch/x86/include/asm/agp.h
index 62da760d6d5a..cd7b14322035 100644
--- a/arch/x86/include/asm/agp.h
+++ b/arch/x86/include/asm/agp.h
@@ -9,7 +9,7 @@
* Functions to keep the agpgart mappings coherent with the MMU. The
* GART gives the CPU a physical alias of pages in memory. The alias
* region is mapped uncacheable. Make sure there are no conflicting
- * mappings with different cachability attributes for the same
+ * mappings with different cacheability attributes for the same
* page. This avoids data corruption on some CPUs.
*/
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
deleted file mode 100644
index 464034db299f..000000000000
--- a/arch/x86/include/asm/alternative-asm.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_ALTERNATIVE_ASM_H
-#define _ASM_X86_ALTERNATIVE_ASM_H
-
-#ifdef __ASSEMBLY__
-
-#include <asm/asm.h>
-
-#ifdef CONFIG_SMP
- .macro LOCK_PREFIX
-672: lock
- .pushsection .smp_locks,"a"
- .balign 4
- .long 672b - .
- .popsection
- .endm
-#else
- .macro LOCK_PREFIX
- .endm
-#endif
-
-/*
- * objtool annotation to ignore the alternatives and only consider the original
- * instruction(s).
- */
-.macro ANNOTATE_IGNORE_ALTERNATIVE
- .Lannotate_\@:
- .pushsection .discard.ignore_alts
- .long .Lannotate_\@ - .
- .popsection
-.endm
-
-/*
- * Issue one struct alt_instr descriptor entry (need to put it into
- * the section .altinstructions, see below). This entry contains
- * enough information for the alternatives patching code to patch an
- * instruction. See apply_alternatives().
- */
-.macro altinstruction_entry orig alt feature orig_len alt_len pad_len
- .long \orig - .
- .long \alt - .
- .word \feature
- .byte \orig_len
- .byte \alt_len
- .byte \pad_len
-.endm
-
-/*
- * Define an alternative between two instructions. If @feature is
- * present, early code in apply_alternatives() replaces @oldinstr with
- * @newinstr. ".skip" directive takes care of proper instruction padding
- * in case @newinstr is longer than @oldinstr.
- */
-.macro ALTERNATIVE oldinstr, newinstr, feature
-140:
- \oldinstr
-141:
- .skip -(((144f-143f)-(141b-140b)) > 0) * ((144f-143f)-(141b-140b)),0x90
-142:
-
- .pushsection .altinstructions,"a"
- altinstruction_entry 140b,143f,\feature,142b-140b,144f-143f,142b-141b
- .popsection
-
- .pushsection .altinstr_replacement,"ax"
-143:
- \newinstr
-144:
- .popsection
-.endm
-
-#define old_len 141b-140b
-#define new_len1 144f-143f
-#define new_len2 145f-144f
-
-/*
- * gas compatible max based on the idea from:
- * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
- *
- * The additional "-" is needed because gas uses a "true" value of -1.
- */
-#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
-
-
-/*
- * Same as ALTERNATIVE macro above but for two alternatives. If CPU
- * has @feature1, it replaces @oldinstr with @newinstr1. If CPU has
- * @feature2, it replaces @oldinstr with @feature2.
- */
-.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
-140:
- \oldinstr
-141:
- .skip -((alt_max_short(new_len1, new_len2) - (old_len)) > 0) * \
- (alt_max_short(new_len1, new_len2) - (old_len)),0x90
-142:
-
- .pushsection .altinstructions,"a"
- altinstruction_entry 140b,143f,\feature1,142b-140b,144f-143f,142b-141b
- altinstruction_entry 140b,144f,\feature2,142b-140b,145f-144f,142b-141b
- .popsection
-
- .pushsection .altinstr_replacement,"ax"
-143:
- \newinstr1
-144:
- \newinstr2
-145:
- .popsection
-.endm
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_X86_ALTERNATIVE_ASM_H */
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 13adca37c99a..a3c2315aca12 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -2,13 +2,17 @@
#ifndef _ASM_X86_ALTERNATIVE_H
#define _ASM_X86_ALTERNATIVE_H
-#ifndef __ASSEMBLY__
-
#include <linux/types.h>
-#include <linux/stddef.h>
#include <linux/stringify.h>
#include <asm/asm.h>
+#define ALTINSTR_FLAG_INV (1 << 15)
+#define ALT_NOT(feat) ((feat) | ALTINSTR_FLAG_INV)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/stddef.h>
+
/*
* Alternative inline assembly for SMP.
*
@@ -61,7 +65,6 @@ struct alt_instr {
u16 cpuid; /* cpuid bit set for replacement */
u8 instrlen; /* length of original instruction */
u8 replacementlen; /* length of new instruction */
- u8 padlen; /* length of build-time padding */
} __packed;
/*
@@ -100,7 +103,6 @@ static inline int alternatives_text_reserved(void *start, void *end)
#define alt_end_marker "663"
#define alt_slen "662b-661b"
-#define alt_pad_len alt_end_marker"b-662b"
#define alt_total_slen alt_end_marker"b-661b"
#define alt_rlen(num) e_replacement(num)"f-"b_replacement(num)"f"
@@ -147,10 +149,9 @@ static inline int alternatives_text_reserved(void *start, void *end)
" .long " b_replacement(num)"f - .\n" /* new instruction */ \
" .word " __stringify(feature) "\n" /* feature bit */ \
" .byte " alt_total_slen "\n" /* source len */ \
- " .byte " alt_rlen(num) "\n" /* replacement len */ \
- " .byte " alt_pad_len "\n" /* pad len */
+ " .byte " alt_rlen(num) "\n" /* replacement len */
-#define ALTINSTR_REPLACEMENT(newinstr, feature, num) /* replacement */ \
+#define ALTINSTR_REPLACEMENT(newinstr, num) /* replacement */ \
"# ALT: replacement " #num "\n" \
b_replacement(num)":\n\t" newinstr "\n" e_replacement(num) ":\n"
@@ -161,7 +162,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
ALTINSTR_ENTRY(feature, 1) \
".popsection\n" \
".pushsection .altinstr_replacement, \"ax\"\n" \
- ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
+ ALTINSTR_REPLACEMENT(newinstr, 1) \
".popsection\n"
#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
@@ -171,10 +172,15 @@ static inline int alternatives_text_reserved(void *start, void *end)
ALTINSTR_ENTRY(feature2, 2) \
".popsection\n" \
".pushsection .altinstr_replacement, \"ax\"\n" \
- ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
- ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
+ ALTINSTR_REPLACEMENT(newinstr1, 1) \
+ ALTINSTR_REPLACEMENT(newinstr2, 2) \
".popsection\n"
+/* If @feature is set, patch in @newinstr_yes, otherwise @newinstr_no. */
+#define ALTERNATIVE_TERNARY(oldinstr, feature, newinstr_yes, newinstr_no) \
+ ALTERNATIVE_2(oldinstr, newinstr_no, X86_FEATURE_ALWAYS, \
+ newinstr_yes, feature)
+
#define ALTERNATIVE_3(oldinsn, newinsn1, feat1, newinsn2, feat2, newinsn3, feat3) \
OLDINSTR_3(oldinsn, 1, 2, 3) \
".pushsection .altinstructions,\"a\"\n" \
@@ -183,9 +189,9 @@ static inline int alternatives_text_reserved(void *start, void *end)
ALTINSTR_ENTRY(feat3, 3) \
".popsection\n" \
".pushsection .altinstr_replacement, \"ax\"\n" \
- ALTINSTR_REPLACEMENT(newinsn1, feat1, 1) \
- ALTINSTR_REPLACEMENT(newinsn2, feat2, 2) \
- ALTINSTR_REPLACEMENT(newinsn3, feat3, 3) \
+ ALTINSTR_REPLACEMENT(newinsn1, 1) \
+ ALTINSTR_REPLACEMENT(newinsn2, 2) \
+ ALTINSTR_REPLACEMENT(newinsn3, 3) \
".popsection\n"
/*
@@ -206,15 +212,15 @@ static inline int alternatives_text_reserved(void *start, void *end)
#define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \
asm_inline volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2) ::: "memory")
+#define alternative_ternary(oldinstr, feature, newinstr_yes, newinstr_no) \
+ asm_inline volatile(ALTERNATIVE_TERNARY(oldinstr, feature, newinstr_yes, newinstr_no) ::: "memory")
+
/*
* Alternative inline assembly with input.
*
* Peculiarities:
* No memory clobber here.
* Argument numbers start with 1.
- * Best is to use constraints that are fixed size (like (%1) ... "r")
- * If you use variable sized constraints like "m" or "g" in the
- * replacement make sure to pad to the worst case length.
* Leaving an unused argument 0 to keep API compatibility.
*/
#define alternative_input(oldinstr, newinstr, feature, input...) \
@@ -271,6 +277,115 @@ static inline int alternatives_text_reserved(void *start, void *end)
*/
#define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr
+#else /* __ASSEMBLY__ */
+
+#ifdef CONFIG_SMP
+ .macro LOCK_PREFIX
+672: lock
+ .pushsection .smp_locks,"a"
+ .balign 4
+ .long 672b - .
+ .popsection
+ .endm
+#else
+ .macro LOCK_PREFIX
+ .endm
+#endif
+
+/*
+ * objtool annotation to ignore the alternatives and only consider the original
+ * instruction(s).
+ */
+.macro ANNOTATE_IGNORE_ALTERNATIVE
+ .Lannotate_\@:
+ .pushsection .discard.ignore_alts
+ .long .Lannotate_\@ - .
+ .popsection
+.endm
+
+/*
+ * Issue one struct alt_instr descriptor entry (need to put it into
+ * the section .altinstructions, see below). This entry contains
+ * enough information for the alternatives patching code to patch an
+ * instruction. See apply_alternatives().
+ */
+.macro altinstruction_entry orig alt feature orig_len alt_len
+ .long \orig - .
+ .long \alt - .
+ .word \feature
+ .byte \orig_len
+ .byte \alt_len
+.endm
+
+/*
+ * Define an alternative between two instructions. If @feature is
+ * present, early code in apply_alternatives() replaces @oldinstr with
+ * @newinstr. ".skip" directive takes care of proper instruction padding
+ * in case @newinstr is longer than @oldinstr.
+ */
+.macro ALTERNATIVE oldinstr, newinstr, feature
+140:
+ \oldinstr
+141:
+ .skip -(((144f-143f)-(141b-140b)) > 0) * ((144f-143f)-(141b-140b)),0x90
+142:
+
+ .pushsection .altinstructions,"a"
+ altinstruction_entry 140b,143f,\feature,142b-140b,144f-143f
+ .popsection
+
+ .pushsection .altinstr_replacement,"ax"
+143:
+ \newinstr
+144:
+ .popsection
+.endm
+
+#define old_len 141b-140b
+#define new_len1 144f-143f
+#define new_len2 145f-144f
+
+/*
+ * gas compatible max based on the idea from:
+ * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
+ *
+ * The additional "-" is needed because gas uses a "true" value of -1.
+ */
+#define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
+
+
+/*
+ * Same as ALTERNATIVE macro above but for two alternatives. If CPU
+ * has @feature1, it replaces @oldinstr with @newinstr1. If CPU has
+ * @feature2, it replaces @oldinstr with @feature2.
+ */
+.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
+140:
+ \oldinstr
+141:
+ .skip -((alt_max_short(new_len1, new_len2) - (old_len)) > 0) * \
+ (alt_max_short(new_len1, new_len2) - (old_len)),0x90
+142:
+
+ .pushsection .altinstructions,"a"
+ altinstruction_entry 140b,143f,\feature1,142b-140b,144f-143f
+ altinstruction_entry 140b,144f,\feature2,142b-140b,145f-144f
+ .popsection
+
+ .pushsection .altinstr_replacement,"ax"
+143:
+ \newinstr1
+144:
+ \newinstr2
+145:
+ .popsection
+.endm
+
+/* If @feature is set, patch in @newinstr_yes, otherwise @newinstr_no. */
+#define ALTERNATIVE_TERNARY(oldinstr, feature, newinstr_yes, newinstr_no) \
+ ALTERNATIVE_2 oldinstr, newinstr_no, X86_FEATURE_ALWAYS, \
+ newinstr_yes, feature
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_ALTERNATIVE_H */
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 412b51e059c8..48067af94678 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -174,6 +174,7 @@ static inline int apic_is_clustered_box(void)
extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask);
extern void lapic_assign_system_vectors(void);
extern void lapic_assign_legacy_vector(unsigned int isairq, bool replace);
+extern void lapic_update_legacy_vectors(void);
extern void lapic_online(void);
extern void lapic_offline(void);
extern bool apic_needs_pit(void);
diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h
index 51e2bf27cc9b..4cb726c71ed8 100644
--- a/arch/x86/include/asm/asm-prototypes.h
+++ b/arch/x86/include/asm/asm-prototypes.h
@@ -19,18 +19,19 @@ extern void cmpxchg8b_emu(void);
#ifdef CONFIG_RETPOLINE
-#define DECL_INDIRECT_THUNK(reg) \
+#undef GEN
+#define GEN(reg) \
extern asmlinkage void __x86_indirect_thunk_ ## reg (void);
-
-#define DECL_RETPOLINE(reg) \
- extern asmlinkage void __x86_retpoline_ ## reg (void);
+#include <asm/GEN-for-each-reg.h>
#undef GEN
-#define GEN(reg) DECL_INDIRECT_THUNK(reg)
+#define GEN(reg) \
+ extern asmlinkage void __x86_indirect_alt_call_ ## reg (void);
#include <asm/GEN-for-each-reg.h>
#undef GEN
-#define GEN(reg) DECL_RETPOLINE(reg)
+#define GEN(reg) \
+ extern asmlinkage void __x86_indirect_alt_jmp_ ## reg (void);
#include <asm/GEN-for-each-reg.h>
#endif /* CONFIG_RETPOLINE */
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 0603c7423aca..3ad3da9a7d97 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -3,25 +3,26 @@
#define _ASM_X86_ASM_H
#ifdef __ASSEMBLY__
-# define __ASM_FORM(x) x
-# define __ASM_FORM_RAW(x) x
-# define __ASM_FORM_COMMA(x) x,
+# define __ASM_FORM(x, ...) x,## __VA_ARGS__
+# define __ASM_FORM_RAW(x, ...) x,## __VA_ARGS__
+# define __ASM_FORM_COMMA(x, ...) x,## __VA_ARGS__,
#else
#include <linux/stringify.h>
-
-# define __ASM_FORM(x) " " __stringify(x) " "
-# define __ASM_FORM_RAW(x) __stringify(x)
-# define __ASM_FORM_COMMA(x) " " __stringify(x) ","
+# define __ASM_FORM(x, ...) " " __stringify(x,##__VA_ARGS__) " "
+# define __ASM_FORM_RAW(x, ...) __stringify(x,##__VA_ARGS__)
+# define __ASM_FORM_COMMA(x, ...) " " __stringify(x,##__VA_ARGS__) ","
#endif
+#define _ASM_BYTES(x, ...) __ASM_FORM(.byte x,##__VA_ARGS__ ;)
+
#ifndef __x86_64__
/* 32 bit */
-# define __ASM_SEL(a,b) __ASM_FORM(a)
-# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a)
+# define __ASM_SEL(a,b) __ASM_FORM(a)
+# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a)
#else
/* 64 bit */
-# define __ASM_SEL(a,b) __ASM_FORM(b)
-# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b)
+# define __ASM_SEL(a,b) __ASM_FORM(b)
+# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b)
#endif
#define __ASM_SIZE(inst, ...) __ASM_SEL(inst##l##__VA_ARGS__, \
@@ -119,6 +120,8 @@
# define CC_OUT(c) [_cc_ ## c] "=qm"
#endif
+#ifdef __KERNEL__
+
/* Exception table entry */
#ifdef __ASSEMBLY__
# define _ASM_EXTABLE_HANDLE(from, to, handler) \
@@ -185,4 +188,6 @@ register unsigned long current_stack_pointer asm(_ASM_SP);
#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
#endif /* __ASSEMBLY__ */
+#endif /* __KERNEL__ */
+
#endif /* _ASM_X86_ASM_H */
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index f732741ad7c7..5e754e895767 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -269,6 +269,4 @@ static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
# include <asm/atomic64_64.h>
#endif
-#define ARCH_ATOMIC
-
#endif /* _ASM_X86_ATOMIC_H */
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 4819d5e5a335..3ba772a69cc8 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -54,11 +54,8 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
#define dma_rmb() barrier()
#define dma_wmb() barrier()
-#ifdef CONFIG_X86_32
-#define __smp_mb() asm volatile("lock; addl $0,-4(%%esp)" ::: "memory", "cc")
-#else
-#define __smp_mb() asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc")
-#endif
+#define __smp_mb() asm volatile("lock; addl $0,-4(%%" _ASM_SP ")" ::: "memory", "cc")
+
#define __smp_rmb() dma_rmb()
#define __smp_wmb() barrier()
#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index 297fa12e7e27..84b87538a15d 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -7,18 +7,9 @@
/*
* Despite that some emulators terminate on UD2, we use it for WARN().
- *
- * Since various instruction decoders/specs disagree on the encoding of
- * UD0/UD1.
*/
-
-#define ASM_UD0 ".byte 0x0f, 0xff" /* + ModRM (for Intel) */
-#define ASM_UD1 ".byte 0x0f, 0xb9" /* + ModRM */
#define ASM_UD2 ".byte 0x0f, 0x0b"
-
-#define INSN_UD0 0xff0f
#define INSN_UD2 0x0b0f
-
#define LEN_UD2 2
#ifdef CONFIG_GENERIC_BUG
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index 4d4ec5cbdc51..94fbe6ae7431 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -22,7 +22,7 @@ extern void __add_wrong_size(void)
/*
* Constants for operation sizes. On 32-bit, the 64-bit size it set to
* -1 because sizeof will never return -1, thereby making those switch
- * case statements guaranteeed dead code which the compiler will
+ * case statements guaranteed dead code which the compiler will
* eliminate, and allowing the "missing symbol in the default case" to
* indicate a usage error.
*/
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index da78ccbd493b..33d41e350c79 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -41,12 +41,14 @@ unsigned int x86_family(unsigned int sig);
unsigned int x86_model(unsigned int sig);
unsigned int x86_stepping(unsigned int sig);
#ifdef CONFIG_CPU_SUP_INTEL
-extern void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c);
+extern void __init sld_setup(struct cpuinfo_x86 *c);
extern void switch_to_sld(unsigned long tifn);
extern bool handle_user_split_lock(struct pt_regs *regs, long error_code);
extern bool handle_guest_split_lock(unsigned long ip);
+extern void handle_bus_lock(struct pt_regs *regs);
+u8 get_this_hybrid_cpu_type(void);
#else
-static inline void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c) {}
+static inline void __init sld_setup(struct cpuinfo_x86 *c) {}
static inline void switch_to_sld(unsigned long tifn) {}
static inline bool handle_user_split_lock(struct pt_regs *regs, long error_code)
{
@@ -57,6 +59,13 @@ static inline bool handle_guest_split_lock(unsigned long ip)
{
return false;
}
+
+static inline void handle_bus_lock(struct pt_regs *regs) {}
+
+static inline u8 get_this_hybrid_cpu_type(void)
+{
+ return 0;
+}
#endif
#ifdef CONFIG_IA32_FEAT_CTL
void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 1728d4ce5730..16a51e7288d5 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -8,6 +8,7 @@
#include <asm/asm.h>
#include <linux/bitops.h>
+#include <asm/alternative.h>
enum cpuid_leafs
{
@@ -175,39 +176,15 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
*/
static __always_inline bool _static_cpu_has(u16 bit)
{
- asm_volatile_goto("1: jmp 6f\n"
- "2:\n"
- ".skip -(((5f-4f) - (2b-1b)) > 0) * "
- "((5f-4f) - (2b-1b)),0x90\n"
- "3:\n"
- ".section .altinstructions,\"a\"\n"
- " .long 1b - .\n" /* src offset */
- " .long 4f - .\n" /* repl offset */
- " .word %P[always]\n" /* always replace */
- " .byte 3b - 1b\n" /* src len */
- " .byte 5f - 4f\n" /* repl len */
- " .byte 3b - 2b\n" /* pad len */
- ".previous\n"
- ".section .altinstr_replacement,\"ax\"\n"
- "4: jmp %l[t_no]\n"
- "5:\n"
- ".previous\n"
- ".section .altinstructions,\"a\"\n"
- " .long 1b - .\n" /* src offset */
- " .long 0\n" /* no replacement */
- " .word %P[feature]\n" /* feature bit */
- " .byte 3b - 1b\n" /* src len */
- " .byte 0\n" /* repl len */
- " .byte 0\n" /* pad len */
- ".previous\n"
- ".section .altinstr_aux,\"ax\"\n"
- "6:\n"
- " testb %[bitnum],%[cap_byte]\n"
- " jnz %l[t_yes]\n"
- " jmp %l[t_no]\n"
- ".previous\n"
+ asm_volatile_goto(
+ ALTERNATIVE_TERNARY("jmp 6f", %P[feature], "", "jmp %l[t_no]")
+ ".section .altinstr_aux,\"ax\"\n"
+ "6:\n"
+ " testb %[bitnum],%[cap_byte]\n"
+ " jnz %l[t_yes]\n"
+ " jmp %l[t_no]\n"
+ ".previous\n"
: : [feature] "i" (bit),
- [always] "i" (X86_FEATURE_ALWAYS),
[bitnum] "i" (1 << (bit & 7)),
[cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3])
: : t_yes, t_no);
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index cc96e26d69f7..d0ce5cfd3ac1 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -84,7 +84,7 @@
/* CPU types for specific tunings: */
#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
-#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
+/* FREE, was #define X86_FEATURE_K7 ( 3*32+ 5) "" Athlon */
#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
@@ -108,7 +108,7 @@
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* Extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* AMD multi-node processor */
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
-/* free ( 3*32+29) */
+#define X86_FEATURE_RAPL ( 3*32+29) /* AMD/Hygon RAPL interface */
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
@@ -236,6 +236,8 @@
#define X86_FEATURE_EPT_AD ( 8*32+17) /* Intel Extended Page Table access-dirty bit */
#define X86_FEATURE_VMCALL ( 8*32+18) /* "" Hypervisor supports the VMCALL instruction */
#define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */
+#define X86_FEATURE_PVUNLOCK ( 8*32+20) /* "" PV unlock function */
+#define X86_FEATURE_VCPUPREEMPT ( 8*32+21) /* "" PV vcpu_is_preempted function */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
@@ -290,6 +292,8 @@
#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
#define X86_FEATURE_SPLIT_LOCK_DETECT (11*32+ 6) /* #AC for split lock */
#define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */
+#define X86_FEATURE_SGX1 (11*32+ 8) /* "" Basic SGX */
+#define X86_FEATURE_SGX2 (11*32+ 9) /* "" SGX Enclave Dynamic Memory Management (EDMM) */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
@@ -336,6 +340,7 @@
#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */
+#define X86_FEATURE_V_SPEC_CTRL (15*32+20) /* Virtual SPEC_CTRL */
#define X86_FEATURE_SVME_ADDR_CHK (15*32+28) /* "" SVME addr check */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
@@ -354,6 +359,7 @@
#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
+#define X86_FEATURE_BUS_LOCK_DETECT (16*32+24) /* Bus Lock detect */
#define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */
#define X86_FEATURE_MOVDIRI (16*32+27) /* MOVDIRI instruction */
#define X86_FEATURE_MOVDIR64B (16*32+28) /* MOVDIR64B instruction */
@@ -372,8 +378,10 @@
#define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */
#define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* "" SRBDS mitigation MSR available */
#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
+#define X86_FEATURE_RTM_ALWAYS_ABORT (18*32+11) /* "" RTM transaction always aborts */
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
#define X86_FEATURE_SERIALIZE (18*32+14) /* SERIALIZE instruction */
+#define X86_FEATURE_HYBRID_CPU (18*32+15) /* "" This part has CPUs of more than one type */
#define X86_FEATURE_TSXLDTRK (18*32+16) /* TSX Suspend Load Address Tracking */
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
#define X86_FEATURE_ARCH_LBR (18*32+19) /* Intel ARCH LBR */
diff --git a/arch/x86/include/asm/crash.h b/arch/x86/include/asm/crash.h
index f58de66091e5..8b6bd63530dc 100644
--- a/arch/x86/include/asm/crash.h
+++ b/arch/x86/include/asm/crash.h
@@ -9,10 +9,4 @@ int crash_setup_memmap_entries(struct kimage *image,
struct boot_params *params);
void crash_smp_send_stop(void);
-#ifdef CONFIG_KEXEC_CORE
-void __init crash_reserve_low_1M(void);
-#else
-static inline void __init crash_reserve_low_1M(void) { }
-#endif
-
#endif /* _ASM_X86_CRASH_H */
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 476082a83d1c..ab97b22ac04a 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -9,6 +9,7 @@
#include <asm/irq_vectors.h>
#include <asm/cpu_entry_area.h>
+#include <linux/debug_locks.h>
#include <linux/smp.h>
#include <linux/percpu.h>
@@ -224,6 +225,26 @@ static inline void store_idt(struct desc_ptr *dtr)
asm volatile("sidt %0":"=m" (*dtr));
}
+static inline void native_gdt_invalidate(void)
+{
+ const struct desc_ptr invalid_gdt = {
+ .address = 0,
+ .size = 0
+ };
+
+ native_load_gdt(&invalid_gdt);
+}
+
+static inline void native_idt_invalidate(void)
+{
+ const struct desc_ptr invalid_idt = {
+ .address = 0,
+ .size = 0
+ };
+
+ native_load_idt(&invalid_idt);
+}
+
/*
* The LTR instruction marks the TSS GDT entry as busy. On 64-bit, the GDT is
* a read-only remapping. To prevent a page fault, the GDT is switched to the
@@ -421,12 +442,10 @@ extern bool idt_is_f00f_address(unsigned long address);
#ifdef CONFIG_X86_64
extern void idt_setup_early_pf(void);
-extern void idt_setup_ist_traps(void);
#else
static inline void idt_setup_early_pf(void) { }
-static inline void idt_setup_ist_traps(void) { }
#endif
-extern void idt_invalidate(void *addr);
+extern void idt_invalidate(void);
#endif /* _ASM_X86_DESC_H */
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index b7dd944dc867..8f28fafa98b3 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -56,11 +56,8 @@
# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
#endif
-#ifdef CONFIG_IOMMU_SUPPORT
-# define DISABLE_ENQCMD 0
-#else
-# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
-#endif
+/* Force disable because it's broken beyond repair */
+#define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
#ifdef CONFIG_X86_SGX
# define DISABLE_SGX 0
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 9224d40cdefe..7d7500806af8 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -283,12 +283,12 @@ extern u32 elf_hwcap2;
*
* The decision process for determining the results are:
*
- *              CPU: | lacks NX*  | has NX, ia32     | has NX, x86_64 |
- * ELF:              |            |                  |                |
+ * CPU: | lacks NX* | has NX, ia32 | has NX, x86_64 |
+ * ELF: | | | |
* ---------------------|------------|------------------|----------------|
- * missing PT_GNU_STACK | exec-all   | exec-all         | exec-none      |
- * PT_GNU_STACK == RWX  | exec-stack | exec-stack       | exec-stack     |
- * PT_GNU_STACK == RW   | exec-none  | exec-none        | exec-none      |
+ * missing PT_GNU_STACK | exec-all | exec-all | exec-none |
+ * PT_GNU_STACK == RWX | exec-stack | exec-stack | exec-stack |
+ * PT_GNU_STACK == RW | exec-none | exec-none | exec-none |
*
* exec-all : all PROT_READ user mappings are executable, except when
* backed by files on a noexec-filesystem.
diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
index 2b87b191b3b8..14ebd2196569 100644
--- a/arch/x86/include/asm/entry-common.h
+++ b/arch/x86/include/asm/entry-common.h
@@ -2,6 +2,7 @@
#ifndef _ASM_X86_ENTRY_COMMON_H
#define _ASM_X86_ENTRY_COMMON_H
+#include <linux/randomize_kstack.h>
#include <linux/user-return-notifier.h>
#include <asm/nospec-branch.h>
@@ -70,6 +71,21 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
*/
current_thread_info()->status &= ~(TS_COMPAT | TS_I386_REGS_POKED);
#endif
+
+ /*
+ * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
+ * but not enough for x86 stack utilization comfort. To keep
+ * reasonable stack head room, reduce the maximum offset to 8 bits.
+ *
+ * The actual entropy will be further reduced by the compiler when
+ * applying stack alignment constraints (see cc_stack_align4/8 in
+ * arch/x86/Makefile), which will remove the 3 (x86_64) or 2 (ia32)
+ * low bits from any entropy chosen here.
+ *
+ * Therefore, final stack offset entropy will be 5 (x86_64) or
+ * 6 (ia32) bits.
+ */
+ choose_random_kstack_offset(rdtsc() & 0xFF);
}
#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
index d43717b423cb..6ec3fc969ad5 100644
--- a/arch/x86/include/asm/floppy.h
+++ b/arch/x86/include/asm/floppy.h
@@ -74,7 +74,6 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id)
int lcount;
char *lptr;
- st = 1;
for (lcount = virtual_dma_count, lptr = virtual_dma_addr;
lcount; lcount--, lptr++) {
st = inb(virtual_dma_port + FD_STATUS);
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
index ed33a14188f6..23bef08a8388 100644
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -106,10 +106,6 @@ extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
*/
#define PASID_DISABLED 0
-#ifdef CONFIG_IOMMU_SUPPORT
-/* Update current's PASID MSR/state by mm's PASID. */
-void update_pasid(void);
-#else
static inline void update_pasid(void) { }
-#endif
+
#endif /* _ASM_X86_FPU_API_H */
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 8d33ad80704f..16bf4d4a8159 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -204,6 +204,14 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
}
+static inline void fxsave(struct fxregs_state *fx)
+{
+ if (IS_ENABLED(CONFIG_X86_32))
+ asm volatile( "fxsave %[fx]" : [fx] "=m" (*fx));
+ else
+ asm volatile("fxsaveq %[fx]" : [fx] "=m" (*fx));
+}
+
/* These macros all use (%edi)/(%rdi) as the single memory argument. */
#define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
#define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
@@ -272,28 +280,6 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
* This function is called only during boot time when x86 caps are not set
* up and alternative can not be used yet.
*/
-static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
-{
- u64 mask = xfeatures_mask_all;
- u32 lmask = mask;
- u32 hmask = mask >> 32;
- int err;
-
- WARN_ON(system_state != SYSTEM_BOOTING);
-
- if (boot_cpu_has(X86_FEATURE_XSAVES))
- XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
- else
- XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
-
- /* We should never fault when copying to a kernel buffer: */
- WARN_ON_FPU(err);
-}
-
-/*
- * This function is called only during boot time when x86 caps are not set
- * up and alternative can not be used yet.
- */
static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
{
u64 mask = -1;
@@ -578,19 +564,19 @@ static inline void switch_fpu_finish(struct fpu *new_fpu)
* PKRU state is switched eagerly because it needs to be valid before we
* return to userland e.g. for a copy_to_user() operation.
*/
- if (current->mm) {
+ if (!(current->flags & PF_KTHREAD)) {
+ /*
+ * If the PKRU bit in xsave.header.xfeatures is not set,
+ * then the PKRU component was in init state, which means
+ * XRSTOR will set PKRU to 0. If the bit is not set then
+ * get_xsave_addr() will return NULL because the PKRU value
+ * in memory is not valid. This means pkru_val has to be
+ * set to 0 and not to init_pkru_value.
+ */
pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU);
- if (pk)
- pkru_val = pk->pkru;
+ pkru_val = pk ? pk->pkru : 0;
}
__write_pkru(pkru_val);
-
- /*
- * Expensive PASID MSR write will be avoided in update_pasid() because
- * TIF_NEED_FPU_LOAD was set. And the PASID state won't be updated
- * unless it's different from mm->pasid to reduce overhead.
- */
- update_pasid();
}
#endif /* _ASM_X86_FPU_INTERNAL_H */
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
index e6cd3fee562b..f1366ce609e3 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -52,7 +52,7 @@
* Support for passing hypercall input parameter block via XMM
* registers is available
*/
-#define HV_X64_HYPERCALL_PARAMS_XMM_AVAILABLE BIT(4)
+#define HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE BIT(4)
/* Support for a virtual guest idle state is available */
#define HV_X64_GUEST_IDLE_STATE_AVAILABLE BIT(5)
/* Frequency MSRs available */
@@ -61,6 +61,11 @@
#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE BIT(10)
/* Support for debug MSRs available */
#define HV_FEATURE_DEBUG_MSRS_AVAILABLE BIT(11)
+/*
+ * Support for returning hypercall output block via XMM
+ * registers is available
+ */
+#define HV_X64_HYPERCALL_XMM_OUTPUT_AVAILABLE BIT(15)
/* stimer Direct Mode is available */
#define HV_STIMER_DIRECT_MODE_AVAILABLE BIT(19)
@@ -133,6 +138,15 @@
#define HV_X64_NESTED_GUEST_MAPPING_FLUSH BIT(18)
#define HV_X64_NESTED_MSR_BITMAP BIT(19)
+/*
+ * This is specific to AMD and specifies that enlightened TLB flush is
+ * supported. If guest opts in to this feature, ASID invalidations only
+ * flushes gva -> hpa mapping entries. To flush the TLB entries derived
+ * from NPT, hypercalls should be used (HvFlushGuestPhysicalAddressSpace
+ * or HvFlushGuestPhysicalAddressList).
+ */
+#define HV_X64_NESTED_ENLIGHTENED_TLB BIT(22)
+
/* HYPERV_CPUID_ISOLATION_CONFIG.EAX bits. */
#define HV_PARAVISOR_PRESENT BIT(0)
@@ -156,7 +170,7 @@ enum hv_isolation_type {
#define HV_X64_MSR_HYPERCALL 0x40000001
/* MSR used to provide vcpu index */
-#define HV_X64_MSR_VP_INDEX 0x40000002
+#define HV_REGISTER_VP_INDEX 0x40000002
/* MSR used to reset the guest OS. */
#define HV_X64_MSR_RESET 0x40000003
@@ -165,10 +179,10 @@ enum hv_isolation_type {
#define HV_X64_MSR_VP_RUNTIME 0x40000010
/* MSR used to read the per-partition time reference counter */
-#define HV_X64_MSR_TIME_REF_COUNT 0x40000020
+#define HV_REGISTER_TIME_REF_COUNT 0x40000020
/* A partition's reference time stamp counter (TSC) page */
-#define HV_X64_MSR_REFERENCE_TSC 0x40000021
+#define HV_REGISTER_REFERENCE_TSC 0x40000021
/* MSR used to retrieve the TSC frequency */
#define HV_X64_MSR_TSC_FREQUENCY 0x40000022
@@ -183,50 +197,50 @@ enum hv_isolation_type {
#define HV_X64_MSR_VP_ASSIST_PAGE 0x40000073
/* Define synthetic interrupt controller model specific registers. */
-#define HV_X64_MSR_SCONTROL 0x40000080
-#define HV_X64_MSR_SVERSION 0x40000081
-#define HV_X64_MSR_SIEFP 0x40000082
-#define HV_X64_MSR_SIMP 0x40000083
-#define HV_X64_MSR_EOM 0x40000084
-#define HV_X64_MSR_SINT0 0x40000090
-#define HV_X64_MSR_SINT1 0x40000091
-#define HV_X64_MSR_SINT2 0x40000092
-#define HV_X64_MSR_SINT3 0x40000093
-#define HV_X64_MSR_SINT4 0x40000094
-#define HV_X64_MSR_SINT5 0x40000095
-#define HV_X64_MSR_SINT6 0x40000096
-#define HV_X64_MSR_SINT7 0x40000097
-#define HV_X64_MSR_SINT8 0x40000098
-#define HV_X64_MSR_SINT9 0x40000099
-#define HV_X64_MSR_SINT10 0x4000009A
-#define HV_X64_MSR_SINT11 0x4000009B
-#define HV_X64_MSR_SINT12 0x4000009C
-#define HV_X64_MSR_SINT13 0x4000009D
-#define HV_X64_MSR_SINT14 0x4000009E
-#define HV_X64_MSR_SINT15 0x4000009F
+#define HV_REGISTER_SCONTROL 0x40000080
+#define HV_REGISTER_SVERSION 0x40000081
+#define HV_REGISTER_SIEFP 0x40000082
+#define HV_REGISTER_SIMP 0x40000083
+#define HV_REGISTER_EOM 0x40000084
+#define HV_REGISTER_SINT0 0x40000090
+#define HV_REGISTER_SINT1 0x40000091
+#define HV_REGISTER_SINT2 0x40000092
+#define HV_REGISTER_SINT3 0x40000093
+#define HV_REGISTER_SINT4 0x40000094
+#define HV_REGISTER_SINT5 0x40000095
+#define HV_REGISTER_SINT6 0x40000096
+#define HV_REGISTER_SINT7 0x40000097
+#define HV_REGISTER_SINT8 0x40000098
+#define HV_REGISTER_SINT9 0x40000099
+#define HV_REGISTER_SINT10 0x4000009A
+#define HV_REGISTER_SINT11 0x4000009B
+#define HV_REGISTER_SINT12 0x4000009C
+#define HV_REGISTER_SINT13 0x4000009D
+#define HV_REGISTER_SINT14 0x4000009E
+#define HV_REGISTER_SINT15 0x4000009F
/*
* Synthetic Timer MSRs. Four timers per vcpu.
*/
-#define HV_X64_MSR_STIMER0_CONFIG 0x400000B0
-#define HV_X64_MSR_STIMER0_COUNT 0x400000B1
-#define HV_X64_MSR_STIMER1_CONFIG 0x400000B2
-#define HV_X64_MSR_STIMER1_COUNT 0x400000B3
-#define HV_X64_MSR_STIMER2_CONFIG 0x400000B4
-#define HV_X64_MSR_STIMER2_COUNT 0x400000B5
-#define HV_X64_MSR_STIMER3_CONFIG 0x400000B6
-#define HV_X64_MSR_STIMER3_COUNT 0x400000B7
+#define HV_REGISTER_STIMER0_CONFIG 0x400000B0
+#define HV_REGISTER_STIMER0_COUNT 0x400000B1
+#define HV_REGISTER_STIMER1_CONFIG 0x400000B2
+#define HV_REGISTER_STIMER1_COUNT 0x400000B3
+#define HV_REGISTER_STIMER2_CONFIG 0x400000B4
+#define HV_REGISTER_STIMER2_COUNT 0x400000B5
+#define HV_REGISTER_STIMER3_CONFIG 0x400000B6
+#define HV_REGISTER_STIMER3_COUNT 0x400000B7
/* Hyper-V guest idle MSR */
#define HV_X64_MSR_GUEST_IDLE 0x400000F0
/* Hyper-V guest crash notification MSR's */
-#define HV_X64_MSR_CRASH_P0 0x40000100
-#define HV_X64_MSR_CRASH_P1 0x40000101
-#define HV_X64_MSR_CRASH_P2 0x40000102
-#define HV_X64_MSR_CRASH_P3 0x40000103
-#define HV_X64_MSR_CRASH_P4 0x40000104
-#define HV_X64_MSR_CRASH_CTL 0x40000105
+#define HV_REGISTER_CRASH_P0 0x40000100
+#define HV_REGISTER_CRASH_P1 0x40000101
+#define HV_REGISTER_CRASH_P2 0x40000102
+#define HV_REGISTER_CRASH_P3 0x40000103
+#define HV_REGISTER_CRASH_P4 0x40000104
+#define HV_REGISTER_CRASH_CTL 0x40000105
/* TSC emulation after migration */
#define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106
@@ -236,6 +250,32 @@ enum hv_isolation_type {
/* TSC invariant control */
#define HV_X64_MSR_TSC_INVARIANT_CONTROL 0x40000118
+/* Register name aliases for temporary compatibility */
+#define HV_X64_MSR_STIMER0_COUNT HV_REGISTER_STIMER0_COUNT
+#define HV_X64_MSR_STIMER0_CONFIG HV_REGISTER_STIMER0_CONFIG
+#define HV_X64_MSR_STIMER1_COUNT HV_REGISTER_STIMER1_COUNT
+#define HV_X64_MSR_STIMER1_CONFIG HV_REGISTER_STIMER1_CONFIG
+#define HV_X64_MSR_STIMER2_COUNT HV_REGISTER_STIMER2_COUNT
+#define HV_X64_MSR_STIMER2_CONFIG HV_REGISTER_STIMER2_CONFIG
+#define HV_X64_MSR_STIMER3_COUNT HV_REGISTER_STIMER3_COUNT
+#define HV_X64_MSR_STIMER3_CONFIG HV_REGISTER_STIMER3_CONFIG
+#define HV_X64_MSR_SCONTROL HV_REGISTER_SCONTROL
+#define HV_X64_MSR_SVERSION HV_REGISTER_SVERSION
+#define HV_X64_MSR_SIMP HV_REGISTER_SIMP
+#define HV_X64_MSR_SIEFP HV_REGISTER_SIEFP
+#define HV_X64_MSR_VP_INDEX HV_REGISTER_VP_INDEX
+#define HV_X64_MSR_EOM HV_REGISTER_EOM
+#define HV_X64_MSR_SINT0 HV_REGISTER_SINT0
+#define HV_X64_MSR_SINT15 HV_REGISTER_SINT15
+#define HV_X64_MSR_CRASH_P0 HV_REGISTER_CRASH_P0
+#define HV_X64_MSR_CRASH_P1 HV_REGISTER_CRASH_P1
+#define HV_X64_MSR_CRASH_P2 HV_REGISTER_CRASH_P2
+#define HV_X64_MSR_CRASH_P3 HV_REGISTER_CRASH_P3
+#define HV_X64_MSR_CRASH_P4 HV_REGISTER_CRASH_P4
+#define HV_X64_MSR_CRASH_CTL HV_REGISTER_CRASH_CTL
+#define HV_X64_MSR_TIME_REF_COUNT HV_REGISTER_TIME_REF_COUNT
+#define HV_X64_MSR_REFERENCE_TSC HV_REGISTER_REFERENCE_TSC
+
/*
* Declare the MSR used to setup pages used to communicate with the hypervisor.
*/
@@ -288,34 +328,8 @@ struct hv_tsc_emulation_status {
#define HV_X64_MSR_TSC_REFERENCE_ENABLE 0x00000001
#define HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT 12
-
-/* Define hypervisor message types. */
-enum hv_message_type {
- HVMSG_NONE = 0x00000000,
-
- /* Memory access messages. */
- HVMSG_UNMAPPED_GPA = 0x80000000,
- HVMSG_GPA_INTERCEPT = 0x80000001,
-
- /* Timer notification messages. */
- HVMSG_TIMER_EXPIRED = 0x80000010,
-
- /* Error messages. */
- HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020,
- HVMSG_UNRECOVERABLE_EXCEPTION = 0x80000021,
- HVMSG_UNSUPPORTED_FEATURE = 0x80000022,
-
- /* Trace buffer complete messages. */
- HVMSG_EVENTLOG_BUFFERCOMPLETE = 0x80000040,
-
- /* Platform-specific processor intercept messages. */
- HVMSG_X64_IOPORT_INTERCEPT = 0x80010000,
- HVMSG_X64_MSR_INTERCEPT = 0x80010001,
- HVMSG_X64_CPUID_INTERCEPT = 0x80010002,
- HVMSG_X64_EXCEPTION_INTERCEPT = 0x80010003,
- HVMSG_X64_APIC_EOI = 0x80010004,
- HVMSG_X64_LEGACY_FP_ERROR = 0x80010005
-};
+/* Number of XMM registers used in hypercall input/output */
+#define HV_HYPERCALL_MAX_XMM_REGISTERS 6
struct hv_nested_enlightenments_control {
struct {
diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h
index 5eb3bdf36a41..1345088e9902 100644
--- a/arch/x86/include/asm/idtentry.h
+++ b/arch/x86/include/asm/idtentry.h
@@ -312,8 +312,8 @@ static __always_inline void __##func(struct pt_regs *regs)
*/
#define DECLARE_IDTENTRY_VC(vector, func) \
DECLARE_IDTENTRY_RAW_ERRORCODE(vector, func); \
- __visible noinstr void ist_##func(struct pt_regs *regs, unsigned long error_code); \
- __visible noinstr void safe_stack_##func(struct pt_regs *regs, unsigned long error_code)
+ __visible noinstr void kernel_##func(struct pt_regs *regs, unsigned long error_code); \
+ __visible noinstr void user_##func(struct pt_regs *regs, unsigned long error_code)
/**
* DEFINE_IDTENTRY_IST - Emit code for IST entry points
@@ -355,33 +355,24 @@ static __always_inline void __##func(struct pt_regs *regs)
DEFINE_IDTENTRY_RAW_ERRORCODE(func)
/**
- * DEFINE_IDTENTRY_VC_SAFE_STACK - Emit code for VMM communication handler
- which runs on a safe stack.
+ * DEFINE_IDTENTRY_VC_KERNEL - Emit code for VMM communication handler
+ when raised from kernel mode
* @func: Function name of the entry point
*
* Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
*/
-#define DEFINE_IDTENTRY_VC_SAFE_STACK(func) \
- DEFINE_IDTENTRY_RAW_ERRORCODE(safe_stack_##func)
+#define DEFINE_IDTENTRY_VC_KERNEL(func) \
+ DEFINE_IDTENTRY_RAW_ERRORCODE(kernel_##func)
/**
- * DEFINE_IDTENTRY_VC_IST - Emit code for VMM communication handler
- which runs on the VC fall-back stack
+ * DEFINE_IDTENTRY_VC_USER - Emit code for VMM communication handler
+ when raised from user mode
* @func: Function name of the entry point
*
* Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
*/
-#define DEFINE_IDTENTRY_VC_IST(func) \
- DEFINE_IDTENTRY_RAW_ERRORCODE(ist_##func)
-
-/**
- * DEFINE_IDTENTRY_VC - Emit code for VMM communication handler
- * @func: Function name of the entry point
- *
- * Maps to DEFINE_IDTENTRY_RAW_ERRORCODE
- */
-#define DEFINE_IDTENTRY_VC(func) \
- DEFINE_IDTENTRY_RAW_ERRORCODE(func)
+#define DEFINE_IDTENTRY_VC_USER(func) \
+ DEFINE_IDTENTRY_RAW_ERRORCODE(user_##func)
#else /* CONFIG_X86_64 */
@@ -504,7 +495,7 @@ __visible noinstr void func(struct pt_regs *regs, \
.align 8
SYM_CODE_START(irq_entries_start)
vector=FIRST_EXTERNAL_VECTOR
- .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
+ .rept NR_EXTERNAL_VECTORS
UNWIND_HINT_IRET_REGS
0 :
.byte 0x6a, vector
@@ -520,7 +511,7 @@ SYM_CODE_END(irq_entries_start)
.align 8
SYM_CODE_START(spurious_entries_start)
vector=FIRST_SYSTEM_VECTOR
- .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR)
+ .rept NR_SYSTEM_VECTORS
UNWIND_HINT_IRET_REGS
0 :
.byte 0x6a, vector
@@ -547,7 +538,7 @@ SYM_CODE_END(spurious_entries_start)
/*
* Dummy trap number so the low level ASM macro vector number checks do not
* match which results in emitting plain IDTENTRY stubs without bells and
- * whistels.
+ * whistles.
*/
#define X86_TRAP_OTHER 0xFFFF
@@ -588,6 +579,21 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_MC, xenpv_exc_machine_check);
#endif
/* NMI */
+
+#if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL)
+/*
+ * Special NOIST entry point for VMX which invokes this on the kernel
+ * stack. asm_exc_nmi() requires an IST to work correctly vs. the NMI
+ * 'executing' marker.
+ *
+ * On 32bit this just uses the regular NMI entry point because 32-bit does
+ * not have ISTs.
+ */
+DECLARE_IDTENTRY(X86_TRAP_NMI, exc_nmi_noist);
+#else
+#define asm_exc_nmi_noist asm_exc_nmi
+#endif
+
DECLARE_IDTENTRY_NMI(X86_TRAP_NMI, exc_nmi);
#ifdef CONFIG_XEN_PV
DECLARE_IDTENTRY_RAW(X86_TRAP_NMI, xenpv_exc_nmi);
diff --git a/arch/x86/include/asm/inat.h b/arch/x86/include/asm/inat.h
index 4cf2ad521f65..b56c5741581a 100644
--- a/arch/x86/include/asm/inat.h
+++ b/arch/x86/include/asm/inat.h
@@ -6,7 +6,7 @@
*
* Written by Masami Hiramatsu <mhiramat@redhat.com>
*/
-#include <asm/inat_types.h>
+#include <asm/inat_types.h> /* __ignore_sync_check__ */
/*
* Internal bits. Don't use bitmasks directly, because these bits are
diff --git a/arch/x86/include/asm/insn-eval.h b/arch/x86/include/asm/insn-eval.h
index a0f839aa144d..91d7182ad2d6 100644
--- a/arch/x86/include/asm/insn-eval.h
+++ b/arch/x86/include/asm/insn-eval.h
@@ -23,7 +23,9 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx);
int insn_get_code_seg_params(struct pt_regs *regs);
int insn_fetch_from_user(struct pt_regs *regs,
unsigned char buf[MAX_INSN_SIZE]);
-bool insn_decode(struct insn *insn, struct pt_regs *regs,
- unsigned char buf[MAX_INSN_SIZE], int buf_size);
+int insn_fetch_from_user_inatomic(struct pt_regs *regs,
+ unsigned char buf[MAX_INSN_SIZE]);
+bool insn_decode_from_regs(struct insn *insn, struct pt_regs *regs,
+ unsigned char buf[MAX_INSN_SIZE], int buf_size);
#endif /* _ASM_X86_INSN_EVAL_H */
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
index 95a448fbb44c..05a6ab940f45 100644
--- a/arch/x86/include/asm/insn.h
+++ b/arch/x86/include/asm/insn.h
@@ -9,7 +9,7 @@
#include <asm/byteorder.h>
/* insn_attr_t is defined in inat.h */
-#include <asm/inat.h>
+#include <asm/inat.h> /* __ignore_sync_check__ */
#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
@@ -132,13 +132,25 @@ struct insn {
#define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */
extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64);
-extern void insn_get_prefixes(struct insn *insn);
-extern void insn_get_opcode(struct insn *insn);
-extern void insn_get_modrm(struct insn *insn);
-extern void insn_get_sib(struct insn *insn);
-extern void insn_get_displacement(struct insn *insn);
-extern void insn_get_immediate(struct insn *insn);
-extern void insn_get_length(struct insn *insn);
+extern int insn_get_prefixes(struct insn *insn);
+extern int insn_get_opcode(struct insn *insn);
+extern int insn_get_modrm(struct insn *insn);
+extern int insn_get_sib(struct insn *insn);
+extern int insn_get_displacement(struct insn *insn);
+extern int insn_get_immediate(struct insn *insn);
+extern int insn_get_length(struct insn *insn);
+
+enum insn_mode {
+ INSN_MODE_32,
+ INSN_MODE_64,
+ /* Mode is determined by the current kernel build. */
+ INSN_MODE_KERN,
+ INSN_NUM_MODES,
+};
+
+extern int insn_decode(struct insn *insn, const void *kaddr, int buf_len, enum insn_mode m);
+
+#define insn_decode_kernel(_insn, _ptr) insn_decode((_insn), (_ptr), MAX_INSN_SIZE, INSN_MODE_KERN)
/* Attribute will be determined after getting ModRM (for opcode groups) */
static inline void insn_get_attribute(struct insn *insn)
@@ -149,17 +161,6 @@ static inline void insn_get_attribute(struct insn *insn)
/* Instruction uses RIP-relative addressing */
extern int insn_rip_relative(struct insn *insn);
-/* Init insn for kernel text */
-static inline void kernel_insn_init(struct insn *insn,
- const void *kaddr, int buf_len)
-{
-#ifdef CONFIG_X86_64
- insn_init(insn, kaddr, buf_len, 1);
-#else /* CONFIG_X86_32 */
- insn_init(insn, kaddr, buf_len, 0);
-#endif
-}
-
static inline int insn_is_avx(struct insn *insn)
{
if (!insn->prefixes.got)
@@ -179,13 +180,6 @@ static inline int insn_has_emulate_prefix(struct insn *insn)
return !!insn->emulate_prefix_size;
}
-/* Ensure this instruction is decoded completely */
-static inline int insn_complete(struct insn *insn)
-{
- return insn->opcode.got && insn->modrm.got && insn->sib.got &&
- insn->displacement.got && insn->immediate.got;
-}
-
static inline insn_byte_t insn_vex_m_bits(struct insn *insn)
{
if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 9abe842dbd84..27158436f322 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -32,7 +32,9 @@
* _EP - 2 socket server parts
* _EX - 4+ socket server parts
*
- * The #define line may optionally include a comment including platform names.
+ * The #define line may optionally include a comment including platform or core
+ * names. An exception is made for skylake/kabylake where steppings seem to have gotten
+ * their own names :-(
*/
/* Wildcard match for FAM6 so X86_MATCH_INTEL_FAM6_MODEL(ANY) works */
@@ -69,35 +71,42 @@
#define INTEL_FAM6_BROADWELL_X 0x4F
#define INTEL_FAM6_BROADWELL_D 0x56
-#define INTEL_FAM6_SKYLAKE_L 0x4E
-#define INTEL_FAM6_SKYLAKE 0x5E
-#define INTEL_FAM6_SKYLAKE_X 0x55
-#define INTEL_FAM6_KABYLAKE_L 0x8E
-#define INTEL_FAM6_KABYLAKE 0x9E
+#define INTEL_FAM6_SKYLAKE_L 0x4E /* Sky Lake */
+#define INTEL_FAM6_SKYLAKE 0x5E /* Sky Lake */
+#define INTEL_FAM6_SKYLAKE_X 0x55 /* Sky Lake */
+/* CASCADELAKE_X 0x55 Sky Lake -- s: 7 */
+/* COOPERLAKE_X 0x55 Sky Lake -- s: 11 */
-#define INTEL_FAM6_CANNONLAKE_L 0x66
+#define INTEL_FAM6_KABYLAKE_L 0x8E /* Sky Lake */
+/* AMBERLAKE_L 0x8E Sky Lake -- s: 9 */
+/* COFFEELAKE_L 0x8E Sky Lake -- s: 10 */
+/* WHISKEYLAKE_L 0x8E Sky Lake -- s: 11,12 */
-#define INTEL_FAM6_ICELAKE_X 0x6A
-#define INTEL_FAM6_ICELAKE_D 0x6C
-#define INTEL_FAM6_ICELAKE 0x7D
-#define INTEL_FAM6_ICELAKE_L 0x7E
-#define INTEL_FAM6_ICELAKE_NNPI 0x9D
+#define INTEL_FAM6_KABYLAKE 0x9E /* Sky Lake */
+/* COFFEELAKE 0x9E Sky Lake -- s: 10-13 */
-#define INTEL_FAM6_TIGERLAKE_L 0x8C
-#define INTEL_FAM6_TIGERLAKE 0x8D
+#define INTEL_FAM6_COMETLAKE 0xA5 /* Sky Lake */
+#define INTEL_FAM6_COMETLAKE_L 0xA6 /* Sky Lake */
-#define INTEL_FAM6_COMETLAKE 0xA5
-#define INTEL_FAM6_COMETLAKE_L 0xA6
+#define INTEL_FAM6_CANNONLAKE_L 0x66 /* Palm Cove */
-#define INTEL_FAM6_ROCKETLAKE 0xA7
+#define INTEL_FAM6_ICELAKE_X 0x6A /* Sunny Cove */
+#define INTEL_FAM6_ICELAKE_D 0x6C /* Sunny Cove */
+#define INTEL_FAM6_ICELAKE 0x7D /* Sunny Cove */
+#define INTEL_FAM6_ICELAKE_L 0x7E /* Sunny Cove */
+#define INTEL_FAM6_ICELAKE_NNPI 0x9D /* Sunny Cove */
-#define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F
+#define INTEL_FAM6_LAKEFIELD 0x8A /* Sunny Cove / Tremont */
-/* Hybrid Core/Atom Processors */
+#define INTEL_FAM6_ROCKETLAKE 0xA7 /* Cypress Cove */
-#define INTEL_FAM6_LAKEFIELD 0x8A
-#define INTEL_FAM6_ALDERLAKE 0x97
-#define INTEL_FAM6_ALDERLAKE_L 0x9A
+#define INTEL_FAM6_TIGERLAKE_L 0x8C /* Willow Cove */
+#define INTEL_FAM6_TIGERLAKE 0x8D /* Willow Cove */
+
+#define INTEL_FAM6_SAPPHIRERAPIDS_X 0x8F /* Golden Cove */
+
+#define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */
+#define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */
/* "Small Core" Processors (Atom) */
diff --git a/arch/x86/include/asm/intel_pconfig.h b/arch/x86/include/asm/intel_pconfig.h
index 3cb002b1d0f9..994638ef171b 100644
--- a/arch/x86/include/asm/intel_pconfig.h
+++ b/arch/x86/include/asm/intel_pconfig.h
@@ -38,7 +38,7 @@ enum pconfig_leaf {
#define MKTME_INVALID_ENC_ALG 4
#define MKTME_DEVICE_BUSY 5
-/* Hardware requires the structure to be 256 byte alinged. Otherwise #GP(0). */
+/* Hardware requires the structure to be 256 byte aligned. Otherwise #GP(0). */
struct mktme_key_program {
u16 keyid;
u32 keyid_ctrl;
diff --git a/arch/x86/include/asm/intel_pt.h b/arch/x86/include/asm/intel_pt.h
index 423b788f495e..ebe8d2ea44fe 100644
--- a/arch/x86/include/asm/intel_pt.h
+++ b/arch/x86/include/asm/intel_pt.h
@@ -3,7 +3,7 @@
#define _ASM_X86_INTEL_PT_H
#define PT_CPUID_LEAVES 2
-#define PT_CPUID_REGS_NUM 4 /* number of regsters (eax, ebx, ecx, edx) */
+#define PT_CPUID_REGS_NUM 4 /* number of registers (eax, ebx, ecx, edx) */
enum pt_capabilities {
PT_CAP_max_subleaf = 0,
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index d726459d08e5..841a5d104afa 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -159,7 +159,7 @@ static inline void *phys_to_virt(phys_addr_t address)
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
* However, we truncate the address to unsigned int to avoid undesirable
- * promitions in legacy drivers.
+ * promotions in legacy drivers.
*/
static inline unsigned int isa_virt_to_bus(volatile void *address)
{
diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h
index 9b2a0ff76c73..562854c60808 100644
--- a/arch/x86/include/asm/irq_stack.h
+++ b/arch/x86/include/asm/irq_stack.h
@@ -190,7 +190,7 @@
/*
* Macro to invoke __do_softirq on the irq stack. This is only called from
- * task context when bottom halfs are about to be reenabled and soft
+ * task context when bottom halves are about to be reenabled and soft
* interrupts are pending to be processed. The interrupt stack cannot be in
* use here.
*/
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 889f8b1b5b7f..43dcb9284208 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -26,8 +26,8 @@
* This file enumerates the exact layout of them:
*/
+/* This is used as an interrupt vector when programming the APIC. */
#define NMI_VECTOR 0x02
-#define MCE_VECTOR 0x12
/*
* IDT vectors usable for external interrupt sources start at 0x20.
@@ -84,7 +84,7 @@
*/
#define IRQ_WORK_VECTOR 0xf6
-#define UV_BAU_MESSAGE 0xf5
+/* 0xf5 - unused, was UV_BAU_MESSAGE */
#define DEFERRED_ERROR_VECTOR 0xf4
/* Vector on which hypervisor callbacks will be delivered */
@@ -114,6 +114,9 @@
#define FIRST_SYSTEM_VECTOR NR_VECTORS
#endif
+#define NR_EXTERNAL_VECTORS (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
+#define NR_SYSTEM_VECTORS (NR_VECTORS - FIRST_SYSTEM_VECTOR)
+
/*
* Size the maximum number of interrupts.
*
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index 144d70ea4393..c5ce9845c999 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -109,18 +109,13 @@ static __always_inline unsigned long arch_local_irq_save(void)
}
#else
-#define ENABLE_INTERRUPTS(x) sti
-#define DISABLE_INTERRUPTS(x) cli
-
#ifdef CONFIG_X86_64
#ifdef CONFIG_DEBUG_ENTRY
-#define SAVE_FLAGS(x) pushfq; popq %rax
+#define SAVE_FLAGS pushfq; popq %rax
#endif
#define INTERRUPT_RETURN jmp native_iret
-#else
-#define INTERRUPT_RETURN iret
#endif
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index 06c3cc22a058..0449b125d27f 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -4,14 +4,6 @@
#define HAVE_JUMP_LABEL_BATCH
-#define JUMP_LABEL_NOP_SIZE 5
-
-#ifdef CONFIG_X86_64
-# define STATIC_KEY_INIT_NOP P6_NOP5_ATOMIC
-#else
-# define STATIC_KEY_INIT_NOP GENERIC_NOP5_ATOMIC
-#endif
-
#include <asm/asm.h>
#include <asm/nops.h>
@@ -20,15 +12,35 @@
#include <linux/stringify.h>
#include <linux/types.h>
+#define JUMP_TABLE_ENTRY \
+ ".pushsection __jump_table, \"aw\" \n\t" \
+ _ASM_ALIGN "\n\t" \
+ ".long 1b - . \n\t" \
+ ".long %l[l_yes] - . \n\t" \
+ _ASM_PTR "%c0 + %c1 - .\n\t" \
+ ".popsection \n\t"
+
+#ifdef CONFIG_STACK_VALIDATION
+
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm_volatile_goto("1:"
- ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
- ".pushsection __jump_table, \"aw\" \n\t"
- _ASM_ALIGN "\n\t"
- ".long 1b - ., %l[l_yes] - . \n\t"
- _ASM_PTR "%c0 + %c1 - .\n\t"
- ".popsection \n\t"
+ "jmp %l[l_yes] # objtool NOPs this \n\t"
+ JUMP_TABLE_ENTRY
+ : : "i" (key), "i" (2 | branch) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+#else
+
+static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch)
+{
+ asm_volatile_goto("1:"
+ ".byte " __stringify(BYTES_NOP5) "\n\t"
+ JUMP_TABLE_ENTRY
: : "i" (key), "i" (branch) : : l_yes);
return false;
@@ -36,16 +48,13 @@ l_yes:
return true;
}
-static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+#endif /* STACK_VALIDATION */
+
+static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch)
{
asm_volatile_goto("1:"
- ".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t"
- "2:\n\t"
- ".pushsection __jump_table, \"aw\" \n\t"
- _ASM_ALIGN "\n\t"
- ".long 1b - ., %l[l_yes] - . \n\t"
- _ASM_PTR "%c0 + %c1 - .\n\t"
- ".popsection \n\t"
+ "jmp %l[l_yes]\n\t"
+ JUMP_TABLE_ENTRY
: : "i" (key), "i" (branch) : : l_yes);
return false;
@@ -53,41 +62,7 @@ l_yes:
return true;
}
-#else /* __ASSEMBLY__ */
-
-.macro STATIC_JUMP_IF_TRUE target, key, def
-.Lstatic_jump_\@:
- .if \def
- /* Equivalent to "jmp.d32 \target" */
- .byte 0xe9
- .long \target - .Lstatic_jump_after_\@
-.Lstatic_jump_after_\@:
- .else
- .byte STATIC_KEY_INIT_NOP
- .endif
- .pushsection __jump_table, "aw"
- _ASM_ALIGN
- .long .Lstatic_jump_\@ - ., \target - .
- _ASM_PTR \key - .
- .popsection
-.endm
-
-.macro STATIC_JUMP_IF_FALSE target, key, def
-.Lstatic_jump_\@:
- .if \def
- .byte STATIC_KEY_INIT_NOP
- .else
- /* Equivalent to "jmp.d32 \target" */
- .byte 0xe9
- .long \target - .Lstatic_jump_after_\@
-.Lstatic_jump_after_\@:
- .endif
- .pushsection __jump_table, "aw"
- _ASM_ALIGN
- .long .Lstatic_jump_\@ - ., \target - .
- _ASM_PTR \key + 1 - .
- .popsection
-.endm
+extern int arch_jump_entry_size(struct jump_entry *entry);
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index 6802c59e8252..0a6e34b07017 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -150,11 +150,6 @@ struct kimage_arch {
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
-
- /* Core ELF header buffer */
- void *elf_headers;
- unsigned long elf_headers_sz;
- unsigned long elf_load_addr;
};
#endif /* CONFIG_X86_32 */
diff --git a/arch/x86/include/asm/kfence.h b/arch/x86/include/asm/kfence.h
index 97bbb4a9083a..05b48b33baf0 100644
--- a/arch/x86/include/asm/kfence.h
+++ b/arch/x86/include/asm/kfence.h
@@ -56,8 +56,13 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
else
set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
- /* Flush this CPU's TLB. */
+ /*
+ * Flush this CPU's TLB, assuming whoever did the allocation/free is
+ * likely to continue running on this CPU.
+ */
+ preempt_disable();
flush_tlb_one_kernel(addr);
+ preempt_enable();
return true;
}
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index d20a3d6be36e..bd7f5886a789 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -65,10 +65,22 @@ struct arch_specific_insn {
* a post_handler).
*/
unsigned boostable:1;
- unsigned if_modifier:1;
- unsigned is_call:1;
- unsigned is_pushf:1;
- unsigned is_abs_ip:1;
+ unsigned char size; /* The size of insn */
+ union {
+ unsigned char opcode;
+ struct {
+ unsigned char type;
+ } jcc;
+ struct {
+ unsigned char type;
+ unsigned char asize;
+ } loop;
+ struct {
+ unsigned char reg;
+ } indirect;
+ };
+ s32 rel32; /* relative offset must be s32, s16, or s8 */
+ void (*emulate_op)(struct kprobe *p, struct pt_regs *regs);
/* Number of bytes of text poked */
int tp_len;
};
@@ -107,7 +119,6 @@ extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
extern int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
extern int kprobe_int3_handler(struct pt_regs *regs);
-extern int kprobe_debug_handler(struct pt_regs *regs);
#else
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 323641097f63..a12a4987154e 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -87,7 +87,10 @@ KVM_X86_OP(set_identity_map_addr)
KVM_X86_OP(get_mt_mask)
KVM_X86_OP(load_mmu_pgd)
KVM_X86_OP_NULL(has_wbinvd_exit)
-KVM_X86_OP(write_l1_tsc_offset)
+KVM_X86_OP(get_l2_tsc_offset)
+KVM_X86_OP(get_l2_tsc_multiplier)
+KVM_X86_OP(write_tsc_offset)
+KVM_X86_OP(write_tsc_multiplier)
KVM_X86_OP(get_exit_info)
KVM_X86_OP(check_intercept)
KVM_X86_OP(handle_exit_irqoff)
@@ -99,14 +102,15 @@ KVM_X86_OP_NULL(post_block)
KVM_X86_OP_NULL(vcpu_blocking)
KVM_X86_OP_NULL(vcpu_unblocking)
KVM_X86_OP_NULL(update_pi_irte)
+KVM_X86_OP_NULL(start_assignment)
KVM_X86_OP_NULL(apicv_post_state_restore)
KVM_X86_OP_NULL(dy_apicv_has_pending_interrupt)
KVM_X86_OP_NULL(set_hv_timer)
KVM_X86_OP_NULL(cancel_hv_timer)
KVM_X86_OP(setup_mce)
KVM_X86_OP(smi_allowed)
-KVM_X86_OP(pre_enter_smm)
-KVM_X86_OP(pre_leave_smm)
+KVM_X86_OP(enter_smm)
+KVM_X86_OP(leave_smm)
KVM_X86_OP(enable_smi_window)
KVM_X86_OP_NULL(mem_enc_op)
KVM_X86_OP_NULL(mem_enc_reg_region)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 877a4025d8da..974cbfb1eefe 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -85,7 +85,7 @@
#define KVM_REQ_APICV_UPDATE \
KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_TLB_FLUSH_CURRENT KVM_ARCH_REQ(26)
-#define KVM_REQ_HV_TLB_FLUSH \
+#define KVM_REQ_TLB_FLUSH_GUEST \
KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_APF_READY KVM_ARCH_REQ(28)
#define KVM_REQ_MSR_FILTER_CHANGED KVM_ARCH_REQ(29)
@@ -113,6 +113,7 @@
#define VALID_PAGE(x) ((x) != INVALID_PAGE)
#define UNMAPPED_GVA (~(gpa_t)0)
+#define INVALID_GPA (~(gpa_t)0)
/* KVM Hugepage definitions for x86 */
#define KVM_MAX_HUGEPAGE_LEVEL PG_LEVEL_1G
@@ -199,6 +200,7 @@ enum x86_intercept_stage;
#define KVM_NR_DB_REGS 4
+#define DR6_BUS_LOCK (1 << 11)
#define DR6_BD (1 << 13)
#define DR6_BS (1 << 14)
#define DR6_BT (1 << 15)
@@ -212,7 +214,7 @@ enum x86_intercept_stage;
* DR6_ACTIVE_LOW is also used as the init/reset value for DR6.
*/
#define DR6_ACTIVE_LOW 0xffff0ff0
-#define DR6_VOLATILE 0x0001e00f
+#define DR6_VOLATILE 0x0001e80f
#define DR6_FIXED_1 (DR6_ACTIVE_LOW & ~DR6_VOLATILE)
#define DR7_BP_EN_MASK 0x000000ff
@@ -221,12 +223,22 @@ enum x86_intercept_stage;
#define DR7_FIXED_1 0x00000400
#define DR7_VOLATILE 0xffff2bff
+#define KVM_GUESTDBG_VALID_MASK \
+ (KVM_GUESTDBG_ENABLE | \
+ KVM_GUESTDBG_SINGLESTEP | \
+ KVM_GUESTDBG_USE_HW_BP | \
+ KVM_GUESTDBG_USE_SW_BP | \
+ KVM_GUESTDBG_INJECT_BP | \
+ KVM_GUESTDBG_INJECT_DB)
+
+
#define PFERR_PRESENT_BIT 0
#define PFERR_WRITE_BIT 1
#define PFERR_USER_BIT 2
#define PFERR_RSVD_BIT 3
#define PFERR_FETCH_BIT 4
#define PFERR_PK_BIT 5
+#define PFERR_SGX_BIT 15
#define PFERR_GUEST_FINAL_BIT 32
#define PFERR_GUEST_PAGE_BIT 33
@@ -236,6 +248,7 @@ enum x86_intercept_stage;
#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
#define PFERR_PK_MASK (1U << PFERR_PK_BIT)
+#define PFERR_SGX_MASK (1U << PFERR_SGX_BIT)
#define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT)
#define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT)
@@ -256,12 +269,36 @@ enum x86_intercept_stage;
struct kvm_kernel_irq_routing_entry;
/*
- * the pages used as guest page table on soft mmu are tracked by
- * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used
- * by indirect shadow page can not be more than 15 bits.
+ * kvm_mmu_page_role tracks the properties of a shadow page (where shadow page
+ * also includes TDP pages) to determine whether or not a page can be used in
+ * the given MMU context. This is a subset of the overall kvm_mmu_role to
+ * minimize the size of kvm_memory_slot.arch.gfn_track, i.e. allows allocating
+ * 2 bytes per gfn instead of 4 bytes per gfn.
+ *
+ * Indirect upper-level shadow pages are tracked for write-protection via
+ * gfn_track. As above, gfn_track is a 16 bit counter, so KVM must not create
+ * more than 2^16-1 upper-level shadow pages at a single gfn, otherwise
+ * gfn_track will overflow and explosions will ensure.
+ *
+ * A unique shadow page (SP) for a gfn is created if and only if an existing SP
+ * cannot be reused. The ability to reuse a SP is tracked by its role, which
+ * incorporates various mode bits and properties of the SP. Roughly speaking,
+ * the number of unique SPs that can theoretically be created is 2^n, where n
+ * is the number of bits that are used to compute the role.
+ *
+ * But, even though there are 18 bits in the mask below, not all combinations
+ * of modes and flags are possible. The maximum number of possible upper-level
+ * shadow pages for a single gfn is in the neighborhood of 2^13.
*
- * Currently, we used 14 bits that are @level, @gpte_is_8_bytes, @quadrant, @access,
- * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp.
+ * - invalid shadow pages are not accounted.
+ * - level is effectively limited to four combinations, not 16 as the number
+ * bits would imply, as 4k SPs are not tracked (allowed to go unsync).
+ * - level is effectively unused for non-PAE paging because there is exactly
+ * one upper level (see 4k SP exception above).
+ * - quadrant is used only for non-PAE paging and is exclusive with
+ * gpte_is_8_bytes.
+ * - execonly and ad_disabled are used only for nested EPT, which makes it
+ * exclusive with quadrant.
*/
union kvm_mmu_page_role {
u32 word;
@@ -272,7 +309,7 @@ union kvm_mmu_page_role {
unsigned direct:1;
unsigned access:3;
unsigned invalid:1;
- unsigned nxe:1;
+ unsigned efer_nx:1;
unsigned cr0_wp:1;
unsigned smep_andnot_wp:1;
unsigned smap_andnot_wp:1;
@@ -290,13 +327,26 @@ union kvm_mmu_page_role {
};
};
-union kvm_mmu_extended_role {
/*
- * This structure complements kvm_mmu_page_role caching everything needed for
- * MMU configuration. If nothing in both these structures changed, MMU
- * re-configuration can be skipped. @valid bit is set on first usage so we don't
- * treat all-zero structure as valid data.
+ * kvm_mmu_extended_role complements kvm_mmu_page_role, tracking properties
+ * relevant to the current MMU configuration. When loading CR0, CR4, or EFER,
+ * including on nested transitions, if nothing in the full role changes then
+ * MMU re-configuration can be skipped. @valid bit is set on first usage so we
+ * don't treat all-zero structure as valid data.
+ *
+ * The properties that are tracked in the extended role but not the page role
+ * are for things that either (a) do not affect the validity of the shadow page
+ * or (b) are indirectly reflected in the shadow page's role. For example,
+ * CR4.PKE only affects permission checks for software walks of the guest page
+ * tables (because KVM doesn't support Protection Keys with shadow paging), and
+ * CR0.PG, CR4.PAE, and CR4.PSE are indirectly reflected in role.level.
+ *
+ * Note, SMEP and SMAP are not redundant with sm*p_andnot_wp in the page role.
+ * If CR0.WP=1, KVM can reuse shadow pages for the guest regardless of SMEP and
+ * SMAP, but the MMU's permission checks for software walks need to be SMEP and
+ * SMAP aware regardless of CR0.WP.
*/
+union kvm_mmu_extended_role {
u32 word;
struct {
unsigned int valid:1;
@@ -307,7 +357,7 @@ union kvm_mmu_extended_role {
unsigned int cr4_pke:1;
unsigned int cr4_smap:1;
unsigned int cr4_smep:1;
- unsigned int maxphyaddr:6;
+ unsigned int cr4_la57:1;
};
};
@@ -396,7 +446,7 @@ struct kvm_mmu {
u32 pkru_mask;
u64 *pae_root;
- u64 *lm_root;
+ u64 *pml4_root;
/*
* check zero bits on shadow page table entries, these
@@ -407,11 +457,6 @@ struct kvm_mmu {
struct rsvd_bits_validate guest_rsvd_check;
- /* Can have large pages at levels 2..last_nonleaf_level-1. */
- u8 last_nonleaf_level;
-
- bool nx;
-
u64 pdptrs[4]; /* pae */
};
@@ -530,6 +575,15 @@ struct kvm_vcpu_hv {
struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT];
DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
cpumask_t tlb_flush;
+ bool enforce_cpuid;
+ struct {
+ u32 features_eax; /* HYPERV_CPUID_FEATURES.EAX */
+ u32 features_ebx; /* HYPERV_CPUID_FEATURES.EBX */
+ u32 features_edx; /* HYPERV_CPUID_FEATURES.EDX */
+ u32 enlightenments_eax; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EAX */
+ u32 enlightenments_ebx; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EBX */
+ u32 syndbg_cap_eax; /* HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX */
+ } cpuid_cache;
};
/* Xen HVM per vcpu emulation context */
@@ -694,7 +748,7 @@ struct kvm_vcpu_arch {
} st;
u64 l1_tsc_offset;
- u64 tsc_offset;
+ u64 tsc_offset; /* current tsc offset */
u64 last_guest_tsc;
u64 last_host_tsc;
u64 tsc_offset_adjustment;
@@ -708,7 +762,8 @@ struct kvm_vcpu_arch {
u32 virtual_tsc_khz;
s64 ia32_tsc_adjust_msr;
u64 msr_ia32_power_ctl;
- u64 tsc_scaling_ratio;
+ u64 l1_tsc_scaling_ratio;
+ u64 tsc_scaling_ratio; /* current scaling ratio */
atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
unsigned nmi_pending; /* NMI queued after currently running handler */
@@ -816,7 +871,7 @@ struct kvm_vcpu_arch {
bool l1tf_flush_l1d;
/* Host CPU on which VM-entry was most recently attempted */
- unsigned int last_vmentry_cpu;
+ int last_vmentry_cpu;
/* AMD MSRC001_0015 Hardware Configuration */
u64 msr_hwcr;
@@ -838,6 +893,16 @@ struct kvm_vcpu_arch {
/* Protected Guests */
bool guest_state_protected;
+
+ /*
+ * Set when PDPTS were loaded directly by the userspace without
+ * reading the guest memory
+ */
+ bool pdptrs_from_userspace;
+
+#if IS_ENABLED(CONFIG_HYPERV)
+ hpa_t hv_root_tdp;
+#endif
};
struct kvm_lpage_info {
@@ -884,12 +949,29 @@ struct kvm_hv_syndbg {
u64 options;
};
+/* Current state of Hyper-V TSC page clocksource */
+enum hv_tsc_page_status {
+ /* TSC page was not set up or disabled */
+ HV_TSC_PAGE_UNSET = 0,
+ /* TSC page MSR was written by the guest, update pending */
+ HV_TSC_PAGE_GUEST_CHANGED,
+ /* TSC page MSR was written by KVM userspace, update pending */
+ HV_TSC_PAGE_HOST_CHANGED,
+ /* TSC page was properly set up and is currently active */
+ HV_TSC_PAGE_SET,
+ /* TSC page is currently being updated and therefore is inactive */
+ HV_TSC_PAGE_UPDATING,
+ /* TSC page was set up with an inaccessible GPA */
+ HV_TSC_PAGE_BROKEN,
+};
+
/* Hyper-V emulation context */
struct kvm_hv {
struct mutex hv_lock;
u64 hv_guest_os_id;
u64 hv_hypercall;
u64 hv_tsc_page;
+ enum hv_tsc_page_status hv_tsc_page_status;
/* Hyper-v based guest crash (NT kernel bugcheck) parameters */
u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
@@ -931,6 +1013,12 @@ enum kvm_irqchip_mode {
KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */
};
+struct kvm_x86_msr_filter {
+ u8 count;
+ bool default_allow:1;
+ struct msr_bitmap_range ranges[16];
+};
+
#define APICV_INHIBIT_REASON_DISABLE 0
#define APICV_INHIBIT_REASON_HYPERV 1
#define APICV_INHIBIT_REASON_NESTED 2
@@ -963,10 +1051,10 @@ struct kvm_arch {
struct kvm_pit *vpit;
atomic_t vapics_in_nmi_mode;
struct mutex apic_map_lock;
- struct kvm_apic_map *apic_map;
+ struct kvm_apic_map __rcu *apic_map;
atomic_t apic_map_dirty;
- bool apic_access_page_done;
+ bool apic_access_memslot_enabled;
unsigned long apicv_inhibit_reasons;
gpa_t wall_clock;
@@ -1025,18 +1113,24 @@ struct kvm_arch {
bool guest_can_read_msr_platform_info;
bool exception_payload_enabled;
+ bool bus_lock_detection_enabled;
+ /*
+ * If exit_on_emulation_error is set, and the in-kernel instruction
+ * emulator fails to emulate an instruction, allow userspace
+ * the opportunity to look at it.
+ */
+ bool exit_on_emulation_error;
+
/* Deflect RDMSR and WRMSR to user space when they trigger a #GP */
u32 user_space_msr_mask;
+ struct kvm_x86_msr_filter __rcu *msr_filter;
- struct {
- u8 count;
- bool default_allow:1;
- struct msr_bitmap_range ranges[16];
- } msr_filter;
+ u32 hypercall_exit_enabled;
- bool bus_lock_detection_enabled;
+ /* Guest can access the SGX PROVISIONKEY. */
+ bool sgx_provisioning_allowed;
- struct kvm_pmu_event_filter *pmu_event_filter;
+ struct kvm_pmu_event_filter __rcu *pmu_event_filter;
struct task_struct *nx_lpage_recovery_thread;
#ifdef CONFIG_X86_64
@@ -1050,25 +1144,36 @@ struct kvm_arch {
bool tdp_mmu_enabled;
/*
- * List of struct kvmp_mmu_pages being used as roots.
+ * List of struct kvm_mmu_pages being used as roots.
* All struct kvm_mmu_pages in the list should have
* tdp_mmu_page set.
- * All struct kvm_mmu_pages in the list should have a positive
- * root_count except when a thread holds the MMU lock and is removing
- * an entry from the list.
+ *
+ * For reads, this list is protected by:
+ * the MMU lock in read mode + RCU or
+ * the MMU lock in write mode
+ *
+ * For writes, this list is protected by:
+ * the MMU lock in read mode + the tdp_mmu_pages_lock or
+ * the MMU lock in write mode
+ *
+ * Roots will remain in the list until their tdp_mmu_root_count
+ * drops to zero, at which point the thread that decremented the
+ * count to zero should removed the root from the list and clean
+ * it up, freeing the root after an RCU grace period.
*/
struct list_head tdp_mmu_roots;
/*
* List of struct kvmp_mmu_pages not being used as roots.
* All struct kvm_mmu_pages in the list should have
- * tdp_mmu_page set and a root_count of 0.
+ * tdp_mmu_page set and a tdp_mmu_root_count of 0.
*/
struct list_head tdp_mmu_pages;
/*
* Protects accesses to the following fields when the MMU lock
* is held in read mode:
+ * - tdp_mmu_roots (above)
* - tdp_mmu_pages (above)
* - the link field of struct kvm_mmu_pages used by the TDP MMU
* - lpage_disallowed_mmu_pages
@@ -1079,23 +1184,35 @@ struct kvm_arch {
*/
spinlock_t tdp_mmu_pages_lock;
#endif /* CONFIG_X86_64 */
+
+ /*
+ * If set, rmaps have been allocated for all memslots and should be
+ * allocated for any newly created or modified memslots.
+ */
+ bool memslots_have_rmaps;
+
+#if IS_ENABLED(CONFIG_HYPERV)
+ hpa_t hv_root_tdp;
+ spinlock_t hv_root_tdp_lock;
+#endif
};
struct kvm_vm_stat {
- ulong mmu_shadow_zapped;
- ulong mmu_pte_write;
- ulong mmu_pde_zapped;
- ulong mmu_flooded;
- ulong mmu_recycled;
- ulong mmu_cache_miss;
- ulong mmu_unsync;
- ulong remote_tlb_flush;
- ulong lpages;
- ulong nx_lpage_splits;
- ulong max_mmu_page_hash_collisions;
+ struct kvm_vm_stat_generic generic;
+ u64 mmu_shadow_zapped;
+ u64 mmu_pte_write;
+ u64 mmu_pde_zapped;
+ u64 mmu_flooded;
+ u64 mmu_recycled;
+ u64 mmu_cache_miss;
+ u64 mmu_unsync;
+ u64 lpages;
+ u64 nx_lpage_splits;
+ u64 max_mmu_page_hash_collisions;
};
struct kvm_vcpu_stat {
+ struct kvm_vcpu_stat_generic generic;
u64 pf_fixed;
u64 pf_guest;
u64 tlb_flush;
@@ -1109,10 +1226,6 @@ struct kvm_vcpu_stat {
u64 nmi_window_exits;
u64 l1d_flush;
u64 halt_exits;
- u64 halt_successful_poll;
- u64 halt_attempted_poll;
- u64 halt_poll_invalid;
- u64 halt_wakeup;
u64 request_irq_exits;
u64 irq_exits;
u64 host_state_reload;
@@ -1123,8 +1236,10 @@ struct kvm_vcpu_stat {
u64 irq_injections;
u64 nmi_injections;
u64 req_event;
- u64 halt_poll_success_ns;
- u64 halt_poll_fail_ns;
+ u64 nested_run;
+ u64 directed_yield_attempted;
+ u64 directed_yield_successful;
+ u64 guest_mode;
};
struct x86_instruction_info;
@@ -1251,13 +1366,15 @@ struct kvm_x86_ops {
int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
- void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, unsigned long pgd,
- int pgd_level);
+ void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa,
+ int root_level);
bool (*has_wbinvd_exit)(void);
- /* Returns actual tsc_offset set in active VMCS */
- u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
+ u64 (*get_l2_tsc_offset)(struct kvm_vcpu *vcpu);
+ u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *vcpu);
+ void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
+ void (*write_tsc_multiplier)(struct kvm_vcpu *vcpu, u64 multiplier);
/*
* Retrieve somewhat arbitrary exit information. Intended to be used
@@ -1304,6 +1421,7 @@ struct kvm_x86_ops {
int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
+ void (*start_assignment)(struct kvm *kvm);
void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
@@ -1314,13 +1432,14 @@ struct kvm_x86_ops {
void (*setup_mce)(struct kvm_vcpu *vcpu);
int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
- int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
- int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
+ int (*enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
+ int (*leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
void (*enable_smi_window)(struct kvm_vcpu *vcpu);
int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
+ int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
int (*get_msr_feature)(struct kvm_msr_entry *entry);
@@ -1339,6 +1458,7 @@ struct kvm_x86_ops {
struct kvm_x86_nested_ops {
int (*check_events)(struct kvm_vcpu *vcpu);
bool (*hv_timer_pending)(struct kvm_vcpu *vcpu);
+ void (*triple_fault)(struct kvm_vcpu *vcpu);
int (*get_state)(struct kvm_vcpu *vcpu,
struct kvm_nested_state __user *user_kvm_nested_state,
unsigned user_data_size);
@@ -1369,8 +1489,10 @@ struct kvm_arch_async_pf {
bool direct_map;
};
+extern u32 __read_mostly kvm_nr_uret_msrs;
extern u64 __read_mostly host_efer;
extern bool __read_mostly allow_smaller_maxphyaddr;
+extern bool __read_mostly enable_apicv;
extern struct kvm_x86_ops kvm_x86_ops;
#define KVM_X86_OP(func) \
@@ -1410,10 +1532,8 @@ void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu);
void kvm_mmu_init_vm(struct kvm *kvm);
void kvm_mmu_uninit_vm(struct kvm *kvm);
-void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
- u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
- u64 acc_track_mask, u64 me_mask);
+void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu);
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
struct kvm_memory_slot *memslot,
@@ -1422,15 +1542,12 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
const struct kvm_memory_slot *memslot);
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
struct kvm_memory_slot *memslot);
-void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
- struct kvm_memory_slot *memslot);
void kvm_mmu_zap_all(struct kvm *kvm);
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
-bool pdptrs_changed(struct kvm_vcpu *vcpu);
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
const void *val, int bytes);
@@ -1470,7 +1587,7 @@ extern u64 kvm_mce_cap_supported;
/*
* EMULTYPE_NO_DECODE - Set when re-emulating an instruction (after completing
* userspace I/O) to indicate that the emulation context
- * should be resued as is, i.e. skip initialization of
+ * should be reused as is, i.e. skip initialization of
* emulation context, instruction fetch and decode.
*
* EMULTYPE_TRAP_UD - Set when emulating an intercepted #UD from hardware.
@@ -1495,7 +1612,7 @@ extern u64 kvm_mce_cap_supported;
*
* EMULTYPE_VMWARE_GP - Set when emulating an intercepted #GP for VMware
* backdoor emulation, which is opt in via module param.
- * VMware backoor emulation handles select instructions
+ * VMware backdoor emulation handles select instructions
* and reinjects the #GP for all other cases.
*
* EMULTYPE_PF - Set when emulating MMIO by way of an intercepted #PF, in which
@@ -1520,6 +1637,11 @@ int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data);
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu);
int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu);
+int kvm_emulate_as_nop(struct kvm_vcpu *vcpu);
+int kvm_emulate_invd(struct kvm_vcpu *vcpu);
+int kvm_emulate_mwait(struct kvm_vcpu *vcpu);
+int kvm_handle_invalid_op(struct kvm_vcpu *vcpu);
+int kvm_emulate_monitor(struct kvm_vcpu *vcpu);
int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in);
int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
@@ -1548,14 +1670,14 @@ void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
-int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
+int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu);
int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
-bool kvm_rdpmc(struct kvm_vcpu *vcpu);
+int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu);
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
@@ -1596,11 +1718,9 @@ void kvm_update_dr7(struct kvm_vcpu *vcpu);
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
-int kvm_mmu_load(struct kvm_vcpu *vcpu);
-void kvm_mmu_unload(struct kvm_vcpu *vcpu);
-void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
ulong roots_to_free);
+void kvm_mmu_free_guest_mode_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu);
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
@@ -1613,7 +1733,6 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception);
bool kvm_apicv_activated(struct kvm *kvm);
-void kvm_apicv_init(struct kvm *kvm, bool enable);
void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
void kvm_request_apicv_update(struct kvm *kvm, bool activate,
unsigned long bit);
@@ -1626,8 +1745,7 @@ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gva_t gva, hpa_t root_hpa);
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
-void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush,
- bool skip_mmu_sync);
+void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd);
void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level,
int tdp_huge_page_level);
@@ -1717,11 +1835,7 @@ asmlinkage void kvm_spurious_fault(void);
_ASM_EXTABLE(666b, 667b)
#define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
- unsigned flags);
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
-int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
-int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
+
int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
int kvm_cpu_has_extint(struct kvm_vcpu *v);
@@ -1734,11 +1848,19 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
unsigned long ipi_bitmap_high, u32 min,
unsigned long icr, int op_64_bit);
-void kvm_define_user_return_msr(unsigned index, u32 msr);
+int kvm_add_user_return_msr(u32 msr);
+int kvm_find_user_return_msr(u32 msr);
int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
-u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
+static inline bool kvm_is_supported_user_return_msr(u32 msr)
+{
+ return kvm_find_user_return_msr(msr) >= 0;
+}
+
+u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc, u64 ratio);
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
+u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier);
+u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier);
unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
@@ -1812,4 +1934,6 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
int kvm_cpu_dirty_log_size(void);
+int alloc_all_memslots_rmaps(struct kvm *kvm);
+
#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 338119852512..69299878b200 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -7,8 +7,6 @@
#include <linux/interrupt.h>
#include <uapi/asm/kvm_para.h>
-extern void kvmclock_init(void);
-
#ifdef CONFIG_KVM_GUEST
bool kvm_check_and_clear_guest_paused(void);
#else
@@ -86,13 +84,14 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
}
#ifdef CONFIG_KVM_GUEST
+void kvmclock_init(void);
+void kvmclock_disable(void);
bool kvm_para_available(void);
unsigned int kvm_arch_para_features(void);
unsigned int kvm_arch_para_hints(void);
void kvm_async_pf_task_wait_schedule(u32 token);
void kvm_async_pf_task_wake(u32 token);
u32 kvm_read_and_reset_apf_flags(void);
-void kvm_disable_steal_time(void);
bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token);
DECLARE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
@@ -137,11 +136,6 @@ static inline u32 kvm_read_and_reset_apf_flags(void)
return 0;
}
-static inline void kvm_disable_steal_time(void)
-{
- return;
-}
-
static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
{
return false;
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index ddfb3cad8dff..0607ec4f5091 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -305,7 +305,7 @@ extern void apei_mce_report_mem_error(int corrected,
/* These may be used by multiple smca_hwid_mcatypes */
enum smca_bank_types {
SMCA_LS = 0, /* Load Store */
- SMCA_LS_V2, /* Load Store */
+ SMCA_LS_V2,
SMCA_IF, /* Instruction Fetch */
SMCA_L2_CACHE, /* L2 Cache */
SMCA_DE, /* Decoder Unit */
@@ -314,17 +314,22 @@ enum smca_bank_types {
SMCA_FP, /* Floating Point */
SMCA_L3_CACHE, /* L3 Cache */
SMCA_CS, /* Coherent Slave */
- SMCA_CS_V2, /* Coherent Slave */
+ SMCA_CS_V2,
SMCA_PIE, /* Power, Interrupts, etc. */
SMCA_UMC, /* Unified Memory Controller */
+ SMCA_UMC_V2,
SMCA_PB, /* Parameter Block */
SMCA_PSP, /* Platform Security Processor */
- SMCA_PSP_V2, /* Platform Security Processor */
+ SMCA_PSP_V2,
SMCA_SMU, /* System Management Unit */
- SMCA_SMU_V2, /* System Management Unit */
+ SMCA_SMU_V2,
SMCA_MP5, /* Microprocessor 5 Unit */
SMCA_NBIO, /* Northbridge IO Unit */
SMCA_PCIE, /* PCI Express Unit */
+ SMCA_PCIE_V2,
+ SMCA_XGMI_PCS, /* xGMI PCS Unit */
+ SMCA_XGMI_PHY, /* xGMI PHY Unit */
+ SMCA_WAFL_PHY, /* WAFL PHY Unit */
N_SMCA_BANK_TYPES
};
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index 31c4df123aa0..9c80c68d75b5 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -20,7 +20,6 @@
extern u64 sme_me_mask;
extern u64 sev_status;
-extern bool sev_enabled;
void sme_encrypt_execute(unsigned long encrypted_kernel_vaddr,
unsigned long decrypted_kernel_vaddr,
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index ccf60a809a17..67ff0d637e55 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -9,70 +9,29 @@
#include <asm/hyperv-tlfs.h>
#include <asm/nospec-branch.h>
#include <asm/paravirt.h>
+#include <asm/mshyperv.h>
typedef int (*hyperv_fill_flush_list_func)(
struct hv_guest_mapping_flush_list *flush,
void *data);
-#define hv_init_timer(timer, tick) \
- wrmsrl(HV_X64_MSR_STIMER0_COUNT + (2*timer), tick)
-#define hv_init_timer_config(timer, val) \
- wrmsrl(HV_X64_MSR_STIMER0_CONFIG + (2*timer), val)
-
-#define hv_get_simp(val) rdmsrl(HV_X64_MSR_SIMP, val)
-#define hv_set_simp(val) wrmsrl(HV_X64_MSR_SIMP, val)
-
-#define hv_get_siefp(val) rdmsrl(HV_X64_MSR_SIEFP, val)
-#define hv_set_siefp(val) wrmsrl(HV_X64_MSR_SIEFP, val)
-
-#define hv_get_synic_state(val) rdmsrl(HV_X64_MSR_SCONTROL, val)
-#define hv_set_synic_state(val) wrmsrl(HV_X64_MSR_SCONTROL, val)
-
-#define hv_get_vp_index(index) rdmsrl(HV_X64_MSR_VP_INDEX, index)
-
-#define hv_signal_eom() wrmsrl(HV_X64_MSR_EOM, 0)
-
-#define hv_get_synint_state(int_num, val) \
- rdmsrl(HV_X64_MSR_SINT0 + int_num, val)
-#define hv_set_synint_state(int_num, val) \
- wrmsrl(HV_X64_MSR_SINT0 + int_num, val)
-#define hv_recommend_using_aeoi() \
- (!(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED))
+static inline void hv_set_register(unsigned int reg, u64 value)
+{
+ wrmsrl(reg, value);
+}
-#define hv_get_crash_ctl(val) \
- rdmsrl(HV_X64_MSR_CRASH_CTL, val)
+static inline u64 hv_get_register(unsigned int reg)
+{
+ u64 value;
-#define hv_get_time_ref_count(val) \
- rdmsrl(HV_X64_MSR_TIME_REF_COUNT, val)
+ rdmsrl(reg, value);
+ return value;
+}
-#define hv_get_reference_tsc(val) \
- rdmsrl(HV_X64_MSR_REFERENCE_TSC, val)
-#define hv_set_reference_tsc(val) \
- wrmsrl(HV_X64_MSR_REFERENCE_TSC, val)
-#define hv_set_clocksource_vdso(val) \
- ((val).vdso_clock_mode = VDSO_CLOCKMODE_HVCLOCK)
-#define hv_enable_vdso_clocksource() \
- vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK);
#define hv_get_raw_timer() rdtsc_ordered()
-#define hv_get_vector() HYPERVISOR_CALLBACK_VECTOR
-
-/*
- * Reference to pv_ops must be inline so objtool
- * detection of noinstr violations can work correctly.
- */
-static __always_inline void hv_setup_sched_clock(void *sched_clock)
-{
-#ifdef CONFIG_PARAVIRT
- pv_ops.time.sched_clock = sched_clock;
-#endif
-}
void hyperv_vector_handler(struct pt_regs *regs);
-static inline void hv_enable_stimer0_percpu_irq(int irq) {}
-static inline void hv_disable_stimer0_percpu_irq(int irq) {}
-
-
#if IS_ENABLED(CONFIG_HYPERV)
extern int hyperv_init_cpuhp;
@@ -189,38 +148,6 @@ static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2)
return hv_status;
}
-/*
- * Rep hypercalls. Callers of this functions are supposed to ensure that
- * rep_count and varhead_size comply with Hyper-V hypercall definition.
- */
-static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
- void *input, void *output)
-{
- u64 control = code;
- u64 status;
- u16 rep_comp;
-
- control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET;
- control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET;
-
- do {
- status = hv_do_hypercall(control, input, output);
- if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS)
- return status;
-
- /* Bits 32-43 of status have 'Reps completed' data. */
- rep_comp = (status & HV_HYPERCALL_REP_COMP_MASK) >>
- HV_HYPERCALL_REP_COMP_OFFSET;
-
- control &= ~HV_HYPERCALL_REP_START_MASK;
- control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET;
-
- touch_nmi_watchdog();
- } while (rep_comp < rep_count);
-
- return status;
-}
-
extern struct hv_vp_assist_page **hv_vp_assist_page;
static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
@@ -233,9 +160,6 @@ static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
void __init hyperv_init(void);
void hyperv_setup_mmu_ops(void);
-void *hv_alloc_hyperv_page(void);
-void *hv_alloc_hyperv_zeroed_page(void);
-void hv_free_hyperv_page(unsigned long addr);
void set_hv_tscchange_cb(void (*cb)(void));
void clear_hv_tscchange_cb(void);
void hyperv_stop_tsc_emulation(void);
@@ -272,8 +196,6 @@ int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry);
#else /* CONFIG_HYPERV */
static inline void hyperv_init(void) {}
static inline void hyperv_setup_mmu_ops(void) {}
-static inline void *hv_alloc_hyperv_page(void) { return NULL; }
-static inline void hv_free_hyperv_page(unsigned long addr) {}
static inline void set_hv_tscchange_cb(void (*cb)(void)) {}
static inline void clear_hv_tscchange_cb(void) {}
static inline void hyperv_stop_tsc_emulation(void) {};
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 546d6ecf0a35..a7c413432b33 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -185,6 +185,9 @@
#define MSR_PEBS_DATA_CFG 0x000003f2
#define MSR_IA32_DS_AREA 0x00000600
#define MSR_IA32_PERF_CAPABILITIES 0x00000345
+#define PERF_CAP_METRICS_IDX 15
+#define PERF_CAP_PT_IDX 16
+
#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6
#define MSR_IA32_RTIT_CTL 0x00000570
@@ -265,6 +268,7 @@
#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */
#define DEBUGCTLMSR_BTF_SHIFT 1
#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */
+#define DEBUGCTLMSR_BUS_LOCK_DETECT (1UL << 2)
#define DEBUGCTLMSR_TR (1UL << 6)
#define DEBUGCTLMSR_BTS (1UL << 7)
#define DEBUGCTLMSR_BTINT (1UL << 8)
@@ -533,9 +537,9 @@
/* K8 MSRs */
#define MSR_K8_TOP_MEM1 0xc001001a
#define MSR_K8_TOP_MEM2 0xc001001d
-#define MSR_K8_SYSCFG 0xc0010010
-#define MSR_K8_SYSCFG_MEM_ENCRYPT_BIT 23
-#define MSR_K8_SYSCFG_MEM_ENCRYPT BIT_ULL(MSR_K8_SYSCFG_MEM_ENCRYPT_BIT)
+#define MSR_AMD64_SYSCFG 0xc0010010
+#define MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT 23
+#define MSR_AMD64_SYSCFG_MEM_ENCRYPT BIT_ULL(MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT)
#define MSR_K8_INT_PENDING_MSG 0xc0010055
/* C1E active bits in int pending message */
#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
@@ -628,8 +632,6 @@
#define MSR_IA32_APICBASE_ENABLE (1<<11)
#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
-#define MSR_IA32_TSCDEADLINE 0x000006e0
-
#define MSR_IA32_UCODE_WRITE 0x00000079
#define MSR_IA32_UCODE_REV 0x0000008b
@@ -770,6 +772,10 @@
#define MSR_TFA_RTM_FORCE_ABORT_BIT 0
#define MSR_TFA_RTM_FORCE_ABORT BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT)
+#define MSR_TFA_TSX_CPUID_CLEAR_BIT 1
+#define MSR_TFA_TSX_CPUID_CLEAR BIT_ULL(MSR_TFA_TSX_CPUID_CLEAR_BIT)
+#define MSR_TFA_SDV_ENABLE_RTM_BIT 2
+#define MSR_TFA_SDV_ENABLE_RTM BIT_ULL(MSR_TFA_SDV_ENABLE_RTM_BIT)
/* P4/Xeon+ specific */
#define MSR_IA32_MCG_EAX 0x00000180
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index e16cccdd0420..a3f87f1015d3 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -324,10 +324,6 @@ static inline int wrmsrl_safe(u32 msr, u64 val)
return wrmsr_safe(msr, (u32)val, (u32)(val >> 32));
}
-#define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
-
-#define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
-
struct msr *msrs_alloc(void);
void msrs_free(struct msr *msrs);
int msr_set_bit(u32 msr, u8 bit);
diff --git a/arch/x86/include/asm/nops.h b/arch/x86/include/asm/nops.h
index 12f12b5cf2ca..c5573eaa5bb9 100644
--- a/arch/x86/include/asm/nops.h
+++ b/arch/x86/include/asm/nops.h
@@ -2,146 +2,76 @@
#ifndef _ASM_X86_NOPS_H
#define _ASM_X86_NOPS_H
+#include <asm/asm.h>
+
/*
* Define nops for use with alternative() and for tracing.
- *
- * *_NOP5_ATOMIC must be a single instruction.
*/
-#define NOP_DS_PREFIX 0x3e
+#ifndef CONFIG_64BIT
-/* generic versions from gas
- 1: nop
- the following instructions are NOT nops in 64-bit mode,
- for 64-bit mode use K8 or P6 nops instead
- 2: movl %esi,%esi
- 3: leal 0x00(%esi),%esi
- 4: leal 0x00(,%esi,1),%esi
- 6: leal 0x00000000(%esi),%esi
- 7: leal 0x00000000(,%esi,1),%esi
-*/
-#define GENERIC_NOP1 0x90
-#define GENERIC_NOP2 0x89,0xf6
-#define GENERIC_NOP3 0x8d,0x76,0x00
-#define GENERIC_NOP4 0x8d,0x74,0x26,0x00
-#define GENERIC_NOP5 GENERIC_NOP1,GENERIC_NOP4
-#define GENERIC_NOP6 0x8d,0xb6,0x00,0x00,0x00,0x00
-#define GENERIC_NOP7 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00
-#define GENERIC_NOP8 GENERIC_NOP1,GENERIC_NOP7
-#define GENERIC_NOP5_ATOMIC NOP_DS_PREFIX,GENERIC_NOP4
-
-/* Opteron 64bit nops
- 1: nop
- 2: osp nop
- 3: osp osp nop
- 4: osp osp osp nop
-*/
-#define K8_NOP1 GENERIC_NOP1
-#define K8_NOP2 0x66,K8_NOP1
-#define K8_NOP3 0x66,K8_NOP2
-#define K8_NOP4 0x66,K8_NOP3
-#define K8_NOP5 K8_NOP3,K8_NOP2
-#define K8_NOP6 K8_NOP3,K8_NOP3
-#define K8_NOP7 K8_NOP4,K8_NOP3
-#define K8_NOP8 K8_NOP4,K8_NOP4
-#define K8_NOP5_ATOMIC 0x66,K8_NOP4
+/*
+ * Generic 32bit nops from GAS:
+ *
+ * 1: nop
+ * 2: movl %esi,%esi
+ * 3: leal 0x0(%esi),%esi
+ * 4: leal 0x0(%esi,%eiz,1),%esi
+ * 5: leal %ds:0x0(%esi,%eiz,1),%esi
+ * 6: leal 0x0(%esi),%esi
+ * 7: leal 0x0(%esi,%eiz,1),%esi
+ * 8: leal %ds:0x0(%esi,%eiz,1),%esi
+ *
+ * Except 5 and 8, which are DS prefixed 4 and 7 resp, where GAS would emit 2
+ * nop instructions.
+ */
+#define BYTES_NOP1 0x90
+#define BYTES_NOP2 0x89,0xf6
+#define BYTES_NOP3 0x8d,0x76,0x00
+#define BYTES_NOP4 0x8d,0x74,0x26,0x00
+#define BYTES_NOP5 0x3e,BYTES_NOP4
+#define BYTES_NOP6 0x8d,0xb6,0x00,0x00,0x00,0x00
+#define BYTES_NOP7 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00
+#define BYTES_NOP8 0x3e,BYTES_NOP7
-/* K7 nops
- uses eax dependencies (arbitrary choice)
- 1: nop
- 2: movl %eax,%eax
- 3: leal (,%eax,1),%eax
- 4: leal 0x00(,%eax,1),%eax
- 6: leal 0x00000000(%eax),%eax
- 7: leal 0x00000000(,%eax,1),%eax
-*/
-#define K7_NOP1 GENERIC_NOP1
-#define K7_NOP2 0x8b,0xc0
-#define K7_NOP3 0x8d,0x04,0x20
-#define K7_NOP4 0x8d,0x44,0x20,0x00
-#define K7_NOP5 K7_NOP4,K7_NOP1
-#define K7_NOP6 0x8d,0x80,0,0,0,0
-#define K7_NOP7 0x8D,0x04,0x05,0,0,0,0
-#define K7_NOP8 K7_NOP7,K7_NOP1
-#define K7_NOP5_ATOMIC NOP_DS_PREFIX,K7_NOP4
+#else
-/* P6 nops
- uses eax dependencies (Intel-recommended choice)
- 1: nop
- 2: osp nop
- 3: nopl (%eax)
- 4: nopl 0x00(%eax)
- 5: nopl 0x00(%eax,%eax,1)
- 6: osp nopl 0x00(%eax,%eax,1)
- 7: nopl 0x00000000(%eax)
- 8: nopl 0x00000000(%eax,%eax,1)
- Note: All the above are assumed to be a single instruction.
- There is kernel code that depends on this.
-*/
-#define P6_NOP1 GENERIC_NOP1
-#define P6_NOP2 0x66,0x90
-#define P6_NOP3 0x0f,0x1f,0x00
-#define P6_NOP4 0x0f,0x1f,0x40,0
-#define P6_NOP5 0x0f,0x1f,0x44,0x00,0
-#define P6_NOP6 0x66,0x0f,0x1f,0x44,0x00,0
-#define P6_NOP7 0x0f,0x1f,0x80,0,0,0,0
-#define P6_NOP8 0x0f,0x1f,0x84,0x00,0,0,0,0
-#define P6_NOP5_ATOMIC P6_NOP5
+/*
+ * Generic 64bit nops from GAS:
+ *
+ * 1: nop
+ * 2: osp nop
+ * 3: nopl (%eax)
+ * 4: nopl 0x00(%eax)
+ * 5: nopl 0x00(%eax,%eax,1)
+ * 6: osp nopl 0x00(%eax,%eax,1)
+ * 7: nopl 0x00000000(%eax)
+ * 8: nopl 0x00000000(%eax,%eax,1)
+ */
+#define BYTES_NOP1 0x90
+#define BYTES_NOP2 0x66,BYTES_NOP1
+#define BYTES_NOP3 0x0f,0x1f,0x00
+#define BYTES_NOP4 0x0f,0x1f,0x40,0x00
+#define BYTES_NOP5 0x0f,0x1f,0x44,0x00,0x00
+#define BYTES_NOP6 0x66,BYTES_NOP5
+#define BYTES_NOP7 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00
+#define BYTES_NOP8 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
-#ifdef __ASSEMBLY__
-#define _ASM_MK_NOP(x) .byte x
-#else
-#define _ASM_MK_NOP(x) ".byte " __stringify(x) "\n"
-#endif
+#endif /* CONFIG_64BIT */
-#if defined(CONFIG_MK7)
-#define ASM_NOP1 _ASM_MK_NOP(K7_NOP1)
-#define ASM_NOP2 _ASM_MK_NOP(K7_NOP2)
-#define ASM_NOP3 _ASM_MK_NOP(K7_NOP3)
-#define ASM_NOP4 _ASM_MK_NOP(K7_NOP4)
-#define ASM_NOP5 _ASM_MK_NOP(K7_NOP5)
-#define ASM_NOP6 _ASM_MK_NOP(K7_NOP6)
-#define ASM_NOP7 _ASM_MK_NOP(K7_NOP7)
-#define ASM_NOP8 _ASM_MK_NOP(K7_NOP8)
-#define ASM_NOP5_ATOMIC _ASM_MK_NOP(K7_NOP5_ATOMIC)
-#elif defined(CONFIG_X86_P6_NOP)
-#define ASM_NOP1 _ASM_MK_NOP(P6_NOP1)
-#define ASM_NOP2 _ASM_MK_NOP(P6_NOP2)
-#define ASM_NOP3 _ASM_MK_NOP(P6_NOP3)
-#define ASM_NOP4 _ASM_MK_NOP(P6_NOP4)
-#define ASM_NOP5 _ASM_MK_NOP(P6_NOP5)
-#define ASM_NOP6 _ASM_MK_NOP(P6_NOP6)
-#define ASM_NOP7 _ASM_MK_NOP(P6_NOP7)
-#define ASM_NOP8 _ASM_MK_NOP(P6_NOP8)
-#define ASM_NOP5_ATOMIC _ASM_MK_NOP(P6_NOP5_ATOMIC)
-#elif defined(CONFIG_X86_64)
-#define ASM_NOP1 _ASM_MK_NOP(K8_NOP1)
-#define ASM_NOP2 _ASM_MK_NOP(K8_NOP2)
-#define ASM_NOP3 _ASM_MK_NOP(K8_NOP3)
-#define ASM_NOP4 _ASM_MK_NOP(K8_NOP4)
-#define ASM_NOP5 _ASM_MK_NOP(K8_NOP5)
-#define ASM_NOP6 _ASM_MK_NOP(K8_NOP6)
-#define ASM_NOP7 _ASM_MK_NOP(K8_NOP7)
-#define ASM_NOP8 _ASM_MK_NOP(K8_NOP8)
-#define ASM_NOP5_ATOMIC _ASM_MK_NOP(K8_NOP5_ATOMIC)
-#else
-#define ASM_NOP1 _ASM_MK_NOP(GENERIC_NOP1)
-#define ASM_NOP2 _ASM_MK_NOP(GENERIC_NOP2)
-#define ASM_NOP3 _ASM_MK_NOP(GENERIC_NOP3)
-#define ASM_NOP4 _ASM_MK_NOP(GENERIC_NOP4)
-#define ASM_NOP5 _ASM_MK_NOP(GENERIC_NOP5)
-#define ASM_NOP6 _ASM_MK_NOP(GENERIC_NOP6)
-#define ASM_NOP7 _ASM_MK_NOP(GENERIC_NOP7)
-#define ASM_NOP8 _ASM_MK_NOP(GENERIC_NOP8)
-#define ASM_NOP5_ATOMIC _ASM_MK_NOP(GENERIC_NOP5_ATOMIC)
-#endif
+#define ASM_NOP1 _ASM_BYTES(BYTES_NOP1)
+#define ASM_NOP2 _ASM_BYTES(BYTES_NOP2)
+#define ASM_NOP3 _ASM_BYTES(BYTES_NOP3)
+#define ASM_NOP4 _ASM_BYTES(BYTES_NOP4)
+#define ASM_NOP5 _ASM_BYTES(BYTES_NOP5)
+#define ASM_NOP6 _ASM_BYTES(BYTES_NOP6)
+#define ASM_NOP7 _ASM_BYTES(BYTES_NOP7)
+#define ASM_NOP8 _ASM_BYTES(BYTES_NOP8)
#define ASM_NOP_MAX 8
-#define NOP_ATOMIC5 (ASM_NOP_MAX+1) /* Entry for the 5-byte atomic NOP */
#ifndef __ASSEMBLY__
-extern const unsigned char * const *ideal_nops;
-extern void arch_init_ideal_nops(void);
+extern const unsigned char * const x86_nops[];
#endif
#endif /* _ASM_X86_NOPS_H */
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index cb9ad6b73973..3ad8c6d3cbb3 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -7,7 +7,6 @@
#include <linux/objtool.h>
#include <asm/alternative.h>
-#include <asm/alternative-asm.h>
#include <asm/cpufeatures.h>
#include <asm/msr-index.h>
#include <asm/unwind_hints.h>
@@ -33,7 +32,7 @@
/*
* Google experimented with loop-unrolling and this turned out to be
- * the optimal version — two calls, each with their own speculation
+ * the optimal version - two calls, each with their own speculation
* trap should their return address end up getting used, in a loop.
*/
#define __FILL_RETURN_BUFFER(reg, nr, sp) \
@@ -81,7 +80,7 @@
.macro JMP_NOSPEC reg:req
#ifdef CONFIG_RETPOLINE
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
- __stringify(jmp __x86_retpoline_\reg), X86_FEATURE_RETPOLINE, \
+ __stringify(jmp __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_AMD
#else
jmp *%\reg
@@ -91,7 +90,7 @@
.macro CALL_NOSPEC reg:req
#ifdef CONFIG_RETPOLINE
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *%\reg), \
- __stringify(call __x86_retpoline_\reg), X86_FEATURE_RETPOLINE, \
+ __stringify(call __x86_indirect_thunk_\reg), X86_FEATURE_RETPOLINE, \
__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *%\reg), X86_FEATURE_RETPOLINE_AMD
#else
call *%\reg
@@ -129,7 +128,7 @@
ALTERNATIVE_2( \
ANNOTATE_RETPOLINE_SAFE \
"call *%[thunk_target]\n", \
- "call __x86_retpoline_%V[thunk_target]\n", \
+ "call __x86_indirect_thunk_%V[thunk_target]\n", \
X86_FEATURE_RETPOLINE, \
"lfence;\n" \
ANNOTATE_RETPOLINE_SAFE \
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index 7555b48803a8..4d5810c8fab7 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -34,9 +34,9 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
copy_page(to, from);
}
-#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
- alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
-#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
+ alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
+#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
#ifndef __pa
#define __pa(x) __phys_addr((unsigned long)(x))
diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
index 939b1cff4a7b..4bde0dc66100 100644
--- a/arch/x86/include/asm/page_64.h
+++ b/arch/x86/include/asm/page_64.h
@@ -56,6 +56,39 @@ static inline void clear_page(void *page)
void copy_page(void *to, void *from);
+#ifdef CONFIG_X86_5LEVEL
+/*
+ * User space process size. This is the first address outside the user range.
+ * There are a few constraints that determine this:
+ *
+ * On Intel CPUs, if a SYSCALL instruction is at the highest canonical
+ * address, then that syscall will enter the kernel with a
+ * non-canonical return address, and SYSRET will explode dangerously.
+ * We avoid this particular problem by preventing anything
+ * from being mapped at the maximum canonical address.
+ *
+ * On AMD CPUs in the Ryzen family, there's a nasty bug in which the
+ * CPUs malfunction if they execute code from the highest canonical page.
+ * They'll speculate right off the end of the canonical space, and
+ * bad things happen. This is worked around in the same way as the
+ * Intel problem.
+ *
+ * With page table isolation enabled, we map the LDT in ... [stay tuned]
+ */
+static __always_inline unsigned long task_size_max(void)
+{
+ unsigned long ret;
+
+ alternative_io("movq %[small],%0","movq %[large],%0",
+ X86_FEATURE_LA57,
+ "=r" (ret),
+ [small] "i" ((1ul << 47)-PAGE_SIZE),
+ [large] "i" ((1ul << 56)-PAGE_SIZE));
+
+ return ret;
+}
+#endif /* CONFIG_X86_5LEVEL */
+
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_X86_VSYSCALL_EMULATION
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 64297eabad63..a8d4ad856568 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -55,30 +55,13 @@
#ifdef CONFIG_X86_5LEVEL
#define __VIRTUAL_MASK_SHIFT (pgtable_l5_enabled() ? 56 : 47)
+/* See task_size_max() in <asm/page_64.h> */
#else
#define __VIRTUAL_MASK_SHIFT 47
+#define task_size_max() ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE)
#endif
-/*
- * User space process size. This is the first address outside the user range.
- * There are a few constraints that determine this:
- *
- * On Intel CPUs, if a SYSCALL instruction is at the highest canonical
- * address, then that syscall will enter the kernel with a
- * non-canonical return address, and SYSRET will explode dangerously.
- * We avoid this particular problem by preventing anything
- * from being mapped at the maximum canonical address.
- *
- * On AMD CPUs in the Ryzen family, there's a nasty bug in which the
- * CPUs malfunction if they execute code from the highest canonical page.
- * They'll speculate right off the end of the canonical space, and
- * bad things happen. This is worked around in the same way as the
- * Intel problem.
- *
- * With page table isolation enabled, we map the LDT in ... [stay tuned]
- */
-#define TASK_SIZE_MAX ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - PAGE_SIZE)
-
+#define TASK_SIZE_MAX task_size_max()
#define DEFAULT_MAP_WINDOW ((1UL << 47) - PAGE_SIZE)
/* This decides where the kernel will search for a free chunk of vm
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 4abf110e2243..da3a1ac82be5 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -15,11 +15,20 @@
#include <linux/bug.h>
#include <linux/types.h>
#include <linux/cpumask.h>
+#include <linux/static_call_types.h>
#include <asm/frame.h>
-static inline unsigned long long paravirt_sched_clock(void)
+u64 dummy_steal_clock(int cpu);
+u64 dummy_sched_clock(void);
+
+DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
+DECLARE_STATIC_CALL(pv_sched_clock, dummy_sched_clock);
+
+void paravirt_set_sched_clock(u64 (*func)(void));
+
+static inline u64 paravirt_sched_clock(void)
{
- return PVOP_CALL0(unsigned long long, time.sched_clock);
+ return static_call(pv_sched_clock)();
}
struct static_key;
@@ -33,9 +42,13 @@ bool pv_is_native_vcpu_is_preempted(void);
static inline u64 paravirt_steal_clock(int cpu)
{
- return PVOP_CALL1(u64, time.steal_clock, cpu);
+ return static_call(pv_steal_clock)(cpu);
}
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+void __init paravirt_set_cap(void);
+#endif
+
/* The paravirtualized I/O functions */
static inline void slow_down_io(void)
{
@@ -50,7 +63,7 @@ static inline void slow_down_io(void)
void native_flush_tlb_local(void);
void native_flush_tlb_global(void);
void native_flush_tlb_one_user(unsigned long addr);
-void native_flush_tlb_others(const struct cpumask *cpumask,
+void native_flush_tlb_multi(const struct cpumask *cpumask,
const struct flush_tlb_info *info);
static inline void __flush_tlb_local(void)
@@ -68,10 +81,10 @@ static inline void __flush_tlb_one_user(unsigned long addr)
PVOP_VCALL1(mmu.flush_tlb_one_user, addr);
}
-static inline void __flush_tlb_others(const struct cpumask *cpumask,
+static inline void __flush_tlb_multi(const struct cpumask *cpumask,
const struct flush_tlb_info *info)
{
- PVOP_VCALL2(mmu.flush_tlb_others, cpumask, info);
+ PVOP_VCALL2(mmu.flush_tlb_multi, cpumask, info);
}
static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
@@ -122,7 +135,9 @@ static inline void write_cr0(unsigned long x)
static inline unsigned long read_cr2(void)
{
- return PVOP_CALLEE0(unsigned long, mmu.read_cr2);
+ return PVOP_ALT_CALLEE0(unsigned long, mmu.read_cr2,
+ "mov %%cr2, %%rax;",
+ ALT_NOT(X86_FEATURE_XENPV));
}
static inline void write_cr2(unsigned long x)
@@ -132,12 +147,14 @@ static inline void write_cr2(unsigned long x)
static inline unsigned long __read_cr3(void)
{
- return PVOP_CALL0(unsigned long, mmu.read_cr3);
+ return PVOP_ALT_CALL0(unsigned long, mmu.read_cr3,
+ "mov %%cr3, %%rax;", ALT_NOT(X86_FEATURE_XENPV));
}
static inline void write_cr3(unsigned long x)
{
- PVOP_VCALL1(mmu.write_cr3, x);
+ PVOP_ALT_VCALL1(mmu.write_cr3, x,
+ "mov %%rdi, %%cr3", ALT_NOT(X86_FEATURE_XENPV));
}
static inline void __write_cr4(unsigned long x)
@@ -157,7 +174,7 @@ static inline void halt(void)
static inline void wbinvd(void)
{
- PVOP_VCALL0(cpu.wbinvd);
+ PVOP_ALT_VCALL0(cpu.wbinvd, "wbinvd", ALT_NOT(X86_FEATURE_XENPV));
}
static inline u64 paravirt_read_msr(unsigned msr)
@@ -371,22 +388,28 @@ static inline void paravirt_release_p4d(unsigned long pfn)
static inline pte_t __pte(pteval_t val)
{
- return (pte_t) { PVOP_CALLEE1(pteval_t, mmu.make_pte, val) };
+ return (pte_t) { PVOP_ALT_CALLEE1(pteval_t, mmu.make_pte, val,
+ "mov %%rdi, %%rax",
+ ALT_NOT(X86_FEATURE_XENPV)) };
}
static inline pteval_t pte_val(pte_t pte)
{
- return PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte);
+ return PVOP_ALT_CALLEE1(pteval_t, mmu.pte_val, pte.pte,
+ "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
}
static inline pgd_t __pgd(pgdval_t val)
{
- return (pgd_t) { PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val) };
+ return (pgd_t) { PVOP_ALT_CALLEE1(pgdval_t, mmu.make_pgd, val,
+ "mov %%rdi, %%rax",
+ ALT_NOT(X86_FEATURE_XENPV)) };
}
static inline pgdval_t pgd_val(pgd_t pgd)
{
- return PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd);
+ return PVOP_ALT_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd,
+ "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
}
#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
@@ -419,12 +442,15 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
static inline pmd_t __pmd(pmdval_t val)
{
- return (pmd_t) { PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val) };
+ return (pmd_t) { PVOP_ALT_CALLEE1(pmdval_t, mmu.make_pmd, val,
+ "mov %%rdi, %%rax",
+ ALT_NOT(X86_FEATURE_XENPV)) };
}
static inline pmdval_t pmd_val(pmd_t pmd)
{
- return PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd);
+ return PVOP_ALT_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd,
+ "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
}
static inline void set_pud(pud_t *pudp, pud_t pud)
@@ -436,14 +462,16 @@ static inline pud_t __pud(pudval_t val)
{
pudval_t ret;
- ret = PVOP_CALLEE1(pudval_t, mmu.make_pud, val);
+ ret = PVOP_ALT_CALLEE1(pudval_t, mmu.make_pud, val,
+ "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
return (pud_t) { ret };
}
static inline pudval_t pud_val(pud_t pud)
{
- return PVOP_CALLEE1(pudval_t, mmu.pud_val, pud.pud);
+ return PVOP_ALT_CALLEE1(pudval_t, mmu.pud_val, pud.pud,
+ "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
}
static inline void pud_clear(pud_t *pudp)
@@ -462,14 +490,17 @@ static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
static inline p4d_t __p4d(p4dval_t val)
{
- p4dval_t ret = PVOP_CALLEE1(p4dval_t, mmu.make_p4d, val);
+ p4dval_t ret = PVOP_ALT_CALLEE1(p4dval_t, mmu.make_p4d, val,
+ "mov %%rdi, %%rax",
+ ALT_NOT(X86_FEATURE_XENPV));
return (p4d_t) { ret };
}
static inline p4dval_t p4d_val(p4d_t p4d)
{
- return PVOP_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d);
+ return PVOP_ALT_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d,
+ "mov %%rdi, %%rax", ALT_NOT(X86_FEATURE_XENPV));
}
static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
@@ -556,7 +587,9 @@ static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
{
- PVOP_VCALLEE1(lock.queued_spin_unlock, lock);
+ PVOP_ALT_VCALLEE1(lock.queued_spin_unlock, lock,
+ "movb $0, (%%" _ASM_ARG1 ");",
+ ALT_NOT(X86_FEATURE_PVUNLOCK));
}
static __always_inline void pv_wait(u8 *ptr, u8 val)
@@ -571,7 +604,9 @@ static __always_inline void pv_kick(int cpu)
static __always_inline bool pv_vcpu_is_preempted(long cpu)
{
- return PVOP_CALLEE1(bool, lock.vcpu_is_preempted, cpu);
+ return PVOP_ALT_CALLEE1(bool, lock.vcpu_is_preempted, cpu,
+ "xor %%" _ASM_AX ", %%" _ASM_AX ";",
+ ALT_NOT(X86_FEATURE_VCPUPREEMPT));
}
void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
@@ -645,17 +680,18 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
#ifdef CONFIG_PARAVIRT_XXL
static inline notrace unsigned long arch_local_save_flags(void)
{
- return PVOP_CALLEE0(unsigned long, irq.save_fl);
+ return PVOP_ALT_CALLEE0(unsigned long, irq.save_fl, "pushf; pop %%rax;",
+ ALT_NOT(X86_FEATURE_XENPV));
}
static inline notrace void arch_local_irq_disable(void)
{
- PVOP_VCALLEE0(irq.irq_disable);
+ PVOP_ALT_VCALLEE0(irq.irq_disable, "cli;", ALT_NOT(X86_FEATURE_XENPV));
}
static inline notrace void arch_local_irq_enable(void)
{
- PVOP_VCALLEE0(irq.irq_enable);
+ PVOP_ALT_VCALLEE0(irq.irq_enable, "sti;", ALT_NOT(X86_FEATURE_XENPV));
}
static inline notrace unsigned long arch_local_irq_save(void)
@@ -700,84 +736,27 @@ extern void default_banner(void);
.popsection
-#define COND_PUSH(set, mask, reg) \
- .if ((~(set)) & mask); push %reg; .endif
-#define COND_POP(set, mask, reg) \
- .if ((~(set)) & mask); pop %reg; .endif
-
#ifdef CONFIG_X86_64
-
-#define PV_SAVE_REGS(set) \
- COND_PUSH(set, CLBR_RAX, rax); \
- COND_PUSH(set, CLBR_RCX, rcx); \
- COND_PUSH(set, CLBR_RDX, rdx); \
- COND_PUSH(set, CLBR_RSI, rsi); \
- COND_PUSH(set, CLBR_RDI, rdi); \
- COND_PUSH(set, CLBR_R8, r8); \
- COND_PUSH(set, CLBR_R9, r9); \
- COND_PUSH(set, CLBR_R10, r10); \
- COND_PUSH(set, CLBR_R11, r11)
-#define PV_RESTORE_REGS(set) \
- COND_POP(set, CLBR_R11, r11); \
- COND_POP(set, CLBR_R10, r10); \
- COND_POP(set, CLBR_R9, r9); \
- COND_POP(set, CLBR_R8, r8); \
- COND_POP(set, CLBR_RDI, rdi); \
- COND_POP(set, CLBR_RSI, rsi); \
- COND_POP(set, CLBR_RDX, rdx); \
- COND_POP(set, CLBR_RCX, rcx); \
- COND_POP(set, CLBR_RAX, rax)
+#ifdef CONFIG_PARAVIRT_XXL
#define PARA_PATCH(off) ((off) / 8)
#define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .quad, 8)
#define PARA_INDIRECT(addr) *addr(%rip)
-#else
-#define PV_SAVE_REGS(set) \
- COND_PUSH(set, CLBR_EAX, eax); \
- COND_PUSH(set, CLBR_EDI, edi); \
- COND_PUSH(set, CLBR_ECX, ecx); \
- COND_PUSH(set, CLBR_EDX, edx)
-#define PV_RESTORE_REGS(set) \
- COND_POP(set, CLBR_EDX, edx); \
- COND_POP(set, CLBR_ECX, ecx); \
- COND_POP(set, CLBR_EDI, edi); \
- COND_POP(set, CLBR_EAX, eax)
-
-#define PARA_PATCH(off) ((off) / 4)
-#define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .long, 4)
-#define PARA_INDIRECT(addr) *%cs:addr
-#endif
-#ifdef CONFIG_PARAVIRT_XXL
#define INTERRUPT_RETURN \
- PARA_SITE(PARA_PATCH(PV_CPU_iret), \
- ANNOTATE_RETPOLINE_SAFE; \
- jmp PARA_INDIRECT(pv_ops+PV_CPU_iret);)
-
-#define DISABLE_INTERRUPTS(clobbers) \
- PARA_SITE(PARA_PATCH(PV_IRQ_irq_disable), \
- PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
- ANNOTATE_RETPOLINE_SAFE; \
- call PARA_INDIRECT(pv_ops+PV_IRQ_irq_disable); \
- PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
-
-#define ENABLE_INTERRUPTS(clobbers) \
- PARA_SITE(PARA_PATCH(PV_IRQ_irq_enable), \
- PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
- ANNOTATE_RETPOLINE_SAFE; \
- call PARA_INDIRECT(pv_ops+PV_IRQ_irq_enable); \
- PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
-#endif
+ ANNOTATE_RETPOLINE_SAFE; \
+ ALTERNATIVE_TERNARY("jmp *paravirt_iret(%rip);", \
+ X86_FEATURE_XENPV, "jmp xen_iret;", "jmp native_iret;")
-#ifdef CONFIG_X86_64
-#ifdef CONFIG_PARAVIRT_XXL
#ifdef CONFIG_DEBUG_ENTRY
-#define SAVE_FLAGS(clobbers) \
- PARA_SITE(PARA_PATCH(PV_IRQ_save_fl), \
- PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
- ANNOTATE_RETPOLINE_SAFE; \
- call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl); \
- PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
+.macro PARA_IRQ_save_fl
+ PARA_SITE(PARA_PATCH(PV_IRQ_save_fl),
+ ANNOTATE_RETPOLINE_SAFE;
+ call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);)
+.endm
+
+#define SAVE_FLAGS ALTERNATIVE "PARA_IRQ_save_fl;", "pushf; pop %rax;", \
+ ALT_NOT(X86_FEATURE_XENPV)
#endif
#endif /* CONFIG_PARAVIRT_XXL */
#endif /* CONFIG_X86_64 */
@@ -800,5 +779,11 @@ static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
{
}
#endif
+
+#ifndef CONFIG_PARAVIRT_SPINLOCKS
+static inline void paravirt_set_cap(void)
+{
+}
+#endif
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_PARAVIRT_H */
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index de87087d3bde..d9d6b0203ec4 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -3,7 +3,6 @@
#define _ASM_X86_PARAVIRT_TYPES_H
/* Bitmask of what can be clobbered: usually at least eax. */
-#define CLBR_NONE 0
#define CLBR_EAX (1 << 0)
#define CLBR_ECX (1 << 1)
#define CLBR_EDX (1 << 2)
@@ -15,7 +14,6 @@
#define CLBR_ARG_REGS (CLBR_EAX | CLBR_EDX | CLBR_ECX)
#define CLBR_RET_REG (CLBR_EAX | CLBR_EDX)
-#define CLBR_SCRATCH (0)
#else
#define CLBR_RAX CLBR_EAX
#define CLBR_RCX CLBR_ECX
@@ -32,12 +30,9 @@
#define CLBR_ARG_REGS (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
CLBR_RCX | CLBR_R8 | CLBR_R9)
#define CLBR_RET_REG (CLBR_RAX)
-#define CLBR_SCRATCH (CLBR_R10 | CLBR_R11)
#endif /* X86_64 */
-#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
-
#ifndef __ASSEMBLY__
#include <asm/desc_defs.h>
@@ -73,19 +68,6 @@ struct pv_info {
const char *name;
};
-struct pv_init_ops {
- /*
- * Patch may replace one of the defined code sequences with
- * arbitrary code, subject to the same register constraints.
- * This generally means the code is not free to clobber any
- * registers other than EAX. The patch function should return
- * the number of bytes of code generated, as we nop pad the
- * rest in generic code.
- */
- unsigned (*patch)(u8 type, void *insn_buff,
- unsigned long addr, unsigned len);
-} __no_randomize_layout;
-
#ifdef CONFIG_PARAVIRT_XXL
struct pv_lazy_ops {
/* Set deferred update mode, used for batching operations. */
@@ -95,11 +77,6 @@ struct pv_lazy_ops {
} __no_randomize_layout;
#endif
-struct pv_time_ops {
- unsigned long long (*sched_clock)(void);
- unsigned long long (*steal_clock)(int cpu);
-} __no_randomize_layout;
-
struct pv_cpu_ops {
/* hooks for various privileged instructions */
void (*io_delay)(void);
@@ -156,10 +133,6 @@ struct pv_cpu_ops {
u64 (*read_pmc)(int counter);
- /* Normal iret. Jump to this with the standard iret stack
- frame set up. */
- void (*iret)(void);
-
void (*start_context_switch)(struct task_struct *prev);
void (*end_context_switch)(struct task_struct *next);
#endif
@@ -188,8 +161,8 @@ struct pv_mmu_ops {
void (*flush_tlb_user)(void);
void (*flush_tlb_kernel)(void);
void (*flush_tlb_one_user)(unsigned long addr);
- void (*flush_tlb_others)(const struct cpumask *cpus,
- const struct flush_tlb_info *info);
+ void (*flush_tlb_multi)(const struct cpumask *cpus,
+ const struct flush_tlb_info *info);
void (*tlb_remove_table)(struct mmu_gather *tlb, void *table);
@@ -290,8 +263,6 @@ struct pv_lock_ops {
* number for each function using the offset which we use to indicate
* what to patch. */
struct paravirt_patch_template {
- struct pv_init_ops init;
- struct pv_time_ops time;
struct pv_cpu_ops cpu;
struct pv_irq_ops irq;
struct pv_mmu_ops mmu;
@@ -300,6 +271,7 @@ struct paravirt_patch_template {
extern struct pv_info pv_info;
extern struct paravirt_patch_template pv_ops;
+extern void (*paravirt_iret)(void);
#define PARAVIRT_PATCH(x) \
(offsetof(struct paravirt_patch_template, x) / sizeof(void *))
@@ -331,11 +303,7 @@ extern struct paravirt_patch_template pv_ops;
/* Simple instruction patching code. */
#define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
-unsigned paravirt_patch_ident_64(void *insn_buff, unsigned len);
-unsigned paravirt_patch_default(u8 type, void *insn_buff, unsigned long addr, unsigned len);
-unsigned paravirt_patch_insns(void *insn_buff, unsigned len, const char *start, const char *end);
-
-unsigned native_patch(u8 type, void *insn_buff, unsigned long addr, unsigned len);
+unsigned int paravirt_patch(u8 type, void *insn_buff, unsigned long addr, unsigned int len);
int paravirt_disable_iospace(void);
@@ -371,7 +339,7 @@ int paravirt_disable_iospace(void);
* on the stack. All caller-save registers (eax,edx,ecx) are expected
* to be modified (either clobbered or used for return values).
* X86_64, on the other hand, already specifies a register-based calling
- * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
+ * conventions, returning at %rax, with parameters going on %rdi, %rsi,
* %rdx, and %rcx. Note that for this reason, x86_64 does not need any
* special handling for dealing with 4 arguments, unlike i386.
* However, x86_64 also have to clobber all caller saved registers, which
@@ -414,11 +382,9 @@ int paravirt_disable_iospace(void);
* makes sure the incoming and outgoing types are always correct.
*/
#ifdef CONFIG_X86_32
-#define PVOP_VCALL_ARGS \
+#define PVOP_CALL_ARGS \
unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx;
-#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
-
#define PVOP_CALL_ARG1(x) "a" ((unsigned long)(x))
#define PVOP_CALL_ARG2(x) "d" ((unsigned long)(x))
#define PVOP_CALL_ARG3(x) "c" ((unsigned long)(x))
@@ -434,12 +400,10 @@ int paravirt_disable_iospace(void);
#define VEXTRA_CLOBBERS
#else /* CONFIG_X86_64 */
/* [re]ax isn't an arg, but the return val */
-#define PVOP_VCALL_ARGS \
+#define PVOP_CALL_ARGS \
unsigned long __edi = __edi, __esi = __esi, \
__edx = __edx, __ecx = __ecx, __eax = __eax;
-#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
-
#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
#define PVOP_CALL_ARG3(x) "d" ((unsigned long)(x))
@@ -464,152 +428,138 @@ int paravirt_disable_iospace(void);
#define PVOP_TEST_NULL(op) ((void)pv_ops.op)
#endif
-#define PVOP_RETMASK(rettype) \
+#define PVOP_RETVAL(rettype) \
({ unsigned long __mask = ~0UL; \
+ BUILD_BUG_ON(sizeof(rettype) > sizeof(unsigned long)); \
switch (sizeof(rettype)) { \
case 1: __mask = 0xffUL; break; \
case 2: __mask = 0xffffUL; break; \
case 4: __mask = 0xffffffffUL; break; \
default: break; \
} \
- __mask; \
+ __mask & __eax; \
})
-#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr, \
- pre, post, ...) \
+#define ____PVOP_CALL(ret, op, clbr, call_clbr, extra_clbr, ...) \
({ \
- rettype __ret; \
PVOP_CALL_ARGS; \
PVOP_TEST_NULL(op); \
- /* This is 32-bit specific, but is okay in 64-bit */ \
- /* since this condition will never hold */ \
- if (sizeof(rettype) > sizeof(unsigned long)) { \
- asm volatile(pre \
- paravirt_alt(PARAVIRT_CALL) \
- post \
- : call_clbr, ASM_CALL_CONSTRAINT \
- : paravirt_type(op), \
- paravirt_clobber(clbr), \
- ##__VA_ARGS__ \
- : "memory", "cc" extra_clbr); \
- __ret = (rettype)((((u64)__edx) << 32) | __eax); \
- } else { \
- asm volatile(pre \
- paravirt_alt(PARAVIRT_CALL) \
- post \
- : call_clbr, ASM_CALL_CONSTRAINT \
- : paravirt_type(op), \
- paravirt_clobber(clbr), \
- ##__VA_ARGS__ \
- : "memory", "cc" extra_clbr); \
- __ret = (rettype)(__eax & PVOP_RETMASK(rettype)); \
- } \
- __ret; \
+ asm volatile(paravirt_alt(PARAVIRT_CALL) \
+ : call_clbr, ASM_CALL_CONSTRAINT \
+ : paravirt_type(op), \
+ paravirt_clobber(clbr), \
+ ##__VA_ARGS__ \
+ : "memory", "cc" extra_clbr); \
+ ret; \
})
-#define __PVOP_CALL(rettype, op, pre, post, ...) \
- ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS, \
- EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
-
-#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...) \
- ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
- PVOP_CALLEE_CLOBBERS, , \
- pre, post, ##__VA_ARGS__)
-
-
-#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
+#define ____PVOP_ALT_CALL(ret, op, alt, cond, clbr, call_clbr, \
+ extra_clbr, ...) \
({ \
- PVOP_VCALL_ARGS; \
+ PVOP_CALL_ARGS; \
PVOP_TEST_NULL(op); \
- asm volatile(pre \
- paravirt_alt(PARAVIRT_CALL) \
- post \
+ asm volatile(ALTERNATIVE(paravirt_alt(PARAVIRT_CALL), \
+ alt, cond) \
: call_clbr, ASM_CALL_CONSTRAINT \
: paravirt_type(op), \
paravirt_clobber(clbr), \
##__VA_ARGS__ \
: "memory", "cc" extra_clbr); \
+ ret; \
})
-#define __PVOP_VCALL(op, pre, post, ...) \
- ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
- VEXTRA_CLOBBERS, \
- pre, post, ##__VA_ARGS__)
+#define __PVOP_CALL(rettype, op, ...) \
+ ____PVOP_CALL(PVOP_RETVAL(rettype), op, CLBR_ANY, \
+ PVOP_CALL_CLOBBERS, EXTRA_CLOBBERS, ##__VA_ARGS__)
+
+#define __PVOP_ALT_CALL(rettype, op, alt, cond, ...) \
+ ____PVOP_ALT_CALL(PVOP_RETVAL(rettype), op, alt, cond, CLBR_ANY,\
+ PVOP_CALL_CLOBBERS, EXTRA_CLOBBERS, \
+ ##__VA_ARGS__)
+
+#define __PVOP_CALLEESAVE(rettype, op, ...) \
+ ____PVOP_CALL(PVOP_RETVAL(rettype), op.func, CLBR_RET_REG, \
+ PVOP_CALLEE_CLOBBERS, , ##__VA_ARGS__)
+
+#define __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond, ...) \
+ ____PVOP_ALT_CALL(PVOP_RETVAL(rettype), op.func, alt, cond, \
+ CLBR_RET_REG, PVOP_CALLEE_CLOBBERS, , ##__VA_ARGS__)
+
+
+#define __PVOP_VCALL(op, ...) \
+ (void)____PVOP_CALL(, op, CLBR_ANY, PVOP_VCALL_CLOBBERS, \
+ VEXTRA_CLOBBERS, ##__VA_ARGS__)
+
+#define __PVOP_ALT_VCALL(op, alt, cond, ...) \
+ (void)____PVOP_ALT_CALL(, op, alt, cond, CLBR_ANY, \
+ PVOP_VCALL_CLOBBERS, VEXTRA_CLOBBERS, \
+ ##__VA_ARGS__)
-#define __PVOP_VCALLEESAVE(op, pre, post, ...) \
- ____PVOP_VCALL(op.func, CLBR_RET_REG, \
- PVOP_VCALLEE_CLOBBERS, , \
- pre, post, ##__VA_ARGS__)
+#define __PVOP_VCALLEESAVE(op, ...) \
+ (void)____PVOP_CALL(, op.func, CLBR_RET_REG, \
+ PVOP_VCALLEE_CLOBBERS, , ##__VA_ARGS__)
+#define __PVOP_ALT_VCALLEESAVE(op, alt, cond, ...) \
+ (void)____PVOP_ALT_CALL(, op.func, alt, cond, CLBR_RET_REG, \
+ PVOP_VCALLEE_CLOBBERS, , ##__VA_ARGS__)
#define PVOP_CALL0(rettype, op) \
- __PVOP_CALL(rettype, op, "", "")
+ __PVOP_CALL(rettype, op)
#define PVOP_VCALL0(op) \
- __PVOP_VCALL(op, "", "")
+ __PVOP_VCALL(op)
+#define PVOP_ALT_CALL0(rettype, op, alt, cond) \
+ __PVOP_ALT_CALL(rettype, op, alt, cond)
+#define PVOP_ALT_VCALL0(op, alt, cond) \
+ __PVOP_ALT_VCALL(op, alt, cond)
#define PVOP_CALLEE0(rettype, op) \
- __PVOP_CALLEESAVE(rettype, op, "", "")
+ __PVOP_CALLEESAVE(rettype, op)
#define PVOP_VCALLEE0(op) \
- __PVOP_VCALLEESAVE(op, "", "")
+ __PVOP_VCALLEESAVE(op)
+#define PVOP_ALT_CALLEE0(rettype, op, alt, cond) \
+ __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond)
+#define PVOP_ALT_VCALLEE0(op, alt, cond) \
+ __PVOP_ALT_VCALLEESAVE(op, alt, cond)
#define PVOP_CALL1(rettype, op, arg1) \
- __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
+ __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1))
#define PVOP_VCALL1(op, arg1) \
- __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
+ __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1))
+#define PVOP_ALT_VCALL1(op, arg1, alt, cond) \
+ __PVOP_ALT_VCALL(op, alt, cond, PVOP_CALL_ARG1(arg1))
#define PVOP_CALLEE1(rettype, op, arg1) \
- __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
+ __PVOP_CALLEESAVE(rettype, op, PVOP_CALL_ARG1(arg1))
#define PVOP_VCALLEE1(op, arg1) \
- __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
+ __PVOP_VCALLEESAVE(op, PVOP_CALL_ARG1(arg1))
+#define PVOP_ALT_CALLEE1(rettype, op, arg1, alt, cond) \
+ __PVOP_ALT_CALLEESAVE(rettype, op, alt, cond, PVOP_CALL_ARG1(arg1))
+#define PVOP_ALT_VCALLEE1(op, arg1, alt, cond) \
+ __PVOP_ALT_VCALLEESAVE(op, alt, cond, PVOP_CALL_ARG1(arg1))
#define PVOP_CALL2(rettype, op, arg1, arg2) \
- __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
- PVOP_CALL_ARG2(arg2))
+ __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2))
#define PVOP_VCALL2(op, arg1, arg2) \
- __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
- PVOP_CALL_ARG2(arg2))
-
-#define PVOP_CALLEE2(rettype, op, arg1, arg2) \
- __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
- PVOP_CALL_ARG2(arg2))
-#define PVOP_VCALLEE2(op, arg1, arg2) \
- __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1), \
- PVOP_CALL_ARG2(arg2))
-
+ __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2))
#define PVOP_CALL3(rettype, op, arg1, arg2, arg3) \
- __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1), \
+ __PVOP_CALL(rettype, op, PVOP_CALL_ARG1(arg1), \
PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
#define PVOP_VCALL3(op, arg1, arg2, arg3) \
- __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1), \
+ __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), \
PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
-/* This is the only difference in x86_64. We can make it much simpler */
-#ifdef CONFIG_X86_32
#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
__PVOP_CALL(rettype, op, \
- "push %[_arg4];", "lea 4(%%esp),%%esp;", \
- PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
- PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
-#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
- __PVOP_VCALL(op, \
- "push %[_arg4];", "lea 4(%%esp),%%esp;", \
- "0" ((u32)(arg1)), "1" ((u32)(arg2)), \
- "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
-#else
-#define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4) \
- __PVOP_CALL(rettype, op, "", "", \
PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
#define PVOP_VCALL4(op, arg1, arg2, arg3, arg4) \
- __PVOP_VCALL(op, "", "", \
- PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
+ __PVOP_VCALL(op, PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2), \
PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
-#endif
/* Lazy mode for batching updates / context switch */
enum paravirt_lazy_mode {
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 544f41a179fb..8fc1b5003713 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -478,6 +478,7 @@ struct x86_pmu_lbr {
extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
extern void perf_check_microcode(void);
+extern void perf_clear_dirty_counters(void);
extern int x86_perf_rdpmc_index(struct perf_event *event);
#else
static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index 62ad61d6fefc..c7ec5bb88334 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -84,8 +84,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
-
#if CONFIG_PGTABLE_LEVELS > 2
extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index a02c67291cfc..b1099f2d9800 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1244,7 +1244,7 @@ static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
/*
* clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
*
- * dst - pointer to pgd range anwhere on a pgd page
+ * dst - pointer to pgd range anywhere on a pgd page
* src - ""
* count - the number of pgds to copy.
*
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index f24d7ef8fffa..40497a9020c6 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -7,8 +7,6 @@
#include <asm/page_types.h>
-#define FIRST_USER_ADDRESS 0UL
-
#define _PAGE_BIT_PRESENT 0 /* is present */
#define _PAGE_BIT_RW 1 /* writeable */
#define _PAGE_BIT_USER 2 /* userspace addressable */
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index f8cb8af4de5c..fe5efbcba824 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -44,7 +44,7 @@ static __always_inline void preempt_count_set(int pc)
#define init_task_preempt_count(p) do { } while (0)
#define init_idle_preempt_count(p, cpu) do { \
- per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
+ per_cpu(__preempt_count, (cpu)) = PREEMPT_DISABLED; \
} while (0)
/*
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index dc6d149bf851..364d0e42e280 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -314,11 +314,6 @@ struct x86_hw_tss {
struct x86_hw_tss {
u32 reserved1;
u64 sp0;
-
- /*
- * We store cpu_current_top_of_stack in sp1 so it's always accessible.
- * Linux does not use ring 1, so sp1 is not otherwise needed.
- */
u64 sp1;
/*
@@ -426,12 +421,7 @@ struct irq_stack {
char stack[IRQ_STACK_SIZE];
} __aligned(IRQ_STACK_SIZE);
-#ifdef CONFIG_X86_32
DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
-#else
-/* The RO copy can't be accessed with this_cpu_xyz(), so use the RW copy. */
-#define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1
-#endif
#ifdef CONFIG_X86_64
struct fixed_percpu_data {
@@ -439,6 +429,9 @@ struct fixed_percpu_data {
* GCC hardcodes the stack canary as %gs:40. Since the
* irq_stack is the object at %gs:0, we reserve the bottom
* 48 bytes of the irq stack for the canary.
+ *
+ * Once we are willing to require -mstack-protector-guard-symbol=
+ * support for x86_64 stackprotector, we can get rid of this.
*/
char gs_base[40];
unsigned long stack_canary;
@@ -460,17 +453,7 @@ extern asmlinkage void ignore_sysret(void);
void current_save_fsgs(void);
#else /* X86_64 */
#ifdef CONFIG_STACKPROTECTOR
-/*
- * Make sure stack canary segment base is cached-aligned:
- * "For Intel Atom processors, avoid non zero segment base address
- * that is not aligned to cache line boundary at all cost."
- * (Optim Ref Manual Assembly/Compiler Coding Rule 15.)
- */
-struct stack_canary {
- char __pad[20]; /* canary at %gs:20 */
- unsigned long canary;
-};
-DECLARE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
+DECLARE_PER_CPU(unsigned long, __stack_chk_guard);
#endif
DECLARE_PER_CPU(struct irq_stack *, hardirq_stack_ptr);
DECLARE_PER_CPU(struct irq_stack *, softirq_stack_ptr);
@@ -527,7 +510,7 @@ struct thread_struct {
struct io_bitmap *io_bitmap;
/*
- * IOPL. Priviledge level dependent I/O permission which is
+ * IOPL. Privilege level dependent I/O permission which is
* emulated via the I/O bitmap to prevent user space from disabling
* interrupts.
*/
@@ -551,15 +534,6 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset,
*size = fpu_kernel_xstate_size;
}
-/*
- * Thread-synchronous status.
- *
- * This is different from the flags in that nobody else
- * ever touches our thread-synchronous status, so we don't
- * have to worry about atomic accesses.
- */
-#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
-
static inline void
native_load_sp0(unsigned long sp0)
{
@@ -689,6 +663,7 @@ extern void load_direct_gdt(int);
extern void load_fixmap_gdt(int);
extern void load_percpu_segment(int);
extern void cpu_init(void);
+extern void cpu_init_secondary(void);
extern void cpu_init_exception_handling(void);
extern void cr4_init(void);
@@ -813,8 +788,10 @@ DECLARE_PER_CPU(u64, msr_misc_features_shadow);
#ifdef CONFIG_CPU_SUP_AMD
extern u32 amd_get_nodes_per_socket(void);
+extern u32 amd_get_highest_perf(void);
#else
static inline u32 amd_get_nodes_per_socket(void) { return 0; }
+static inline u32 amd_get_highest_perf(void) { return 0; }
#endif
static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
index 2c35f1c01a2d..8c5d1910a848 100644
--- a/arch/x86/include/asm/proto.h
+++ b/arch/x86/include/asm/proto.h
@@ -4,6 +4,8 @@
#include <asm/ldt.h>
+struct task_struct;
+
/* misc architecture specific prototypes */
void syscall_init(void);
@@ -25,6 +27,7 @@ void __end_SYSENTER_singlestep_region(void);
void entry_SYSENTER_compat(void);
void __end_entry_SYSENTER_compat(void);
void entry_SYSCALL_compat(void);
+void entry_SYSCALL_compat_safe_stack(void);
void entry_INT80_compat(void);
#ifdef CONFIG_XEN_PV
void xen_entry_INT80_compat(void);
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index d8324a236696..b94f615600d5 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -37,7 +37,10 @@ struct pt_regs {
unsigned short __esh;
unsigned short fs;
unsigned short __fsh;
- /* On interrupt, gs and __gsh store the vector number. */
+ /*
+ * On interrupt, gs and __gsh store the vector number. They never
+ * store gs any more.
+ */
unsigned short gs;
unsigned short __gsh;
/* On interrupt, this is the error code. */
@@ -94,6 +97,8 @@ struct pt_regs {
#include <asm/paravirt_types.h>
#endif
+#include <asm/proto.h>
+
struct cpuinfo_x86;
struct task_struct;
@@ -175,6 +180,19 @@ static inline bool any_64bit_mode(struct pt_regs *regs)
#ifdef CONFIG_X86_64
#define current_user_stack_pointer() current_pt_regs()->sp
#define compat_user_stack_pointer() current_pt_regs()->sp
+
+static inline bool ip_within_syscall_gap(struct pt_regs *regs)
+{
+ bool ret = (regs->ip >= (unsigned long)entry_SYSCALL_64 &&
+ regs->ip < (unsigned long)entry_SYSCALL_64_safe_stack);
+
+#ifdef CONFIG_IA32_EMULATION
+ ret = ret || (regs->ip >= (unsigned long)entry_SYSCALL_compat &&
+ regs->ip < (unsigned long)entry_SYSCALL_compat_safe_stack);
+#endif
+
+ return ret;
+}
#endif
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index 7fdd4facfce7..72044026eb3c 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -95,7 +95,7 @@
*
* 26 - ESPFIX small SS
* 27 - per-cpu [ offset to per-cpu data area ]
- * 28 - stack_canary-20 [ for stack protector ] <=== cacheline #8
+ * 28 - unused
* 29 - unused
* 30 - unused
* 31 - TSS for double fault handler
@@ -118,7 +118,6 @@
#define GDT_ENTRY_ESPFIX_SS 26
#define GDT_ENTRY_PERCPU 27
-#define GDT_ENTRY_STACK_CANARY 28
#define GDT_ENTRY_DOUBLEFAULT_TSS 31
@@ -158,12 +157,6 @@
# define __KERNEL_PERCPU 0
#endif
-#ifdef CONFIG_STACKPROTECTOR
-# define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY*8)
-#else
-# define __KERNEL_STACK_CANARY 0
-#endif
-
#else /* 64-bit: */
#include <asm/cache.h>
@@ -364,22 +357,15 @@ static inline void __loadsegment_fs(unsigned short value)
asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
/*
- * x86-32 user GS accessors:
+ * x86-32 user GS accessors. This is ugly and could do with some cleaning up.
*/
#ifdef CONFIG_X86_32
-# ifdef CONFIG_X86_32_LAZY_GS
-# define get_user_gs(regs) (u16)({ unsigned long v; savesegment(gs, v); v; })
-# define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
-# define task_user_gs(tsk) ((tsk)->thread.gs)
-# define lazy_save_gs(v) savesegment(gs, (v))
-# define lazy_load_gs(v) loadsegment(gs, (v))
-# else /* X86_32_LAZY_GS */
-# define get_user_gs(regs) (u16)((regs)->gs)
-# define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0)
-# define task_user_gs(tsk) (task_pt_regs(tsk)->gs)
-# define lazy_save_gs(v) do { } while (0)
-# define lazy_load_gs(v) do { } while (0)
-# endif /* X86_32_LAZY_GS */
+# define get_user_gs(regs) (u16)({ unsigned long v; savesegment(gs, v); v; })
+# define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v))
+# define task_user_gs(tsk) ((tsk)->thread.gs)
+# define lazy_save_gs(v) savesegment(gs, (v))
+# define lazy_load_gs(v) loadsegment(gs, (v))
+# define load_gs_index(v) loadsegment(gs, (v))
#endif /* X86_32 */
#endif /* !__ASSEMBLY__ */
diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
index 4352f08bfbb5..43fa081a1adb 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -8,8 +8,8 @@
/*
* The set_memory_* API can be used to change various attributes of a virtual
* address range. The attributes include:
- * Cachability : UnCached, WriteCombining, WriteThrough, WriteBack
- * Executability : eXeutable, NoteXecutable
+ * Cacheability : UnCached, WriteCombining, WriteThrough, WriteBack
+ * Executability : eXecutable, NoteXecutable
* Read/Write : ReadOnly, ReadWrite
* Presence : NotPresent
* Encryption : Encrypted, Decrypted
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 389d851a02c4..a12458a7a8d4 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -130,11 +130,6 @@ void *extend_brk(size_t size, size_t align);
: : "i" (sz)); \
}
-/* Helper for reserving space for arrays of things */
-#define RESERVE_BRK_ARRAY(type, name, entries) \
- type *name; \
- RESERVE_BRK(name, sizeof(type) * entries)
-
extern void probe_roms(void);
#ifdef __i386__
diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
new file mode 100644
index 000000000000..2cef6c5a52c2
--- /dev/null
+++ b/arch/x86/include/asm/sev-common.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AMD SEV header common between the guest and the hypervisor.
+ *
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ */
+
+#ifndef __ASM_X86_SEV_COMMON_H
+#define __ASM_X86_SEV_COMMON_H
+
+#define GHCB_MSR_INFO_POS 0
+#define GHCB_DATA_LOW 12
+#define GHCB_MSR_INFO_MASK (BIT_ULL(GHCB_DATA_LOW) - 1)
+
+#define GHCB_DATA(v) \
+ (((unsigned long)(v) & ~GHCB_MSR_INFO_MASK) >> GHCB_DATA_LOW)
+
+/* SEV Information Request/Response */
+#define GHCB_MSR_SEV_INFO_RESP 0x001
+#define GHCB_MSR_SEV_INFO_REQ 0x002
+#define GHCB_MSR_VER_MAX_POS 48
+#define GHCB_MSR_VER_MAX_MASK 0xffff
+#define GHCB_MSR_VER_MIN_POS 32
+#define GHCB_MSR_VER_MIN_MASK 0xffff
+#define GHCB_MSR_CBIT_POS 24
+#define GHCB_MSR_CBIT_MASK 0xff
+#define GHCB_MSR_SEV_INFO(_max, _min, _cbit) \
+ ((((_max) & GHCB_MSR_VER_MAX_MASK) << GHCB_MSR_VER_MAX_POS) | \
+ (((_min) & GHCB_MSR_VER_MIN_MASK) << GHCB_MSR_VER_MIN_POS) | \
+ (((_cbit) & GHCB_MSR_CBIT_MASK) << GHCB_MSR_CBIT_POS) | \
+ GHCB_MSR_SEV_INFO_RESP)
+#define GHCB_MSR_INFO(v) ((v) & 0xfffUL)
+#define GHCB_MSR_PROTO_MAX(v) (((v) >> GHCB_MSR_VER_MAX_POS) & GHCB_MSR_VER_MAX_MASK)
+#define GHCB_MSR_PROTO_MIN(v) (((v) >> GHCB_MSR_VER_MIN_POS) & GHCB_MSR_VER_MIN_MASK)
+
+/* CPUID Request/Response */
+#define GHCB_MSR_CPUID_REQ 0x004
+#define GHCB_MSR_CPUID_RESP 0x005
+#define GHCB_MSR_CPUID_FUNC_POS 32
+#define GHCB_MSR_CPUID_FUNC_MASK 0xffffffff
+#define GHCB_MSR_CPUID_VALUE_POS 32
+#define GHCB_MSR_CPUID_VALUE_MASK 0xffffffff
+#define GHCB_MSR_CPUID_REG_POS 30
+#define GHCB_MSR_CPUID_REG_MASK 0x3
+#define GHCB_CPUID_REQ_EAX 0
+#define GHCB_CPUID_REQ_EBX 1
+#define GHCB_CPUID_REQ_ECX 2
+#define GHCB_CPUID_REQ_EDX 3
+#define GHCB_CPUID_REQ(fn, reg) \
+ (GHCB_MSR_CPUID_REQ | \
+ (((unsigned long)reg & GHCB_MSR_CPUID_REG_MASK) << GHCB_MSR_CPUID_REG_POS) | \
+ (((unsigned long)fn) << GHCB_MSR_CPUID_FUNC_POS))
+
+/* AP Reset Hold */
+#define GHCB_MSR_AP_RESET_HOLD_REQ 0x006
+#define GHCB_MSR_AP_RESET_HOLD_RESP 0x007
+
+/* GHCB Hypervisor Feature Request/Response */
+#define GHCB_MSR_HV_FT_REQ 0x080
+#define GHCB_MSR_HV_FT_RESP 0x081
+
+#define GHCB_MSR_TERM_REQ 0x100
+#define GHCB_MSR_TERM_REASON_SET_POS 12
+#define GHCB_MSR_TERM_REASON_SET_MASK 0xf
+#define GHCB_MSR_TERM_REASON_POS 16
+#define GHCB_MSR_TERM_REASON_MASK 0xff
+#define GHCB_SEV_TERM_REASON(reason_set, reason_val) \
+ (((((u64)reason_set) & GHCB_MSR_TERM_REASON_SET_MASK) << GHCB_MSR_TERM_REASON_SET_POS) | \
+ ((((u64)reason_val) & GHCB_MSR_TERM_REASON_MASK) << GHCB_MSR_TERM_REASON_POS))
+
+#define GHCB_SEV_ES_REASON_GENERAL_REQUEST 0
+#define GHCB_SEV_ES_REASON_PROTOCOL_UNSUPPORTED 1
+
+#define GHCB_RESP_CODE(v) ((v) & GHCB_MSR_INFO_MASK)
+
+#endif
diff --git a/arch/x86/include/asm/sev-es.h b/arch/x86/include/asm/sev.h
index cf1d957c7091..fa5cd05d3b5b 100644
--- a/arch/x86/include/asm/sev-es.h
+++ b/arch/x86/include/asm/sev.h
@@ -10,34 +10,12 @@
#include <linux/types.h>
#include <asm/insn.h>
+#include <asm/sev-common.h>
-#define GHCB_SEV_INFO 0x001UL
-#define GHCB_SEV_INFO_REQ 0x002UL
-#define GHCB_INFO(v) ((v) & 0xfffUL)
-#define GHCB_PROTO_MAX(v) (((v) >> 48) & 0xffffUL)
-#define GHCB_PROTO_MIN(v) (((v) >> 32) & 0xffffUL)
-#define GHCB_PROTO_OUR 0x0001UL
-#define GHCB_SEV_CPUID_REQ 0x004UL
-#define GHCB_CPUID_REQ_EAX 0
-#define GHCB_CPUID_REQ_EBX 1
-#define GHCB_CPUID_REQ_ECX 2
-#define GHCB_CPUID_REQ_EDX 3
-#define GHCB_CPUID_REQ(fn, reg) (GHCB_SEV_CPUID_REQ | \
- (((unsigned long)reg & 3) << 30) | \
- (((unsigned long)fn) << 32))
+#define GHCB_PROTO_OUR 0x0001UL
+#define GHCB_PROTOCOL_MAX 1ULL
+#define GHCB_DEFAULT_USAGE 0ULL
-#define GHCB_PROTOCOL_MAX 0x0001UL
-#define GHCB_DEFAULT_USAGE 0x0000UL
-
-#define GHCB_SEV_CPUID_RESP 0x005UL
-#define GHCB_SEV_TERMINATE 0x100UL
-#define GHCB_SEV_TERMINATE_REASON(reason_set, reason_val) \
- (((((u64)reason_set) & 0x7) << 12) | \
- ((((u64)reason_val) & 0xff) << 16))
-#define GHCB_SEV_ES_REASON_GENERAL_REQUEST 0
-#define GHCB_SEV_ES_REASON_PROTOCOL_UNSUPPORTED 1
-
-#define GHCB_SEV_GHCB_RESP_CODE(v) ((v) & 0xfff)
#define VMGEXIT() { asm volatile("rep; vmmcall\n\r"); }
enum es_result {
diff --git a/arch/x86/kernel/cpu/sgx/arch.h b/arch/x86/include/asm/sgx.h
index dd7602c44c72..05f3e21f01a7 100644
--- a/arch/x86/kernel/cpu/sgx/arch.h
+++ b/arch/x86/include/asm/sgx.h
@@ -2,15 +2,20 @@
/**
* Copyright(c) 2016-20 Intel Corporation.
*
- * Contains data structures defined by the SGX architecture. Data structures
- * defined by the Linux software stack should not be placed here.
+ * Intel Software Guard Extensions (SGX) support.
*/
-#ifndef _ASM_X86_SGX_ARCH_H
-#define _ASM_X86_SGX_ARCH_H
+#ifndef _ASM_X86_SGX_H
+#define _ASM_X86_SGX_H
#include <linux/bits.h>
#include <linux/types.h>
+/*
+ * This file contains both data structures defined by SGX architecture and Linux
+ * defined software data structures and functions. The two should not be mixed
+ * together for better readability. The architectural definitions come first.
+ */
+
/* The SGX specific CPUID function. */
#define SGX_CPUID 0x12
/* EPC enumeration. */
@@ -22,16 +27,36 @@
/* The bitmask for the EPC section type. */
#define SGX_CPUID_EPC_MASK GENMASK(3, 0)
+enum sgx_encls_function {
+ ECREATE = 0x00,
+ EADD = 0x01,
+ EINIT = 0x02,
+ EREMOVE = 0x03,
+ EDGBRD = 0x04,
+ EDGBWR = 0x05,
+ EEXTEND = 0x06,
+ ELDU = 0x08,
+ EBLOCK = 0x09,
+ EPA = 0x0A,
+ EWB = 0x0B,
+ ETRACK = 0x0C,
+ EAUG = 0x0D,
+ EMODPR = 0x0E,
+ EMODT = 0x0F,
+};
+
/**
* enum sgx_return_code - The return code type for ENCLS, ENCLU and ENCLV
* %SGX_NOT_TRACKED: Previous ETRACK's shootdown sequence has not
* been completed yet.
+ * %SGX_CHILD_PRESENT SECS has child pages present in the EPC.
* %SGX_INVALID_EINITTOKEN: EINITTOKEN is invalid and enclave signer's
* public key does not match IA32_SGXLEPUBKEYHASH.
* %SGX_UNMASKED_EVENT: An unmasked event, e.g. INTR, was received
*/
enum sgx_return_code {
SGX_NOT_TRACKED = 11,
+ SGX_CHILD_PRESENT = 13,
SGX_INVALID_EINITTOKEN = 16,
SGX_UNMASKED_EVENT = 128,
};
@@ -271,7 +296,7 @@ struct sgx_pcmd {
* @header1: constant byte string
* @vendor: must be either 0x0000 or 0x8086
* @date: YYYYMMDD in BCD
- * @header2: costant byte string
+ * @header2: constant byte string
* @swdefined: software defined value
*/
struct sgx_sigstruct_header {
@@ -335,4 +360,19 @@ struct sgx_sigstruct {
#define SGX_LAUNCH_TOKEN_SIZE 304
-#endif /* _ASM_X86_SGX_ARCH_H */
+/*
+ * Do not put any hardware-defined SGX structure representations below this
+ * comment!
+ */
+
+#ifdef CONFIG_X86_SGX_KVM
+int sgx_virt_ecreate(struct sgx_pageinfo *pageinfo, void __user *secs,
+ int *trapnr);
+int sgx_virt_einit(void __user *sigstruct, void __user *token,
+ void __user *secs, u64 *lepubkeyhash, int *trapnr);
+#endif
+
+int sgx_set_attribute(unsigned long *allowed_attributes,
+ unsigned int attribute_fd);
+
+#endif /* _ASM_X86_SGX_H */
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
index 8b58d6975d5d..d17b39893b79 100644
--- a/arch/x86/include/asm/smap.h
+++ b/arch/x86/include/asm/smap.h
@@ -11,6 +11,7 @@
#include <asm/nops.h>
#include <asm/cpufeatures.h>
+#include <asm/alternative.h>
/* "Raw" instruction opcodes */
#define __ASM_CLAC ".byte 0x0f,0x01,0xca"
@@ -18,8 +19,6 @@
#ifdef __ASSEMBLY__
-#include <asm/alternative-asm.h>
-
#ifdef CONFIG_X86_SMAP
#define ASM_CLAC \
@@ -37,8 +36,6 @@
#else /* __ASSEMBLY__ */
-#include <asm/alternative.h>
-
#ifdef CONFIG_X86_SMAP
static __always_inline void clac(void)
@@ -58,9 +55,8 @@ static __always_inline unsigned long smap_save(void)
unsigned long flags;
asm volatile ("# smap_save\n\t"
- ALTERNATIVE("jmp 1f", "", X86_FEATURE_SMAP)
- "pushf; pop %0; " __ASM_CLAC "\n\t"
- "1:"
+ ALTERNATIVE("", "pushf; pop %0; " __ASM_CLAC "\n\t",
+ X86_FEATURE_SMAP)
: "=rm" (flags) : : "memory", "cc");
return flags;
@@ -69,9 +65,8 @@ static __always_inline unsigned long smap_save(void)
static __always_inline void smap_restore(unsigned long flags)
{
asm volatile ("# smap_restore\n\t"
- ALTERNATIVE("jmp 1f", "", X86_FEATURE_SMAP)
- "push %0; popf\n\t"
- "1:"
+ ALTERNATIVE("", "push %0; popf\n\t",
+ X86_FEATURE_SMAP)
: : "g" (flags) : "memory", "cc");
}
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index c0538f82c9a2..630ff08532be 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -132,6 +132,7 @@ void native_play_dead(void);
void play_dead_common(void);
void wbinvd_on_cpu(int cpu);
int wbinvd_on_all_cpus(void);
+void cond_wakeup_cpu0(void);
void native_smp_send_reschedule(int cpu);
void native_send_call_func_ipi(const struct cpumask *mask);
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 1d3cbaef4bb7..2acd6cb62328 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -214,7 +214,7 @@ static inline void clflush(volatile void *__p)
static inline void clflushopt(volatile void *__p)
{
- alternative_io(".byte " __stringify(NOP_DS_PREFIX) "; clflush %P0",
+ alternative_io(".byte 0x3e; clflush %P0",
".byte 0x66; clflush %P0",
X86_FEATURE_CLFLUSHOPT,
"+m" (*(volatile char __force *)__p));
@@ -225,7 +225,7 @@ static inline void clwb(volatile void *__p)
volatile struct { char x[64]; } *p = __p;
asm volatile(ALTERNATIVE_2(
- ".byte " __stringify(NOP_DS_PREFIX) "; clflush (%[pax])",
+ ".byte 0x3e; clflush (%[pax])",
".byte 0x66; clflush (%[pax])", /* clflushopt (%%rax) */
X86_FEATURE_CLFLUSHOPT,
".byte 0x66, 0x0f, 0xae, 0x30", /* clwb (%%rax) */
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index 7fb482f0f25b..24a8d6c4fb18 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -5,30 +5,23 @@
* Stack protector works by putting predefined pattern at the start of
* the stack frame and verifying that it hasn't been overwritten when
* returning from the function. The pattern is called stack canary
- * and unfortunately gcc requires it to be at a fixed offset from %gs.
- * On x86_64, the offset is 40 bytes and on x86_32 20 bytes. x86_64
- * and x86_32 use segment registers differently and thus handles this
- * requirement differently.
+ * and unfortunately gcc historically required it to be at a fixed offset
+ * from the percpu segment base. On x86_64, the offset is 40 bytes.
*
- * On x86_64, %gs is shared by percpu area and stack canary. All
- * percpu symbols are zero based and %gs points to the base of percpu
- * area. The first occupant of the percpu area is always
- * fixed_percpu_data which contains stack_canary at offset 40. Userland
- * %gs is always saved and restored on kernel entry and exit using
- * swapgs, so stack protector doesn't add any complexity there.
+ * The same segment is shared by percpu area and stack canary. On
+ * x86_64, percpu symbols are zero based and %gs (64-bit) points to the
+ * base of percpu area. The first occupant of the percpu area is always
+ * fixed_percpu_data which contains stack_canary at the appropriate
+ * offset. On x86_32, the stack canary is just a regular percpu
+ * variable.
*
- * On x86_32, it's slightly more complicated. As in x86_64, %gs is
- * used for userland TLS. Unfortunately, some processors are much
- * slower at loading segment registers with different value when
- * entering and leaving the kernel, so the kernel uses %fs for percpu
- * area and manages %gs lazily so that %gs is switched only when
- * necessary, usually during task switch.
+ * Putting percpu data in %fs on 32-bit is a minor optimization compared to
+ * using %gs. Since 32-bit userspace normally has %fs == 0, we are likely
+ * to load 0 into %fs on exit to usermode, whereas with percpu data in
+ * %gs, we are likely to load a non-null %gs on return to user mode.
*
- * As gcc requires the stack canary at %gs:20, %gs can't be managed
- * lazily if stack protector is enabled, so the kernel saves and
- * restores userland %gs on kernel entry and exit. This behavior is
- * controlled by CONFIG_X86_32_LAZY_GS and accessors are defined in
- * system.h to hide the details.
+ * Once we are willing to require GCC 8.1 or better for 64-bit stackprotector
+ * support, we can remove some of this complexity.
*/
#ifndef _ASM_STACKPROTECTOR_H
@@ -45,14 +38,6 @@
#include <linux/sched.h>
/*
- * 24 byte read-only segment initializer for stack canary. Linker
- * can't handle the address bit shifting. Address will be set in
- * head_32 for boot CPU and setup_per_cpu_areas() for others.
- */
-#define GDT_STACK_CANARY_INIT \
- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
-
-/*
* Initialize the stackprotector canary value.
*
* NOTE: this must only be called from functions that never return
@@ -86,7 +71,7 @@ static __always_inline void boot_init_stack_canary(void)
#ifdef CONFIG_X86_64
this_cpu_write(fixed_percpu_data.stack_canary, canary);
#else
- this_cpu_write(stack_canary.canary, canary);
+ this_cpu_write(__stack_chk_guard, canary);
#endif
}
@@ -95,48 +80,16 @@ static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
#ifdef CONFIG_X86_64
per_cpu(fixed_percpu_data.stack_canary, cpu) = idle->stack_canary;
#else
- per_cpu(stack_canary.canary, cpu) = idle->stack_canary;
-#endif
-}
-
-static inline void setup_stack_canary_segment(int cpu)
-{
-#ifdef CONFIG_X86_32
- unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu);
- struct desc_struct *gdt_table = get_cpu_gdt_rw(cpu);
- struct desc_struct desc;
-
- desc = gdt_table[GDT_ENTRY_STACK_CANARY];
- set_desc_base(&desc, canary);
- write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S);
-#endif
-}
-
-static inline void load_stack_canary_segment(void)
-{
-#ifdef CONFIG_X86_32
- asm("mov %0, %%gs" : : "r" (__KERNEL_STACK_CANARY) : "memory");
+ per_cpu(__stack_chk_guard, cpu) = idle->stack_canary;
#endif
}
#else /* STACKPROTECTOR */
-#define GDT_STACK_CANARY_INIT
-
/* dummy boot_init_stack_canary() is defined in linux/stackprotector.h */
-static inline void setup_stack_canary_segment(int cpu)
-{ }
-
static inline void cpu_init_stack_canary(int cpu, struct task_struct *idle)
{ }
-static inline void load_stack_canary_segment(void)
-{
-#ifdef CONFIG_X86_32
- asm volatile ("mov %0, %%gs" : : "r" (0));
-#endif
-}
-
#endif /* STACKPROTECTOR */
#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
index fdbd9d7b7bca..7b132d0312eb 100644
--- a/arch/x86/include/asm/suspend_32.h
+++ b/arch/x86/include/asm/suspend_32.h
@@ -13,12 +13,10 @@
/* image of the saved processor state */
struct saved_context {
/*
- * On x86_32, all segment registers, with the possible exception of
- * gs, are saved at kernel entry in pt_regs.
+ * On x86_32, all segment registers except gs are saved at kernel
+ * entry in pt_regs.
*/
-#ifdef CONFIG_X86_32_LAZY_GS
u16 gs;
-#endif
unsigned long cr0, cr2, cr3, cr4;
u64 misc_enable;
bool misc_enable_saved;
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 1c561945b426..e322676039f4 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -156,6 +156,12 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
u64 avic_physical_id; /* Offset 0xf8 */
u8 reserved_7[8];
u64 vmsa_pa; /* Used for an SEV-ES guest */
+ u8 reserved_8[720];
+ /*
+ * Offset 0x3e0, 32 bytes reserved
+ * for use by hypervisor/software.
+ */
+ u8 reserved_sw[32];
};
@@ -269,7 +275,9 @@ struct vmcb_save_area {
* SEV-ES guests when referenced through the GHCB or for
* saving to the host save area.
*/
- u8 reserved_7[80];
+ u8 reserved_7[72];
+ u32 spec_ctrl; /* Guest version of SPEC_CTRL at 0x2E0 */
+ u8 reserved_7b[4];
u32 pkru;
u8 reserved_7a[20];
u64 reserved_8; /* rax already available at 0x01f8 */
@@ -312,7 +320,7 @@ struct ghcb {
#define EXPECTED_VMCB_SAVE_AREA_SIZE 1032
-#define EXPECTED_VMCB_CONTROL_AREA_SIZE 272
+#define EXPECTED_VMCB_CONTROL_AREA_SIZE 1024
#define EXPECTED_GHCB_SIZE PAGE_SIZE
static inline void __unused_size_checks(void)
@@ -324,7 +332,6 @@ static inline void __unused_size_checks(void)
struct vmcb {
struct vmcb_control_area control;
- u8 reserved_control[1024 - sizeof(struct vmcb_control_area)];
struct vmcb_save_area save;
} __packed;
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 9f69cc497f4b..b5f0d2ff47e4 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -71,12 +71,7 @@ static inline void update_task_stack(struct task_struct *task)
else
this_cpu_write(cpu_tss_rw.x86_tss.sp1, task->thread.sp0);
#else
- /*
- * x86-64 updates x86_tss.sp1 via cpu_current_top_of_stack. That
- * doesn't work on x86-32 because sp1 and
- * cpu_current_top_of_stack have different values (because of
- * the non-zero stack-padding on 32bit).
- */
+ /* Xen PV enters the kernel on the thread stack. */
if (static_cpu_has(X86_FEATURE_XENPV))
load_sp0(task_top_of_stack(task));
#endif
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index 7cbf733d11af..f7e2d82d24fb 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -21,13 +21,12 @@ extern const sys_call_ptr_t sys_call_table[];
#if defined(CONFIG_X86_32)
#define ia32_sys_call_table sys_call_table
-#endif
-
-#if defined(CONFIG_IA32_EMULATION)
+#else
+/*
+ * These may not exist, but still put the prototypes in so we
+ * can use IS_ENABLED().
+ */
extern const sys_call_ptr_t ia32_sys_call_table[];
-#endif
-
-#ifdef CONFIG_X86_X32_ABI
extern const sys_call_ptr_t x32_sys_call_table[];
#endif
@@ -160,7 +159,7 @@ static inline int syscall_get_arch(struct task_struct *task)
? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
}
-void do_syscall_64(unsigned long nr, struct pt_regs *regs);
+void do_syscall_64(struct pt_regs *regs, int nr);
void do_int80_syscall_32(struct pt_regs *regs);
long do_fast_syscall_32(struct pt_regs *regs);
diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h
index a84333adeef2..6a2827d0681f 100644
--- a/arch/x86/include/asm/syscall_wrapper.h
+++ b/arch/x86/include/asm/syscall_wrapper.h
@@ -17,7 +17,7 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
* __x64_sys_*() - 64-bit native syscall
* __ia32_sys_*() - 32-bit native syscall or common compat syscall
* __ia32_compat_sys_*() - 32-bit compat syscall
- * __x32_compat_sys_*() - 64-bit X32 compat syscall
+ * __x64_compat_sys_*() - 64-bit X32 compat syscall
*
* The registers are decoded according to the ABI:
* 64-bit: RDI, RSI, RDX, R10, R8, R9
@@ -80,6 +80,7 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
}
#define __COND_SYSCALL(abi, name) \
+ __weak long __##abi##_##name(const struct pt_regs *__unused); \
__weak long __##abi##_##name(const struct pt_regs *__unused) \
{ \
return sys_ni_syscall(); \
@@ -165,17 +166,17 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
* with x86_64 obviously do not need such care.
*/
#define __X32_COMPAT_SYS_STUB0(name) \
- __SYS_STUB0(x32, compat_sys_##name)
+ __SYS_STUB0(x64, compat_sys_##name)
#define __X32_COMPAT_SYS_STUBx(x, name, ...) \
- __SYS_STUBx(x32, compat_sys##name, \
+ __SYS_STUBx(x64, compat_sys##name, \
SC_X86_64_REGS_TO_ARGS(x, __VA_ARGS__))
#define __X32_COMPAT_COND_SYSCALL(name) \
- __COND_SYSCALL(x32, compat_sys_##name)
+ __COND_SYSCALL(x64, compat_sys_##name)
#define __X32_COMPAT_SYS_NI(name) \
- __SYS_NI(x32, compat_sys_##name)
+ __SYS_NI(x64, compat_sys_##name)
#else /* CONFIG_X86_X32 */
#define __X32_COMPAT_SYS_STUB0(name)
#define __X32_COMPAT_SYS_STUBx(x, name, ...)
diff --git a/arch/x86/include/asm/thermal.h b/arch/x86/include/asm/thermal.h
index ddbdefd5b94f..91a7b6687c3b 100644
--- a/arch/x86/include/asm/thermal.h
+++ b/arch/x86/include/asm/thermal.h
@@ -3,11 +3,13 @@
#define _ASM_X86_THERMAL_H
#ifdef CONFIG_X86_THERMAL_VECTOR
+void therm_lvt_init(void);
void intel_init_thermal(struct cpuinfo_x86 *c);
bool x86_thermal_enabled(void);
void intel_thermal_interrupt(void);
#else
-static inline void intel_init_thermal(struct cpuinfo_x86 *c) { }
+static inline void therm_lvt_init(void) { }
+static inline void intel_init_thermal(struct cpuinfo_x86 *c) { }
#endif
#endif /* _ASM_X86_THERMAL_H */
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 0d751d5da702..de406d93b515 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -197,18 +197,25 @@ static inline int arch_within_stack_frames(const void * const stack,
#endif
}
-#else /* !__ASSEMBLY__ */
-
-#ifdef CONFIG_X86_64
-# define cpu_current_top_of_stack (cpu_tss_rw + TSS_sp1)
-#endif
+#endif /* !__ASSEMBLY__ */
-#endif
+/*
+ * Thread-synchronous status.
+ *
+ * This is different from the flags in that nobody else
+ * ever touches our thread-synchronous status, so we don't
+ * have to worry about atomic accesses.
+ */
+#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
+#ifndef __ASSEMBLY__
#ifdef CONFIG_COMPAT
#define TS_I386_REGS_POKED 0x0004 /* regs poked by 32-bit ptracer */
+
+#define arch_set_restart_data(restart) \
+ do { restart->arch_data = current_thread_info()->status; } while (0)
+
#endif
-#ifndef __ASSEMBLY__
#ifdef CONFIG_X86_32
#define in_ia32_syscall() true
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 8c87a2e0b660..fa952eadbc2e 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -90,23 +90,6 @@ struct tlb_state {
u16 next_asid;
/*
- * We can be in one of several states:
- *
- * - Actively using an mm. Our CPU's bit will be set in
- * mm_cpumask(loaded_mm) and is_lazy == false;
- *
- * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
- * will not be set in mm_cpumask(&init_mm) and is_lazy == false.
- *
- * - Lazily using a real mm. loaded_mm != &init_mm, our bit
- * is set in mm_cpumask(loaded_mm), but is_lazy == true.
- * We're heuristically guessing that the CR3 load we
- * skipped more than makes up for the overhead added by
- * lazy mode.
- */
- bool is_lazy;
-
- /*
* If set we changed the page tables in such a way that we
* needed an invalidation of all contexts (aka. PCIDs / ASIDs).
* This tells us to go invalidate all the non-loaded ctxs[]
@@ -151,7 +134,27 @@ struct tlb_state {
*/
struct tlb_context ctxs[TLB_NR_DYN_ASIDS];
};
-DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
+DECLARE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate);
+
+struct tlb_state_shared {
+ /*
+ * We can be in one of several states:
+ *
+ * - Actively using an mm. Our CPU's bit will be set in
+ * mm_cpumask(loaded_mm) and is_lazy == false;
+ *
+ * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
+ * will not be set in mm_cpumask(&init_mm) and is_lazy == false.
+ *
+ * - Lazily using a real mm. loaded_mm != &init_mm, our bit
+ * is set in mm_cpumask(loaded_mm), but is_lazy == true.
+ * We're heuristically guessing that the CR3 load we
+ * skipped more than makes up for the overhead added by
+ * lazy mode.
+ */
+ bool is_lazy;
+};
+DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
bool nmi_uaccess_okay(void);
#define nmi_uaccess_okay nmi_uaccess_okay
@@ -175,7 +178,7 @@ extern void initialize_tlbstate_and_flush(void);
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
- * - flush_tlb_others(cpumask, info) flushes TLBs on other cpus
+ * - flush_tlb_multi(cpumask, info) flushes TLBs on multiple cpus
*
* ..but the i386 has somewhat limited tlb flushing capabilities,
* and page-granular flushes are available only on i486 and up.
@@ -201,14 +204,15 @@ struct flush_tlb_info {
unsigned long start;
unsigned long end;
u64 new_tlb_gen;
- unsigned int stride_shift;
- bool freed_tables;
+ unsigned int initiating_cpu;
+ u8 stride_shift;
+ u8 freed_tables;
};
void flush_tlb_local(void);
void flush_tlb_one_user(unsigned long addr);
void flush_tlb_one_kernel(unsigned long addr);
-void flush_tlb_others(const struct cpumask *cpumask,
+void flush_tlb_multi(const struct cpumask *cpumask,
const struct flush_tlb_info *info);
#ifdef CONFIG_PARAVIRT
diff --git a/arch/x86/include/asm/trace/hyperv.h b/arch/x86/include/asm/trace/hyperv.h
index 4d705cb4d63b..a8e5a7a2b460 100644
--- a/arch/x86/include/asm/trace/hyperv.h
+++ b/arch/x86/include/asm/trace/hyperv.h
@@ -8,7 +8,7 @@
#if IS_ENABLED(CONFIG_HYPERV)
-TRACE_EVENT(hyperv_mmu_flush_tlb_others,
+TRACE_EVENT(hyperv_mmu_flush_tlb_multi,
TP_PROTO(const struct cpumask *cpus,
const struct flush_tlb_info *info),
TP_ARGS(cpus, info),
diff --git a/arch/x86/include/asm/unaligned.h b/arch/x86/include/asm/unaligned.h
deleted file mode 100644
index 9c754a7447aa..000000000000
--- a/arch/x86/include/asm/unaligned.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_UNALIGNED_H
-#define _ASM_X86_UNALIGNED_H
-
-/*
- * The x86 can do unaligned accesses itself.
- */
-
-#include <linux/unaligned/access_ok.h>
-#include <linux/unaligned/generic.h>
-
-#define get_unaligned __get_unaligned_le
-#define put_unaligned __put_unaligned_le
-
-#endif /* _ASM_X86_UNALIGNED_H */
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
index c1c3d31b15c0..80e9d5206a71 100644
--- a/arch/x86/include/asm/unistd.h
+++ b/arch/x86/include/asm/unistd.h
@@ -13,7 +13,7 @@
# define __ARCH_WANT_SYS_OLD_MMAP
# define __ARCH_WANT_SYS_OLD_SELECT
-# define __NR_ia32_syscall_max __NR_syscall_max
+# define IA32_NR_syscalls (__NR_syscalls)
# else
@@ -26,12 +26,12 @@
# define __ARCH_WANT_COMPAT_SYS_PWRITEV64
# define __ARCH_WANT_COMPAT_SYS_PREADV64V2
# define __ARCH_WANT_COMPAT_SYS_PWRITEV64V2
+# define X32_NR_syscalls (__NR_x32_syscalls)
+# define IA32_NR_syscalls (__NR_ia32_syscalls)
# endif
-# define NR_syscalls (__NR_syscall_max + 1)
-# define X32_NR_syscalls (__NR_x32_syscall_max + 1)
-# define IA32_NR_syscalls (__NR_ia32_syscall_max + 1)
+# define NR_syscalls (__NR_syscalls)
# define __ARCH_WANT_NEW_STAT
# define __ARCH_WANT_OLD_READDIR
diff --git a/arch/x86/include/asm/uv/uv_geo.h b/arch/x86/include/asm/uv/uv_geo.h
index f241451035fb..027a9258dbca 100644
--- a/arch/x86/include/asm/uv/uv_geo.h
+++ b/arch/x86/include/asm/uv/uv_geo.h
@@ -10,7 +10,7 @@
#ifndef _ASM_UV_GEO_H
#define _ASM_UV_GEO_H
-/* Type declaractions */
+/* Type declarations */
/* Size of a geoid_s structure (must be before decl. of geoid_u) */
#define GEOID_SIZE 8
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index 5002f52be332..d3e3197917be 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -353,7 +353,7 @@ union uvh_apicid {
*
* Note there are NO leds on a UV system. This register is only
* used by the system controller to monitor system-wide operation.
- * There are 64 regs per node. With Nahelem cpus (2 cores per node,
+ * There are 64 regs per node. With Nehalem cpus (2 cores per node,
* 8 cpus per core, 2 threads per cpu) there are 32 cpu threads on
* a node.
*
diff --git a/arch/x86/include/asm/vdso/clocksource.h b/arch/x86/include/asm/vdso/clocksource.h
index 119ac8612d89..136e5e57cfe1 100644
--- a/arch/x86/include/asm/vdso/clocksource.h
+++ b/arch/x86/include/asm/vdso/clocksource.h
@@ -7,4 +7,6 @@
VDSO_CLOCKMODE_PVCLOCK, \
VDSO_CLOCKMODE_HVCLOCK
+#define HAVE_VDSO_CLOCKMODE_HVCLOCK
+
#endif /* __ASM_VDSO_CLOCKSOURCE_H */
diff --git a/arch/x86/include/asm/vdso/gettimeofday.h b/arch/x86/include/asm/vdso/gettimeofday.h
index df01d7349d79..1936f21ed8cd 100644
--- a/arch/x86/include/asm/vdso/gettimeofday.h
+++ b/arch/x86/include/asm/vdso/gettimeofday.h
@@ -58,7 +58,8 @@ extern struct ms_hyperv_tsc_page hvclock_page
#endif
#ifdef CONFIG_TIME_NS
-static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void)
+static __always_inline
+const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
{
return __timens_vdso_data;
}
diff --git a/arch/x86/include/asm/vmalloc.h b/arch/x86/include/asm/vmalloc.h
index 29837740b520..49ce331f3ac6 100644
--- a/arch/x86/include/asm/vmalloc.h
+++ b/arch/x86/include/asm/vmalloc.h
@@ -1,6 +1,26 @@
#ifndef _ASM_X86_VMALLOC_H
#define _ASM_X86_VMALLOC_H
+#include <asm/cpufeature.h>
+#include <asm/page.h>
#include <asm/pgtable_areas.h>
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+
+#ifdef CONFIG_X86_64
+#define arch_vmap_pud_supported arch_vmap_pud_supported
+static inline bool arch_vmap_pud_supported(pgprot_t prot)
+{
+ return boot_cpu_has(X86_FEATURE_GBPAGES);
+}
+#endif
+
+#define arch_vmap_pmd_supported arch_vmap_pmd_supported
+static inline bool arch_vmap_pmd_supported(pgprot_t prot)
+{
+ return boot_cpu_has(X86_FEATURE_PSE);
+}
+
+#endif
+
#endif /* _ASM_X86_VMALLOC_H */
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 358707f60d99..0ffaa3156a4e 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -373,6 +373,7 @@ enum vmcs_field {
#define GUEST_INTR_STATE_MOV_SS 0x00000002
#define GUEST_INTR_STATE_SMI 0x00000004
#define GUEST_INTR_STATE_NMI 0x00000008
+#define GUEST_INTR_STATE_ENCLAVE_INTR 0x00000010
/* GUEST_ACTIVITY_STATE flags */
#define GUEST_ACTIVITY_ACTIVE 0
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 7068e4bb057d..1a162e559753 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -87,18 +87,6 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
#endif
/*
- * The maximum amount of extra memory compared to the base size. The
- * main scaling factor is the size of struct page. At extreme ratios
- * of base:extra, all the base memory can be filled with page
- * structures for the extra memory, leaving no space for anything
- * else.
- *
- * 10x seems like a reasonable balance between scaling flexibility and
- * leaving a practically usable system.
- */
-#define XEN_EXTRA_MEM_RATIO (10)
-
-/*
* Helper functions to write or read unsigned long values to/from
* memory, when the access may fault.
*/
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index 600a141c8805..b25d3f82c2f3 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -234,7 +234,7 @@ struct boot_params {
* handling of page tables.
*
* These enums should only ever be used by x86 code, and the code that uses
- * it should be well contained and compartamentalized.
+ * it should be well contained and compartmentalized.
*
* KVM and Xen HVM do not have a subarch as these are expected to follow
* standard x86 boot entries. If there is a genuine need for "hypervisor" type
@@ -252,7 +252,7 @@ struct boot_params {
* @X86_SUBARCH_XEN: Used for Xen guest types which follow the PV boot path,
* which start at asm startup_xen() entry point and later jump to the C
* xen_start_kernel() entry point. Both domU and dom0 type of guests are
- * currently supportd through this PV boot path.
+ * currently supported through this PV boot path.
* @X86_SUBARCH_INTEL_MID: Used for Intel MID (Mobile Internet Device) platform
* systems which do not have the PCI legacy interfaces.
* @X86_SUBARCH_CE4100: Used for Intel CE media processor (CE4100) SoC
diff --git a/arch/x86/include/uapi/asm/debugreg.h b/arch/x86/include/uapi/asm/debugreg.h
index d95d080b30e3..0007ba077c0c 100644
--- a/arch/x86/include/uapi/asm/debugreg.h
+++ b/arch/x86/include/uapi/asm/debugreg.h
@@ -24,6 +24,7 @@
#define DR_TRAP3 (0x8) /* db3 */
#define DR_TRAP_BITS (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)
+#define DR_BUS_LOCK (0x800) /* bus_lock */
#define DR_STEP (0x4000) /* single-step */
#define DR_SWITCH (0x8000) /* task switch */
diff --git a/arch/x86/include/uapi/asm/hwcap2.h b/arch/x86/include/uapi/asm/hwcap2.h
index 5fdfcb47000f..054604aba9f0 100644
--- a/arch/x86/include/uapi/asm/hwcap2.h
+++ b/arch/x86/include/uapi/asm/hwcap2.h
@@ -2,10 +2,12 @@
#ifndef _ASM_X86_HWCAP2_H
#define _ASM_X86_HWCAP2_H
+#include <linux/const.h>
+
/* MONITOR/MWAIT enabled in Ring 3 */
-#define HWCAP2_RING3MWAIT (1 << 0)
+#define HWCAP2_RING3MWAIT _BITUL(0)
/* Kernel allows FSGSBASE instructions available in Ring 3 */
-#define HWCAP2_FSGSBASE BIT(1)
+#define HWCAP2_FSGSBASE _BITUL(1)
#endif
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 5a3022c8af82..a6c327f8ad9e 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -159,6 +159,19 @@ struct kvm_sregs {
__u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64];
};
+struct kvm_sregs2 {
+ /* out (KVM_GET_SREGS2) / in (KVM_SET_SREGS2) */
+ struct kvm_segment cs, ds, es, fs, gs, ss;
+ struct kvm_segment tr, ldt;
+ struct kvm_dtable gdt, idt;
+ __u64 cr0, cr2, cr3, cr4, cr8;
+ __u64 efer;
+ __u64 apic_base;
+ __u64 flags;
+ __u64 pdptrs[4];
+};
+#define KVM_SREGS2_FLAGS_PDPTRS_VALID 1
+
/* for KVM_GET_FPU and KVM_SET_FPU */
struct kvm_fpu {
__u8 fpr[8][16];
@@ -437,6 +450,8 @@ struct kvm_vmx_nested_state_hdr {
__u16 flags;
} smm;
+ __u16 pad;
+
__u32 flags;
__u64 preemption_timer_deadline;
};
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
index 950afebfba88..5146bbab84d4 100644
--- a/arch/x86/include/uapi/asm/kvm_para.h
+++ b/arch/x86/include/uapi/asm/kvm_para.h
@@ -33,6 +33,8 @@
#define KVM_FEATURE_PV_SCHED_YIELD 13
#define KVM_FEATURE_ASYNC_PF_INT 14
#define KVM_FEATURE_MSI_EXT_DEST_ID 15
+#define KVM_FEATURE_HC_MAP_GPA_RANGE 16
+#define KVM_FEATURE_MIGRATION_CONTROL 17
#define KVM_HINTS_REALTIME 0
@@ -54,6 +56,7 @@
#define MSR_KVM_POLL_CONTROL 0x4b564d05
#define MSR_KVM_ASYNC_PF_INT 0x4b564d06
#define MSR_KVM_ASYNC_PF_ACK 0x4b564d07
+#define MSR_KVM_MIGRATION_CONTROL 0x4b564d08
struct kvm_steal_time {
__u64 steal;
@@ -90,6 +93,16 @@ struct kvm_clock_pairing {
/* MSR_KVM_ASYNC_PF_INT */
#define KVM_ASYNC_PF_VEC_MASK GENMASK(7, 0)
+/* MSR_KVM_MIGRATION_CONTROL */
+#define KVM_MIGRATION_READY (1 << 0)
+
+/* KVM_HC_MAP_GPA_RANGE */
+#define KVM_MAP_GPA_RANGE_PAGE_SZ_4K 0
+#define KVM_MAP_GPA_RANGE_PAGE_SZ_2M (1 << 0)
+#define KVM_MAP_GPA_RANGE_PAGE_SZ_1G (1 << 1)
+#define KVM_MAP_GPA_RANGE_ENC_STAT(n) (n << 4)
+#define KVM_MAP_GPA_RANGE_ENCRYPTED KVM_MAP_GPA_RANGE_ENC_STAT(1)
+#define KVM_MAP_GPA_RANGE_DECRYPTED KVM_MAP_GPA_RANGE_ENC_STAT(0)
/* Operations for KVM_HC_MMU_OP */
#define KVM_MMU_OP_WRITE_PTE 1
diff --git a/arch/x86/include/uapi/asm/msgbuf.h b/arch/x86/include/uapi/asm/msgbuf.h
index b3d0664fadc9..ac83e25bbf37 100644
--- a/arch/x86/include/uapi/asm/msgbuf.h
+++ b/arch/x86/include/uapi/asm/msgbuf.h
@@ -12,7 +12,7 @@
* The msqid64_ds structure for x86 architecture with x32 ABI.
*
* On x86-32 and x86-64 we can just use the generic definition, but
- * x32 uses the same binary layout as x86_64, which is differnet
+ * x32 uses the same binary layout as x86_64, which is different
* from other 32-bit architectures.
*/
diff --git a/arch/x86/include/uapi/asm/sgx.h b/arch/x86/include/uapi/asm/sgx.h
index 9034f3007c4e..9690d6899ad9 100644
--- a/arch/x86/include/uapi/asm/sgx.h
+++ b/arch/x86/include/uapi/asm/sgx.h
@@ -152,7 +152,7 @@ struct sgx_enclave_run {
* Most exceptions reported on ENCLU, including those that occur within the
* enclave, are fixed up and reported synchronously instead of being delivered
* via a standard signal. Debug Exceptions (#DB) and Breakpoints (#BP) are
- * never fixed up and are always delivered via standard signals. On synchrously
+ * never fixed up and are always delivered via standard signals. On synchronously
* reported exceptions, -EFAULT is returned and details about the exception are
* recorded in @run.exception, the optional sgx_enclave_exception struct.
*
diff --git a/arch/x86/include/uapi/asm/shmbuf.h b/arch/x86/include/uapi/asm/shmbuf.h
index f0305dc660c9..fce18eaa070c 100644
--- a/arch/x86/include/uapi/asm/shmbuf.h
+++ b/arch/x86/include/uapi/asm/shmbuf.h
@@ -9,7 +9,7 @@
* The shmid64_ds structure for x86 architecture with x32 ABI.
*
* On x86-32 and x86-64 we can just use the generic definition, but
- * x32 uses the same binary layout as x86_64, which is differnet
+ * x32 uses the same binary layout as x86_64, which is different
* from other 32-bit architectures.
*/
diff --git a/arch/x86/include/uapi/asm/sigcontext.h b/arch/x86/include/uapi/asm/sigcontext.h
index 844d60eb1882..d0d9b331d3a1 100644
--- a/arch/x86/include/uapi/asm/sigcontext.h
+++ b/arch/x86/include/uapi/asm/sigcontext.h
@@ -139,7 +139,7 @@ struct _fpstate_32 {
* The 64-bit FPU frame. (FXSAVE format and later)
*
* Note1: If sw_reserved.magic1 == FP_XSTATE_MAGIC1 then the structure is
- * larger: 'struct _xstate'. Note that 'struct _xstate' embedds
+ * larger: 'struct _xstate'. Note that 'struct _xstate' embeds
* 'struct _fpstate' so that you can always assume the _fpstate portion
* exists so that you can check the magic value.
*
diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h
index 554f75fe013c..efa969325ede 100644
--- a/arch/x86/include/uapi/asm/svm.h
+++ b/arch/x86/include/uapi/asm/svm.h
@@ -110,6 +110,9 @@
#define SVM_VMGEXIT_GET_AP_JUMP_TABLE 1
#define SVM_VMGEXIT_UNSUPPORTED_EVENT 0x8000ffff
+/* Exit code reserved for hypervisor/software use */
+#define SVM_EXIT_SW 0xf0000000
+
#define SVM_EXIT_ERR -1
#define SVM_EXIT_REASONS \
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index b8e650a985e3..946d761adbd3 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -27,6 +27,7 @@
#define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
+#define VMX_EXIT_REASONS_SGX_ENCLAVE_MODE 0x08000000
#define EXIT_REASON_EXCEPTION_NMI 0
#define EXIT_REASON_EXTERNAL_INTERRUPT 1
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 2ddf08351f0b..3e625c61f008 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -20,7 +20,7 @@ CFLAGS_REMOVE_kvmclock.o = -pg
CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_early_printk.o = -pg
CFLAGS_REMOVE_head64.o = -pg
-CFLAGS_REMOVE_sev-es.o = -pg
+CFLAGS_REMOVE_sev.o = -pg
endif
KASAN_SANITIZE_head$(BITS).o := n
@@ -28,14 +28,13 @@ KASAN_SANITIZE_dumpstack.o := n
KASAN_SANITIZE_dumpstack_$(BITS).o := n
KASAN_SANITIZE_stacktrace.o := n
KASAN_SANITIZE_paravirt.o := n
-KASAN_SANITIZE_sev-es.o := n
+KASAN_SANITIZE_sev.o := n
# With some compiler versions the generated code results in boot hangs, caused
# by several compilation units. To be safe, disable all instrumentation.
KCSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD_test_nx.o := y
-OBJECT_FILES_NON_STANDARD_paravirt_patch.o := y
ifdef CONFIG_FRAME_POINTER
OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y
@@ -103,6 +102,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += ftrace_$(BITS).o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
obj-$(CONFIG_X86_TSC) += trace_clock.o
+obj-$(CONFIG_TRACING) += trace.o
obj-$(CONFIG_CRASH_CORE) += crash_core_$(BITS).o
obj-$(CONFIG_KEXEC_CORE) += machine_kexec_$(BITS).o
obj-$(CONFIG_KEXEC_CORE) += relocate_kernel_$(BITS).o crash.o
@@ -121,7 +121,7 @@ obj-$(CONFIG_AMD_NB) += amd_nb.o
obj-$(CONFIG_DEBUG_NMI_SELFTEST) += nmi_selftest.o
obj-$(CONFIG_KVM_GUEST) += kvm.o kvmclock.o
-obj-$(CONFIG_PARAVIRT) += paravirt.o paravirt_patch.o
+obj-$(CONFIG_PARAVIRT) += paravirt.o
obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o
obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o
obj-$(CONFIG_X86_PMEM_LEGACY_DEVICE) += pmem.o
@@ -149,7 +149,7 @@ obj-$(CONFIG_UNWINDER_ORC) += unwind_orc.o
obj-$(CONFIG_UNWINDER_FRAME_POINTER) += unwind_frame.o
obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
-obj-$(CONFIG_AMD_MEM_ENCRYPT) += sev-es.o
+obj-$(CONFIG_AMD_MEM_ENCRYPT) += sev.o
###
# 64 bit specific files
ifeq ($(CONFIG_X86_64),y)
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 7bdc0239a943..e55e0c1fad8c 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -5,6 +5,7 @@
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
*/
+#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/init.h>
#include <linux/acpi.h>
@@ -42,8 +43,6 @@ EXPORT_SYMBOL(acpi_disabled);
# include <asm/proto.h>
#endif /* X86 */
-#define PREFIX "ACPI: "
-
int acpi_noirq; /* skip ACPI IRQ initialization */
static int acpi_nobgrt; /* skip ACPI BGRT */
int acpi_pci_disabled; /* skip ACPI PCI scan and IRQ initialization */
@@ -130,15 +129,14 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
madt = (struct acpi_table_madt *)table;
if (!madt) {
- printk(KERN_WARNING PREFIX "Unable to map MADT\n");
+ pr_warn("Unable to map MADT\n");
return -ENODEV;
}
if (madt->address) {
acpi_lapic_addr = (u64) madt->address;
- printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
- madt->address);
+ pr_debug("Local APIC address 0x%08x\n", madt->address);
}
default_acpi_madt_oem_check(madt->header.oem_id,
@@ -161,7 +159,7 @@ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled)
int cpu;
if (id >= MAX_LOCAL_APIC) {
- printk(KERN_INFO PREFIX "skipped apicid that is too big\n");
+ pr_info("skipped apicid that is too big\n");
return -EINVAL;
}
@@ -213,13 +211,13 @@ acpi_parse_x2apic(union acpi_subtable_headers *header, const unsigned long end)
*/
if (!apic->apic_id_valid(apic_id)) {
if (enabled)
- pr_warn(PREFIX "x2apic entry ignored\n");
+ pr_warn("x2apic entry ignored\n");
return 0;
}
acpi_register_lapic(apic_id, processor->uid, enabled);
#else
- printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
+ pr_warn("x2apic entry ignored\n");
#endif
return 0;
@@ -306,7 +304,7 @@ acpi_parse_x2apic_nmi(union acpi_subtable_headers *header,
acpi_table_print_madt_entry(&header->common);
if (x2apic_nmi->lint != 1)
- printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
+ pr_warn("NMI not connected to LINT 1!\n");
return 0;
}
@@ -324,7 +322,7 @@ acpi_parse_lapic_nmi(union acpi_subtable_headers * header, const unsigned long e
acpi_table_print_madt_entry(&header->common);
if (lapic_nmi->lint != 1)
- printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
+ pr_warn("NMI not connected to LINT 1!\n");
return 0;
}
@@ -514,14 +512,14 @@ acpi_parse_int_src_ovr(union acpi_subtable_headers * header,
if (intsrc->source_irq == 0) {
if (acpi_skip_timer_override) {
- printk(PREFIX "BIOS IRQ0 override ignored.\n");
+ pr_warn("BIOS IRQ0 override ignored.\n");
return 0;
}
if ((intsrc->global_irq == 2) && acpi_fix_pin2_polarity
&& (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
- printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
+ pr_warn("BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
}
}
@@ -597,7 +595,7 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
if (old == new)
return;
- printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
+ pr_warn("setting ELCR to %04x (from %04x)\n", new, old);
outb(new, 0x4d0);
outb(new >> 8, 0x4d1);
}
@@ -754,7 +752,7 @@ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id,
cpu = acpi_register_lapic(physid, acpi_id, ACPI_MADT_ENABLED);
if (cpu < 0) {
- pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
+ pr_info("Unable to map lapic to logical cpu number\n");
return cpu;
}
@@ -830,7 +828,7 @@ int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
EXPORT_SYMBOL(acpi_unregister_ioapic);
/**
- * acpi_ioapic_registered - Check whether IOAPIC assoicatied with @gsi_base
+ * acpi_ioapic_registered - Check whether IOAPIC associated with @gsi_base
* has been registered
* @handle: ACPI handle of the IOAPIC device
* @gsi_base: GSI base associated with the IOAPIC
@@ -870,8 +868,7 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
struct acpi_table_hpet *hpet_tbl = (struct acpi_table_hpet *)table;
if (hpet_tbl->address.space_id != ACPI_SPACE_MEM) {
- printk(KERN_WARNING PREFIX "HPET timers must be located in "
- "memory.\n");
+ pr_warn("HPET timers must be located in memory.\n");
return -1;
}
@@ -883,9 +880,7 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
* want to allocate a resource there.
*/
if (!hpet_address) {
- printk(KERN_WARNING PREFIX
- "HPET id: %#x base: %#lx is invalid\n",
- hpet_tbl->id, hpet_address);
+ pr_warn("HPET id: %#x base: %#lx is invalid\n", hpet_tbl->id, hpet_address);
return 0;
}
#ifdef CONFIG_X86_64
@@ -896,21 +891,17 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table)
*/
if (hpet_address == 0xfed0000000000000UL) {
if (!hpet_force_user) {
- printk(KERN_WARNING PREFIX "HPET id: %#x "
- "base: 0xfed0000000000000 is bogus\n "
- "try hpet=force on the kernel command line to "
- "fix it up to 0xfed00000.\n", hpet_tbl->id);
+ pr_warn("HPET id: %#x base: 0xfed0000000000000 is bogus, try hpet=force on the kernel command line to fix it up to 0xfed00000.\n",
+ hpet_tbl->id);
hpet_address = 0;
return 0;
}
- printk(KERN_WARNING PREFIX
- "HPET id: %#x base: 0xfed0000000000000 fixed up "
- "to 0xfed00000.\n", hpet_tbl->id);
+ pr_warn("HPET id: %#x base: 0xfed0000000000000 fixed up to 0xfed00000.\n",
+ hpet_tbl->id);
hpet_address >>= 32;
}
#endif
- printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
- hpet_tbl->id, hpet_address);
+ pr_info("HPET id: %#x base: %#lx\n", hpet_tbl->id, hpet_address);
/*
* Allocate and initialize the HPET firmware resource for adding into
@@ -955,24 +946,24 @@ late_initcall(hpet_insert_resource);
static int __init acpi_parse_fadt(struct acpi_table_header *table)
{
if (!(acpi_gbl_FADT.boot_flags & ACPI_FADT_LEGACY_DEVICES)) {
- pr_debug("ACPI: no legacy devices present\n");
+ pr_debug("no legacy devices present\n");
x86_platform.legacy.devices.pnpbios = 0;
}
if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
!(acpi_gbl_FADT.boot_flags & ACPI_FADT_8042) &&
x86_platform.legacy.i8042 != X86_LEGACY_I8042_PLATFORM_ABSENT) {
- pr_debug("ACPI: i8042 controller is absent\n");
+ pr_debug("i8042 controller is absent\n");
x86_platform.legacy.i8042 = X86_LEGACY_I8042_FIRMWARE_ABSENT;
}
if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_CMOS_RTC) {
- pr_debug("ACPI: not registering RTC platform device\n");
+ pr_debug("not registering RTC platform device\n");
x86_platform.legacy.rtc = 0;
}
if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_VGA) {
- pr_debug("ACPI: probing for VGA not safe\n");
+ pr_debug("probing for VGA not safe\n");
x86_platform.legacy.no_vga = 1;
}
@@ -997,8 +988,7 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
pmtmr_ioport = acpi_gbl_FADT.pm_timer_block;
}
if (pmtmr_ioport)
- printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
- pmtmr_ioport);
+ pr_info("PM-Timer IO Port: %#x\n", pmtmr_ioport);
#endif
return 0;
}
@@ -1024,8 +1014,7 @@ static int __init early_acpi_parse_madt_lapic_addr_ovr(void)
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
acpi_parse_lapic_addr_ovr, 0);
if (count < 0) {
- printk(KERN_ERR PREFIX
- "Error parsing LAPIC address override entry\n");
+ pr_err("Error parsing LAPIC address override entry\n");
return count;
}
@@ -1057,8 +1046,7 @@ static int __init acpi_parse_madt_lapic_entries(void)
sizeof(struct acpi_table_madt),
madt_proc, ARRAY_SIZE(madt_proc), MAX_LOCAL_APIC);
if (ret < 0) {
- printk(KERN_ERR PREFIX
- "Error parsing LAPIC/X2APIC entries\n");
+ pr_err("Error parsing LAPIC/X2APIC entries\n");
return ret;
}
@@ -1066,11 +1054,11 @@ static int __init acpi_parse_madt_lapic_entries(void)
x2count = madt_proc[1].count;
}
if (!count && !x2count) {
- printk(KERN_ERR PREFIX "No LAPIC entries present\n");
+ pr_err("No LAPIC entries present\n");
/* TBD: Cleanup to allow fallback to MPS */
return -ENODEV;
} else if (count < 0 || x2count < 0) {
- printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
+ pr_err("Error parsing LAPIC entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return count;
}
@@ -1080,7 +1068,7 @@ static int __init acpi_parse_madt_lapic_entries(void)
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI,
acpi_parse_lapic_nmi, 0);
if (count < 0 || x2count < 0) {
- printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
+ pr_err("Error parsing LAPIC NMI entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return count;
}
@@ -1139,7 +1127,7 @@ static void __init mp_config_acpi_legacy_irqs(void)
}
if (idx != mp_irq_entries) {
- printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
+ pr_debug("ACPI: IRQ%d used by override.\n", i);
continue; /* IRQ already used */
}
@@ -1179,26 +1167,24 @@ static int __init acpi_parse_madt_ioapic_entries(void)
* if "noapic" boot option, don't look for IO-APICs
*/
if (skip_ioapic_setup) {
- printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
- "due to 'noapic' option.\n");
+ pr_info("Skipping IOAPIC probe due to 'noapic' option.\n");
return -ENODEV;
}
count = acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_ioapic,
MAX_IO_APICS);
if (!count) {
- printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
+ pr_err("No IOAPIC entries present\n");
return -ENODEV;
} else if (count < 0) {
- printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
+ pr_err("Error parsing IOAPIC entry\n");
return count;
}
count = acpi_table_parse_madt(ACPI_MADT_TYPE_INTERRUPT_OVERRIDE,
acpi_parse_int_src_ovr, nr_irqs);
if (count < 0) {
- printk(KERN_ERR PREFIX
- "Error parsing interrupt source overrides entry\n");
+ pr_err("Error parsing interrupt source overrides entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return count;
}
@@ -1218,7 +1204,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
count = acpi_table_parse_madt(ACPI_MADT_TYPE_NMI_SOURCE,
acpi_parse_nmi_src, nr_irqs);
if (count < 0) {
- printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
+ pr_err("Error parsing NMI SRC entry\n");
/* TBD: Cleanup to allow fallback to MPS */
return count;
}
@@ -1251,8 +1237,7 @@ static void __init early_acpi_process_madt(void)
/*
* Dell Precision Workstation 410, 610 come here.
*/
- printk(KERN_ERR PREFIX
- "Invalid BIOS MADT, disabling ACPI\n");
+ pr_err("Invalid BIOS MADT, disabling ACPI\n");
disable_acpi();
}
}
@@ -1289,8 +1274,7 @@ static void __init acpi_process_madt(void)
/*
* Dell Precision Workstation 410, 610 come here.
*/
- printk(KERN_ERR PREFIX
- "Invalid BIOS MADT, disabling ACPI\n");
+ pr_err("Invalid BIOS MADT, disabling ACPI\n");
disable_acpi();
}
} else {
@@ -1300,8 +1284,7 @@ static void __init acpi_process_madt(void)
* Boot with "acpi=off" to use MPS on such a system.
*/
if (smp_found_config) {
- printk(KERN_WARNING PREFIX
- "No APIC-table, disabling MPS\n");
+ pr_warn("No APIC-table, disabling MPS\n");
smp_found_config = 0;
}
}
@@ -1311,11 +1294,9 @@ static void __init acpi_process_madt(void)
* processors, where MPS only supports physical.
*/
if (acpi_lapic && acpi_ioapic)
- printk(KERN_INFO "Using ACPI (MADT) for SMP configuration "
- "information\n");
+ pr_info("Using ACPI (MADT) for SMP configuration information\n");
else if (acpi_lapic)
- printk(KERN_INFO "Using ACPI for processor (LAPIC) "
- "configuration information\n");
+ pr_info("Using ACPI for processor (LAPIC) configuration information\n");
#endif
return;
}
@@ -1323,8 +1304,7 @@ static void __init acpi_process_madt(void)
static int __init disable_acpi_irq(const struct dmi_system_id *d)
{
if (!acpi_force) {
- printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
- d->ident);
+ pr_notice("%s detected: force use of acpi=noirq\n", d->ident);
acpi_noirq_set();
}
return 0;
@@ -1333,8 +1313,7 @@ static int __init disable_acpi_irq(const struct dmi_system_id *d)
static int __init disable_acpi_pci(const struct dmi_system_id *d)
{
if (!acpi_force) {
- printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
- d->ident);
+ pr_notice("%s detected: force use of pci=noacpi\n", d->ident);
acpi_disable_pci();
}
return 0;
@@ -1343,11 +1322,10 @@ static int __init disable_acpi_pci(const struct dmi_system_id *d)
static int __init dmi_disable_acpi(const struct dmi_system_id *d)
{
if (!acpi_force) {
- printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
+ pr_notice("%s detected: acpi off\n", d->ident);
disable_acpi();
} else {
- printk(KERN_NOTICE
- "Warning: DMI blacklist says broken, but acpi forced\n");
+ pr_notice("Warning: DMI blacklist says broken, but acpi forced\n");
}
return 0;
}
@@ -1554,10 +1532,18 @@ void __init acpi_boot_table_init(void)
/*
* Initialize the ACPI boot-time table parser.
*/
- if (acpi_table_init()) {
+ if (acpi_locate_initial_tables())
disable_acpi();
- return;
- }
+ else
+ acpi_reserve_initial_tables();
+}
+
+int __init early_acpi_boot_init(void)
+{
+ if (acpi_disabled)
+ return 1;
+
+ acpi_table_init_complete();
acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
@@ -1566,22 +1552,13 @@ void __init acpi_boot_table_init(void)
*/
if (acpi_blacklisted()) {
if (acpi_force) {
- printk(KERN_WARNING PREFIX "acpi=force override\n");
+ pr_warn("acpi=force override\n");
} else {
- printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
+ pr_warn("Disabling ACPI support\n");
disable_acpi();
- return;
+ return 1;
}
}
-}
-
-int __init early_acpi_boot_init(void)
-{
- /*
- * If acpi_disabled, bail out
- */
- if (acpi_disabled)
- return 1;
/*
* Process the Multiple APIC Description Table (MADT), if present
@@ -1657,7 +1634,7 @@ static int __init parse_acpi(char *arg)
else if (strcmp(arg, "noirq") == 0) {
acpi_noirq_set();
}
- /* "acpi=copy_dsdt" copys DSDT */
+ /* "acpi=copy_dsdt" copies DSDT */
else if (strcmp(arg, "copy_dsdt") == 0) {
acpi_gbl_copy_dsdt_locally = 1;
}
@@ -1693,9 +1670,7 @@ int __init acpi_mps_check(void)
#if defined(CONFIG_X86_LOCAL_APIC) && !defined(CONFIG_X86_MPPARSE)
/* mptable code is not built-in*/
if (acpi_disabled || acpi_noirq) {
- printk(KERN_WARNING "MPS support code is not built-in.\n"
- "Using acpi=off or acpi=noirq or pci=noacpi "
- "may have problem\n");
+ pr_warn("MPS support code is not built-in, using acpi=off or acpi=noirq or pci=noacpi may have problem\n");
return 1;
}
#endif
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 49ae4e1ac9cd..7de599eba7f0 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -197,7 +197,8 @@ static int __init ffh_cstate_init(void)
struct cpuinfo_x86 *c = &boot_cpu_data;
if (c->x86_vendor != X86_VENDOR_INTEL &&
- c->x86_vendor != X86_VENDOR_AMD)
+ c->x86_vendor != X86_VENDOR_AMD &&
+ c->x86_vendor != X86_VENDOR_HYGON)
return -1;
cpu_cstate_entry = alloc_percpu(struct cstate_entry);
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index cc1fea76aab0..3f85fcae450c 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -41,7 +41,7 @@ unsigned long acpi_get_wakeup_address(void)
* x86_acpi_enter_sleep_state - enter sleep state
* @state: Sleep state to enter.
*
- * Wrapper around acpi_enter_sleep_state() to be called by assmebly.
+ * Wrapper around acpi_enter_sleep_state() to be called by assembly.
*/
asmlinkage acpi_status __visible x86_acpi_enter_sleep_state(u8 state)
{
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 56b6865afb2a..d5d8a352eafa 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -115,7 +115,7 @@ SYM_FUNC_START(do_suspend_lowlevel)
movq pt_regs_r14(%rax), %r14
movq pt_regs_r15(%rax), %r15
-#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
+#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
/*
* The suspend path may have poisoned some areas deeper in the stack,
* which we now need to unpoison.
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 8d778e46725d..e9da3dc71254 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -28,6 +28,7 @@
#include <asm/insn.h>
#include <asm/io.h>
#include <asm/fixmap.h>
+#include <asm/paravirt.h>
int __read_mostly alternatives_patched;
@@ -74,186 +75,30 @@ do { \
} \
} while (0)
-/*
- * Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
- * that correspond to that nop. Getting from one nop to the next, we
- * add to the array the offset that is equal to the sum of all sizes of
- * nops preceding the one we are after.
- *
- * Note: The GENERIC_NOP5_ATOMIC is at the end, as it breaks the
- * nice symmetry of sizes of the previous nops.
- */
-#if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
-static const unsigned char intelnops[] =
-{
- GENERIC_NOP1,
- GENERIC_NOP2,
- GENERIC_NOP3,
- GENERIC_NOP4,
- GENERIC_NOP5,
- GENERIC_NOP6,
- GENERIC_NOP7,
- GENERIC_NOP8,
- GENERIC_NOP5_ATOMIC
-};
-static const unsigned char * const intel_nops[ASM_NOP_MAX+2] =
-{
- NULL,
- intelnops,
- intelnops + 1,
- intelnops + 1 + 2,
- intelnops + 1 + 2 + 3,
- intelnops + 1 + 2 + 3 + 4,
- intelnops + 1 + 2 + 3 + 4 + 5,
- intelnops + 1 + 2 + 3 + 4 + 5 + 6,
- intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
- intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
-};
-#endif
-
-#ifdef K8_NOP1
-static const unsigned char k8nops[] =
-{
- K8_NOP1,
- K8_NOP2,
- K8_NOP3,
- K8_NOP4,
- K8_NOP5,
- K8_NOP6,
- K8_NOP7,
- K8_NOP8,
- K8_NOP5_ATOMIC
-};
-static const unsigned char * const k8_nops[ASM_NOP_MAX+2] =
+static const unsigned char x86nops[] =
{
- NULL,
- k8nops,
- k8nops + 1,
- k8nops + 1 + 2,
- k8nops + 1 + 2 + 3,
- k8nops + 1 + 2 + 3 + 4,
- k8nops + 1 + 2 + 3 + 4 + 5,
- k8nops + 1 + 2 + 3 + 4 + 5 + 6,
- k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
- k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
-};
-#endif
-
-#if defined(K7_NOP1) && !defined(CONFIG_X86_64)
-static const unsigned char k7nops[] =
-{
- K7_NOP1,
- K7_NOP2,
- K7_NOP3,
- K7_NOP4,
- K7_NOP5,
- K7_NOP6,
- K7_NOP7,
- K7_NOP8,
- K7_NOP5_ATOMIC
-};
-static const unsigned char * const k7_nops[ASM_NOP_MAX+2] =
-{
- NULL,
- k7nops,
- k7nops + 1,
- k7nops + 1 + 2,
- k7nops + 1 + 2 + 3,
- k7nops + 1 + 2 + 3 + 4,
- k7nops + 1 + 2 + 3 + 4 + 5,
- k7nops + 1 + 2 + 3 + 4 + 5 + 6,
- k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
- k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
+ BYTES_NOP1,
+ BYTES_NOP2,
+ BYTES_NOP3,
+ BYTES_NOP4,
+ BYTES_NOP5,
+ BYTES_NOP6,
+ BYTES_NOP7,
+ BYTES_NOP8,
};
-#endif
-#ifdef P6_NOP1
-static const unsigned char p6nops[] =
-{
- P6_NOP1,
- P6_NOP2,
- P6_NOP3,
- P6_NOP4,
- P6_NOP5,
- P6_NOP6,
- P6_NOP7,
- P6_NOP8,
- P6_NOP5_ATOMIC
-};
-static const unsigned char * const p6_nops[ASM_NOP_MAX+2] =
+const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
{
NULL,
- p6nops,
- p6nops + 1,
- p6nops + 1 + 2,
- p6nops + 1 + 2 + 3,
- p6nops + 1 + 2 + 3 + 4,
- p6nops + 1 + 2 + 3 + 4 + 5,
- p6nops + 1 + 2 + 3 + 4 + 5 + 6,
- p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
- p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8,
+ x86nops,
+ x86nops + 1,
+ x86nops + 1 + 2,
+ x86nops + 1 + 2 + 3,
+ x86nops + 1 + 2 + 3 + 4,
+ x86nops + 1 + 2 + 3 + 4 + 5,
+ x86nops + 1 + 2 + 3 + 4 + 5 + 6,
+ x86nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
};
-#endif
-
-/* Initialize these to a safe default */
-#ifdef CONFIG_X86_64
-const unsigned char * const *ideal_nops = p6_nops;
-#else
-const unsigned char * const *ideal_nops = intel_nops;
-#endif
-
-void __init arch_init_ideal_nops(void)
-{
- switch (boot_cpu_data.x86_vendor) {
- case X86_VENDOR_INTEL:
- /*
- * Due to a decoder implementation quirk, some
- * specific Intel CPUs actually perform better with
- * the "k8_nops" than with the SDM-recommended NOPs.
- */
- if (boot_cpu_data.x86 == 6 &&
- boot_cpu_data.x86_model >= 0x0f &&
- boot_cpu_data.x86_model != 0x1c &&
- boot_cpu_data.x86_model != 0x26 &&
- boot_cpu_data.x86_model != 0x27 &&
- boot_cpu_data.x86_model < 0x30) {
- ideal_nops = k8_nops;
- } else if (boot_cpu_has(X86_FEATURE_NOPL)) {
- ideal_nops = p6_nops;
- } else {
-#ifdef CONFIG_X86_64
- ideal_nops = k8_nops;
-#else
- ideal_nops = intel_nops;
-#endif
- }
- break;
-
- case X86_VENDOR_HYGON:
- ideal_nops = p6_nops;
- return;
-
- case X86_VENDOR_AMD:
- if (boot_cpu_data.x86 > 0xf) {
- ideal_nops = p6_nops;
- return;
- }
-
- fallthrough;
-
- default:
-#ifdef CONFIG_X86_64
- ideal_nops = k8_nops;
-#else
- if (boot_cpu_has(X86_FEATURE_K8))
- ideal_nops = k8_nops;
- else if (boot_cpu_has(X86_FEATURE_K7))
- ideal_nops = k7_nops;
- else
- ideal_nops = intel_nops;
-#endif
- }
-}
/* Use this to add nops to a buffer, then text_poke the whole buffer. */
static void __init_or_module add_nops(void *insns, unsigned int len)
@@ -262,7 +107,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len)
unsigned int noplen = len;
if (noplen > ASM_NOP_MAX)
noplen = ASM_NOP_MAX;
- memcpy(insns, ideal_nops[noplen], noplen);
+ memcpy(insns, x86_nops[noplen], noplen);
insns += noplen;
len -= noplen;
}
@@ -338,25 +183,69 @@ done:
}
/*
- * "noinline" to cause control flow change and thus invalidate I$ and
- * cause refetch after modification.
+ * optimize_nops_range() - Optimize a sequence of single byte NOPs (0x90)
+ *
+ * @instr: instruction byte stream
+ * @instrlen: length of the above
+ * @off: offset within @instr where the first NOP has been detected
+ *
+ * Return: number of NOPs found (and replaced).
*/
-static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
+static __always_inline int optimize_nops_range(u8 *instr, u8 instrlen, int off)
{
unsigned long flags;
- int i;
+ int i = off, nnops;
- for (i = 0; i < a->padlen; i++) {
+ while (i < instrlen) {
if (instr[i] != 0x90)
- return;
+ break;
+
+ i++;
}
+ nnops = i - off;
+
+ if (nnops <= 1)
+ return nnops;
+
local_irq_save(flags);
- add_nops(instr + (a->instrlen - a->padlen), a->padlen);
+ add_nops(instr + off, nnops);
local_irq_restore(flags);
- DUMP_BYTES(instr, a->instrlen, "%px: [%d:%d) optimized NOPs: ",
- instr, a->instrlen - a->padlen, a->padlen);
+ DUMP_BYTES(instr, instrlen, "%px: [%d:%d) optimized NOPs: ", instr, off, i);
+
+ return nnops;
+}
+
+/*
+ * "noinline" to cause control flow change and thus invalidate I$ and
+ * cause refetch after modification.
+ */
+static void __init_or_module noinline optimize_nops(struct alt_instr *a, u8 *instr)
+{
+ struct insn insn;
+ int i = 0;
+
+ /*
+ * Jump over the non-NOP insns and optimize single-byte NOPs into bigger
+ * ones.
+ */
+ for (;;) {
+ if (insn_decode_kernel(&insn, &instr[i]))
+ return;
+
+ /*
+ * See if this and any potentially following NOPs can be
+ * optimized.
+ */
+ if (insn.length == 1 && insn.opcode.bytes[0] == 0x90)
+ i += optimize_nops_range(instr, a->instrlen, i);
+ else
+ i += insn.length;
+
+ if (i >= a->instrlen)
+ return;
+ }
}
/*
@@ -388,26 +277,32 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
*/
for (a = start; a < end; a++) {
int insn_buff_sz = 0;
+ /* Mask away "NOT" flag bit for feature to test. */
+ u16 feature = a->cpuid & ~ALTINSTR_FLAG_INV;
instr = (u8 *)&a->instr_offset + a->instr_offset;
replacement = (u8 *)&a->repl_offset + a->repl_offset;
BUG_ON(a->instrlen > sizeof(insn_buff));
- BUG_ON(a->cpuid >= (NCAPINTS + NBUGINTS) * 32);
- if (!boot_cpu_has(a->cpuid)) {
- if (a->padlen > 1)
- optimize_nops(a, instr);
+ BUG_ON(feature >= (NCAPINTS + NBUGINTS) * 32);
- continue;
- }
+ /*
+ * Patch if either:
+ * - feature is present
+ * - feature not present but ALTINSTR_FLAG_INV is set to mean,
+ * patch if feature is *NOT* present.
+ */
+ if (!boot_cpu_has(feature) == !(a->cpuid & ALTINSTR_FLAG_INV))
+ goto next;
- DPRINTK("feat: %d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d), pad: %d",
- a->cpuid >> 5,
- a->cpuid & 0x1f,
+ DPRINTK("feat: %s%d*32+%d, old: (%pS (%px) len: %d), repl: (%px, len: %d)",
+ (a->cpuid & ALTINSTR_FLAG_INV) ? "!" : "",
+ feature >> 5,
+ feature & 0x1f,
instr, instr, a->instrlen,
- replacement, a->replacementlen, a->padlen);
+ replacement, a->replacementlen);
- DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
- DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
+ DUMP_BYTES(instr, a->instrlen, "%px: old_insn: ", instr);
+ DUMP_BYTES(replacement, a->replacementlen, "%px: rpl_insn: ", replacement);
memcpy(insn_buff, replacement, a->replacementlen);
insn_buff_sz = a->replacementlen;
@@ -428,14 +323,15 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
if (a->replacementlen && is_jmp(replacement[0]))
recompute_jump(a, instr, replacement, insn_buff);
- if (a->instrlen > a->replacementlen) {
- add_nops(insn_buff + a->replacementlen,
- a->instrlen - a->replacementlen);
- insn_buff_sz += a->instrlen - a->replacementlen;
- }
+ for (; insn_buff_sz < a->instrlen; insn_buff_sz++)
+ insn_buff[insn_buff_sz] = 0x90;
+
DUMP_BYTES(insn_buff, insn_buff_sz, "%px: final_insn: ", instr);
text_poke_early(instr, insn_buff, insn_buff_sz);
+
+next:
+ optimize_nops(a, instr);
}
}
@@ -605,7 +501,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
BUG_ON(p->len > MAX_PATCH_LEN);
/* prep the buffer with the original instructions */
memcpy(insn_buff, p->instr, p->len);
- used = pv_ops.init.patch(p->type, insn_buff, (unsigned long)p->instr, p->len);
+ used = paravirt_patch(p->type, insn_buff, (unsigned long)p->instr, p->len);
BUG_ON(used > p->len);
@@ -723,6 +619,33 @@ void __init alternative_instructions(void)
* patching.
*/
+ /*
+ * Paravirt patching and alternative patching can be combined to
+ * replace a function call with a short direct code sequence (e.g.
+ * by setting a constant return value instead of doing that in an
+ * external function).
+ * In order to make this work the following sequence is required:
+ * 1. set (artificial) features depending on used paravirt
+ * functions which can later influence alternative patching
+ * 2. apply paravirt patching (generally replacing an indirect
+ * function call with a direct one)
+ * 3. apply alternative patching (e.g. replacing a direct function
+ * call with a custom code sequence)
+ * Doing paravirt patching after alternative patching would clobber
+ * the optimization of the custom code with a function call again.
+ */
+ paravirt_set_cap();
+
+ /*
+ * First patch paravirt functions, such that we overwrite the indirect
+ * call with the direct call.
+ */
+ apply_paravirt(__parainstructions, __parainstructions_end);
+
+ /*
+ * Then patch alternatives, such that those paravirt calls that are in
+ * alternatives can be overwritten by their immediate fragments.
+ */
apply_alternatives(__alt_instructions, __alt_instructions_end);
#ifdef CONFIG_SMP
@@ -741,8 +664,6 @@ void __init alternative_instructions(void)
}
#endif
- apply_paravirt(__parainstructions, __parainstructions_end);
-
restart_nmi();
alternatives_patched = 1;
}
@@ -813,7 +734,7 @@ static inline temp_mm_state_t use_temporary_mm(struct mm_struct *mm)
* with a stale address space WITHOUT being in lazy mode after
* restoring the previous mm.
*/
- if (this_cpu_read(cpu_tlbstate.is_lazy))
+ if (this_cpu_read(cpu_tlbstate_shared.is_lazy))
leave_mm(smp_processor_id());
temp_state.mm = this_cpu_read(cpu_tlbstate.loaded_mm);
@@ -1274,15 +1195,15 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
const void *opcode, size_t len, const void *emulate)
{
struct insn insn;
+ int ret;
memcpy((void *)tp->text, opcode, len);
if (!emulate)
emulate = opcode;
- kernel_insn_init(&insn, emulate, MAX_INSN_SIZE);
- insn_get_length(&insn);
+ ret = insn_decode_kernel(&insn, emulate);
- BUG_ON(!insn_complete(&insn));
+ BUG_ON(ret < 0);
BUG_ON(len != insn.length);
tp->rel_addr = addr - (void *)_stext;
@@ -1302,13 +1223,13 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
default: /* assume NOP */
switch (len) {
case 2: /* NOP2 -- emulate as JMP8+0 */
- BUG_ON(memcmp(emulate, ideal_nops[len], len));
+ BUG_ON(memcmp(emulate, x86_nops[len], len));
tp->opcode = JMP8_INSN_OPCODE;
tp->rel32 = 0;
break;
case 5: /* NOP5 -- emulate as JMP32+0 */
- BUG_ON(memcmp(emulate, ideal_nops[NOP_ATOMIC5], len));
+ BUG_ON(memcmp(emulate, x86_nops[len], len));
tp->opcode = JMP32_INSN_OPCODE;
tp->rel32 = 0;
break;
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index b4396952c9a6..23dda362dc0f 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Shared support code for AMD K8 northbridges and derivates.
+ * Shared support code for AMD K8 northbridges and derivatives.
* Copyright 2006 Andi Kleen, SUSE Labs.
*/
@@ -25,6 +25,7 @@
#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
#define PCI_DEVICE_ID_AMD_19H_DF_F4 0x1654
+#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e
/* Protect the PCI config register pairs used for SMN and DF indirect access. */
static DEFINE_MUTEX(smn_mutex);
@@ -57,6 +58,7 @@ static const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
{}
};
@@ -72,6 +74,7 @@ static const struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
{}
};
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index bda4f2a36868..d262811ce14b 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -619,7 +619,7 @@ static void setup_APIC_timer(void)
if (this_cpu_has(X86_FEATURE_ARAT)) {
lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
- /* Make LAPIC timer preferrable over percpu HPET */
+ /* Make LAPIC timer preferable over percpu HPET */
lapic_clockevent.rating = 150;
}
@@ -666,7 +666,7 @@ void lapic_update_tsc_freq(void)
* In this functions we calibrate APIC bus clocks to the external timer.
*
* We want to do the calibration only once since we want to have local timer
- * irqs syncron. CPUs connected by the same APIC bus have the very same bus
+ * irqs synchronous. CPUs connected by the same APIC bus have the very same bus
* frequency.
*
* This was previously done by reading the PIT/HPET and waiting for a wrap
@@ -1532,7 +1532,7 @@ static bool apic_check_and_ack(union apic_ir *irr, union apic_ir *isr)
* Most probably by now the CPU has serviced that pending interrupt and it
* might not have done the ack_APIC_irq() because it thought, interrupt
* came from i8259 as ExtInt. LAPIC did not get EOI so it does not clear
- * the ISR bit and cpu thinks it has already serivced the interrupt. Hence
+ * the ISR bit and cpu thinks it has already serviced the interrupt. Hence
* a vector might get locked. It was noticed for timer irq (vector
* 0x31). Issue an extra EOI to clear ISR.
*
@@ -1657,7 +1657,7 @@ static void setup_local_APIC(void)
*/
/*
* Actually disabling the focus CPU check just makes the hang less
- * frequent as it makes the interrupt distributon model be more
+ * frequent as it makes the interrupt distribution model be more
* like LRU than MRU (the short-term load is more even across CPUs).
*/
@@ -1875,7 +1875,7 @@ static __init void try_to_enable_x2apic(int remap_mode)
/*
* Without IR, all CPUs can be addressed by IOAPIC/MSI only
- * in physical mode, and CPUs with an APIC ID that cannnot
+ * in physical mode, and CPUs with an APIC ID that cannot
* be addressed must not be brought online.
*/
x2apic_set_max_apicid(apic_limit);
@@ -2342,6 +2342,11 @@ static int cpuid_to_apicid[] = {
[0 ... NR_CPUS - 1] = -1,
};
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
+{
+ return phys_id == cpuid_to_apicid[cpu];
+}
+
#ifdef CONFIG_SMP
/**
* apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread
@@ -2599,6 +2604,7 @@ static void __init apic_bsp_setup(bool upmode)
end_local_APIC_setup();
irq_remap_enable_fault_handling();
setup_IO_APIC();
+ lapic_update_legacy_vectors();
}
#ifdef CONFIG_UP_LATE_INIT
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index c3b60c37c728..d5c691a3208b 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -928,7 +928,7 @@ static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info)
/*
* setup_IO_APIC_irqs() programs all legacy IRQs with default trigger
- * and polarity attirbutes. So allow the first user to reprogram the
+ * and polarity attributes. So allow the first user to reprogram the
* pin with real trigger and polarity attributes.
*/
if (irq < nr_legacy_irqs() && data->count == 1) {
@@ -994,7 +994,7 @@ static int alloc_isa_irq_from_domain(struct irq_domain *domain,
/*
* Legacy ISA IRQ has already been allocated, just add pin to
- * the pin list assoicated with this IRQ and program the IOAPIC
+ * the pin list associated with this IRQ and program the IOAPIC
* entry. The IOAPIC entry
*/
if (irq_data && irq_data->parent_data) {
@@ -1032,6 +1032,16 @@ static int mp_map_pin_to_irq(u32 gsi, int idx, int ioapic, int pin,
if (idx >= 0 && test_bit(mp_irqs[idx].srcbus, mp_bus_not_pci)) {
irq = mp_irqs[idx].srcbusirq;
legacy = mp_is_legacy_irq(irq);
+ /*
+ * IRQ2 is unusable for historical reasons on systems which
+ * have a legacy PIC. See the comment vs. IRQ2 further down.
+ *
+ * If this gets removed at some point then the related code
+ * in lapic_assign_system_vectors() needs to be adjusted as
+ * well.
+ */
+ if (legacy && irq == PIC_CASCADE_IR)
+ return -EINVAL;
}
mutex_lock(&ioapic_mutex);
@@ -1742,7 +1752,7 @@ static inline void ioapic_finish_move(struct irq_data *data, bool moveit)
* with masking the ioapic entry and then polling until
* Remote IRR was clear before reprogramming the
* ioapic I don't trust the Remote IRR bit to be
- * completey accurate.
+ * completely accurate.
*
* However there appears to be no other way to plug
* this race, so if the Remote IRR bit is not
@@ -1820,7 +1830,7 @@ static void ioapic_ack_level(struct irq_data *irq_data)
/*
* Tail end of clearing remote IRR bit (either by delivering the EOI
* message via io-apic EOI register write or simulating it using
- * mask+edge followed by unnask+level logic) manually when the
+ * mask+edge followed by unmask+level logic) manually when the
* level triggered interrupt is seen as the edge triggered interrupt
* at the cpu.
*/
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 3c9c7492252f..fb67ed5e7e6a 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -543,6 +543,14 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
return -ENOSYS;
+ /*
+ * Catch any attempt to touch the cascade interrupt on a PIC
+ * equipped system.
+ */
+ if (WARN_ON_ONCE(info->flags & X86_IRQ_ALLOC_LEGACY &&
+ virq == PIC_CASCADE_IR))
+ return -EINVAL;
+
for (i = 0; i < nr_irqs; i++) {
irqd = irq_domain_get_irq_data(domain, virq + i);
BUG_ON(!irqd);
@@ -730,6 +738,26 @@ void lapic_assign_legacy_vector(unsigned int irq, bool replace)
irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
}
+void __init lapic_update_legacy_vectors(void)
+{
+ unsigned int i;
+
+ if (IS_ENABLED(CONFIG_X86_IO_APIC) && nr_ioapics > 0)
+ return;
+
+ /*
+ * If the IO/APIC is disabled via config, kernel command line or
+ * lack of enumeration then all legacy interrupts are routed
+ * through the PIC. Make sure that they are marked as legacy
+ * vectors. PIC_CASCADE_IRQ has already been marked in
+ * lapic_assign_system_vectors().
+ */
+ for (i = 0; i < nr_legacy_irqs(); i++) {
+ if (i != PIC_CASCADE_IR)
+ lapic_assign_legacy_vector(i, true);
+ }
+}
+
void __init lapic_assign_system_vectors(void)
{
unsigned int i, vector = 0;
@@ -745,6 +773,11 @@ void __init lapic_assign_system_vectors(void)
/* Mark the preallocated legacy interrupts */
for (i = 0; i < nr_legacy_irqs(); i++) {
+ /*
+ * Don't touch the cascade interrupt. It's unusable
+ * on PIC equipped machines. See the large comment
+ * in the IO/APIC code.
+ */
if (i != PIC_CASCADE_IR)
irq_matrix_assign(vector_matrix, ISA_IRQ_VECTOR(i));
}
@@ -1045,7 +1078,7 @@ void irq_force_complete_move(struct irq_desc *desc)
*
* But in case of cpu hotplug this should be a non issue
* because if the affinity update happens right before all
- * cpus rendevouz in stop machine, there is no way that the
+ * cpus rendezvous in stop machine, there is no way that the
* interrupt can be blocked on the target cpu because all cpus
* loops first with interrupts enabled in stop machine, so the
* old vector is not yet cleaned up when the interrupt fires.
@@ -1054,7 +1087,7 @@ void irq_force_complete_move(struct irq_desc *desc)
* of the interrupt on the apic/system bus would be delayed
* beyond the point where the target cpu disables interrupts
* in stop machine. I doubt that it can happen, but at least
- * there is a theroretical chance. Virtualization might be
+ * there is a theoretical chance. Virtualization might be
* able to expose this, but AFAICT the IOAPIC emulation is not
* as stupid as the real hardware.
*
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 52bc217ca8c3..f5a48e66e4f5 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -369,6 +369,15 @@ static int __init early_get_arch_type(void)
return ret;
}
+/* UV system found, check which APIC MODE BIOS already selected */
+static void __init early_set_apic_mode(void)
+{
+ if (x2apic_enabled())
+ uv_system_type = UV_X2APIC;
+ else
+ uv_system_type = UV_LEGACY_APIC;
+}
+
static int __init uv_set_system_type(char *_oem_id, char *_oem_table_id)
{
/* Save OEM_ID passed from ACPI MADT */
@@ -404,11 +413,12 @@ static int __init uv_set_system_type(char *_oem_id, char *_oem_table_id)
else
uv_hubless_system |= 0x8;
- /* Copy APIC type */
+ /* Copy OEM Table ID */
uv_stringify(sizeof(oem_table_id), oem_table_id, _oem_table_id);
pr_info("UV: OEM IDs %s/%s, SystemType %d, HUBLESS ID %x\n",
oem_id, oem_table_id, uv_system_type, uv_hubless_system);
+
return 0;
}
@@ -453,6 +463,7 @@ static int __init uv_set_system_type(char *_oem_id, char *_oem_table_id)
early_set_hub_type();
/* Other UV setup functions */
+ early_set_apic_mode();
early_get_pnodeid();
early_get_apic_socketid_shift();
x86_platform.is_untracked_pat_range = uv_is_untracked_pat_range;
@@ -472,29 +483,14 @@ static int __init uv_acpi_madt_oem_check(char *_oem_id, char *_oem_table_id)
if (uv_set_system_type(_oem_id, _oem_table_id) == 0)
return 0;
- /* Save and Decode OEM Table ID */
+ /* Save for display of the OEM Table ID */
uv_stringify(sizeof(oem_table_id), oem_table_id, _oem_table_id);
- /* This is the most common hardware variant, x2apic mode */
- if (!strcmp(oem_table_id, "UVX"))
- uv_system_type = UV_X2APIC;
-
- /* Only used for very small systems, usually 1 chassis, legacy mode */
- else if (!strcmp(oem_table_id, "UVL"))
- uv_system_type = UV_LEGACY_APIC;
-
- else
- goto badbios;
-
pr_info("UV: OEM IDs %s/%s, System/UVType %d/0x%x, HUB RevID %d\n",
oem_id, oem_table_id, uv_system_type, is_uv(UV_ANY),
uv_min_hub_revision_id);
return 0;
-
-badbios:
- pr_err("UV: UVarchtype:%s not supported\n", uv_archtype);
- BUG();
}
enum uv_system_type get_uv_system_type(void)
@@ -1671,6 +1667,9 @@ static __init int uv_system_init_hubless(void)
if (rc < 0)
return rc;
+ /* Set section block size for current node memory */
+ set_block_size();
+
/* Create user access node */
if (rc >= 0)
uv_setup_proc_files(1);
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 660270359d39..241dda687eb9 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -94,7 +94,7 @@
* Remove APM dependencies in arch/i386/kernel/process.c
* Remove APM dependencies in drivers/char/sysrq.c
* Reset time across standby.
- * Allow more inititialisation on SMP.
+ * Allow more initialisation on SMP.
* Remove CONFIG_APM_POWER_OFF and make it boot time
* configurable (default on).
* Make debug only a boot time parameter (remove APM_DEBUG).
@@ -766,7 +766,7 @@ static int apm_driver_version(u_short *val)
* not cleared until it is acknowledged.
*
* Additional information is returned in the info pointer, providing
- * that APM 1.2 is in use. If no messges are pending the value 0x80
+ * that APM 1.2 is in use. If no messages are pending the value 0x80
* is returned (No power management events pending).
*/
static int apm_get_event(apm_event_t *event, apm_eventinfo_t *info)
@@ -1025,7 +1025,7 @@ static int apm_enable_power_management(int enable)
* status which gives the rough battery status, and current power
* source. The bat value returned give an estimate as a percentage
* of life and a status value for the battery. The estimated life
- * if reported is a lifetime in secodnds/minutes at current powwer
+ * if reported is a lifetime in seconds/minutes at current power
* consumption.
*/
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index 60b9f42ce3c1..ecd3fd6993d1 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -61,13 +61,6 @@ static void __used common(void)
OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe_ia32, uc.uc_mcontext);
#endif
-#ifdef CONFIG_PARAVIRT_XXL
- BLANK();
- OFFSET(PV_IRQ_irq_disable, paravirt_patch_template, irq.irq_disable);
- OFFSET(PV_IRQ_irq_enable, paravirt_patch_template, irq.irq_enable);
- OFFSET(PV_CPU_iret, paravirt_patch_template, cpu.iret);
-#endif
-
#ifdef CONFIG_XEN
BLANK();
OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index 6e043f295a60..2b411cd00a4e 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -53,11 +53,6 @@ void foo(void)
offsetof(struct cpu_entry_area, tss.x86_tss.sp1) -
offsetofend(struct cpu_entry_area, entry_stack_page.stack));
-#ifdef CONFIG_STACKPROTECTOR
- BLANK();
- OFFSET(stack_canary_offset, stack_canary, canary);
-#endif
-
BLANK();
DEFINE(EFI_svam, offsetof(efi_runtime_services_t, set_virtual_address_map));
}
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 347a956f71ca..b7c003013d41 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -593,8 +593,8 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
*/
if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
/* Check if memory encryption is enabled */
- rdmsrl(MSR_K8_SYSCFG, msr);
- if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
+ rdmsrl(MSR_AMD64_SYSCFG, msr);
+ if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
goto clear_all;
/*
@@ -628,11 +628,6 @@ static void early_init_amd(struct cpuinfo_x86 *c)
early_init_amd_mc(c);
-#ifdef CONFIG_X86_32
- if (c->x86 == 6)
- set_cpu_cap(c, X86_FEATURE_K7);
-#endif
-
if (c->x86 >= 0xf)
set_cpu_cap(c, X86_FEATURE_K8);
@@ -651,6 +646,10 @@ static void early_init_amd(struct cpuinfo_x86 *c)
if (c->x86_power & BIT(12))
set_cpu_cap(c, X86_FEATURE_ACC_POWER);
+ /* Bit 14 indicates the Runtime Average Power Limit interface. */
+ if (c->x86_power & BIT(14))
+ set_cpu_cap(c, X86_FEATURE_RAPL);
+
#ifdef CONFIG_X86_64
set_cpu_cap(c, X86_FEATURE_SYSCALL32);
#else
@@ -1170,3 +1169,19 @@ void set_dr_addr_mask(unsigned long mask, int dr)
break;
}
}
+
+u32 amd_get_highest_perf(void)
+{
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+
+ if (c->x86 == 0x17 && ((c->x86_model >= 0x30 && c->x86_model < 0x40) ||
+ (c->x86_model >= 0x70 && c->x86_model < 0x80)))
+ return 166;
+
+ if (c->x86 == 0x19 && ((c->x86_model >= 0x20 && c->x86_model < 0x30) ||
+ (c->x86_model >= 0x40 && c->x86_model < 0x70)))
+ return 166;
+
+ return 255;
+}
+EXPORT_SYMBOL_GPL(amd_get_highest_perf);
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index 3ca9be482a9e..d66af2950e06 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -877,7 +877,7 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c)
static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
struct _cpuid4_info_regs *base)
{
- struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cpu_cacheinfo *this_cpu_ci;
struct cacheinfo *this_leaf;
int i, sibling;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index ab640abe26b6..a99d00393206 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -161,7 +161,6 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
[GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
[GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
- GDT_STACK_CANARY_INIT
#endif
} };
EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
@@ -482,7 +481,7 @@ static __always_inline void setup_pku(struct cpuinfo_x86 *c)
if (pk)
pk->pkru = init_pkru_value;
/*
- * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
+ * Setting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
* cpuid bit to be set. We need to ensure that we
* update that bit in this CPU's "cpu_info".
*/
@@ -599,7 +598,6 @@ void load_percpu_segment(int cpu)
__loadsegment_simple(gs, 0);
wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
#endif
- load_stack_canary_segment();
}
#ifdef CONFIG_X86_32
@@ -1330,7 +1328,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
cpu_set_bug_bits(c);
- cpu_set_core_cap_bits(c);
+ sld_setup(c);
fpu__init_system(c);
@@ -1404,7 +1402,7 @@ static void detect_null_seg_behavior(struct cpuinfo_x86 *c)
* where GS is unused by the prev and next threads.
*
* Since neither vendor documents this anywhere that I can see,
- * detect it directly instead of hardcoding the choice by
+ * detect it directly instead of hard-coding the choice by
* vendor.
*
* I've designated AMD's behavior as the "bug" because it's
@@ -1748,6 +1746,8 @@ DEFINE_PER_CPU(bool, hardirq_stack_inuse);
DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
EXPORT_PER_CPU_SYMBOL(__preempt_count);
+DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = TOP_OF_INIT_STACK;
+
/* May not be marked __init: used by software suspend */
void syscall_init(void)
{
@@ -1773,10 +1773,16 @@ void syscall_init(void)
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
#endif
- /* Flags to clear on syscall */
+ /*
+ * Flags to clear on syscall; clear as much as possible
+ * to minimize user space-kernel interference.
+ */
wrmsrl(MSR_SYSCALL_MASK,
- X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
- X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
+ X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF|
+ X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_TF|
+ X86_EFLAGS_IF|X86_EFLAGS_DF|X86_EFLAGS_OF|
+ X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_RF|
+ X86_EFLAGS_AC|X86_EFLAGS_ID);
}
#else /* CONFIG_X86_64 */
@@ -1796,7 +1802,8 @@ DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) =
EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack);
#ifdef CONFIG_STACKPROTECTOR
-DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
+DEFINE_PER_CPU(unsigned long, __stack_chk_guard);
+EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
#endif
#endif /* CONFIG_X86_64 */
@@ -1850,8 +1857,8 @@ static inline void setup_getcpu(int cpu)
unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
struct desc_struct d = { };
- if (boot_cpu_has(X86_FEATURE_RDTSCP))
- write_rdtscp_aux(cpudata);
+ if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID))
+ wrmsr(MSR_TSC_AUX, cpudata, 0);
/* Store CPU and node number in limit. */
d.limit0 = cpudata;
@@ -1937,13 +1944,12 @@ void cpu_init_exception_handling(void)
/*
* cpu_init() initializes state that is per-CPU. Some data is already
- * initialized (naturally) in the bootstrap process, such as the GDT
- * and IDT. We reload them nevertheless, this function acts as a
- * 'CPU state barrier', nothing should get across.
+ * initialized (naturally) in the bootstrap process, such as the GDT. We
+ * reload it nevertheless, this function acts as a 'CPU state barrier',
+ * nothing should get across.
*/
void cpu_init(void)
{
- struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
struct task_struct *cur = current;
int cpu = raw_smp_processor_id();
@@ -1956,8 +1962,6 @@ void cpu_init(void)
early_cpu_to_node(cpu) != NUMA_NO_NODE)
set_numa_node(early_cpu_to_node(cpu));
#endif
- setup_getcpu(cpu);
-
pr_debug("Initializing CPU#%d\n", cpu);
if (IS_ENABLED(CONFIG_X86_64) || cpu_feature_enabled(X86_FEATURE_VME) ||
@@ -1969,7 +1973,6 @@ void cpu_init(void)
* and set up the GDT descriptor:
*/
switch_to_new_gdt(cpu);
- load_current_idt();
if (IS_ENABLED(CONFIG_X86_64)) {
loadsegment(fs, 0);
@@ -1989,12 +1992,6 @@ void cpu_init(void)
initialize_tlbstate_and_flush();
enter_lazy_tlb(&init_mm, cur);
- /* Initialize the TSS. */
- tss_setup_ist(tss);
- tss_setup_io_bitmap(tss);
- set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
-
- load_TR_desc();
/*
* sp0 points to the entry trampoline stack regardless of what task
* is running.
@@ -2016,6 +2013,18 @@ void cpu_init(void)
load_fixmap_gdt(cpu);
}
+#ifdef CONFIG_SMP
+void cpu_init_secondary(void)
+{
+ /*
+ * Relies on the BP having set-up the IDT tables, which are loaded
+ * on this CPU in cpu_init_exception_handling().
+ */
+ cpu_init_exception_handling();
+ cpu_init();
+}
+#endif
+
/*
* The microcode loader calls this upon late microcode load to recheck features,
* only when microcode has been updated. Caller holds microcode_mutex and CPU
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 67944128876d..95521302630d 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -48,6 +48,7 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
enum tsx_ctrl_states {
TSX_CTRL_ENABLE,
TSX_CTRL_DISABLE,
+ TSX_CTRL_RTM_ALWAYS_ABORT,
TSX_CTRL_NOT_SUPPORTED,
};
@@ -56,6 +57,7 @@ extern __ro_after_init enum tsx_ctrl_states tsx_ctrl_state;
extern void __init tsx_init(void);
extern void tsx_enable(void);
extern void tsx_disable(void);
+extern void tsx_clear_cpuid(void);
#else
static inline void tsx_init(void) { }
#endif /* CONFIG_CPU_SUP_INTEL */
diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c
index 42af31b64c2c..defda61f372d 100644
--- a/arch/x86/kernel/cpu/cpuid-deps.c
+++ b/arch/x86/kernel/cpu/cpuid-deps.c
@@ -72,6 +72,9 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_AVX512_FP16, X86_FEATURE_AVX512BW },
{ X86_FEATURE_ENQCMD, X86_FEATURE_XSAVES },
{ X86_FEATURE_PER_THREAD_MBA, X86_FEATURE_MBA },
+ { X86_FEATURE_SGX_LC, X86_FEATURE_SGX },
+ { X86_FEATURE_SGX1, X86_FEATURE_SGX },
+ { X86_FEATURE_SGX2, X86_FEATURE_SGX1 },
{}
};
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 1d9b8aaea06c..7227c15299d0 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -291,7 +291,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
mark_tsc_unstable("cyrix 5510/5520 detected");
}
#endif
- c->x86_cache_size = 16; /* Yep 16K integrated cache thats it */
+ c->x86_cache_size = 16; /* Yep 16K integrated cache that's it */
/* GXm supports extended cpuid levels 'ala' AMD */
if (c->cpuid_level == 2) {
diff --git a/arch/x86/kernel/cpu/feat_ctl.c b/arch/x86/kernel/cpu/feat_ctl.c
index 3b1b01f2b248..da696eb4821a 100644
--- a/arch/x86/kernel/cpu/feat_ctl.c
+++ b/arch/x86/kernel/cpu/feat_ctl.c
@@ -93,15 +93,9 @@ static void init_vmx_capabilities(struct cpuinfo_x86 *c)
}
#endif /* CONFIG_X86_VMX_FEATURE_NAMES */
-static void clear_sgx_caps(void)
-{
- setup_clear_cpu_cap(X86_FEATURE_SGX);
- setup_clear_cpu_cap(X86_FEATURE_SGX_LC);
-}
-
static int __init nosgx(char *str)
{
- clear_sgx_caps();
+ setup_clear_cpu_cap(X86_FEATURE_SGX);
return 0;
}
@@ -110,23 +104,30 @@ early_param("nosgx", nosgx);
void init_ia32_feat_ctl(struct cpuinfo_x86 *c)
{
+ bool enable_sgx_kvm = false, enable_sgx_driver = false;
bool tboot = tboot_enabled();
- bool enable_sgx;
+ bool enable_vmx;
u64 msr;
if (rdmsrl_safe(MSR_IA32_FEAT_CTL, &msr)) {
clear_cpu_cap(c, X86_FEATURE_VMX);
- clear_sgx_caps();
+ clear_cpu_cap(c, X86_FEATURE_SGX);
return;
}
- /*
- * Enable SGX if and only if the kernel supports SGX and Launch Control
- * is supported, i.e. disable SGX if the LE hash MSRs can't be written.
- */
- enable_sgx = cpu_has(c, X86_FEATURE_SGX) &&
- cpu_has(c, X86_FEATURE_SGX_LC) &&
- IS_ENABLED(CONFIG_X86_SGX);
+ enable_vmx = cpu_has(c, X86_FEATURE_VMX) &&
+ IS_ENABLED(CONFIG_KVM_INTEL);
+
+ if (cpu_has(c, X86_FEATURE_SGX) && IS_ENABLED(CONFIG_X86_SGX)) {
+ /*
+ * Separate out SGX driver enabling from KVM. This allows KVM
+ * guests to use SGX even if the kernel SGX driver refuses to
+ * use it. This happens if flexible Launch Control is not
+ * available.
+ */
+ enable_sgx_driver = cpu_has(c, X86_FEATURE_SGX_LC);
+ enable_sgx_kvm = enable_vmx && IS_ENABLED(CONFIG_X86_SGX_KVM);
+ }
if (msr & FEAT_CTL_LOCKED)
goto update_caps;
@@ -142,15 +143,18 @@ void init_ia32_feat_ctl(struct cpuinfo_x86 *c)
* i.e. KVM is enabled, to avoid unnecessarily adding an attack vector
* for the kernel, e.g. using VMX to hide malicious code.
*/
- if (cpu_has(c, X86_FEATURE_VMX) && IS_ENABLED(CONFIG_KVM_INTEL)) {
+ if (enable_vmx) {
msr |= FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
if (tboot)
msr |= FEAT_CTL_VMX_ENABLED_INSIDE_SMX;
}
- if (enable_sgx)
- msr |= FEAT_CTL_SGX_ENABLED | FEAT_CTL_SGX_LC_ENABLED;
+ if (enable_sgx_kvm || enable_sgx_driver) {
+ msr |= FEAT_CTL_SGX_ENABLED;
+ if (enable_sgx_driver)
+ msr |= FEAT_CTL_SGX_LC_ENABLED;
+ }
wrmsrl(MSR_IA32_FEAT_CTL, msr);
@@ -173,10 +177,29 @@ update_caps:
}
update_sgx:
- if (!(msr & FEAT_CTL_SGX_ENABLED) ||
- !(msr & FEAT_CTL_SGX_LC_ENABLED) || !enable_sgx) {
- if (enable_sgx)
- pr_err_once("SGX disabled by BIOS\n");
- clear_sgx_caps();
+ if (!(msr & FEAT_CTL_SGX_ENABLED)) {
+ if (enable_sgx_kvm || enable_sgx_driver)
+ pr_err_once("SGX disabled by BIOS.\n");
+ clear_cpu_cap(c, X86_FEATURE_SGX);
+ return;
+ }
+
+ /*
+ * VMX feature bit may be cleared due to being disabled in BIOS,
+ * in which case SGX virtualization cannot be supported either.
+ */
+ if (!cpu_has(c, X86_FEATURE_VMX) && enable_sgx_kvm) {
+ pr_err_once("SGX virtualization disabled due to lack of VMX.\n");
+ enable_sgx_kvm = 0;
+ }
+
+ if (!(msr & FEAT_CTL_SGX_LC_ENABLED) && enable_sgx_driver) {
+ if (!enable_sgx_kvm) {
+ pr_err_once("SGX Launch Control is locked. Disable SGX.\n");
+ clear_cpu_cap(c, X86_FEATURE_SGX);
+ } else {
+ pr_err_once("SGX Launch Control is locked. Support SGX virtualization only.\n");
+ clear_cpu_cap(c, X86_FEATURE_SGX_LC);
+ }
}
}
diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
index ae59115d18f9..6d50136f7ab9 100644
--- a/arch/x86/kernel/cpu/hygon.c
+++ b/arch/x86/kernel/cpu/hygon.c
@@ -215,12 +215,12 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c)
u32 ecx;
ecx = cpuid_ecx(0x8000001e);
- nodes_per_socket = ((ecx >> 8) & 7) + 1;
+ __max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1;
} else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
u64 value;
rdmsrl(MSR_FAM10H_NODE_ID, value);
- nodes_per_socket = ((value >> 3) & 7) + 1;
+ __max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1;
}
if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
@@ -260,6 +260,10 @@ static void early_init_hygon(struct cpuinfo_x86 *c)
if (c->x86_power & BIT(12))
set_cpu_cap(c, X86_FEATURE_ACC_POWER);
+ /* Bit 14 indicates the Runtime Average Power Limit interface. */
+ if (c->x86_power & BIT(14))
+ set_cpu_cap(c, X86_FEATURE_RAPL);
+
#ifdef CONFIG_X86_64
set_cpu_cap(c, X86_FEATURE_SYSCALL32);
#endif
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 0e422a544835..8321c43554a1 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -10,6 +10,7 @@
#include <linux/thread_info.h>
#include <linux/init.h>
#include <linux/uaccess.h>
+#include <linux/delay.h>
#include <asm/cpufeature.h>
#include <asm/msr.h>
@@ -41,12 +42,13 @@ enum split_lock_detect_state {
sld_off = 0,
sld_warn,
sld_fatal,
+ sld_ratelimit,
};
/*
- * Default to sld_off because most systems do not support split lock detection
- * split_lock_setup() will switch this to sld_warn on systems that support
- * split lock detect, unless there is a command line override.
+ * Default to sld_off because most systems do not support split lock detection.
+ * sld_state_setup() will switch this to sld_warn on systems that support
+ * split lock/bus lock detect, unless there is a command line override.
*/
static enum split_lock_detect_state sld_state __ro_after_init = sld_off;
static u64 msr_test_ctrl_cache __ro_after_init;
@@ -301,7 +303,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
* The operating system must reload CR3 to cause the TLB to be flushed"
*
* As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
- * should be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
+ * should be false so that __flush_tlb_all() causes CR3 instead of CR4.PGE
* to be modified.
*/
if (c->x86 == 5 && c->x86_model == 9) {
@@ -603,6 +605,7 @@ static void init_intel_misc_features(struct cpuinfo_x86 *c)
}
static void split_lock_init(void);
+static void bus_lock_init(void);
static void init_intel(struct cpuinfo_x86 *c)
{
@@ -716,10 +719,13 @@ static void init_intel(struct cpuinfo_x86 *c)
if (tsx_ctrl_state == TSX_CTRL_ENABLE)
tsx_enable();
- if (tsx_ctrl_state == TSX_CTRL_DISABLE)
+ else if (tsx_ctrl_state == TSX_CTRL_DISABLE)
tsx_disable();
+ else if (tsx_ctrl_state == TSX_CTRL_RTM_ALWAYS_ABORT)
+ tsx_clear_cpuid();
split_lock_init();
+ bus_lock_init();
intel_init_thermal(c);
}
@@ -995,13 +1001,30 @@ static const struct {
{ "off", sld_off },
{ "warn", sld_warn },
{ "fatal", sld_fatal },
+ { "ratelimit:", sld_ratelimit },
};
+static struct ratelimit_state bld_ratelimit;
+
static inline bool match_option(const char *arg, int arglen, const char *opt)
{
- int len = strlen(opt);
+ int len = strlen(opt), ratelimit;
+
+ if (strncmp(arg, opt, len))
+ return false;
+
+ /*
+ * Min ratelimit is 1 bus lock/sec.
+ * Max ratelimit is 1000 bus locks/sec.
+ */
+ if (sscanf(arg, "ratelimit:%d", &ratelimit) == 1 &&
+ ratelimit > 0 && ratelimit <= 1000) {
+ ratelimit_state_init(&bld_ratelimit, HZ, ratelimit);
+ ratelimit_set_flags(&bld_ratelimit, RATELIMIT_MSG_ON_RELEASE);
+ return true;
+ }
- return len == arglen && !strncmp(arg, opt, len);
+ return len == arglen;
}
static bool split_lock_verify_msr(bool on)
@@ -1020,16 +1043,15 @@ static bool split_lock_verify_msr(bool on)
return ctrl == tmp;
}
-static void __init split_lock_setup(void)
+static void __init sld_state_setup(void)
{
enum split_lock_detect_state state = sld_warn;
char arg[20];
int i, ret;
- if (!split_lock_verify_msr(false)) {
- pr_info("MSR access failed: Disabled\n");
+ if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) &&
+ !boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
return;
- }
ret = cmdline_find_option(boot_command_line, "split_lock_detect",
arg, sizeof(arg));
@@ -1041,17 +1063,14 @@ static void __init split_lock_setup(void)
}
}
}
+ sld_state = state;
+}
- switch (state) {
- case sld_off:
- pr_info("disabled\n");
+static void __init __split_lock_setup(void)
+{
+ if (!split_lock_verify_msr(false)) {
+ pr_info("MSR access failed: Disabled\n");
return;
- case sld_warn:
- pr_info("warning about user-space split_locks\n");
- break;
- case sld_fatal:
- pr_info("sending SIGBUS on user-space split_locks\n");
- break;
}
rdmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache);
@@ -1061,7 +1080,9 @@ static void __init split_lock_setup(void)
return;
}
- sld_state = state;
+ /* Restore the MSR to its cached value. */
+ wrmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache);
+
setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT);
}
@@ -1082,6 +1103,15 @@ static void sld_update_msr(bool on)
static void split_lock_init(void)
{
+ /*
+ * #DB for bus lock handles ratelimit and #AC for split lock is
+ * disabled.
+ */
+ if (sld_state == sld_ratelimit) {
+ split_lock_verify_msr(false);
+ return;
+ }
+
if (cpu_model_supports_sld)
split_lock_verify_msr(sld_state != sld_off);
}
@@ -1118,6 +1148,29 @@ bool handle_guest_split_lock(unsigned long ip)
}
EXPORT_SYMBOL_GPL(handle_guest_split_lock);
+static void bus_lock_init(void)
+{
+ u64 val;
+
+ /*
+ * Warn and fatal are handled by #AC for split lock if #AC for
+ * split lock is supported.
+ */
+ if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) ||
+ (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) &&
+ (sld_state == sld_warn || sld_state == sld_fatal)) ||
+ sld_state == sld_off)
+ return;
+
+ /*
+ * Enable #DB for bus lock. All bus locks are handled in #DB except
+ * split locks are handled in #AC in the fatal case.
+ */
+ rdmsrl(MSR_IA32_DEBUGCTLMSR, val);
+ val |= DEBUGCTLMSR_BUS_LOCK_DETECT;
+ wrmsrl(MSR_IA32_DEBUGCTLMSR, val);
+}
+
bool handle_user_split_lock(struct pt_regs *regs, long error_code)
{
if ((regs->flags & X86_EFLAGS_AC) || sld_state == sld_fatal)
@@ -1126,6 +1179,27 @@ bool handle_user_split_lock(struct pt_regs *regs, long error_code)
return true;
}
+void handle_bus_lock(struct pt_regs *regs)
+{
+ switch (sld_state) {
+ case sld_off:
+ break;
+ case sld_ratelimit:
+ /* Enforce no more than bld_ratelimit bus locks/sec. */
+ while (!__ratelimit(&bld_ratelimit))
+ msleep(20);
+ /* Warn on the bus lock. */
+ fallthrough;
+ case sld_warn:
+ pr_warn_ratelimited("#DB: %s/%d took a bus_lock trap at address: 0x%lx\n",
+ current->comm, current->pid, regs->ip);
+ break;
+ case sld_fatal:
+ force_sig_fault(SIGBUS, BUS_ADRALN, NULL);
+ break;
+ }
+}
+
/*
* This function is called only when switching between tasks with
* different split-lock detection modes. It sets the MSR for the
@@ -1166,7 +1240,7 @@ static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
{}
};
-void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c)
+static void __init split_lock_setup(struct cpuinfo_x86 *c)
{
const struct x86_cpu_id *m;
u64 ia32_core_caps;
@@ -1193,5 +1267,60 @@ void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c)
}
cpu_model_supports_sld = true;
- split_lock_setup();
+ __split_lock_setup();
+}
+
+static void sld_state_show(void)
+{
+ if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) &&
+ !boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
+ return;
+
+ switch (sld_state) {
+ case sld_off:
+ pr_info("disabled\n");
+ break;
+ case sld_warn:
+ if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
+ pr_info("#AC: crashing the kernel on kernel split_locks and warning on user-space split_locks\n");
+ else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
+ pr_info("#DB: warning on user-space bus_locks\n");
+ break;
+ case sld_fatal:
+ if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) {
+ pr_info("#AC: crashing the kernel on kernel split_locks and sending SIGBUS on user-space split_locks\n");
+ } else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) {
+ pr_info("#DB: sending SIGBUS on user-space bus_locks%s\n",
+ boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) ?
+ " from non-WB" : "");
+ }
+ break;
+ case sld_ratelimit:
+ if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
+ pr_info("#DB: setting system wide bus lock rate limit to %u/sec\n", bld_ratelimit.burst);
+ break;
+ }
+}
+
+void __init sld_setup(struct cpuinfo_x86 *c)
+{
+ split_lock_setup(c);
+ sld_state_setup();
+ sld_state_show();
+}
+
+#define X86_HYBRID_CPU_TYPE_ID_SHIFT 24
+
+/**
+ * get_this_hybrid_cpu_type() - Get the type of this hybrid CPU
+ *
+ * Returns the CPU type [31:24] (i.e., Atom or Core) of a CPU in
+ * a hybrid processor. If the processor is not hybrid, returns 0.
+ */
+u8 get_this_hybrid_cpu_type(void)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
+ return 0;
+
+ return cpuid_eax(0x0000001a) >> X86_HYBRID_CPU_TYPE_ID_SHIFT;
}
diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
index e486f96b3cb3..08831acc1d03 100644
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -77,27 +77,29 @@ struct smca_bank_name {
};
static struct smca_bank_name smca_names[] = {
- [SMCA_LS] = { "load_store", "Load Store Unit" },
- [SMCA_LS_V2] = { "load_store", "Load Store Unit" },
- [SMCA_IF] = { "insn_fetch", "Instruction Fetch Unit" },
- [SMCA_L2_CACHE] = { "l2_cache", "L2 Cache" },
- [SMCA_DE] = { "decode_unit", "Decode Unit" },
- [SMCA_RESERVED] = { "reserved", "Reserved" },
- [SMCA_EX] = { "execution_unit", "Execution Unit" },
- [SMCA_FP] = { "floating_point", "Floating Point Unit" },
- [SMCA_L3_CACHE] = { "l3_cache", "L3 Cache" },
- [SMCA_CS] = { "coherent_slave", "Coherent Slave" },
- [SMCA_CS_V2] = { "coherent_slave", "Coherent Slave" },
- [SMCA_PIE] = { "pie", "Power, Interrupts, etc." },
- [SMCA_UMC] = { "umc", "Unified Memory Controller" },
- [SMCA_PB] = { "param_block", "Parameter Block" },
- [SMCA_PSP] = { "psp", "Platform Security Processor" },
- [SMCA_PSP_V2] = { "psp", "Platform Security Processor" },
- [SMCA_SMU] = { "smu", "System Management Unit" },
- [SMCA_SMU_V2] = { "smu", "System Management Unit" },
- [SMCA_MP5] = { "mp5", "Microprocessor 5 Unit" },
- [SMCA_NBIO] = { "nbio", "Northbridge IO Unit" },
- [SMCA_PCIE] = { "pcie", "PCI Express Unit" },
+ [SMCA_LS ... SMCA_LS_V2] = { "load_store", "Load Store Unit" },
+ [SMCA_IF] = { "insn_fetch", "Instruction Fetch Unit" },
+ [SMCA_L2_CACHE] = { "l2_cache", "L2 Cache" },
+ [SMCA_DE] = { "decode_unit", "Decode Unit" },
+ [SMCA_RESERVED] = { "reserved", "Reserved" },
+ [SMCA_EX] = { "execution_unit", "Execution Unit" },
+ [SMCA_FP] = { "floating_point", "Floating Point Unit" },
+ [SMCA_L3_CACHE] = { "l3_cache", "L3 Cache" },
+ [SMCA_CS ... SMCA_CS_V2] = { "coherent_slave", "Coherent Slave" },
+ [SMCA_PIE] = { "pie", "Power, Interrupts, etc." },
+
+ /* UMC v2 is separate because both of them can exist in a single system. */
+ [SMCA_UMC] = { "umc", "Unified Memory Controller" },
+ [SMCA_UMC_V2] = { "umc_v2", "Unified Memory Controller v2" },
+ [SMCA_PB] = { "param_block", "Parameter Block" },
+ [SMCA_PSP ... SMCA_PSP_V2] = { "psp", "Platform Security Processor" },
+ [SMCA_SMU ... SMCA_SMU_V2] = { "smu", "System Management Unit" },
+ [SMCA_MP5] = { "mp5", "Microprocessor 5 Unit" },
+ [SMCA_NBIO] = { "nbio", "Northbridge IO Unit" },
+ [SMCA_PCIE ... SMCA_PCIE_V2] = { "pcie", "PCI Express Unit" },
+ [SMCA_XGMI_PCS] = { "xgmi_pcs", "Ext Global Memory Interconnect PCS Unit" },
+ [SMCA_XGMI_PHY] = { "xgmi_phy", "Ext Global Memory Interconnect PHY Unit" },
+ [SMCA_WAFL_PHY] = { "wafl_phy", "WAFL PHY Unit" },
};
static const char *smca_get_name(enum smca_bank_types t)
@@ -155,6 +157,7 @@ static struct smca_hwid smca_hwid_mcatypes[] = {
/* Unified Memory Controller MCA type */
{ SMCA_UMC, HWID_MCATYPE(0x96, 0x0) },
+ { SMCA_UMC_V2, HWID_MCATYPE(0x96, 0x1) },
/* Parameter Block MCA type */
{ SMCA_PB, HWID_MCATYPE(0x05, 0x0) },
@@ -175,6 +178,16 @@ static struct smca_hwid smca_hwid_mcatypes[] = {
/* PCI Express Unit MCA type */
{ SMCA_PCIE, HWID_MCATYPE(0x46, 0x0) },
+ { SMCA_PCIE_V2, HWID_MCATYPE(0x46, 0x1) },
+
+ /* xGMI PCS MCA type */
+ { SMCA_XGMI_PCS, HWID_MCATYPE(0x50, 0x0) },
+
+ /* xGMI PHY MCA type */
+ { SMCA_XGMI_PHY, HWID_MCATYPE(0x259, 0x0) },
+
+ /* WAFL PHY MCA type */
+ { SMCA_WAFL_PHY, HWID_MCATYPE(0x267, 0x0) },
};
struct smca_bank smca_banks[MAX_NR_BANKS];
diff --git a/arch/x86/kernel/cpu/mce/apei.c b/arch/x86/kernel/cpu/mce/apei.c
index b58b85380ddb..0e3ae64d3b76 100644
--- a/arch/x86/kernel/cpu/mce/apei.c
+++ b/arch/x86/kernel/cpu/mce/apei.c
@@ -36,7 +36,8 @@ void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err)
mce_setup(&m);
m.bank = -1;
/* Fake a memory read error with unknown channel */
- m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | 0x9f;
+ m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | MCI_STATUS_MISCV | 0x9f;
+ m.misc = (MCI_MISC_ADDR_PHYS << 6) | PAGE_SHIFT;
if (severity >= GHES_SEV_RECOVERABLE)
m.status |= MCI_STATUS_UC;
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
index 7962355436da..22791aadc085 100644
--- a/arch/x86/kernel/cpu/mce/core.c
+++ b/arch/x86/kernel/cpu/mce/core.c
@@ -529,7 +529,7 @@ static void mce_irq_work_cb(struct irq_work *entry)
* Check if the address reported by the CPU is in a format we can parse.
* It would be possible to add code for most other cases, but all would
* be somewhat complicated (e.g. segment offset would require an instruction
- * parser). So only support physical addresses up to page granuality for now.
+ * parser). So only support physical addresses up to page granularity for now.
*/
int mce_usable_address(struct mce *m)
{
@@ -1257,19 +1257,28 @@ static void kill_me_maybe(struct callback_head *cb)
{
struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me);
int flags = MF_ACTION_REQUIRED;
+ int ret;
pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
if (!p->mce_ripv)
flags |= MF_MUST_KILL;
- if (!memory_failure(p->mce_addr >> PAGE_SHIFT, flags) &&
- !(p->mce_kflags & MCE_IN_KERNEL_COPYIN)) {
+ ret = memory_failure(p->mce_addr >> PAGE_SHIFT, flags);
+ if (!ret && !(p->mce_kflags & MCE_IN_KERNEL_COPYIN)) {
set_mce_nospec(p->mce_addr >> PAGE_SHIFT, p->mce_whole_page);
sync_core();
return;
}
+ /*
+ * -EHWPOISON from memory_failure() means that it already sent SIGBUS
+ * to the current process with the proper error info, so no need to
+ * send SIGBUS here again.
+ */
+ if (ret == -EHWPOISON)
+ return;
+
if (p->mce_vaddr != (void __user *)-1l) {
force_sig_mceerr(BUS_MCEERR_AR, p->mce_vaddr, PAGE_SHIFT);
} else {
diff --git a/arch/x86/kernel/cpu/mce/inject.c b/arch/x86/kernel/cpu/mce/inject.c
index 7b360731fc2d..4e86d97f9653 100644
--- a/arch/x86/kernel/cpu/mce/inject.c
+++ b/arch/x86/kernel/cpu/mce/inject.c
@@ -74,6 +74,7 @@ MCE_INJECT_SET(status);
MCE_INJECT_SET(misc);
MCE_INJECT_SET(addr);
MCE_INJECT_SET(synd);
+MCE_INJECT_SET(ipid);
#define MCE_INJECT_GET(reg) \
static int inj_##reg##_get(void *data, u64 *val) \
@@ -88,11 +89,13 @@ MCE_INJECT_GET(status);
MCE_INJECT_GET(misc);
MCE_INJECT_GET(addr);
MCE_INJECT_GET(synd);
+MCE_INJECT_GET(ipid);
DEFINE_SIMPLE_ATTRIBUTE(status_fops, inj_status_get, inj_status_set, "%llx\n");
DEFINE_SIMPLE_ATTRIBUTE(misc_fops, inj_misc_get, inj_misc_set, "%llx\n");
DEFINE_SIMPLE_ATTRIBUTE(addr_fops, inj_addr_get, inj_addr_set, "%llx\n");
DEFINE_SIMPLE_ATTRIBUTE(synd_fops, inj_synd_get, inj_synd_set, "%llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(ipid_fops, inj_ipid_get, inj_ipid_set, "%llx\n");
static void setup_inj_struct(struct mce *m)
{
@@ -629,6 +632,8 @@ static const char readme_msg[] =
"\t is present in hardware. \n"
"\t - \"th\": Trigger APIC interrupt for Threshold errors. Causes threshold \n"
"\t APIC interrupt handler to handle the error. \n"
+"\n"
+"ipid:\t IPID (AMD-specific)\n"
"\n";
static ssize_t
@@ -652,6 +657,7 @@ static struct dfs_node {
{ .name = "misc", .fops = &misc_fops, .perm = S_IRUSR | S_IWUSR },
{ .name = "addr", .fops = &addr_fops, .perm = S_IRUSR | S_IWUSR },
{ .name = "synd", .fops = &synd_fops, .perm = S_IRUSR | S_IWUSR },
+ { .name = "ipid", .fops = &ipid_fops, .perm = S_IRUSR | S_IWUSR },
{ .name = "bank", .fops = &bank_fops, .perm = S_IRUSR | S_IWUSR },
{ .name = "flags", .fops = &flags_fops, .perm = S_IRUSR | S_IWUSR },
{ .name = "cpu", .fops = &extcpu_fops, .perm = S_IRUSR | S_IWUSR },
diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c
index e309476743b7..acfd5d9f93c6 100644
--- a/arch/x86/kernel/cpu/mce/intel.c
+++ b/arch/x86/kernel/cpu/mce/intel.c
@@ -486,6 +486,7 @@ static void intel_ppin_init(struct cpuinfo_x86 *c)
case INTEL_FAM6_BROADWELL_X:
case INTEL_FAM6_SKYLAKE_X:
case INTEL_FAM6_ICELAKE_X:
+ case INTEL_FAM6_SAPPHIRERAPIDS_X:
case INTEL_FAM6_XEON_PHI_KNL:
case INTEL_FAM6_XEON_PHI_KNM:
diff --git a/arch/x86/kernel/cpu/mce/severity.c b/arch/x86/kernel/cpu/mce/severity.c
index 83df991314c5..17e631443116 100644
--- a/arch/x86/kernel/cpu/mce/severity.c
+++ b/arch/x86/kernel/cpu/mce/severity.c
@@ -142,7 +142,7 @@ static struct severity {
MASK(MCI_STATUS_OVER|MCI_UC_SAR, MCI_STATUS_UC|MCI_STATUS_AR)
),
MCESEV(
- KEEP, "Non signalled machine check",
+ KEEP, "Non signaled machine check",
SER, BITCLR(MCI_STATUS_S)
),
@@ -218,15 +218,15 @@ static struct severity {
static bool is_copy_from_user(struct pt_regs *regs)
{
u8 insn_buf[MAX_INSN_SIZE];
- struct insn insn;
unsigned long addr;
+ struct insn insn;
+ int ret;
if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip, MAX_INSN_SIZE))
return false;
- kernel_insn_init(&insn, insn_buf, MAX_INSN_SIZE);
- insn_get_opcode(&insn);
- if (!insn.opcode.got)
+ ret = insn_decode_kernel(&insn, insn_buf);
+ if (ret < 0)
return false;
switch (insn.opcode.value) {
@@ -234,10 +234,6 @@ static bool is_copy_from_user(struct pt_regs *regs)
case 0x8A: case 0x8B:
/* MOVZ mem,reg */
case 0xB60F: case 0xB70F:
- insn_get_modrm(&insn);
- insn_get_sib(&insn);
- if (!insn.modrm.got || !insn.sib.got)
- return false;
addr = (unsigned long)insn_get_addr_ref(&insn, regs);
break;
/* REP MOVS */
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index b935e1b5f115..6a6318e9590c 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -629,16 +629,16 @@ static ssize_t reload_store(struct device *dev,
if (val != 1)
return size;
- tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
- if (tmp_ret != UCODE_NEW)
- return size;
-
get_online_cpus();
ret = check_online_cpus();
if (ret)
goto put;
+ tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
+ if (tmp_ret != UCODE_NEW)
+ goto put;
+
mutex_lock(&microcode_mutex);
ret = microcode_reload_late();
mutex_unlock(&microcode_mutex);
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index e88bc296afca..cc8f1773deca 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -17,6 +17,7 @@
#include <linux/irq.h>
#include <linux/kexec.h>
#include <linux/i8253.h>
+#include <linux/panic_notifier.h>
#include <linux/random.h>
#include <asm/processor.h>
#include <asm/hypervisor.h>
@@ -60,23 +61,18 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_callback)
set_irq_regs(old_regs);
}
-int hv_setup_vmbus_irq(int irq, void (*handler)(void))
+void hv_setup_vmbus_handler(void (*handler)(void))
{
- /*
- * The 'irq' argument is ignored on x86/x64 because a hard-coded
- * interrupt vector is used for Hyper-V interrupts.
- */
vmbus_handler = handler;
- return 0;
}
+EXPORT_SYMBOL_GPL(hv_setup_vmbus_handler);
-void hv_remove_vmbus_irq(void)
+void hv_remove_vmbus_handler(void)
{
/* We have no way to deallocate the interrupt gate */
vmbus_handler = NULL;
}
-EXPORT_SYMBOL_GPL(hv_setup_vmbus_irq);
-EXPORT_SYMBOL_GPL(hv_remove_vmbus_irq);
+EXPORT_SYMBOL_GPL(hv_remove_vmbus_handler);
/*
* Routines to do per-architecture handling of stimer0
@@ -95,21 +91,17 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_stimer0)
set_irq_regs(old_regs);
}
-int hv_setup_stimer0_irq(int *irq, int *vector, void (*handler)(void))
+/* For x86/x64, override weak placeholders in hyperv_timer.c */
+void hv_setup_stimer0_handler(void (*handler)(void))
{
- *vector = HYPERV_STIMER0_VECTOR;
- *irq = -1; /* Unused on x86/x64 */
hv_stimer0_handler = handler;
- return 0;
}
-EXPORT_SYMBOL_GPL(hv_setup_stimer0_irq);
-void hv_remove_stimer0_irq(int irq)
+void hv_remove_stimer0_handler(void)
{
/* We have no way to deallocate the interrupt gate */
hv_stimer0_handler = NULL;
}
-EXPORT_SYMBOL_GPL(hv_remove_stimer0_irq);
void hv_setup_kexec_handler(void (*handler)(void))
{
@@ -197,7 +189,7 @@ static unsigned char hv_get_nmi_reason(void)
#ifdef CONFIG_X86_LOCAL_APIC
/*
* Prior to WS2016 Debug-VM sends NMIs to all CPUs which makes
- * it dificult to process CHANNELMSG_UNLOAD in case of crash. Handle
+ * it difficult to process CHANNELMSG_UNLOAD in case of crash. Handle
* unknown NMI on the first CPU which gets it.
*/
static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs)
@@ -245,7 +237,7 @@ static void __init hv_smp_prepare_cpus(unsigned int max_cpus)
for_each_present_cpu(i) {
if (i == 0)
continue;
- ret = hv_call_add_logical_proc(numa_cpu_node(i), i, cpu_physical_id(i));
+ ret = hv_call_add_logical_proc(numa_cpu_node(i), i, i);
BUG_ON(ret);
}
@@ -261,6 +253,7 @@ static void __init hv_smp_prepare_cpus(unsigned int max_cpus)
static void __init ms_hyperv_init_platform(void)
{
+ int hv_max_functions_eax;
int hv_host_info_eax;
int hv_host_info_ebx;
int hv_host_info_ecx;
@@ -274,12 +267,15 @@ static void __init ms_hyperv_init_platform(void)
* Extract the features and hints
*/
ms_hyperv.features = cpuid_eax(HYPERV_CPUID_FEATURES);
- ms_hyperv.features_b = cpuid_ebx(HYPERV_CPUID_FEATURES);
+ ms_hyperv.priv_high = cpuid_ebx(HYPERV_CPUID_FEATURES);
ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
- pr_info("Hyper-V: features 0x%x, hints 0x%x, misc 0x%x\n",
- ms_hyperv.features, ms_hyperv.hints, ms_hyperv.misc_features);
+ hv_max_functions_eax = cpuid_eax(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS);
+
+ pr_info("Hyper-V: privilege flags low 0x%x, high 0x%x, hints 0x%x, misc 0x%x\n",
+ ms_hyperv.features, ms_hyperv.priv_high, ms_hyperv.hints,
+ ms_hyperv.misc_features);
ms_hyperv.max_vp_index = cpuid_eax(HYPERV_CPUID_IMPLEMENT_LIMITS);
ms_hyperv.max_lp_index = cpuid_ebx(HYPERV_CPUID_IMPLEMENT_LIMITS);
@@ -306,8 +302,7 @@ static void __init ms_hyperv_init_platform(void)
/*
* Extract host information.
*/
- if (cpuid_eax(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS) >=
- HYPERV_CPUID_VERSION) {
+ if (hv_max_functions_eax >= HYPERV_CPUID_VERSION) {
hv_host_info_eax = cpuid_eax(HYPERV_CPUID_VERSION);
hv_host_info_ebx = cpuid_ebx(HYPERV_CPUID_VERSION);
hv_host_info_ecx = cpuid_ecx(HYPERV_CPUID_VERSION);
@@ -325,7 +320,7 @@ static void __init ms_hyperv_init_platform(void)
x86_platform.calibrate_cpu = hv_get_tsc_khz;
}
- if (ms_hyperv.features_b & HV_ISOLATION) {
+ if (ms_hyperv.priv_high & HV_ISOLATION) {
ms_hyperv.isolation_config_a = cpuid_eax(HYPERV_CPUID_ISOLATION_CONFIG);
ms_hyperv.isolation_config_b = cpuid_ebx(HYPERV_CPUID_ISOLATION_CONFIG);
@@ -333,9 +328,11 @@ static void __init ms_hyperv_init_platform(void)
ms_hyperv.isolation_config_a, ms_hyperv.isolation_config_b);
}
- if (ms_hyperv.hints & HV_X64_ENLIGHTENED_VMCS_RECOMMENDED) {
+ if (hv_max_functions_eax >= HYPERV_CPUID_NESTED_FEATURES) {
ms_hyperv.nested_features =
cpuid_eax(HYPERV_CPUID_NESTED_FEATURES);
+ pr_info("Hyper-V: Nested features: 0x%x\n",
+ ms_hyperv.nested_features);
}
/*
@@ -428,7 +425,7 @@ static void __init ms_hyperv_init_platform(void)
/*
* Hyper-V doesn't provide irq remapping for IO-APIC. To enable x2apic,
- * set x2apic destination mode to physcial mode when x2apic is available
+ * set x2apic destination mode to physical mode when x2apic is available
* and Hyper-V IOMMU driver makes sure cpus assigned with IO-APIC irqs
* have 8-bit APIC id.
*/
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index 9231640782fa..b5f43049fa5f 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -434,7 +434,7 @@ set_var_mtrr_range(struct var_mtrr_state *state, unsigned long base_pfn,
state->range_sizek = sizek - second_sizek;
}
-/* Mininum size of mtrr block that can take hole: */
+/* Minimum size of mtrr block that can take hole: */
static u64 mtrr_chunk_size __initdata = (256ULL<<20);
static int __init parse_mtrr_chunk_size_opt(char *p)
@@ -836,7 +836,7 @@ int __init amd_special_default_mtrr(void)
if (boot_cpu_data.x86 < 0xf)
return 0;
/* In case some hypervisor doesn't pass SYSCFG through: */
- if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0)
+ if (rdmsr_safe(MSR_AMD64_SYSCFG, &l, &h) < 0)
return 0;
/*
* Memory between 4GB and top of mem is forced WB by this magic bit.
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index b90f3f437765..558108296f3c 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -53,13 +53,13 @@ static inline void k8_check_syscfg_dram_mod_en(void)
(boot_cpu_data.x86 >= 0x0f)))
return;
- rdmsr(MSR_K8_SYSCFG, lo, hi);
+ rdmsr(MSR_AMD64_SYSCFG, lo, hi);
if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
pr_err(FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
" not cleared by BIOS, clearing this bit\n",
smp_processor_id());
lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
- mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi);
+ mtrr_wrmsr(MSR_AMD64_SYSCFG, lo, hi);
}
}
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
index 28c8a23aa42e..a76694bffe86 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -799,7 +799,7 @@ void mtrr_ap_init(void)
*
* This routine is called in two cases:
*
- * 1. very earily time of software resume, when there absolutely
+ * 1. very early time of software resume, when there absolutely
* isn't mtrr entry changes;
*
* 2. cpu hotadd time. We let mtrr_add/del_page hold cpuhotplug
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 3ef5868ac588..7aecb2fc3186 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -63,7 +63,7 @@ static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
case 15:
return msr - MSR_P4_BPU_PERFCTR0;
}
- fallthrough;
+ break;
case X86_VENDOR_ZHAOXIN:
case X86_VENDOR_CENTAUR:
return msr - MSR_ARCH_PERFMON_PERFCTR0;
@@ -96,7 +96,7 @@ static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
case 15:
return msr - MSR_P4_BSU_ESCR0;
}
- fallthrough;
+ break;
case X86_VENDOR_ZHAOXIN:
case X86_VENDOR_CENTAUR:
return msr - MSR_ARCH_PERFMON_EVENTSEL0;
diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index 698bb26aeb6e..23001ae03e82 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -192,7 +192,7 @@ static unsigned int cbm_idx(struct rdt_resource *r, unsigned int closid)
* Intel(R) Xeon(R) CPU E5-2608L v3 @ 2.00GHz
* Intel(R) Xeon(R) CPU E5-2658A v3 @ 2.20GHz
*
- * Probe by trying to write the first of the L3 cach mask registers
+ * Probe by trying to write the first of the L3 cache mask registers
* and checking that the bits stick. Max CLOSids is always 4 and max cbm length
* is always 20 on hsw server parts. The minimum cache bitmask length
* allowed for HSW server is always 2 bits. Hardcode all of them.
diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
index c4d320d02fd5..6a5f60a37219 100644
--- a/arch/x86/kernel/cpu/resctrl/internal.h
+++ b/arch/x86/kernel/cpu/resctrl/internal.h
@@ -70,6 +70,7 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
* struct mon_evt - Entry in the event list of a resource
* @evtid: event id
* @name: name of the event
+ * @list: entry in &rdt_resource->evt_list
*/
struct mon_evt {
u32 evtid;
@@ -78,10 +79,13 @@ struct mon_evt {
};
/**
- * struct mon_data_bits - Monitoring details for each event file
- * @rid: Resource id associated with the event file.
+ * union mon_data_bits - Monitoring details for each event file
+ * @priv: Used to store monitoring event data in @u
+ * as kernfs private data
+ * @rid: Resource id associated with the event file
* @evtid: Event id associated with the event file
* @domid: The domain to which the event file belongs
+ * @u: Name of the bit fields struct
*/
union mon_data_bits {
void *priv;
@@ -119,6 +123,7 @@ enum rdt_group_type {
* @RDT_MODE_PSEUDO_LOCKSETUP: Resource group will be used for Pseudo-Locking
* @RDT_MODE_PSEUDO_LOCKED: No sharing of this resource group's allocations
* allowed AND the allocations are Cache Pseudo-Locked
+ * @RDT_NUM_MODES: Total number of modes
*
* The mode of a resource group enables control over the allowed overlap
* between allocations associated with different resource groups (classes
@@ -142,7 +147,7 @@ enum rdtgrp_mode {
/**
* struct mongroup - store mon group's data in resctrl fs.
- * @mon_data_kn kernlfs node for the mon_data directory
+ * @mon_data_kn: kernfs node for the mon_data directory
* @parent: parent rdtgrp
* @crdtgrp_list: child rdtgroup node list
* @rmid: rmid for this rdtgroup
@@ -282,11 +287,11 @@ struct rftype {
/**
* struct mbm_state - status for each MBM counter in each domain
* @chunks: Total data moved (multiply by rdt_group.mon_scale to get bytes)
- * @prev_msr Value of IA32_QM_CTR for this RMID last time we read it
+ * @prev_msr: Value of IA32_QM_CTR for this RMID last time we read it
* @prev_bw_msr:Value of previous IA32_QM_CTR for bandwidth counting
- * @prev_bw The most recent bandwidth in MBps
- * @delta_bw Difference between the current and previous bandwidth
- * @delta_comp Indicates whether to compute the delta_bw
+ * @prev_bw: The most recent bandwidth in MBps
+ * @delta_bw: Difference between the current and previous bandwidth
+ * @delta_comp: Indicates whether to compute the delta_bw
*/
struct mbm_state {
u64 chunks;
@@ -456,11 +461,13 @@ struct rdt_parse_data {
* @data_width: Character width of data when displaying
* @domains: All domains for this resource
* @cache: Cache allocation related data
+ * @membw: If the component has bandwidth controls, their properties.
* @format_str: Per resource format string to show domain value
* @parse_ctrlval: Per resource function pointer to parse control values
* @evt_list: List of monitoring events
* @num_rmid: Number of RMIDs available
* @mon_scale: cqm counter * mon_scale = occupancy in bytes
+ * @mbm_width: Monitor width, to detect and correct for overflow.
* @fflags: flags to choose base and info files
*/
struct rdt_resource {
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index 7ac31210e452..f07c10b87a87 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -84,7 +84,7 @@ unsigned int resctrl_cqm_threshold;
static const struct mbm_correction_factor_table {
u32 rmidthreshold;
u64 cf;
-} mbm_cf_table[] __initdata = {
+} mbm_cf_table[] __initconst = {
{7, CF(1.000000)},
{15, CF(1.000000)},
{15, CF(0.969650)},
@@ -387,7 +387,7 @@ void mon_event_count(void *info)
* adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so
* that:
*
- * current bandwdith(cur_bw) < user specified bandwidth(user_bw)
+ * current bandwidth(cur_bw) < user specified bandwidth(user_bw)
*
* This uses the MBM counters to measure the bandwidth and MBA throttle
* MSRs to control the bandwidth for a particular rdtgrp. It builds on the
@@ -397,7 +397,7 @@ void mon_event_count(void *info)
* timer. Having 1s interval makes the calculation of bandwidth simpler.
*
* Although MBA's goal is to restrict the bandwidth to a maximum, there may
- * be a need to increase the bandwidth to avoid uncecessarily restricting
+ * be a need to increase the bandwidth to avoid unnecessarily restricting
* the L2 <-> L3 traffic.
*
* Since MBA controls the L2 external bandwidth where as MBM measures the
@@ -480,7 +480,7 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
/*
* Delta values are updated dynamically package wise for each
- * rdtgrp everytime the throttle MSR changes value.
+ * rdtgrp every time the throttle MSR changes value.
*
* This is because (1)the increase in bandwidth is not perfectly
* linear and only "approximately" linear even when the hardware
diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
index e916646adc69..2207916cae65 100644
--- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
+++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c
@@ -49,6 +49,7 @@ static struct class *pseudo_lock_class;
/**
* get_prefetch_disable_bits - prefetch disable bits of supported platforms
+ * @void: It takes no parameters.
*
* Capture the list of platforms that have been validated to support
* pseudo-locking. This includes testing to ensure pseudo-locked regions
@@ -162,7 +163,7 @@ static struct rdtgroup *region_find_by_minor(unsigned int minor)
}
/**
- * pseudo_lock_pm_req - A power management QoS request list entry
+ * struct pseudo_lock_pm_req - A power management QoS request list entry
* @list: Entry within the @pm_reqs list for a pseudo-locked region
* @req: PM QoS request
*/
@@ -184,6 +185,7 @@ static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr)
/**
* pseudo_lock_cstates_constrain - Restrict cores from entering C6
+ * @plr: Pseudo-locked region
*
* To prevent the cache from being affected by power management entering
* C6 has to be avoided. This is accomplished by requesting a latency
@@ -196,6 +198,8 @@ static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr)
* the ACPI latencies need to be considered while keeping in mind that C2
* may be set to map to deeper sleep states. In this case the latency
* requirement needs to prevent entering C2 also.
+ *
+ * Return: 0 on success, <0 on failure
*/
static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
{
@@ -520,7 +524,7 @@ static int pseudo_lock_fn(void *_rdtgrp)
/**
* rdtgroup_monitor_in_progress - Test if monitoring in progress
- * @r: resource group being queried
+ * @rdtgrp: resource group being queried
*
* Return: 1 if monitor groups have been created for this resource
* group, 0 otherwise.
@@ -1140,6 +1144,8 @@ out:
/**
* pseudo_lock_measure_cycles - Trigger latency measure to pseudo-locked region
+ * @rdtgrp: Resource group to which the pseudo-locked region belongs.
+ * @sel: Selector of which measurement to perform on a pseudo-locked region.
*
* The measurement of latency to access a pseudo-locked region should be
* done from a cpu that is associated with that pseudo-locked region.
@@ -1307,7 +1313,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
* If the thread does not get on the CPU for whatever
* reason and the process which sets up the region is
* interrupted then this will leave the thread in runnable
- * state and once it gets on the CPU it will derefence
+ * state and once it gets on the CPU it will dereference
* the cleared, but not freed, plr struct resulting in an
* empty pseudo-locking loop.
*/
@@ -1391,7 +1397,7 @@ out:
* group is removed from user space via a "rmdir" from userspace or the
* unmount of the resctrl filesystem. On removal the resource group does
* not go back to pseudo-locksetup mode before it is removed, instead it is
- * removed directly. There is thus assymmetry with the creation where the
+ * removed directly. There is thus asymmetry with the creation where the
* &struct pseudo_lock_region is removed here while it was not created in
* rdtgroup_pseudo_lock_create().
*
@@ -1458,7 +1464,7 @@ static int pseudo_lock_dev_release(struct inode *inode, struct file *filp)
return 0;
}
-static int pseudo_lock_dev_mremap(struct vm_area_struct *area, unsigned long flags)
+static int pseudo_lock_dev_mremap(struct vm_area_struct *area)
{
/* Not supported */
return -EINVAL;
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index f9190adc52cb..01fd30e7829d 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * User interface for Resource Alloction in Resource Director Technology(RDT)
+ * User interface for Resource Allocation in Resource Director Technology(RDT)
*
* Copyright (C) 2016 Intel Corporation
*
@@ -294,7 +294,7 @@ static int rdtgroup_cpus_show(struct kernfs_open_file *of,
/*
* This is safe against resctrl_sched_in() called from __switch_to()
* because __switch_to() is executed with interrupts disabled. A local call
- * from update_closid_rmid() is proteced against __switch_to() because
+ * from update_closid_rmid() is protected against __switch_to() because
* preemption is disabled.
*/
static void update_cpu_closid_rmid(void *info)
@@ -2555,7 +2555,7 @@ static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn,
/*
* This creates a directory mon_data which contains the monitored data.
*
- * mon_data has one directory for each domain whic are named
+ * mon_data has one directory for each domain which are named
* in the format mon_<domain_name>_<domain_id>. For ex: A mon_data
* with L3 domain looks as below:
* ./mon_data:
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 972ec3bfa9c0..21d1f062895a 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -36,6 +36,8 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_CDP_L2, CPUID_ECX, 2, 0x00000010, 2 },
{ X86_FEATURE_MBA, CPUID_EBX, 3, 0x00000010, 0 },
{ X86_FEATURE_PER_THREAD_MBA, CPUID_ECX, 0, 0x00000010, 3 },
+ { X86_FEATURE_SGX1, CPUID_EAX, 0, 0x00000012, 0 },
+ { X86_FEATURE_SGX2, CPUID_EAX, 1, 0x00000012, 0 },
{ X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 },
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
diff --git a/arch/x86/kernel/cpu/sgx/Makefile b/arch/x86/kernel/cpu/sgx/Makefile
index 91d3dc784a29..9c1656779b2a 100644
--- a/arch/x86/kernel/cpu/sgx/Makefile
+++ b/arch/x86/kernel/cpu/sgx/Makefile
@@ -3,3 +3,4 @@ obj-y += \
encl.o \
ioctl.o \
main.o
+obj-$(CONFIG_X86_SGX_KVM) += virt.o
diff --git a/arch/x86/kernel/cpu/sgx/driver.c b/arch/x86/kernel/cpu/sgx/driver.c
index 8ce6d8371cfb..aa9b8b868867 100644
--- a/arch/x86/kernel/cpu/sgx/driver.c
+++ b/arch/x86/kernel/cpu/sgx/driver.c
@@ -136,10 +136,6 @@ static const struct file_operations sgx_encl_fops = {
.get_unmapped_area = sgx_get_unmapped_area,
};
-const struct file_operations sgx_provision_fops = {
- .owner = THIS_MODULE,
-};
-
static struct miscdevice sgx_dev_enclave = {
.minor = MISC_DYNAMIC_MINOR,
.name = "sgx_enclave",
@@ -147,13 +143,6 @@ static struct miscdevice sgx_dev_enclave = {
.fops = &sgx_encl_fops,
};
-static struct miscdevice sgx_dev_provision = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "sgx_provision",
- .nodename = "sgx_provision",
- .fops = &sgx_provision_fops,
-};
-
int __init sgx_drv_init(void)
{
unsigned int eax, ebx, ecx, edx;
@@ -187,11 +176,5 @@ int __init sgx_drv_init(void)
if (ret)
return ret;
- ret = misc_register(&sgx_dev_provision);
- if (ret) {
- misc_deregister(&sgx_dev_enclave);
- return ret;
- }
-
return 0;
}
diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
index 7449ef33f081..001808e3901c 100644
--- a/arch/x86/kernel/cpu/sgx/encl.c
+++ b/arch/x86/kernel/cpu/sgx/encl.c
@@ -7,7 +7,7 @@
#include <linux/shmem_fs.h>
#include <linux/suspend.h>
#include <linux/sched/mm.h>
-#include "arch.h"
+#include <asm/sgx.h>
#include "encl.h"
#include "encls.h"
#include "sgx.h"
@@ -78,7 +78,7 @@ static struct sgx_epc_page *sgx_encl_eldu(struct sgx_encl_page *encl_page,
ret = __sgx_encl_eldu(encl_page, epc_page, secs_page);
if (ret) {
- sgx_free_epc_page(epc_page);
+ sgx_encl_free_epc_page(epc_page);
return ERR_PTR(ret);
}
@@ -383,7 +383,7 @@ const struct vm_operations_struct sgx_vm_ops = {
/**
* sgx_encl_release - Destroy an enclave instance
- * @kref: address of a kref inside &sgx_encl
+ * @ref: address of a kref inside &sgx_encl
*
* Used together with kref_put(). Frees all the resources associated with the
* enclave and the instance itself.
@@ -404,7 +404,7 @@ void sgx_encl_release(struct kref *ref)
if (sgx_unmark_page_reclaimable(entry->epc_page))
continue;
- sgx_free_epc_page(entry->epc_page);
+ sgx_encl_free_epc_page(entry->epc_page);
encl->secs_child_cnt--;
entry->epc_page = NULL;
}
@@ -415,7 +415,7 @@ void sgx_encl_release(struct kref *ref)
xa_destroy(&encl->page_array);
if (!encl->secs_child_cnt && encl->secs.epc_page) {
- sgx_free_epc_page(encl->secs.epc_page);
+ sgx_encl_free_epc_page(encl->secs.epc_page);
encl->secs.epc_page = NULL;
}
@@ -423,7 +423,7 @@ void sgx_encl_release(struct kref *ref)
va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
list);
list_del(&va_page->list);
- sgx_free_epc_page(va_page->epc_page);
+ sgx_encl_free_epc_page(va_page->epc_page);
kfree(va_page);
}
@@ -686,7 +686,7 @@ struct sgx_epc_page *sgx_alloc_va_page(void)
ret = __epa(sgx_get_epc_virt_addr(epc_page));
if (ret) {
WARN_ONCE(1, "EPA returned %d (0x%x)", ret, ret);
- sgx_free_epc_page(epc_page);
+ sgx_encl_free_epc_page(epc_page);
return ERR_PTR(-EFAULT);
}
@@ -735,3 +735,24 @@ bool sgx_va_page_full(struct sgx_va_page *va_page)
return slot == SGX_VA_SLOT_COUNT;
}
+
+/**
+ * sgx_encl_free_epc_page - free an EPC page assigned to an enclave
+ * @page: EPC page to be freed
+ *
+ * Free an EPC page assigned to an enclave. It does EREMOVE for the page, and
+ * only upon success, it puts the page back to free page list. Otherwise, it
+ * gives a WARNING to indicate page is leaked.
+ */
+void sgx_encl_free_epc_page(struct sgx_epc_page *page)
+{
+ int ret;
+
+ WARN_ON_ONCE(page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED);
+
+ ret = __eremove(sgx_get_epc_virt_addr(page));
+ if (WARN_ONCE(ret, EREMOVE_ERROR_MESSAGE, ret, ret))
+ return;
+
+ sgx_free_epc_page(page);
+}
diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
index d8d30ccbef4c..fec43ca65065 100644
--- a/arch/x86/kernel/cpu/sgx/encl.h
+++ b/arch/x86/kernel/cpu/sgx/encl.h
@@ -91,8 +91,8 @@ static inline int sgx_encl_find(struct mm_struct *mm, unsigned long addr,
{
struct vm_area_struct *result;
- result = find_vma(mm, addr);
- if (!result || result->vm_ops != &sgx_vm_ops || addr < result->vm_start)
+ result = vma_lookup(mm, addr);
+ if (!result || result->vm_ops != &sgx_vm_ops)
return -EINVAL;
*vma = result;
@@ -115,5 +115,6 @@ struct sgx_epc_page *sgx_alloc_va_page(void);
unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page);
void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset);
bool sgx_va_page_full(struct sgx_va_page *va_page);
+void sgx_encl_free_epc_page(struct sgx_epc_page *page);
#endif /* _X86_ENCL_H */
diff --git a/arch/x86/kernel/cpu/sgx/encls.h b/arch/x86/kernel/cpu/sgx/encls.h
index 443188fe7e70..9b204843b78d 100644
--- a/arch/x86/kernel/cpu/sgx/encls.h
+++ b/arch/x86/kernel/cpu/sgx/encls.h
@@ -11,21 +11,6 @@
#include <asm/traps.h>
#include "sgx.h"
-enum sgx_encls_function {
- ECREATE = 0x00,
- EADD = 0x01,
- EINIT = 0x02,
- EREMOVE = 0x03,
- EDGBRD = 0x04,
- EDGBWR = 0x05,
- EEXTEND = 0x06,
- ELDU = 0x08,
- EBLOCK = 0x09,
- EPA = 0x0A,
- EWB = 0x0B,
- ETRACK = 0x0C,
-};
-
/**
* ENCLS_FAULT_FLAG - flag signifying an ENCLS return code is a trapnr
*
@@ -55,6 +40,19 @@ enum sgx_encls_function {
} while (0); \
}
+/*
+ * encls_faulted() - Check if an ENCLS leaf faulted given an error code
+ * @ret: the return value of an ENCLS leaf function call
+ *
+ * Return:
+ * - true: ENCLS leaf faulted.
+ * - false: Otherwise.
+ */
+static inline bool encls_faulted(int ret)
+{
+ return ret & ENCLS_FAULT_FLAG;
+}
+
/**
* encls_failed() - Check if an ENCLS function failed
* @ret: the return value of an ENCLS function call
@@ -65,7 +63,7 @@ enum sgx_encls_function {
*/
static inline bool encls_failed(int ret)
{
- if (ret & ENCLS_FAULT_FLAG)
+ if (encls_faulted(ret))
return ENCLS_TRAPNR(ret) != X86_TRAP_PF;
return !!ret;
diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
index 90a5caf76939..83df20e3e633 100644
--- a/arch/x86/kernel/cpu/sgx/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2016-20 Intel Corporation. */
#include <asm/mman.h>
+#include <asm/sgx.h>
#include <linux/mman.h>
#include <linux/delay.h>
#include <linux/file.h>
@@ -47,7 +48,7 @@ static void sgx_encl_shrink(struct sgx_encl *encl, struct sgx_va_page *va_page)
encl->page_cnt--;
if (va_page) {
- sgx_free_epc_page(va_page->epc_page);
+ sgx_encl_free_epc_page(va_page->epc_page);
list_del(&va_page->list);
kfree(va_page);
}
@@ -117,7 +118,7 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
return 0;
err_out:
- sgx_free_epc_page(encl->secs.epc_page);
+ sgx_encl_free_epc_page(encl->secs.epc_page);
encl->secs.epc_page = NULL;
err_out_backing:
@@ -365,7 +366,7 @@ err_out_unlock:
mmap_read_unlock(current->mm);
err_out_free:
- sgx_free_epc_page(epc_page);
+ sgx_encl_free_epc_page(epc_page);
kfree(encl_page);
return ret;
@@ -495,7 +496,7 @@ static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
void *token)
{
u64 mrsigner[4];
- int i, j, k;
+ int i, j;
void *addr;
int ret;
@@ -544,8 +545,7 @@ static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
preempt_disable();
- for (k = 0; k < 4; k++)
- wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + k, mrsigner[k]);
+ sgx_update_lepubkeyhash(mrsigner);
ret = __einit(sigstruct, token, addr);
@@ -568,7 +568,7 @@ static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
}
}
- if (ret & ENCLS_FAULT_FLAG) {
+ if (encls_faulted(ret)) {
if (encls_failed(ret))
ENCLS_WARN(ret, "EINIT");
@@ -604,7 +604,6 @@ static long sgx_ioc_enclave_init(struct sgx_encl *encl, void __user *arg)
{
struct sgx_sigstruct *sigstruct;
struct sgx_enclave_init init_arg;
- struct page *initp_page;
void *token;
int ret;
@@ -615,11 +614,15 @@ static long sgx_ioc_enclave_init(struct sgx_encl *encl, void __user *arg)
if (copy_from_user(&init_arg, arg, sizeof(init_arg)))
return -EFAULT;
- initp_page = alloc_page(GFP_KERNEL);
- if (!initp_page)
+ /*
+ * 'sigstruct' must be on a page boundary and 'token' on a 512 byte
+ * boundary. kmalloc() will give this alignment when allocating
+ * PAGE_SIZE bytes.
+ */
+ sigstruct = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!sigstruct)
return -ENOMEM;
- sigstruct = kmap(initp_page);
token = (void *)((unsigned long)sigstruct + PAGE_SIZE / 2);
memset(token, 0, SGX_LAUNCH_TOKEN_SIZE);
@@ -645,8 +648,7 @@ static long sgx_ioc_enclave_init(struct sgx_encl *encl, void __user *arg)
ret = sgx_encl_init(encl, sigstruct, token);
out:
- kunmap(initp_page);
- __free_page(initp_page);
+ kfree(sigstruct);
return ret;
}
@@ -665,24 +667,11 @@ out:
static long sgx_ioc_enclave_provision(struct sgx_encl *encl, void __user *arg)
{
struct sgx_enclave_provision params;
- struct file *file;
if (copy_from_user(&params, arg, sizeof(params)))
return -EFAULT;
- file = fget(params.fd);
- if (!file)
- return -EINVAL;
-
- if (file->f_op != &sgx_provision_fops) {
- fput(file);
- return -EINVAL;
- }
-
- encl->attributes_mask |= SGX_ATTR_PROVISIONKEY;
-
- fput(file);
- return 0;
+ return sgx_set_attribute(&encl->attributes_mask, params.fd);
}
long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
index 8df81a3ed945..63d3de02bbcc 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -1,14 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2016-20 Intel Corporation. */
+#include <linux/file.h>
#include <linux/freezer.h>
#include <linux/highmem.h>
#include <linux/kthread.h>
+#include <linux/miscdevice.h>
#include <linux/pagemap.h>
#include <linux/ratelimit.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
+#include <asm/sgx.h>
#include "driver.h"
#include "encl.h"
#include "encls.h"
@@ -23,42 +26,58 @@ static DECLARE_WAIT_QUEUE_HEAD(ksgxd_waitq);
* with sgx_reclaimer_lock acquired.
*/
static LIST_HEAD(sgx_active_page_list);
-
static DEFINE_SPINLOCK(sgx_reclaimer_lock);
+/* The free page list lock protected variables prepend the lock. */
+static unsigned long sgx_nr_free_pages;
+
+/* Nodes with one or more EPC sections. */
+static nodemask_t sgx_numa_mask;
+
+/*
+ * Array with one list_head for each possible NUMA node. Each
+ * list contains all the sgx_epc_section's which are on that
+ * node.
+ */
+static struct sgx_numa_node *sgx_numa_nodes;
+
+static LIST_HEAD(sgx_dirty_page_list);
+
/*
- * Reset dirty EPC pages to uninitialized state. Laundry can be left with SECS
- * pages whose child pages blocked EREMOVE.
+ * Reset post-kexec EPC pages to the uninitialized state. The pages are removed
+ * from the input list, and made available for the page allocator. SECS pages
+ * prepending their children in the input list are left intact.
*/
-static void sgx_sanitize_section(struct sgx_epc_section *section)
+static void __sgx_sanitize_pages(struct list_head *dirty_page_list)
{
struct sgx_epc_page *page;
LIST_HEAD(dirty);
int ret;
- /* init_laundry_list is thread-local, no need for a lock: */
- while (!list_empty(&section->init_laundry_list)) {
+ /* dirty_page_list is thread-local, no need for a lock: */
+ while (!list_empty(dirty_page_list)) {
if (kthread_should_stop())
return;
- /* needed for access to ->page_list: */
- spin_lock(&section->lock);
-
- page = list_first_entry(&section->init_laundry_list,
- struct sgx_epc_page, list);
+ page = list_first_entry(dirty_page_list, struct sgx_epc_page, list);
ret = __eremove(sgx_get_epc_virt_addr(page));
- if (!ret)
- list_move(&page->list, &section->page_list);
- else
+ if (!ret) {
+ /*
+ * page is now sanitized. Make it available via the SGX
+ * page allocator:
+ */
+ list_del(&page->list);
+ sgx_free_epc_page(page);
+ } else {
+ /* The page is not yet clean - move to the dirty list. */
list_move_tail(&page->list, &dirty);
-
- spin_unlock(&section->lock);
+ }
cond_resched();
}
- list_splice(&dirty, &section->init_laundry_list);
+ list_splice(&dirty, dirty_page_list);
}
static bool sgx_reclaimer_age(struct sgx_epc_page *epc_page)
@@ -195,10 +214,10 @@ static const cpumask_t *sgx_encl_ewb_cpumask(struct sgx_encl *encl)
/*
* Swap page to the regular memory transformed to the blocked state by using
- * EBLOCK, which means that it can no loger be referenced (no new TLB entries).
+ * EBLOCK, which means that it can no longer be referenced (no new TLB entries).
*
* The first trial just tries to write the page assuming that some other thread
- * has reset the count for threads inside the enlave by using ETRACK, and
+ * has reset the count for threads inside the enclave by using ETRACK, and
* previous thread count has been zeroed out. The second trial calls ETRACK
* before EWB. If that fails we kick all the HW threads out, and then do EWB,
* which should be guaranteed the succeed.
@@ -278,7 +297,7 @@ static void sgx_reclaimer_write(struct sgx_epc_page *epc_page,
sgx_encl_ewb(encl->secs.epc_page, &secs_backing);
- sgx_free_epc_page(encl->secs.epc_page);
+ sgx_encl_free_epc_page(encl->secs.epc_page);
encl->secs.epc_page = NULL;
sgx_encl_put_backing(&secs_backing, true);
@@ -308,6 +327,7 @@ static void sgx_reclaim_pages(void)
struct sgx_epc_section *section;
struct sgx_encl_page *encl_page;
struct sgx_epc_page *epc_page;
+ struct sgx_numa_node *node;
pgoff_t page_index;
int cnt = 0;
int ret;
@@ -379,50 +399,33 @@ skip:
epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
section = &sgx_epc_sections[epc_page->section];
- spin_lock(&section->lock);
- list_add_tail(&epc_page->list, &section->page_list);
- section->free_cnt++;
- spin_unlock(&section->lock);
- }
-}
-
-static unsigned long sgx_nr_free_pages(void)
-{
- unsigned long cnt = 0;
- int i;
-
- for (i = 0; i < sgx_nr_epc_sections; i++)
- cnt += sgx_epc_sections[i].free_cnt;
+ node = section->node;
- return cnt;
+ spin_lock(&node->lock);
+ list_add_tail(&epc_page->list, &node->free_page_list);
+ sgx_nr_free_pages++;
+ spin_unlock(&node->lock);
+ }
}
static bool sgx_should_reclaim(unsigned long watermark)
{
- return sgx_nr_free_pages() < watermark &&
- !list_empty(&sgx_active_page_list);
+ return sgx_nr_free_pages < watermark && !list_empty(&sgx_active_page_list);
}
static int ksgxd(void *p)
{
- int i;
-
set_freezable();
/*
* Sanitize pages in order to recover from kexec(). The 2nd pass is
* required for SECS pages, whose child pages blocked EREMOVE.
*/
- for (i = 0; i < sgx_nr_epc_sections; i++)
- sgx_sanitize_section(&sgx_epc_sections[i]);
-
- for (i = 0; i < sgx_nr_epc_sections; i++) {
- sgx_sanitize_section(&sgx_epc_sections[i]);
+ __sgx_sanitize_pages(&sgx_dirty_page_list);
+ __sgx_sanitize_pages(&sgx_dirty_page_list);
- /* Should never happen. */
- if (!list_empty(&sgx_epc_sections[i].init_laundry_list))
- WARN(1, "EPC section %d has unsanitized pages.\n", i);
- }
+ /* sanity check: */
+ WARN_ON(!list_empty(&sgx_dirty_page_list));
while (!kthread_should_stop()) {
if (try_to_freeze())
@@ -454,45 +457,56 @@ static bool __init sgx_page_reclaimer_init(void)
return true;
}
-static struct sgx_epc_page *__sgx_alloc_epc_page_from_section(struct sgx_epc_section *section)
+static struct sgx_epc_page *__sgx_alloc_epc_page_from_node(int nid)
{
- struct sgx_epc_page *page;
+ struct sgx_numa_node *node = &sgx_numa_nodes[nid];
+ struct sgx_epc_page *page = NULL;
- spin_lock(&section->lock);
+ spin_lock(&node->lock);
- if (list_empty(&section->page_list)) {
- spin_unlock(&section->lock);
+ if (list_empty(&node->free_page_list)) {
+ spin_unlock(&node->lock);
return NULL;
}
- page = list_first_entry(&section->page_list, struct sgx_epc_page, list);
+ page = list_first_entry(&node->free_page_list, struct sgx_epc_page, list);
list_del_init(&page->list);
- section->free_cnt--;
+ sgx_nr_free_pages--;
+
+ spin_unlock(&node->lock);
- spin_unlock(&section->lock);
return page;
}
/**
* __sgx_alloc_epc_page() - Allocate an EPC page
*
- * Iterate through EPC sections and borrow a free EPC page to the caller. When a
- * page is no longer needed it must be released with sgx_free_epc_page().
+ * Iterate through NUMA nodes and reserve ia free EPC page to the caller. Start
+ * from the NUMA node, where the caller is executing.
*
* Return:
- * an EPC page,
- * -errno on error
+ * - an EPC page: A borrowed EPC pages were available.
+ * - NULL: Out of EPC pages.
*/
struct sgx_epc_page *__sgx_alloc_epc_page(void)
{
- struct sgx_epc_section *section;
struct sgx_epc_page *page;
- int i;
+ int nid_of_current = numa_node_id();
+ int nid = nid_of_current;
- for (i = 0; i < sgx_nr_epc_sections; i++) {
- section = &sgx_epc_sections[i];
+ if (node_isset(nid_of_current, sgx_numa_mask)) {
+ page = __sgx_alloc_epc_page_from_node(nid_of_current);
+ if (page)
+ return page;
+ }
+
+ /* Fall back to the non-local NUMA nodes: */
+ while (true) {
+ nid = next_node_in(nid, sgx_numa_mask);
+ if (nid == nid_of_current)
+ break;
- page = __sgx_alloc_epc_page_from_section(section);
+ page = __sgx_alloc_epc_page_from_node(nid);
if (page)
return page;
}
@@ -598,23 +612,22 @@ struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim)
* sgx_free_epc_page() - Free an EPC page
* @page: an EPC page
*
- * Call EREMOVE for an EPC page and insert it back to the list of free pages.
+ * Put the EPC page back to the list of free pages. It's the caller's
+ * responsibility to make sure that the page is in uninitialized state. In other
+ * words, do EREMOVE, EWB or whatever operation is necessary before calling
+ * this function.
*/
void sgx_free_epc_page(struct sgx_epc_page *page)
{
struct sgx_epc_section *section = &sgx_epc_sections[page->section];
- int ret;
+ struct sgx_numa_node *node = section->node;
- WARN_ON_ONCE(page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED);
+ spin_lock(&node->lock);
- ret = __eremove(sgx_get_epc_virt_addr(page));
- if (WARN_ONCE(ret, "EREMOVE returned %d (0x%x)", ret, ret))
- return;
+ list_add_tail(&page->list, &node->free_page_list);
+ sgx_nr_free_pages++;
- spin_lock(&section->lock);
- list_add_tail(&page->list, &section->page_list);
- section->free_cnt++;
- spin_unlock(&section->lock);
+ spin_unlock(&node->lock);
}
static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size,
@@ -635,18 +648,14 @@ static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size,
}
section->phys_addr = phys_addr;
- spin_lock_init(&section->lock);
- INIT_LIST_HEAD(&section->page_list);
- INIT_LIST_HEAD(&section->init_laundry_list);
for (i = 0; i < nr_pages; i++) {
section->pages[i].section = index;
section->pages[i].flags = 0;
section->pages[i].owner = NULL;
- list_add_tail(&section->pages[i].list, &section->init_laundry_list);
+ list_add_tail(&section->pages[i].list, &sgx_dirty_page_list);
}
- section->free_cnt = nr_pages;
return true;
}
@@ -665,8 +674,13 @@ static bool __init sgx_page_cache_init(void)
{
u32 eax, ebx, ecx, edx, type;
u64 pa, size;
+ int nid;
int i;
+ sgx_numa_nodes = kmalloc_array(num_possible_nodes(), sizeof(*sgx_numa_nodes), GFP_KERNEL);
+ if (!sgx_numa_nodes)
+ return false;
+
for (i = 0; i < ARRAY_SIZE(sgx_epc_sections); i++) {
cpuid_count(SGX_CPUID, i + SGX_CPUID_EPC, &eax, &ebx, &ecx, &edx);
@@ -689,6 +703,21 @@ static bool __init sgx_page_cache_init(void)
break;
}
+ nid = numa_map_to_online_node(phys_to_target_node(pa));
+ if (nid == NUMA_NO_NODE) {
+ /* The physical address is already printed above. */
+ pr_warn(FW_BUG "Unable to map EPC section to online node. Fallback to the NUMA node 0.\n");
+ nid = 0;
+ }
+
+ if (!node_isset(nid, sgx_numa_mask)) {
+ spin_lock_init(&sgx_numa_nodes[nid].lock);
+ INIT_LIST_HEAD(&sgx_numa_nodes[nid].free_page_list);
+ node_set(nid, sgx_numa_mask);
+ }
+
+ sgx_epc_sections[i].node = &sgx_numa_nodes[nid];
+
sgx_nr_epc_sections++;
}
@@ -700,6 +729,67 @@ static bool __init sgx_page_cache_init(void)
return true;
}
+/*
+ * Update the SGX_LEPUBKEYHASH MSRs to the values specified by caller.
+ * Bare-metal driver requires to update them to hash of enclave's signer
+ * before EINIT. KVM needs to update them to guest's virtual MSR values
+ * before doing EINIT from guest.
+ */
+void sgx_update_lepubkeyhash(u64 *lepubkeyhash)
+{
+ int i;
+
+ WARN_ON_ONCE(preemptible());
+
+ for (i = 0; i < 4; i++)
+ wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + i, lepubkeyhash[i]);
+}
+
+const struct file_operations sgx_provision_fops = {
+ .owner = THIS_MODULE,
+};
+
+static struct miscdevice sgx_dev_provision = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "sgx_provision",
+ .nodename = "sgx_provision",
+ .fops = &sgx_provision_fops,
+};
+
+/**
+ * sgx_set_attribute() - Update allowed attributes given file descriptor
+ * @allowed_attributes: Pointer to allowed enclave attributes
+ * @attribute_fd: File descriptor for specific attribute
+ *
+ * Append enclave attribute indicated by file descriptor to allowed
+ * attributes. Currently only SGX_ATTR_PROVISIONKEY indicated by
+ * /dev/sgx_provision is supported.
+ *
+ * Return:
+ * -0: SGX_ATTR_PROVISIONKEY is appended to allowed_attributes
+ * -EINVAL: Invalid, or not supported file descriptor
+ */
+int sgx_set_attribute(unsigned long *allowed_attributes,
+ unsigned int attribute_fd)
+{
+ struct file *file;
+
+ file = fget(attribute_fd);
+ if (!file)
+ return -EINVAL;
+
+ if (file->f_op != &sgx_provision_fops) {
+ fput(file);
+ return -EINVAL;
+ }
+
+ *allowed_attributes |= SGX_ATTR_PROVISIONKEY;
+
+ fput(file);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sgx_set_attribute);
+
static int __init sgx_init(void)
{
int ret;
@@ -716,12 +806,28 @@ static int __init sgx_init(void)
goto err_page_cache;
}
- ret = sgx_drv_init();
+ ret = misc_register(&sgx_dev_provision);
if (ret)
goto err_kthread;
+ /*
+ * Always try to initialize the native *and* KVM drivers.
+ * The KVM driver is less picky than the native one and
+ * can function if the native one is not supported on the
+ * current system or fails to initialize.
+ *
+ * Error out only if both fail to initialize.
+ */
+ ret = sgx_drv_init();
+
+ if (sgx_vepc_init() && ret)
+ goto err_provision;
+
return 0;
+err_provision:
+ misc_deregister(&sgx_dev_provision);
+
err_kthread:
kthread_stop(ksgxd_tsk);
diff --git a/arch/x86/kernel/cpu/sgx/sgx.h b/arch/x86/kernel/cpu/sgx/sgx.h
index 5fa42d143feb..4628acec0009 100644
--- a/arch/x86/kernel/cpu/sgx/sgx.h
+++ b/arch/x86/kernel/cpu/sgx/sgx.h
@@ -8,11 +8,15 @@
#include <linux/rwsem.h>
#include <linux/types.h>
#include <asm/asm.h>
-#include "arch.h"
+#include <asm/sgx.h>
#undef pr_fmt
#define pr_fmt(fmt) "sgx: " fmt
+#define EREMOVE_ERROR_MESSAGE \
+ "EREMOVE returned %d (0x%x) and an EPC page was leaked. SGX may become unusable. " \
+ "Refer to Documentation/x86/sgx.rst for more information."
+
#define SGX_MAX_EPC_SECTIONS 8
#define SGX_EEXTEND_BLOCK_SIZE 256
#define SGX_NR_TO_SCAN 16
@@ -30,28 +34,25 @@ struct sgx_epc_page {
};
/*
+ * Contains the tracking data for NUMA nodes having EPC pages. Most importantly,
+ * the free page list local to the node is stored here.
+ */
+struct sgx_numa_node {
+ struct list_head free_page_list;
+ spinlock_t lock;
+};
+
+/*
* The firmware can define multiple chunks of EPC to the different areas of the
* physical memory e.g. for memory areas of the each node. This structure is
* used to store EPC pages for one EPC section and virtual memory area where
* the pages have been mapped.
- *
- * 'lock' must be held before accessing 'page_list' or 'free_cnt'.
*/
struct sgx_epc_section {
unsigned long phys_addr;
void *virt_addr;
struct sgx_epc_page *pages;
-
- spinlock_t lock;
- struct list_head page_list;
- unsigned long free_cnt;
-
- /*
- * Pages which need EREMOVE run on them before they can be
- * used. Only safe to be accessed in ksgxd and init code.
- * Not protected by locks.
- */
- struct list_head init_laundry_list;
+ struct sgx_numa_node *node;
};
extern struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS];
@@ -83,4 +84,15 @@ void sgx_mark_page_reclaimable(struct sgx_epc_page *page);
int sgx_unmark_page_reclaimable(struct sgx_epc_page *page);
struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim);
+#ifdef CONFIG_X86_SGX_KVM
+int __init sgx_vepc_init(void);
+#else
+static inline int __init sgx_vepc_init(void)
+{
+ return -ENODEV;
+}
+#endif
+
+void sgx_update_lepubkeyhash(u64 *lepubkeyhash);
+
#endif /* _X86_SGX_H */
diff --git a/arch/x86/kernel/cpu/sgx/virt.c b/arch/x86/kernel/cpu/sgx/virt.c
new file mode 100644
index 000000000000..64511c4a5200
--- /dev/null
+++ b/arch/x86/kernel/cpu/sgx/virt.c
@@ -0,0 +1,377 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Device driver to expose SGX enclave memory to KVM guests.
+ *
+ * Copyright(c) 2021 Intel Corporation.
+ */
+
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+#include <asm/sgx.h>
+#include <uapi/asm/sgx.h>
+
+#include "encls.h"
+#include "sgx.h"
+
+struct sgx_vepc {
+ struct xarray page_array;
+ struct mutex lock;
+};
+
+/*
+ * Temporary SECS pages that cannot be EREMOVE'd due to having child in other
+ * virtual EPC instances, and the lock to protect it.
+ */
+static struct mutex zombie_secs_pages_lock;
+static struct list_head zombie_secs_pages;
+
+static int __sgx_vepc_fault(struct sgx_vepc *vepc,
+ struct vm_area_struct *vma, unsigned long addr)
+{
+ struct sgx_epc_page *epc_page;
+ unsigned long index, pfn;
+ int ret;
+
+ WARN_ON(!mutex_is_locked(&vepc->lock));
+
+ /* Calculate index of EPC page in virtual EPC's page_array */
+ index = vma->vm_pgoff + PFN_DOWN(addr - vma->vm_start);
+
+ epc_page = xa_load(&vepc->page_array, index);
+ if (epc_page)
+ return 0;
+
+ epc_page = sgx_alloc_epc_page(vepc, false);
+ if (IS_ERR(epc_page))
+ return PTR_ERR(epc_page);
+
+ ret = xa_err(xa_store(&vepc->page_array, index, epc_page, GFP_KERNEL));
+ if (ret)
+ goto err_free;
+
+ pfn = PFN_DOWN(sgx_get_epc_phys_addr(epc_page));
+
+ ret = vmf_insert_pfn(vma, addr, pfn);
+ if (ret != VM_FAULT_NOPAGE) {
+ ret = -EFAULT;
+ goto err_delete;
+ }
+
+ return 0;
+
+err_delete:
+ xa_erase(&vepc->page_array, index);
+err_free:
+ sgx_free_epc_page(epc_page);
+ return ret;
+}
+
+static vm_fault_t sgx_vepc_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct sgx_vepc *vepc = vma->vm_private_data;
+ int ret;
+
+ mutex_lock(&vepc->lock);
+ ret = __sgx_vepc_fault(vepc, vma, vmf->address);
+ mutex_unlock(&vepc->lock);
+
+ if (!ret)
+ return VM_FAULT_NOPAGE;
+
+ if (ret == -EBUSY && (vmf->flags & FAULT_FLAG_ALLOW_RETRY)) {
+ mmap_read_unlock(vma->vm_mm);
+ return VM_FAULT_RETRY;
+ }
+
+ return VM_FAULT_SIGBUS;
+}
+
+static const struct vm_operations_struct sgx_vepc_vm_ops = {
+ .fault = sgx_vepc_fault,
+};
+
+static int sgx_vepc_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct sgx_vepc *vepc = file->private_data;
+
+ if (!(vma->vm_flags & VM_SHARED))
+ return -EINVAL;
+
+ vma->vm_ops = &sgx_vepc_vm_ops;
+ /* Don't copy VMA in fork() */
+ vma->vm_flags |= VM_PFNMAP | VM_IO | VM_DONTDUMP | VM_DONTCOPY;
+ vma->vm_private_data = vepc;
+
+ return 0;
+}
+
+static int sgx_vepc_free_page(struct sgx_epc_page *epc_page)
+{
+ int ret;
+
+ /*
+ * Take a previously guest-owned EPC page and return it to the
+ * general EPC page pool.
+ *
+ * Guests can not be trusted to have left this page in a good
+ * state, so run EREMOVE on the page unconditionally. In the
+ * case that a guest properly EREMOVE'd this page, a superfluous
+ * EREMOVE is harmless.
+ */
+ ret = __eremove(sgx_get_epc_virt_addr(epc_page));
+ if (ret) {
+ /*
+ * Only SGX_CHILD_PRESENT is expected, which is because of
+ * EREMOVE'ing an SECS still with child, in which case it can
+ * be handled by EREMOVE'ing the SECS again after all pages in
+ * virtual EPC have been EREMOVE'd. See comments in below in
+ * sgx_vepc_release().
+ *
+ * The user of virtual EPC (KVM) needs to guarantee there's no
+ * logical processor is still running in the enclave in guest,
+ * otherwise EREMOVE will get SGX_ENCLAVE_ACT which cannot be
+ * handled here.
+ */
+ WARN_ONCE(ret != SGX_CHILD_PRESENT, EREMOVE_ERROR_MESSAGE,
+ ret, ret);
+ return ret;
+ }
+
+ sgx_free_epc_page(epc_page);
+
+ return 0;
+}
+
+static int sgx_vepc_release(struct inode *inode, struct file *file)
+{
+ struct sgx_vepc *vepc = file->private_data;
+ struct sgx_epc_page *epc_page, *tmp, *entry;
+ unsigned long index;
+
+ LIST_HEAD(secs_pages);
+
+ xa_for_each(&vepc->page_array, index, entry) {
+ /*
+ * Remove all normal, child pages. sgx_vepc_free_page()
+ * will fail if EREMOVE fails, but this is OK and expected on
+ * SECS pages. Those can only be EREMOVE'd *after* all their
+ * child pages. Retries below will clean them up.
+ */
+ if (sgx_vepc_free_page(entry))
+ continue;
+
+ xa_erase(&vepc->page_array, index);
+ }
+
+ /*
+ * Retry EREMOVE'ing pages. This will clean up any SECS pages that
+ * only had children in this 'epc' area.
+ */
+ xa_for_each(&vepc->page_array, index, entry) {
+ epc_page = entry;
+ /*
+ * An EREMOVE failure here means that the SECS page still
+ * has children. But, since all children in this 'sgx_vepc'
+ * have been removed, the SECS page must have a child on
+ * another instance.
+ */
+ if (sgx_vepc_free_page(epc_page))
+ list_add_tail(&epc_page->list, &secs_pages);
+
+ xa_erase(&vepc->page_array, index);
+ }
+
+ /*
+ * SECS pages are "pinned" by child pages, and "unpinned" once all
+ * children have been EREMOVE'd. A child page in this instance
+ * may have pinned an SECS page encountered in an earlier release(),
+ * creating a zombie. Since some children were EREMOVE'd above,
+ * try to EREMOVE all zombies in the hopes that one was unpinned.
+ */
+ mutex_lock(&zombie_secs_pages_lock);
+ list_for_each_entry_safe(epc_page, tmp, &zombie_secs_pages, list) {
+ /*
+ * Speculatively remove the page from the list of zombies,
+ * if the page is successfully EREMOVE'd it will be added to
+ * the list of free pages. If EREMOVE fails, throw the page
+ * on the local list, which will be spliced on at the end.
+ */
+ list_del(&epc_page->list);
+
+ if (sgx_vepc_free_page(epc_page))
+ list_add_tail(&epc_page->list, &secs_pages);
+ }
+
+ if (!list_empty(&secs_pages))
+ list_splice_tail(&secs_pages, &zombie_secs_pages);
+ mutex_unlock(&zombie_secs_pages_lock);
+
+ xa_destroy(&vepc->page_array);
+ kfree(vepc);
+
+ return 0;
+}
+
+static int sgx_vepc_open(struct inode *inode, struct file *file)
+{
+ struct sgx_vepc *vepc;
+
+ vepc = kzalloc(sizeof(struct sgx_vepc), GFP_KERNEL);
+ if (!vepc)
+ return -ENOMEM;
+ mutex_init(&vepc->lock);
+ xa_init(&vepc->page_array);
+
+ file->private_data = vepc;
+
+ return 0;
+}
+
+static const struct file_operations sgx_vepc_fops = {
+ .owner = THIS_MODULE,
+ .open = sgx_vepc_open,
+ .release = sgx_vepc_release,
+ .mmap = sgx_vepc_mmap,
+};
+
+static struct miscdevice sgx_vepc_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "sgx_vepc",
+ .nodename = "sgx_vepc",
+ .fops = &sgx_vepc_fops,
+};
+
+int __init sgx_vepc_init(void)
+{
+ /* SGX virtualization requires KVM to work */
+ if (!cpu_feature_enabled(X86_FEATURE_VMX))
+ return -ENODEV;
+
+ INIT_LIST_HEAD(&zombie_secs_pages);
+ mutex_init(&zombie_secs_pages_lock);
+
+ return misc_register(&sgx_vepc_dev);
+}
+
+/**
+ * sgx_virt_ecreate() - Run ECREATE on behalf of guest
+ * @pageinfo: Pointer to PAGEINFO structure
+ * @secs: Userspace pointer to SECS page
+ * @trapnr: trap number injected to guest in case of ECREATE error
+ *
+ * Run ECREATE on behalf of guest after KVM traps ECREATE for the purpose
+ * of enforcing policies of guest's enclaves, and return the trap number
+ * which should be injected to guest in case of any ECREATE error.
+ *
+ * Return:
+ * - 0: ECREATE was successful.
+ * - <0: on error.
+ */
+int sgx_virt_ecreate(struct sgx_pageinfo *pageinfo, void __user *secs,
+ int *trapnr)
+{
+ int ret;
+
+ /*
+ * @secs is an untrusted, userspace-provided address. It comes from
+ * KVM and is assumed to be a valid pointer which points somewhere in
+ * userspace. This can fault and call SGX or other fault handlers when
+ * userspace mapping @secs doesn't exist.
+ *
+ * Add a WARN() to make sure @secs is already valid userspace pointer
+ * from caller (KVM), who should already have handled invalid pointer
+ * case (for instance, made by malicious guest). All other checks,
+ * such as alignment of @secs, are deferred to ENCLS itself.
+ */
+ if (WARN_ON_ONCE(!access_ok(secs, PAGE_SIZE)))
+ return -EINVAL;
+
+ __uaccess_begin();
+ ret = __ecreate(pageinfo, (void *)secs);
+ __uaccess_end();
+
+ if (encls_faulted(ret)) {
+ *trapnr = ENCLS_TRAPNR(ret);
+ return -EFAULT;
+ }
+
+ /* ECREATE doesn't return an error code, it faults or succeeds. */
+ WARN_ON_ONCE(ret);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sgx_virt_ecreate);
+
+static int __sgx_virt_einit(void __user *sigstruct, void __user *token,
+ void __user *secs)
+{
+ int ret;
+
+ /*
+ * Make sure all userspace pointers from caller (KVM) are valid.
+ * All other checks deferred to ENCLS itself. Also see comment
+ * for @secs in sgx_virt_ecreate().
+ */
+#define SGX_EINITTOKEN_SIZE 304
+ if (WARN_ON_ONCE(!access_ok(sigstruct, sizeof(struct sgx_sigstruct)) ||
+ !access_ok(token, SGX_EINITTOKEN_SIZE) ||
+ !access_ok(secs, PAGE_SIZE)))
+ return -EINVAL;
+
+ __uaccess_begin();
+ ret = __einit((void *)sigstruct, (void *)token, (void *)secs);
+ __uaccess_end();
+
+ return ret;
+}
+
+/**
+ * sgx_virt_einit() - Run EINIT on behalf of guest
+ * @sigstruct: Userspace pointer to SIGSTRUCT structure
+ * @token: Userspace pointer to EINITTOKEN structure
+ * @secs: Userspace pointer to SECS page
+ * @lepubkeyhash: Pointer to guest's *virtual* SGX_LEPUBKEYHASH MSR values
+ * @trapnr: trap number injected to guest in case of EINIT error
+ *
+ * Run EINIT on behalf of guest after KVM traps EINIT. If SGX_LC is available
+ * in host, SGX driver may rewrite the hardware values at wish, therefore KVM
+ * needs to update hardware values to guest's virtual MSR values in order to
+ * ensure EINIT is executed with expected hardware values.
+ *
+ * Return:
+ * - 0: EINIT was successful.
+ * - <0: on error.
+ */
+int sgx_virt_einit(void __user *sigstruct, void __user *token,
+ void __user *secs, u64 *lepubkeyhash, int *trapnr)
+{
+ int ret;
+
+ if (!cpu_feature_enabled(X86_FEATURE_SGX_LC)) {
+ ret = __sgx_virt_einit(sigstruct, token, secs);
+ } else {
+ preempt_disable();
+
+ sgx_update_lepubkeyhash(lepubkeyhash);
+
+ ret = __sgx_virt_einit(sigstruct, token, secs);
+ preempt_enable();
+ }
+
+ /* Propagate up the error from the WARN_ON_ONCE in __sgx_virt_einit() */
+ if (ret == -EINVAL)
+ return ret;
+
+ if (encls_faulted(ret)) {
+ *trapnr = ENCLS_TRAPNR(ret);
+ return -EFAULT;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sgx_virt_einit);
diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
index 8678864ce712..132a2de44d2f 100644
--- a/arch/x86/kernel/cpu/topology.c
+++ b/arch/x86/kernel/cpu/topology.c
@@ -30,7 +30,7 @@ EXPORT_SYMBOL(__max_die_per_package);
#ifdef CONFIG_SMP
/*
- * Check if given CPUID extended toplogy "leaf" is implemented
+ * Check if given CPUID extended topology "leaf" is implemented
*/
static int check_extended_topology_leaf(int leaf)
{
@@ -44,7 +44,7 @@ static int check_extended_topology_leaf(int leaf)
return 0;
}
/*
- * Return best CPUID Extended Toplogy Leaf supported
+ * Return best CPUID Extended Topology Leaf supported
*/
static int detect_extended_topology_leaf(struct cpuinfo_x86 *c)
{
diff --git a/arch/x86/kernel/cpu/tsx.c b/arch/x86/kernel/cpu/tsx.c
index e2ad30e474f8..9c7a5f049292 100644
--- a/arch/x86/kernel/cpu/tsx.c
+++ b/arch/x86/kernel/cpu/tsx.c
@@ -2,7 +2,7 @@
/*
* Intel Transactional Synchronization Extensions (TSX) control.
*
- * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2019-2021 Intel Corporation
*
* Author:
* Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
@@ -84,13 +84,46 @@ static enum tsx_ctrl_states x86_get_tsx_auto_mode(void)
return TSX_CTRL_ENABLE;
}
+void tsx_clear_cpuid(void)
+{
+ u64 msr;
+
+ /*
+ * MSR_TFA_TSX_CPUID_CLEAR bit is only present when both CPUID
+ * bits RTM_ALWAYS_ABORT and TSX_FORCE_ABORT are present.
+ */
+ if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT) &&
+ boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
+ rdmsrl(MSR_TSX_FORCE_ABORT, msr);
+ msr |= MSR_TFA_TSX_CPUID_CLEAR;
+ wrmsrl(MSR_TSX_FORCE_ABORT, msr);
+ }
+}
+
void __init tsx_init(void)
{
char arg[5] = {};
int ret;
- if (!tsx_ctrl_is_supported())
+ /*
+ * Hardware will always abort a TSX transaction if both CPUID bits
+ * RTM_ALWAYS_ABORT and TSX_FORCE_ABORT are set. In this case, it is
+ * better not to enumerate CPUID.RTM and CPUID.HLE bits. Clear them
+ * here.
+ */
+ if (boot_cpu_has(X86_FEATURE_RTM_ALWAYS_ABORT) &&
+ boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
+ tsx_ctrl_state = TSX_CTRL_RTM_ALWAYS_ABORT;
+ tsx_clear_cpuid();
+ setup_clear_cpu_cap(X86_FEATURE_RTM);
+ setup_clear_cpu_cap(X86_FEATURE_HLE);
return;
+ }
+
+ if (!tsx_ctrl_is_supported()) {
+ tsx_ctrl_state = TSX_CTRL_NOT_SUPPORTED;
+ return;
+ }
ret = cmdline_find_option(boot_command_line, "tsx", arg, sizeof(arg));
if (ret >= 0) {
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index c6ede3b3d302..c04b933f48d3 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -27,6 +27,7 @@
#include <linux/clocksource.h>
#include <linux/cpu.h>
#include <linux/reboot.h>
+#include <linux/static_call.h>
#include <asm/div64.h>
#include <asm/x86_init.h>
#include <asm/hypervisor.h>
@@ -336,11 +337,11 @@ static void __init vmware_paravirt_ops_setup(void)
vmware_cyc2ns_setup();
if (vmw_sched_clock)
- pv_ops.time.sched_clock = vmware_sched_clock;
+ paravirt_set_sched_clock(vmware_sched_clock);
if (vmware_is_stealclock_available()) {
has_steal_clock = true;
- pv_ops.time.steal_clock = vmware_steal_clock;
+ static_call_update(pv_steal_clock, vmware_steal_clock);
/* We use reboot notifier only to disable steal clock */
register_reboot_notifier(&vmware_pv_reboot_nb);
@@ -378,6 +379,8 @@ static void __init vmware_set_capabilities(void)
{
setup_force_cpu_cap(X86_FEATURE_CONSTANT_TSC);
setup_force_cpu_cap(X86_FEATURE_TSC_RELIABLE);
+ if (vmware_tsc_khz)
+ setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
if (vmware_hypercall_mode == CPUID_VMWARE_FEATURES_ECX_VMCALL)
setup_force_cpu_cap(X86_FEATURE_VMCALL);
else if (vmware_hypercall_mode == CPUID_VMWARE_FEATURES_ECX_VMMCALL)
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index a8f3af257e26..e8326a8d1c5d 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -70,19 +70,6 @@ static inline void cpu_crash_vmclear_loaded_vmcss(void)
rcu_read_unlock();
}
-/*
- * When the crashkernel option is specified, only use the low
- * 1M for the real mode trampoline.
- */
-void __init crash_reserve_low_1M(void)
-{
- if (cmdline_find_option(boot_command_line, "crashkernel", NULL, 0) < 0)
- return;
-
- memblock_reserve(0, 1<<20);
- pr_info("Reserving the low 1M of memory for crashkernel\n");
-}
-
#if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
@@ -323,8 +310,8 @@ static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
cmem->nr_ranges = 1;
/* Exclude elf header region */
- start = image->arch.elf_load_addr;
- end = start + image->arch.elf_headers_sz - 1;
+ start = image->elf_load_addr;
+ end = start + image->elf_headers_sz - 1;
return crash_exclude_mem_range(cmem, start, end);
}
@@ -337,7 +324,7 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
struct crash_memmap_data cmd;
struct crash_mem *cmem;
- cmem = vzalloc(sizeof(struct crash_mem));
+ cmem = vzalloc(struct_size(cmem, ranges, 1));
if (!cmem)
return -ENOMEM;
@@ -407,20 +394,20 @@ int crash_load_segments(struct kimage *image)
if (ret)
return ret;
- image->arch.elf_headers = kbuf.buffer;
- image->arch.elf_headers_sz = kbuf.bufsz;
+ image->elf_headers = kbuf.buffer;
+ image->elf_headers_sz = kbuf.bufsz;
kbuf.memsz = kbuf.bufsz;
kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
ret = kexec_add_buffer(&kbuf);
if (ret) {
- vfree((void *)image->arch.elf_headers);
+ vfree((void *)image->elf_headers);
return ret;
}
- image->arch.elf_load_addr = kbuf.mem;
+ image->elf_load_addr = kbuf.mem;
pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
- image->arch.elf_load_addr, kbuf.bufsz, kbuf.bufsz);
+ image->elf_load_addr, kbuf.bufsz, kbuf.bufsz);
return ret;
}
diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
index 759d392cbe9f..d1d49e3d536b 100644
--- a/arch/x86/kernel/doublefault_32.c
+++ b/arch/x86/kernel/doublefault_32.c
@@ -100,9 +100,7 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack) = {
.ss = __KERNEL_DS,
.ds = __USER_DS,
.fs = __KERNEL_PERCPU,
-#ifndef CONFIG_X86_32_LAZY_GS
- .gs = __KERNEL_STACK_CANARY,
-#endif
+ .gs = 0,
.__cr3 = __pa_nodebug(swapper_pg_dir),
},
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 22aad412f965..bc0657f0deed 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -31,8 +31,8 @@
* - inform the user about the firmware's notion of memory layout
* via /sys/firmware/memmap
*
- * - the hibernation code uses it to generate a kernel-independent MD5
- * fingerprint of the physical memory layout of a system.
+ * - the hibernation code uses it to generate a kernel-independent CRC32
+ * checksum of the physical memory layout of a system.
*
* - 'e820_table_kexec': a slightly modified (by the kernel) firmware version
* passed to us by the bootloader - the major difference between
@@ -793,7 +793,7 @@ core_initcall(e820__register_nvs_regions);
#endif
/*
- * Allocate the requested number of bytes with the requsted alignment
+ * Allocate the requested number of bytes with the requested alignment
* and return (the physical address) to the caller. Also register this
* range in the 'kexec' E820 table as a reserved range.
*
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index a4b5af03dcc1..38837dad46e6 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -549,8 +549,11 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
INTEL_CNL_IDS(&gen9_early_ops),
INTEL_ICL_11_IDS(&gen11_early_ops),
INTEL_EHL_IDS(&gen11_early_ops),
+ INTEL_JSL_IDS(&gen11_early_ops),
INTEL_TGL_12_IDS(&gen11_early_ops),
INTEL_RKL_IDS(&gen11_early_ops),
+ INTEL_ADLS_IDS(&gen11_early_ops),
+ INTEL_ADLP_IDS(&gen11_early_ops),
};
struct resource intel_graphics_stolen_res __ro_after_init = DEFINE_RES_MEM(0, 0);
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index a4ec65317a7f..b7b92cdf3add 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -221,28 +221,18 @@ sanitize_restored_user_xstate(union fpregs_state *state,
if (use_xsave()) {
/*
- * Note: we don't need to zero the reserved bits in the
- * xstate_header here because we either didn't copy them at all,
- * or we checked earlier that they aren't set.
+ * Clear all feature bits which are not set in
+ * user_xfeatures and clear all extended features
+ * for fx_only mode.
*/
+ u64 mask = fx_only ? XFEATURE_MASK_FPSSE : user_xfeatures;
/*
- * 'user_xfeatures' might have bits clear which are
- * set in header->xfeatures. This represents features that
- * were in init state prior to a signal delivery, and need
- * to be reset back to the init state. Clear any user
- * feature bits which are set in the kernel buffer to get
- * them back to the init state.
- *
- * Supervisor state is unchanged by input from userspace.
- * Ensure supervisor state bits stay set and supervisor
- * state is not modified.
+ * Supervisor state has to be preserved. The sigframe
+ * restore can only modify user features, i.e. @mask
+ * cannot contain them.
*/
- if (fx_only)
- header->xfeatures = XFEATURE_MASK_FPSSE;
- else
- header->xfeatures &= user_xfeatures |
- xfeatures_mask_supervisor();
+ header->xfeatures &= mask | xfeatures_mask_supervisor();
}
if (use_fxsr()) {
@@ -307,13 +297,17 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
return 0;
}
- if (!access_ok(buf, size))
- return -EACCES;
+ if (!access_ok(buf, size)) {
+ ret = -EACCES;
+ goto out;
+ }
- if (!static_cpu_has(X86_FEATURE_FPU))
- return fpregs_soft_set(current, NULL,
- 0, sizeof(struct user_i387_ia32_struct),
- NULL, buf) != 0;
+ if (!static_cpu_has(X86_FEATURE_FPU)) {
+ ret = fpregs_soft_set(current, NULL, 0,
+ sizeof(struct user_i387_ia32_struct),
+ NULL, buf);
+ goto out;
+ }
if (use_xsave()) {
struct _fpx_sw_bytes fx_sw_user;
@@ -369,6 +363,25 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
fpregs_unlock();
return 0;
}
+
+ /*
+ * The above did an FPU restore operation, restricted to
+ * the user portion of the registers, and failed, but the
+ * microcode might have modified the FPU registers
+ * nevertheless.
+ *
+ * If the FPU registers do not belong to current, then
+ * invalidate the FPU register state otherwise the task might
+ * preempt current and return to user space with corrupted
+ * FPU registers.
+ *
+ * In case current owns the FPU registers then no further
+ * action is required. The fixup below will handle it
+ * correctly.
+ */
+ if (test_thread_flag(TIF_NEED_FPU_LOAD))
+ __cpu_invalidate_fpregs_state();
+
fpregs_unlock();
} else {
/*
@@ -377,7 +390,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
*/
ret = __copy_from_user(&env, buf, sizeof(env));
if (ret)
- goto err_out;
+ goto out;
envp = &env;
}
@@ -405,16 +418,9 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
if (use_xsave() && !fx_only) {
u64 init_bv = xfeatures_mask_user() & ~user_xfeatures;
- if (using_compacted_format()) {
- ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
- } else {
- ret = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
-
- if (!ret && state_size > offsetof(struct xregs_state, header))
- ret = validate_user_xstate_header(&fpu->state.xsave.header);
- }
+ ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
if (ret)
- goto err_out;
+ goto out;
sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
fx_only);
@@ -434,7 +440,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size);
if (ret) {
ret = -EFAULT;
- goto err_out;
+ goto out;
}
sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
@@ -452,7 +458,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
} else {
ret = __copy_from_user(&fpu->state.fsave, buf_fx, state_size);
if (ret)
- goto err_out;
+ goto out;
fpregs_lock();
ret = copy_kernel_to_fregs_err(&fpu->state.fsave);
@@ -463,7 +469,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
fpregs_deactivate(fpu);
fpregs_unlock();
-err_out:
+out:
if (ret)
fpu__clear_user_states(fpu);
return ret;
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 683749b80ae2..1cadb2faf740 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -253,7 +253,7 @@ static bool xfeature_enabled(enum xfeature xfeature)
static void __init setup_xstate_features(void)
{
u32 eax, ebx, ecx, edx, i;
- /* start at the beginnning of the "extended state" */
+ /* start at the beginning of the "extended state" */
unsigned int last_good_offset = offsetof(struct xregs_state,
extended_state_area);
/*
@@ -441,12 +441,35 @@ static void __init print_xstate_offset_size(void)
}
/*
+ * All supported features have either init state all zeros or are
+ * handled in setup_init_fpu() individually. This is an explicit
+ * feature list and does not use XFEATURE_MASK*SUPPORTED to catch
+ * newly added supported features at build time and make people
+ * actually look at the init state for the new feature.
+ */
+#define XFEATURES_INIT_FPSTATE_HANDLED \
+ (XFEATURE_MASK_FP | \
+ XFEATURE_MASK_SSE | \
+ XFEATURE_MASK_YMM | \
+ XFEATURE_MASK_OPMASK | \
+ XFEATURE_MASK_ZMM_Hi256 | \
+ XFEATURE_MASK_Hi16_ZMM | \
+ XFEATURE_MASK_PKRU | \
+ XFEATURE_MASK_BNDREGS | \
+ XFEATURE_MASK_BNDCSR | \
+ XFEATURE_MASK_PASID)
+
+/*
* setup the xstate image representing the init state
*/
static void __init setup_init_fpu_buf(void)
{
static int on_boot_cpu __initdata = 1;
+ BUILD_BUG_ON((XFEATURE_MASK_USER_SUPPORTED |
+ XFEATURE_MASK_SUPERVISOR_SUPPORTED) !=
+ XFEATURES_INIT_FPSTATE_HANDLED);
+
WARN_ON_FPU(!on_boot_cpu);
on_boot_cpu = 0;
@@ -466,10 +489,22 @@ static void __init setup_init_fpu_buf(void)
copy_kernel_to_xregs_booting(&init_fpstate.xsave);
/*
- * Dump the init state again. This is to identify the init state
- * of any feature which is not represented by all zero's.
+ * All components are now in init state. Read the state back so
+ * that init_fpstate contains all non-zero init state. This only
+ * works with XSAVE, but not with XSAVEOPT and XSAVES because
+ * those use the init optimization which skips writing data for
+ * components in init state.
+ *
+ * XSAVE could be used, but that would require to reshuffle the
+ * data when XSAVES is available because XSAVES uses xstate
+ * compaction. But doing so is a pointless exercise because most
+ * components have an all zeros init state except for the legacy
+ * ones (FP and SSE). Those can be saved with FXSAVE into the
+ * legacy area. Adding new features requires to ensure that init
+ * state is all zeroes or if not to add the necessary handling
+ * here.
*/
- copy_xregs_to_kernel_booting(&init_fpstate.xsave);
+ fxsave(&init_fpstate.fxsave);
}
static int xfeature_uncompacted_offset(int xfeature_nr)
@@ -1402,60 +1437,3 @@ int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
return 0;
}
#endif /* CONFIG_PROC_PID_ARCH_STATUS */
-
-#ifdef CONFIG_IOMMU_SUPPORT
-void update_pasid(void)
-{
- u64 pasid_state;
- u32 pasid;
-
- if (!cpu_feature_enabled(X86_FEATURE_ENQCMD))
- return;
-
- if (!current->mm)
- return;
-
- pasid = READ_ONCE(current->mm->pasid);
- /* Set the valid bit in the PASID MSR/state only for valid pasid. */
- pasid_state = pasid == PASID_DISABLED ?
- pasid : pasid | MSR_IA32_PASID_VALID;
-
- /*
- * No need to hold fregs_lock() since the task's fpstate won't
- * be changed by others (e.g. ptrace) while the task is being
- * switched to or is in IPI.
- */
- if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
- /* The MSR is active and can be directly updated. */
- wrmsrl(MSR_IA32_PASID, pasid_state);
- } else {
- struct fpu *fpu = &current->thread.fpu;
- struct ia32_pasid_state *ppasid_state;
- struct xregs_state *xsave;
-
- /*
- * The CPU's xstate registers are not currently active. Just
- * update the PASID state in the memory buffer here. The
- * PASID MSR will be loaded when returning to user mode.
- */
- xsave = &fpu->state.xsave;
- xsave->header.xfeatures |= XFEATURE_MASK_PASID;
- ppasid_state = get_xsave_addr(xsave, XFEATURE_PASID);
- /*
- * Since XFEATURE_MASK_PASID is set in xfeatures, ppasid_state
- * won't be NULL and no need to check its value.
- *
- * Only update the task's PASID state when it's different
- * from the mm's pasid.
- */
- if (ppasid_state->pasid != pasid_state) {
- /*
- * Invalid fpregs so that state restoring will pick up
- * the PASID state.
- */
- __fpu_invalidate_fpregs_state(fpu);
- ppasid_state->pasid = pasid_state;
- }
- }
-}
-#endif /* CONFIG_IOMMU_SUPPORT */
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 7edbd5ee5ed4..1b3ce3b4a2a2 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -66,7 +66,7 @@ int ftrace_arch_code_modify_post_process(void)
static const char *ftrace_nop_replace(void)
{
- return ideal_nops[NOP_ATOMIC5];
+ return x86_nops[5];
}
static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
@@ -377,7 +377,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
ip = trampoline + (jmp_offset - start_offset);
if (WARN_ON(*(char *)ip != 0x75))
goto fail;
- ret = copy_from_kernel_nofault(ip, ideal_nops[2], 2);
+ ret = copy_from_kernel_nofault(ip, x86_nops[2], 2);
if (ret < 0)
goto fail;
}
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 5e9beb77cafd..de01903c3735 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -39,7 +39,7 @@
#include <asm/realmode.h>
#include <asm/extable.h>
#include <asm/trapnr.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
/*
* Manage page tables very early on.
@@ -104,7 +104,7 @@ static unsigned int __head *fixup_int(void *ptr, unsigned long physaddr)
static bool __head check_la57_support(unsigned long physaddr)
{
/*
- * 5-level paging is detected and enabled at kernel decomression
+ * 5-level paging is detected and enabled at kernel decompression
* stage. Only check if it has been enabled there.
*/
if (!(native_read_cr4() & X86_CR4_LA57))
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 7ed84c282233..67f590425d90 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -318,8 +318,8 @@ SYM_FUNC_START(startup_32_smp)
movl $(__KERNEL_PERCPU), %eax
movl %eax,%fs # set this cpu's percpu
- movl $(__KERNEL_STACK_CANARY),%eax
- movl %eax,%gs
+ xorl %eax,%eax
+ movl %eax,%gs # clear possible garbage in %gs
xorl %eax,%eax # Clear LDT
lldt %ax
@@ -339,20 +339,6 @@ SYM_FUNC_END(startup_32_smp)
*/
__INIT
setup_once:
-#ifdef CONFIG_STACKPROTECTOR
- /*
- * Configure the stack canary. The linker can't handle this by
- * relocation. Manually set base address in stack canary
- * segment descriptor.
- */
- movl $gdt_page,%eax
- movl $stack_canary,%ecx
- movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
- shrl $16, %ecx
- movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
- movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
-#endif
-
andl $0,setup_once_ref /* Once is enough, thanks */
ret
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 04bddaaba8e2..d8b3ebd2bb85 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -62,7 +62,7 @@ SYM_CODE_START_NOALIGN(startup_64)
*/
/* Set up the stack for verify_cpu(), similar to initial_stack below */
- leaq (__end_init_task - SIZEOF_PTREGS)(%rip), %rsp
+ leaq (__end_init_task - FRAME_SIZE)(%rip), %rsp
leaq _text(%rip), %rdi
pushq %rsi
@@ -343,10 +343,10 @@ SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb)
#endif
/*
- * The SIZEOF_PTREGS gap is a convention which helps the in-kernel unwinder
+ * The FRAME_SIZE gap is a convention which helps the in-kernel unwinder
* reliably detect the end of the stack.
*/
-SYM_DATA(initial_stack, .quad init_thread_union + THREAD_SIZE - SIZEOF_PTREGS)
+SYM_DATA(initial_stack, .quad init_thread_union + THREAD_SIZE - FRAME_SIZE)
__FINITDATA
__INIT
diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
index ee1a283f8e96..df0fa695bb09 100644
--- a/arch/x86/kernel/idt.c
+++ b/arch/x86/kernel/idt.c
@@ -35,12 +35,16 @@
#define SYSG(_vector, _addr) \
G(_vector, _addr, DEFAULT_STACK, GATE_INTERRUPT, DPL3, __KERNEL_CS)
+#ifdef CONFIG_X86_64
/*
* Interrupt gate with interrupt stack. The _ist index is the index in
* the tss.ist[] array, but for the descriptor it needs to start at 1.
*/
#define ISTG(_vector, _addr, _ist) \
G(_vector, _addr, _ist + 1, GATE_INTERRUPT, DPL0, __KERNEL_CS)
+#else
+#define ISTG(_vector, _addr, _ist) INTG(_vector, _addr)
+#endif
/* Task gate */
#define TSKG(_vector, _gdt) \
@@ -74,7 +78,7 @@ static const __initconst struct idt_data early_idts[] = {
*/
static const __initconst struct idt_data def_idts[] = {
INTG(X86_TRAP_DE, asm_exc_divide_error),
- INTG(X86_TRAP_NMI, asm_exc_nmi),
+ ISTG(X86_TRAP_NMI, asm_exc_nmi, IST_INDEX_NMI),
INTG(X86_TRAP_BR, asm_exc_bounds),
INTG(X86_TRAP_UD, asm_exc_invalid_op),
INTG(X86_TRAP_NM, asm_exc_device_not_available),
@@ -91,12 +95,16 @@ static const __initconst struct idt_data def_idts[] = {
#ifdef CONFIG_X86_32
TSKG(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS),
#else
- INTG(X86_TRAP_DF, asm_exc_double_fault),
+ ISTG(X86_TRAP_DF, asm_exc_double_fault, IST_INDEX_DF),
#endif
- INTG(X86_TRAP_DB, asm_exc_debug),
+ ISTG(X86_TRAP_DB, asm_exc_debug, IST_INDEX_DB),
#ifdef CONFIG_X86_MCE
- INTG(X86_TRAP_MC, asm_exc_machine_check),
+ ISTG(X86_TRAP_MC, asm_exc_machine_check, IST_INDEX_MCE),
+#endif
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ ISTG(X86_TRAP_VC, asm_exc_vmm_communication, IST_INDEX_VC),
#endif
SYSG(X86_TRAP_OF, asm_exc_overflow),
@@ -221,22 +229,6 @@ static const __initconst struct idt_data early_pf_idts[] = {
INTG(X86_TRAP_PF, asm_exc_page_fault),
};
-/*
- * The exceptions which use Interrupt stacks. They are setup after
- * cpu_init() when the TSS has been initialized.
- */
-static const __initconst struct idt_data ist_idts[] = {
- ISTG(X86_TRAP_DB, asm_exc_debug, IST_INDEX_DB),
- ISTG(X86_TRAP_NMI, asm_exc_nmi, IST_INDEX_NMI),
- ISTG(X86_TRAP_DF, asm_exc_double_fault, IST_INDEX_DF),
-#ifdef CONFIG_X86_MCE
- ISTG(X86_TRAP_MC, asm_exc_machine_check, IST_INDEX_MCE),
-#endif
-#ifdef CONFIG_AMD_MEM_ENCRYPT
- ISTG(X86_TRAP_VC, asm_exc_vmm_communication, IST_INDEX_VC),
-#endif
-};
-
/**
* idt_setup_early_pf - Initialize the idt table with early pagefault handler
*
@@ -245,7 +237,7 @@ static const __initconst struct idt_data ist_idts[] = {
* after that.
*
* Note, that X86_64 cannot install the real #PF handler in
- * idt_setup_early_traps() because the memory intialization needs the #PF
+ * idt_setup_early_traps() because the memory initialization needs the #PF
* handler from the early_idt_handler_array to initialize the early page
* tables.
*/
@@ -254,14 +246,6 @@ void __init idt_setup_early_pf(void)
idt_setup_from_table(idt_table, early_pf_idts,
ARRAY_SIZE(early_pf_idts), true);
}
-
-/**
- * idt_setup_ist_traps - Initialize the idt table with traps using IST
- */
-void __init idt_setup_ist_traps(void)
-{
- idt_setup_from_table(idt_table, ist_idts, ARRAY_SIZE(ist_idts), true);
-}
#endif
static void __init idt_map_in_cea(void)
@@ -331,11 +315,10 @@ void __init idt_setup_early_handler(void)
/**
* idt_invalidate - Invalidate interrupt descriptor table
- * @addr: The virtual address of the 'invalid' IDT
*/
-void idt_invalidate(void *addr)
+void idt_invalidate(void)
{
- struct desc_ptr idt = { .address = (unsigned long) addr, .size = 0 };
+ static const struct desc_ptr idt = { .address = 0, .size = 0 };
load_idt(&idt);
}
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 58aa712973ac..e28f6a5d14f1 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -338,7 +338,7 @@ void fixup_irqs(void)
irq_migrate_all_off_this_cpu();
/*
- * We can remove mdelay() and then send spuriuous interrupts to
+ * We can remove mdelay() and then send spurious interrupts to
* new cpu targets for all the irqs that were handled previously by
* this cpu. While it works, I have seen spurious interrupt messages
* (nothing wrong but still...).
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index 5ba8477c2cb7..674906fad43b 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -15,54 +15,75 @@
#include <asm/kprobes.h>
#include <asm/alternative.h>
#include <asm/text-patching.h>
+#include <asm/insn.h>
-static void bug_at(const void *ip, int line)
+int arch_jump_entry_size(struct jump_entry *entry)
{
- /*
- * The location is not an op that we were expecting.
- * Something went wrong. Crash the box, as something could be
- * corrupting the kernel.
- */
- pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph) %d\n", ip, ip, ip, line);
- BUG();
+ struct insn insn = {};
+
+ insn_decode_kernel(&insn, (void *)jump_entry_code(entry));
+ BUG_ON(insn.length != 2 && insn.length != 5);
+
+ return insn.length;
}
-static const void *
-__jump_label_set_jump_code(struct jump_entry *entry, enum jump_label_type type, int init)
+struct jump_label_patch {
+ const void *code;
+ int size;
+};
+
+static struct jump_label_patch
+__jump_label_patch(struct jump_entry *entry, enum jump_label_type type)
{
- const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
- const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
- const void *expect, *code;
+ const void *expect, *code, *nop;
const void *addr, *dest;
- int line;
+ int size;
addr = (void *)jump_entry_code(entry);
dest = (void *)jump_entry_target(entry);
- code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest);
+ size = arch_jump_entry_size(entry);
+ switch (size) {
+ case JMP8_INSN_SIZE:
+ code = text_gen_insn(JMP8_INSN_OPCODE, addr, dest);
+ nop = x86_nops[size];
+ break;
+
+ case JMP32_INSN_SIZE:
+ code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest);
+ nop = x86_nops[size];
+ break;
- if (init) {
- expect = default_nop; line = __LINE__;
- } else if (type == JUMP_LABEL_JMP) {
- expect = ideal_nop; line = __LINE__;
- } else {
- expect = code; line = __LINE__;
+ default: BUG();
}
- if (memcmp(addr, expect, JUMP_LABEL_NOP_SIZE))
- bug_at(addr, line);
+ if (type == JUMP_LABEL_JMP)
+ expect = nop;
+ else
+ expect = code;
+
+ if (memcmp(addr, expect, size)) {
+ /*
+ * The location is not an op that we were expecting.
+ * Something went wrong. Crash the box, as something could be
+ * corrupting the kernel.
+ */
+ pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph != %5ph)) size:%d type:%d\n",
+ addr, addr, addr, expect, size, type);
+ BUG();
+ }
if (type == JUMP_LABEL_NOP)
- code = ideal_nop;
+ code = nop;
- return code;
+ return (struct jump_label_patch){.code = code, .size = size};
}
static inline void __jump_label_transform(struct jump_entry *entry,
enum jump_label_type type,
int init)
{
- const void *opcode = __jump_label_set_jump_code(entry, type, init);
+ const struct jump_label_patch jlp = __jump_label_patch(entry, type);
/*
* As long as only a single processor is running and the code is still
@@ -76,12 +97,11 @@ static inline void __jump_label_transform(struct jump_entry *entry,
* always nop being the 'currently valid' instruction
*/
if (init || system_state == SYSTEM_BOOTING) {
- text_poke_early((void *)jump_entry_code(entry), opcode,
- JUMP_LABEL_NOP_SIZE);
+ text_poke_early((void *)jump_entry_code(entry), jlp.code, jlp.size);
return;
}
- text_poke_bp((void *)jump_entry_code(entry), opcode, JUMP_LABEL_NOP_SIZE, NULL);
+ text_poke_bp((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
}
static void __ref jump_label_transform(struct jump_entry *entry,
@@ -102,7 +122,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
bool arch_jump_label_transform_queue(struct jump_entry *entry,
enum jump_label_type type)
{
- const void *opcode;
+ struct jump_label_patch jlp;
if (system_state == SYSTEM_BOOTING) {
/*
@@ -113,9 +133,8 @@ bool arch_jump_label_transform_queue(struct jump_entry *entry,
}
mutex_lock(&text_mutex);
- opcode = __jump_label_set_jump_code(entry, type, 0);
- text_poke_queue((void *)jump_entry_code(entry),
- opcode, JUMP_LABEL_NOP_SIZE, NULL);
+ jlp = __jump_label_patch(entry, type);
+ text_poke_queue((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
mutex_unlock(&text_mutex);
return true;
}
@@ -136,22 +155,6 @@ static enum {
__init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
enum jump_label_type type)
{
- /*
- * This function is called at boot up and when modules are
- * first loaded. Check if the default nop, the one that is
- * inserted at compile time, is the ideal nop. If it is, then
- * we do not need to update the nop, and we can leave it as is.
- * If it is not, then we need to update the nop to the ideal nop.
- */
- if (jlstate == JL_STATE_START) {
- const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
- const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
-
- if (memcmp(ideal_nop, default_nop, 5) != 0)
- jlstate = JL_STATE_UPDATE;
- else
- jlstate = JL_STATE_NO_UPDATE;
- }
if (jlstate == JL_STATE_UPDATE)
jump_label_transform(entry, type, 1);
}
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index ce831f9448e7..170d0fd68b1f 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -75,7 +75,7 @@ static int setup_cmdline(struct kimage *image, struct boot_params *params,
if (image->type == KEXEC_TYPE_CRASH) {
len = sprintf(cmdline_ptr,
- "elfcorehdr=0x%lx ", image->arch.elf_load_addr);
+ "elfcorehdr=0x%lx ", image->elf_load_addr);
}
memcpy(cmdline_ptr + len, cmdline, cmdline_len);
cmdline_len += len;
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index ff7878df96b4..3a43a2dee658 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -17,7 +17,7 @@
* Updated by: Tom Rini <trini@kernel.crashing.org>
* Updated by: Jason Wessel <jason.wessel@windriver.com>
* Modified for 386 by Jim Kingdon, Cygnus Support.
- * Origianl kgdb, compatibility with 2.1.xx kernel by
+ * Original kgdb, compatibility with 2.1.xx kernel by
* David Grothe <dave@gcom.com>
* Integrated into 2.2.5 kernel by Tigran Aivazian <tigran@sco.com>
* X86_64 changes from Andi Kleen's patch merged by Jim Houston
@@ -642,7 +642,7 @@ void kgdb_arch_late(void)
struct perf_event **pevent;
/*
- * Pre-allocate the hw breakpoint structions in the non-atomic
+ * Pre-allocate the hw breakpoint instructions in the non-atomic
* portion of kgdb because this operation requires mutexs to
* complete.
*/
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index df776cdca327..b6e046e4b289 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -139,6 +139,8 @@ NOKPROBE_SYMBOL(synthesize_relcall);
int can_boost(struct insn *insn, void *addr)
{
kprobe_opcode_t opcode;
+ insn_byte_t prefix;
+ int i;
if (search_exception_tables((unsigned long)addr))
return 0; /* Page fault may occur on this address. */
@@ -151,35 +153,39 @@ int can_boost(struct insn *insn, void *addr)
if (insn->opcode.nbytes != 1)
return 0;
- /* Can't boost Address-size override prefix */
- if (unlikely(inat_is_address_size_prefix(insn->attr)))
- return 0;
+ for_each_insn_prefix(insn, i, prefix) {
+ insn_attr_t attr;
+
+ attr = inat_get_opcode_attribute(prefix);
+ /* Can't boost Address-size override prefix and CS override prefix */
+ if (prefix == 0x2e || inat_is_address_size_prefix(attr))
+ return 0;
+ }
opcode = insn->opcode.bytes[0];
- switch (opcode & 0xf0) {
- case 0x60:
- /* can't boost "bound" */
- return (opcode != 0x62);
- case 0x70:
- return 0; /* can't boost conditional jump */
- case 0x90:
- return opcode != 0x9a; /* can't boost call far */
- case 0xc0:
- /* can't boost software-interruptions */
- return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
- case 0xd0:
- /* can boost AA* and XLAT */
- return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
- case 0xe0:
- /* can boost in/out and absolute jmps */
- return ((opcode & 0x04) || opcode == 0xea);
- case 0xf0:
- /* clear and set flags are boostable */
- return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
+ switch (opcode) {
+ case 0x62: /* bound */
+ case 0x70 ... 0x7f: /* Conditional jumps */
+ case 0x9a: /* Call far */
+ case 0xc0 ... 0xc1: /* Grp2 */
+ case 0xcc ... 0xce: /* software exceptions */
+ case 0xd0 ... 0xd3: /* Grp2 */
+ case 0xd6: /* (UD) */
+ case 0xd8 ... 0xdf: /* ESC */
+ case 0xe0 ... 0xe3: /* LOOP*, JCXZ */
+ case 0xe8 ... 0xe9: /* near Call, JMP */
+ case 0xeb: /* Short JMP */
+ case 0xf0 ... 0xf4: /* LOCK/REP, HLT */
+ case 0xf6 ... 0xf7: /* Grp3 */
+ case 0xfe: /* Grp4 */
+ /* ... are not boostable */
+ return 0;
+ case 0xff: /* Grp5 */
+ /* Only indirect jmp is boostable */
+ return X86_MODRM_REG(insn->modrm.bytes[0]) == 4;
default:
- /* CS override prefix and call are not boostable */
- return (opcode != 0x2e && opcode != 0x9a);
+ return 1;
}
}
@@ -229,7 +235,7 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
return 0UL;
if (faddr)
- memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
+ memcpy(buf, x86_nops[5], 5);
else
buf[0] = kp->opcode;
return (unsigned long)buf;
@@ -265,6 +271,8 @@ static int can_probe(unsigned long paddr)
/* Decode instructions */
addr = paddr - offset;
while (addr < paddr) {
+ int ret;
+
/*
* Check if the instruction has been modified by another
* kprobe, in which case we replace the breakpoint by the
@@ -276,8 +284,10 @@ static int can_probe(unsigned long paddr)
__addr = recover_probed_instruction(buf, addr);
if (!__addr)
return 0;
- kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE);
- insn_get_length(&insn);
+
+ ret = insn_decode_kernel(&insn, (void *)__addr);
+ if (ret < 0)
+ return 0;
/*
* Another debugging subsystem might insert this breakpoint.
@@ -301,8 +311,8 @@ static int can_probe(unsigned long paddr)
int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
{
kprobe_opcode_t buf[MAX_INSN_SIZE];
- unsigned long recovered_insn =
- recover_probed_instruction(buf, (unsigned long)src);
+ unsigned long recovered_insn = recover_probed_instruction(buf, (unsigned long)src);
+ int ret;
if (!recovered_insn || !insn)
return 0;
@@ -312,8 +322,9 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
MAX_INSN_SIZE))
return 0;
- kernel_insn_init(insn, dest, MAX_INSN_SIZE);
- insn_get_length(insn);
+ ret = insn_decode_kernel(insn, dest);
+ if (ret < 0)
+ return 0;
/* We can not probe force emulate prefixed instruction */
if (insn_has_emulate_prefix(insn))
@@ -357,13 +368,14 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
return insn->length;
}
-/* Prepare reljump right after instruction to boost */
-static int prepare_boost(kprobe_opcode_t *buf, struct kprobe *p,
- struct insn *insn)
+/* Prepare reljump or int3 right after instruction */
+static int prepare_singlestep(kprobe_opcode_t *buf, struct kprobe *p,
+ struct insn *insn)
{
int len = insn->length;
- if (can_boost(insn, p->addr) &&
+ if (!IS_ENABLED(CONFIG_PREEMPTION) &&
+ !p->post_handler && can_boost(insn, p->addr) &&
MAX_INSN_SIZE - len >= JMP32_INSN_SIZE) {
/*
* These instructions can be executed directly if it
@@ -374,7 +386,12 @@ static int prepare_boost(kprobe_opcode_t *buf, struct kprobe *p,
len += JMP32_INSN_SIZE;
p->ainsn.boostable = 1;
} else {
- p->ainsn.boostable = 0;
+ /* Otherwise, put an int3 for trapping singlestep */
+ if (MAX_INSN_SIZE - len < INT3_INSN_SIZE)
+ return -ENOSPC;
+
+ buf[len] = INT3_INSN_OPCODE;
+ len += INT3_INSN_SIZE;
}
return len;
@@ -405,92 +422,290 @@ void *alloc_insn_page(void)
return page;
}
-/* Recover page to RW mode before releasing it */
-void free_insn_page(void *page)
+/* Kprobe x86 instruction emulation - only regs->ip or IF flag modifiers */
+
+static void kprobe_emulate_ifmodifiers(struct kprobe *p, struct pt_regs *regs)
{
- module_memfree(page);
+ switch (p->ainsn.opcode) {
+ case 0xfa: /* cli */
+ regs->flags &= ~(X86_EFLAGS_IF);
+ break;
+ case 0xfb: /* sti */
+ regs->flags |= X86_EFLAGS_IF;
+ break;
+ case 0x9c: /* pushf */
+ int3_emulate_push(regs, regs->flags);
+ break;
+ case 0x9d: /* popf */
+ regs->flags = int3_emulate_pop(regs);
+ break;
+ }
+ regs->ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
+}
+NOKPROBE_SYMBOL(kprobe_emulate_ifmodifiers);
+
+static void kprobe_emulate_ret(struct kprobe *p, struct pt_regs *regs)
+{
+ int3_emulate_ret(regs);
+}
+NOKPROBE_SYMBOL(kprobe_emulate_ret);
+
+static void kprobe_emulate_call(struct kprobe *p, struct pt_regs *regs)
+{
+ unsigned long func = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
+
+ func += p->ainsn.rel32;
+ int3_emulate_call(regs, func);
}
+NOKPROBE_SYMBOL(kprobe_emulate_call);
-static void set_resume_flags(struct kprobe *p, struct insn *insn)
+static nokprobe_inline
+void __kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs, bool cond)
+{
+ unsigned long ip = regs->ip - INT3_INSN_SIZE + p->ainsn.size;
+
+ if (cond)
+ ip += p->ainsn.rel32;
+ int3_emulate_jmp(regs, ip);
+}
+
+static void kprobe_emulate_jmp(struct kprobe *p, struct pt_regs *regs)
+{
+ __kprobe_emulate_jmp(p, regs, true);
+}
+NOKPROBE_SYMBOL(kprobe_emulate_jmp);
+
+static const unsigned long jcc_mask[6] = {
+ [0] = X86_EFLAGS_OF,
+ [1] = X86_EFLAGS_CF,
+ [2] = X86_EFLAGS_ZF,
+ [3] = X86_EFLAGS_CF | X86_EFLAGS_ZF,
+ [4] = X86_EFLAGS_SF,
+ [5] = X86_EFLAGS_PF,
+};
+
+static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs)
+{
+ bool invert = p->ainsn.jcc.type & 1;
+ bool match;
+
+ if (p->ainsn.jcc.type < 0xc) {
+ match = regs->flags & jcc_mask[p->ainsn.jcc.type >> 1];
+ } else {
+ match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^
+ ((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT);
+ if (p->ainsn.jcc.type >= 0xe)
+ match = match && (regs->flags & X86_EFLAGS_ZF);
+ }
+ __kprobe_emulate_jmp(p, regs, (match && !invert) || (!match && invert));
+}
+NOKPROBE_SYMBOL(kprobe_emulate_jcc);
+
+static void kprobe_emulate_loop(struct kprobe *p, struct pt_regs *regs)
+{
+ bool match;
+
+ if (p->ainsn.loop.type != 3) { /* LOOP* */
+ if (p->ainsn.loop.asize == 32)
+ match = ((*(u32 *)&regs->cx)--) != 0;
+#ifdef CONFIG_X86_64
+ else if (p->ainsn.loop.asize == 64)
+ match = ((*(u64 *)&regs->cx)--) != 0;
+#endif
+ else
+ match = ((*(u16 *)&regs->cx)--) != 0;
+ } else { /* JCXZ */
+ if (p->ainsn.loop.asize == 32)
+ match = *(u32 *)(&regs->cx) == 0;
+#ifdef CONFIG_X86_64
+ else if (p->ainsn.loop.asize == 64)
+ match = *(u64 *)(&regs->cx) == 0;
+#endif
+ else
+ match = *(u16 *)(&regs->cx) == 0;
+ }
+
+ if (p->ainsn.loop.type == 0) /* LOOPNE */
+ match = match && !(regs->flags & X86_EFLAGS_ZF);
+ else if (p->ainsn.loop.type == 1) /* LOOPE */
+ match = match && (regs->flags & X86_EFLAGS_ZF);
+
+ __kprobe_emulate_jmp(p, regs, match);
+}
+NOKPROBE_SYMBOL(kprobe_emulate_loop);
+
+static const int addrmode_regoffs[] = {
+ offsetof(struct pt_regs, ax),
+ offsetof(struct pt_regs, cx),
+ offsetof(struct pt_regs, dx),
+ offsetof(struct pt_regs, bx),
+ offsetof(struct pt_regs, sp),
+ offsetof(struct pt_regs, bp),
+ offsetof(struct pt_regs, si),
+ offsetof(struct pt_regs, di),
+#ifdef CONFIG_X86_64
+ offsetof(struct pt_regs, r8),
+ offsetof(struct pt_regs, r9),
+ offsetof(struct pt_regs, r10),
+ offsetof(struct pt_regs, r11),
+ offsetof(struct pt_regs, r12),
+ offsetof(struct pt_regs, r13),
+ offsetof(struct pt_regs, r14),
+ offsetof(struct pt_regs, r15),
+#endif
+};
+
+static void kprobe_emulate_call_indirect(struct kprobe *p, struct pt_regs *regs)
+{
+ unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg];
+
+ int3_emulate_call(regs, regs_get_register(regs, offs));
+}
+NOKPROBE_SYMBOL(kprobe_emulate_call_indirect);
+
+static void kprobe_emulate_jmp_indirect(struct kprobe *p, struct pt_regs *regs)
+{
+ unsigned long offs = addrmode_regoffs[p->ainsn.indirect.reg];
+
+ int3_emulate_jmp(regs, regs_get_register(regs, offs));
+}
+NOKPROBE_SYMBOL(kprobe_emulate_jmp_indirect);
+
+static int prepare_emulation(struct kprobe *p, struct insn *insn)
{
insn_byte_t opcode = insn->opcode.bytes[0];
switch (opcode) {
case 0xfa: /* cli */
case 0xfb: /* sti */
+ case 0x9c: /* pushfl */
case 0x9d: /* popf/popfd */
- /* Check whether the instruction modifies Interrupt Flag or not */
- p->ainsn.if_modifier = 1;
- break;
- case 0x9c: /* pushfl */
- p->ainsn.is_pushf = 1;
+ /*
+ * IF modifiers must be emulated since it will enable interrupt while
+ * int3 single stepping.
+ */
+ p->ainsn.emulate_op = kprobe_emulate_ifmodifiers;
+ p->ainsn.opcode = opcode;
break;
- case 0xcf: /* iret */
- p->ainsn.if_modifier = 1;
- fallthrough;
case 0xc2: /* ret/lret */
case 0xc3:
case 0xca:
case 0xcb:
- case 0xea: /* jmp absolute -- ip is correct */
- /* ip is already adjusted, no more changes required */
- p->ainsn.is_abs_ip = 1;
- /* Without resume jump, this is boostable */
- p->ainsn.boostable = 1;
+ p->ainsn.emulate_op = kprobe_emulate_ret;
break;
- case 0xe8: /* call relative - Fix return addr */
- p->ainsn.is_call = 1;
+ case 0x9a: /* far call absolute -- segment is not supported */
+ case 0xea: /* far jmp absolute -- segment is not supported */
+ case 0xcc: /* int3 */
+ case 0xcf: /* iret -- in-kernel IRET is not supported */
+ return -EOPNOTSUPP;
break;
-#ifdef CONFIG_X86_32
- case 0x9a: /* call absolute -- same as call absolute, indirect */
- p->ainsn.is_call = 1;
- p->ainsn.is_abs_ip = 1;
+ case 0xe8: /* near call relative */
+ p->ainsn.emulate_op = kprobe_emulate_call;
+ if (insn->immediate.nbytes == 2)
+ p->ainsn.rel32 = *(s16 *)&insn->immediate.value;
+ else
+ p->ainsn.rel32 = *(s32 *)&insn->immediate.value;
break;
-#endif
- case 0xff:
+ case 0xeb: /* short jump relative */
+ case 0xe9: /* near jump relative */
+ p->ainsn.emulate_op = kprobe_emulate_jmp;
+ if (insn->immediate.nbytes == 1)
+ p->ainsn.rel32 = *(s8 *)&insn->immediate.value;
+ else if (insn->immediate.nbytes == 2)
+ p->ainsn.rel32 = *(s16 *)&insn->immediate.value;
+ else
+ p->ainsn.rel32 = *(s32 *)&insn->immediate.value;
+ break;
+ case 0x70 ... 0x7f:
+ /* 1 byte conditional jump */
+ p->ainsn.emulate_op = kprobe_emulate_jcc;
+ p->ainsn.jcc.type = opcode & 0xf;
+ p->ainsn.rel32 = *(char *)insn->immediate.bytes;
+ break;
+ case 0x0f:
opcode = insn->opcode.bytes[1];
+ if ((opcode & 0xf0) == 0x80) {
+ /* 2 bytes Conditional Jump */
+ p->ainsn.emulate_op = kprobe_emulate_jcc;
+ p->ainsn.jcc.type = opcode & 0xf;
+ if (insn->immediate.nbytes == 2)
+ p->ainsn.rel32 = *(s16 *)&insn->immediate.value;
+ else
+ p->ainsn.rel32 = *(s32 *)&insn->immediate.value;
+ } else if (opcode == 0x01 &&
+ X86_MODRM_REG(insn->modrm.bytes[0]) == 0 &&
+ X86_MODRM_MOD(insn->modrm.bytes[0]) == 3) {
+ /* VM extensions - not supported */
+ return -EOPNOTSUPP;
+ }
+ break;
+ case 0xe0: /* Loop NZ */
+ case 0xe1: /* Loop */
+ case 0xe2: /* Loop */
+ case 0xe3: /* J*CXZ */
+ p->ainsn.emulate_op = kprobe_emulate_loop;
+ p->ainsn.loop.type = opcode & 0x3;
+ p->ainsn.loop.asize = insn->addr_bytes * 8;
+ p->ainsn.rel32 = *(s8 *)&insn->immediate.value;
+ break;
+ case 0xff:
+ /*
+ * Since the 0xff is an extended group opcode, the instruction
+ * is determined by the MOD/RM byte.
+ */
+ opcode = insn->modrm.bytes[0];
if ((opcode & 0x30) == 0x10) {
- /*
- * call absolute, indirect
- * Fix return addr; ip is correct.
- * But this is not boostable
- */
- p->ainsn.is_call = 1;
- p->ainsn.is_abs_ip = 1;
+ if ((opcode & 0x8) == 0x8)
+ return -EOPNOTSUPP; /* far call */
+ /* call absolute, indirect */
+ p->ainsn.emulate_op = kprobe_emulate_call_indirect;
+ } else if ((opcode & 0x30) == 0x20) {
+ if ((opcode & 0x8) == 0x8)
+ return -EOPNOTSUPP; /* far jmp */
+ /* jmp near absolute indirect */
+ p->ainsn.emulate_op = kprobe_emulate_jmp_indirect;
+ } else
break;
- } else if (((opcode & 0x31) == 0x20) ||
- ((opcode & 0x31) == 0x21)) {
- /*
- * jmp near and far, absolute indirect
- * ip is correct.
- */
- p->ainsn.is_abs_ip = 1;
- /* Without resume jump, this is boostable */
- p->ainsn.boostable = 1;
- }
+
+ if (insn->addr_bytes != sizeof(unsigned long))
+ return -EOPNOTSUPP; /* Don't support different size */
+ if (X86_MODRM_MOD(opcode) != 3)
+ return -EOPNOTSUPP; /* TODO: support memory addressing */
+
+ p->ainsn.indirect.reg = X86_MODRM_RM(opcode);
+#ifdef CONFIG_X86_64
+ if (X86_REX_B(insn->rex_prefix.value))
+ p->ainsn.indirect.reg += 8;
+#endif
+ break;
+ default:
break;
}
+ p->ainsn.size = insn->length;
+
+ return 0;
}
static int arch_copy_kprobe(struct kprobe *p)
{
struct insn insn;
kprobe_opcode_t buf[MAX_INSN_SIZE];
- int len;
+ int ret, len;
/* Copy an instruction with recovering if other optprobe modifies it.*/
len = __copy_instruction(buf, p->addr, p->ainsn.insn, &insn);
if (!len)
return -EINVAL;
- /*
- * __copy_instruction can modify the displacement of the instruction,
- * but it doesn't affect boostable check.
- */
- len = prepare_boost(buf, p, &insn);
+ /* Analyze the opcode and setup emulate functions */
+ ret = prepare_emulation(p, &insn);
+ if (ret < 0)
+ return ret;
- /* Analyze the opcode and set resume flags */
- set_resume_flags(p, &insn);
+ /* Add int3 for single-step or booster jmp */
+ len = prepare_singlestep(buf, p, &insn);
+ if (len < 0)
+ return len;
/* Also, displacement change doesn't affect the first byte */
p->opcode = buf[0];
@@ -583,29 +798,7 @@ set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
{
__this_cpu_write(current_kprobe, p);
kcb->kprobe_saved_flags = kcb->kprobe_old_flags
- = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
- if (p->ainsn.if_modifier)
- kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
-}
-
-static nokprobe_inline void clear_btf(void)
-{
- if (test_thread_flag(TIF_BLOCKSTEP)) {
- unsigned long debugctl = get_debugctlmsr();
-
- debugctl &= ~DEBUGCTLMSR_BTF;
- update_debugctlmsr(debugctl);
- }
-}
-
-static nokprobe_inline void restore_btf(void)
-{
- if (test_thread_flag(TIF_BLOCKSTEP)) {
- unsigned long debugctl = get_debugctlmsr();
-
- debugctl |= DEBUGCTLMSR_BTF;
- update_debugctlmsr(debugctl);
- }
+ = (regs->flags & X86_EFLAGS_IF);
}
void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
@@ -620,6 +813,22 @@ void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
}
NOKPROBE_SYMBOL(arch_prepare_kretprobe);
+static void kprobe_post_process(struct kprobe *cur, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ cur->post_handler(cur, regs, 0);
+ }
+
+ /* Restore back the original saved kprobes variables and continue. */
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ restore_previous_kprobe(kcb);
+ else
+ reset_current_kprobe();
+}
+NOKPROBE_SYMBOL(kprobe_post_process);
+
static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb, int reenter)
{
@@ -627,7 +836,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
return;
#if !defined(CONFIG_PREEMPTION)
- if (p->ainsn.boostable && !p->post_handler) {
+ if (p->ainsn.boostable) {
/* Boost up -- we can execute copied instructions directly */
if (!reenter)
reset_current_kprobe();
@@ -646,19 +855,51 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
kcb->kprobe_status = KPROBE_REENTER;
} else
kcb->kprobe_status = KPROBE_HIT_SS;
- /* Prepare real single stepping */
- clear_btf();
- regs->flags |= X86_EFLAGS_TF;
+
+ if (p->ainsn.emulate_op) {
+ p->ainsn.emulate_op(p, regs);
+ kprobe_post_process(p, regs, kcb);
+ return;
+ }
+
+ /* Disable interrupt, and set ip register on trampoline */
regs->flags &= ~X86_EFLAGS_IF;
- /* single step inline if the instruction is an int3 */
- if (p->opcode == INT3_INSN_OPCODE)
- regs->ip = (unsigned long)p->addr;
- else
- regs->ip = (unsigned long)p->ainsn.insn;
+ regs->ip = (unsigned long)p->ainsn.insn;
}
NOKPROBE_SYMBOL(setup_singlestep);
/*
+ * Called after single-stepping. p->addr is the address of the
+ * instruction whose first byte has been replaced by the "int3"
+ * instruction. To avoid the SMP problems that can occur when we
+ * temporarily put back the original opcode to single-step, we
+ * single-stepped a copy of the instruction. The address of this
+ * copy is p->ainsn.insn. We also doesn't use trap, but "int3" again
+ * right after the copied instruction.
+ * Different from the trap single-step, "int3" single-step can not
+ * handle the instruction which changes the ip register, e.g. jmp,
+ * call, conditional jmp, and the instructions which changes the IF
+ * flags because interrupt must be disabled around the single-stepping.
+ * Such instructions are software emulated, but others are single-stepped
+ * using "int3".
+ *
+ * When the 2nd "int3" handled, the regs->ip and regs->flags needs to
+ * be adjusted, so that we can resume execution on correct code.
+ */
+static void resume_singlestep(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ unsigned long copy_ip = (unsigned long)p->ainsn.insn;
+ unsigned long orig_ip = (unsigned long)p->addr;
+
+ /* Restore saved interrupt flag and ip register */
+ regs->flags |= kcb->kprobe_saved_flags;
+ /* Note that regs->ip is executed int3 so must be a step back */
+ regs->ip += (orig_ip - copy_ip) - INT3_INSN_SIZE;
+}
+NOKPROBE_SYMBOL(resume_singlestep);
+
+/*
* We have reentered the kprobe_handler(), since another probe was hit while
* within the handler. We save the original kprobes variables and just single
* step on the instruction of the new probe without calling any user handlers.
@@ -693,6 +934,12 @@ static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
}
NOKPROBE_SYMBOL(reenter_kprobe);
+static nokprobe_inline int kprobe_is_ss(struct kprobe_ctlblk *kcb)
+{
+ return (kcb->kprobe_status == KPROBE_HIT_SS ||
+ kcb->kprobe_status == KPROBE_REENTER);
+}
+
/*
* Interrupts are disabled on entry as trap3 is an interrupt gate and they
* remain disabled throughout this function.
@@ -737,7 +984,18 @@ int kprobe_int3_handler(struct pt_regs *regs)
reset_current_kprobe();
return 1;
}
- } else if (*addr != INT3_INSN_OPCODE) {
+ } else if (kprobe_is_ss(kcb)) {
+ p = kprobe_running();
+ if ((unsigned long)p->ainsn.insn < regs->ip &&
+ (unsigned long)p->ainsn.insn + MAX_INSN_SIZE > regs->ip) {
+ /* Most provably this is the second int3 for singlestep */
+ resume_singlestep(p, regs, kcb);
+ kprobe_post_process(p, regs, kcb);
+ return 1;
+ }
+ }
+
+ if (*addr != INT3_INSN_OPCODE) {
/*
* The breakpoint instruction was removed right
* after we hit it. Another cpu has removed
@@ -810,91 +1068,6 @@ __used __visible void *trampoline_handler(struct pt_regs *regs)
}
NOKPROBE_SYMBOL(trampoline_handler);
-/*
- * Called after single-stepping. p->addr is the address of the
- * instruction whose first byte has been replaced by the "int 3"
- * instruction. To avoid the SMP problems that can occur when we
- * temporarily put back the original opcode to single-step, we
- * single-stepped a copy of the instruction. The address of this
- * copy is p->ainsn.insn.
- *
- * This function prepares to return from the post-single-step
- * interrupt. We have to fix up the stack as follows:
- *
- * 0) Except in the case of absolute or indirect jump or call instructions,
- * the new ip is relative to the copied instruction. We need to make
- * it relative to the original instruction.
- *
- * 1) If the single-stepped instruction was pushfl, then the TF and IF
- * flags are set in the just-pushed flags, and may need to be cleared.
- *
- * 2) If the single-stepped instruction was a call, the return address
- * that is atop the stack is the address following the copied instruction.
- * We need to make it the address following the original instruction.
- */
-static void resume_execution(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
-{
- unsigned long *tos = stack_addr(regs);
- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
- unsigned long orig_ip = (unsigned long)p->addr;
-
- regs->flags &= ~X86_EFLAGS_TF;
-
- /* Fixup the contents of top of stack */
- if (p->ainsn.is_pushf) {
- *tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF);
- *tos |= kcb->kprobe_old_flags;
- } else if (p->ainsn.is_call) {
- *tos = orig_ip + (*tos - copy_ip);
- }
-
- if (!p->ainsn.is_abs_ip)
- regs->ip += orig_ip - copy_ip;
-
- restore_btf();
-}
-NOKPROBE_SYMBOL(resume_execution);
-
-/*
- * Interrupts are disabled on entry as trap1 is an interrupt gate and they
- * remain disabled throughout this function.
- */
-int kprobe_debug_handler(struct pt_regs *regs)
-{
- struct kprobe *cur = kprobe_running();
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- if (!cur)
- return 0;
-
- resume_execution(cur, regs, kcb);
- regs->flags |= kcb->kprobe_saved_flags;
-
- if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
- kcb->kprobe_status = KPROBE_HIT_SSDONE;
- cur->post_handler(cur, regs, 0);
- }
-
- /* Restore back the original saved kprobes variables and continue. */
- if (kcb->kprobe_status == KPROBE_REENTER) {
- restore_previous_kprobe(kcb);
- goto out;
- }
- reset_current_kprobe();
-out:
- /*
- * if somebody else is singlestepping across a probe point, flags
- * will have TF set, in which case, continue the remaining processing
- * of do_debug, as if this is not a probe hit.
- */
- if (regs->flags & X86_EFLAGS_TF)
- return 0;
-
- return 1;
-}
-NOKPROBE_SYMBOL(kprobe_debug_handler);
-
int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
@@ -912,20 +1085,9 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
* normal page fault.
*/
regs->ip = (unsigned long)cur->addr;
- /*
- * Trap flag (TF) has been set here because this fault
- * happened where the single stepping will be done.
- * So clear it by resetting the current kprobe:
- */
- regs->flags &= ~X86_EFLAGS_TF;
- /*
- * Since the single step (trap) has been cancelled,
- * we need to restore BTF here.
- */
- restore_btf();
/*
- * If the TF flag was set before the kprobe hit,
+ * If the IF flag was set before the kprobe hit,
* don't touch it:
*/
regs->flags |= kcb->kprobe_old_flags;
@@ -934,24 +1096,6 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
restore_previous_kprobe(kcb);
else
reset_current_kprobe();
- } else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE ||
- kcb->kprobe_status == KPROBE_HIT_SSDONE) {
- /*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
}
return 0;
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
index 373e5fa3ce1f..596de2f6d3a5 100644
--- a/arch/x86/kernel/kprobes/ftrace.c
+++ b/arch/x86/kernel/kprobes/ftrace.c
@@ -12,7 +12,7 @@
#include "common.h"
-/* Ftrace callback handler for kprobes -- called under preepmt disabed */
+/* Ftrace callback handler for kprobes -- called under preempt disabled */
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 08eb23074f92..71425ebba98a 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -312,6 +312,8 @@ static int can_optimize(unsigned long paddr)
addr = paddr - offset;
while (addr < paddr - offset + size) { /* Decode until function end */
unsigned long recovered_insn;
+ int ret;
+
if (search_exception_tables(addr))
/*
* Since some fixup code will jumps into this function,
@@ -321,8 +323,11 @@ static int can_optimize(unsigned long paddr)
recovered_insn = recover_probed_instruction(buf, addr);
if (!recovered_insn)
return 0;
- kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
- insn_get_length(&insn);
+
+ ret = insn_decode_kernel(&insn, (void *)recovered_insn);
+ if (ret < 0)
+ return 0;
+
/*
* In the case of detecting unknown breakpoint, this could be
* a padding INT3 between functions. Let's check that all the
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 5e78e01ca3b4..a26643dc6bd6 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -26,6 +26,7 @@
#include <linux/kprobes.h>
#include <linux/nmi.h>
#include <linux/swait.h>
+#include <linux/syscore_ops.h>
#include <asm/timer.h>
#include <asm/cpu.h>
#include <asm/traps.h>
@@ -37,6 +38,7 @@
#include <asm/tlb.h>
#include <asm/cpuidle_haltpoll.h>
#include <asm/ptrace.h>
+#include <asm/reboot.h>
#include <asm/svm.h>
DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
@@ -345,7 +347,7 @@ static void kvm_guest_cpu_init(void)
wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
__this_cpu_write(apf_reason.enabled, 1);
- pr_info("KVM setup async PF for cpu %d\n", smp_processor_id());
+ pr_info("setup async PF for cpu %d\n", smp_processor_id());
}
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
@@ -371,34 +373,17 @@ static void kvm_pv_disable_apf(void)
wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
__this_cpu_write(apf_reason.enabled, 0);
- pr_info("Unregister pv shared memory for cpu %d\n", smp_processor_id());
+ pr_info("disable async PF for cpu %d\n", smp_processor_id());
}
-static void kvm_pv_guest_cpu_reboot(void *unused)
+static void kvm_disable_steal_time(void)
{
- /*
- * We disable PV EOI before we load a new kernel by kexec,
- * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
- * New kernel can re-enable when it boots.
- */
- if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
- wrmsrl(MSR_KVM_PV_EOI_EN, 0);
- kvm_pv_disable_apf();
- kvm_disable_steal_time();
-}
+ if (!has_steal_clock)
+ return;
-static int kvm_pv_reboot_notify(struct notifier_block *nb,
- unsigned long code, void *unused)
-{
- if (code == SYS_RESTART)
- on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
- return NOTIFY_DONE;
+ wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
}
-static struct notifier_block kvm_pv_reboot_nb = {
- .notifier_call = kvm_pv_reboot_notify,
-};
-
static u64 kvm_steal_clock(int cpu)
{
u64 steal;
@@ -416,14 +401,6 @@ static u64 kvm_steal_clock(int cpu)
return steal;
}
-void kvm_disable_steal_time(void)
-{
- if (!has_steal_clock)
- return;
-
- wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
-}
-
static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
{
early_set_memory_decrypted((unsigned long) ptr, size);
@@ -451,6 +428,31 @@ static void __init sev_map_percpu_data(void)
}
}
+static void kvm_guest_cpu_offline(bool shutdown)
+{
+ kvm_disable_steal_time();
+ if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
+ wrmsrl(MSR_KVM_PV_EOI_EN, 0);
+ kvm_pv_disable_apf();
+ if (!shutdown)
+ apf_task_wake_all();
+ kvmclock_disable();
+}
+
+static int kvm_cpu_online(unsigned int cpu)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ kvm_guest_cpu_init();
+ local_irq_restore(flags);
+ return 0;
+}
+
+#ifdef CONFIG_SMP
+
+static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
+
static bool pv_tlb_flush_supported(void)
{
return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
@@ -458,10 +460,6 @@ static bool pv_tlb_flush_supported(void)
kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
}
-static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
-
-#ifdef CONFIG_SMP
-
static bool pv_ipi_supported(void)
{
return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI);
@@ -574,6 +572,54 @@ static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
}
}
+static void kvm_flush_tlb_multi(const struct cpumask *cpumask,
+ const struct flush_tlb_info *info)
+{
+ u8 state;
+ int cpu;
+ struct kvm_steal_time *src;
+ struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
+
+ cpumask_copy(flushmask, cpumask);
+ /*
+ * We have to call flush only on online vCPUs. And
+ * queue flush_on_enter for pre-empted vCPUs
+ */
+ for_each_cpu(cpu, flushmask) {
+ /*
+ * The local vCPU is never preempted, so we do not explicitly
+ * skip check for local vCPU - it will never be cleared from
+ * flushmask.
+ */
+ src = &per_cpu(steal_time, cpu);
+ state = READ_ONCE(src->preempted);
+ if ((state & KVM_VCPU_PREEMPTED)) {
+ if (try_cmpxchg(&src->preempted, &state,
+ state | KVM_VCPU_FLUSH_TLB))
+ __cpumask_clear_cpu(cpu, flushmask);
+ }
+ }
+
+ native_flush_tlb_multi(flushmask, info);
+}
+
+static __init int kvm_alloc_cpumask(void)
+{
+ int cpu;
+
+ if (!kvm_para_available() || nopv)
+ return 0;
+
+ if (pv_tlb_flush_supported() || pv_ipi_supported())
+ for_each_possible_cpu(cpu) {
+ zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
+ GFP_KERNEL, cpu_to_node(cpu));
+ }
+
+ return 0;
+}
+arch_initcall(kvm_alloc_cpumask);
+
static void __init kvm_smp_prepare_boot_cpu(void)
{
/*
@@ -587,57 +633,65 @@ static void __init kvm_smp_prepare_boot_cpu(void)
kvm_spinlock_init();
}
-static void kvm_guest_cpu_offline(void)
+static int kvm_cpu_down_prepare(unsigned int cpu)
{
- kvm_disable_steal_time();
- if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
- wrmsrl(MSR_KVM_PV_EOI_EN, 0);
- kvm_pv_disable_apf();
- apf_task_wake_all();
+ unsigned long flags;
+
+ local_irq_save(flags);
+ kvm_guest_cpu_offline(false);
+ local_irq_restore(flags);
+ return 0;
}
-static int kvm_cpu_online(unsigned int cpu)
+#endif
+
+static int kvm_suspend(void)
{
- local_irq_disable();
- kvm_guest_cpu_init();
- local_irq_enable();
+ kvm_guest_cpu_offline(false);
+
return 0;
}
-static int kvm_cpu_down_prepare(unsigned int cpu)
+static void kvm_resume(void)
{
- local_irq_disable();
- kvm_guest_cpu_offline();
- local_irq_enable();
- return 0;
+ kvm_cpu_online(raw_smp_processor_id());
}
-#endif
-static void kvm_flush_tlb_others(const struct cpumask *cpumask,
- const struct flush_tlb_info *info)
+static struct syscore_ops kvm_syscore_ops = {
+ .suspend = kvm_suspend,
+ .resume = kvm_resume,
+};
+
+static void kvm_pv_guest_cpu_reboot(void *unused)
{
- u8 state;
- int cpu;
- struct kvm_steal_time *src;
- struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
+ kvm_guest_cpu_offline(true);
+}
- cpumask_copy(flushmask, cpumask);
- /*
- * We have to call flush only on online vCPUs. And
- * queue flush_on_enter for pre-empted vCPUs
- */
- for_each_cpu(cpu, flushmask) {
- src = &per_cpu(steal_time, cpu);
- state = READ_ONCE(src->preempted);
- if ((state & KVM_VCPU_PREEMPTED)) {
- if (try_cmpxchg(&src->preempted, &state,
- state | KVM_VCPU_FLUSH_TLB))
- __cpumask_clear_cpu(cpu, flushmask);
- }
- }
+static int kvm_pv_reboot_notify(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ if (code == SYS_RESTART)
+ on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
+ return NOTIFY_DONE;
+}
- native_flush_tlb_others(flushmask, info);
+static struct notifier_block kvm_pv_reboot_nb = {
+ .notifier_call = kvm_pv_reboot_notify,
+};
+
+/*
+ * After a PV feature is registered, the host will keep writing to the
+ * registered memory location. If the guest happens to shutdown, this memory
+ * won't be valid. In cases like kexec, in which you install a new kernel, this
+ * means a random memory location will be kept being written.
+ */
+#ifdef CONFIG_KEXEC_CORE
+static void kvm_crash_shutdown(struct pt_regs *regs)
+{
+ kvm_guest_cpu_offline(true);
+ native_machine_crash_shutdown(regs);
}
+#endif
static void __init kvm_guest_init(void)
{
@@ -650,13 +704,7 @@ static void __init kvm_guest_init(void)
if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
has_steal_clock = 1;
- pv_ops.time.steal_clock = kvm_steal_clock;
- }
-
- if (pv_tlb_flush_supported()) {
- pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
- pv_ops.mmu.tlb_remove_table = tlb_remove_table;
- pr_info("KVM setup pv remote TLB flush\n");
+ static_call_update(pv_steal_clock, kvm_steal_clock);
}
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
@@ -668,6 +716,12 @@ static void __init kvm_guest_init(void)
}
#ifdef CONFIG_SMP
+ if (pv_tlb_flush_supported()) {
+ pv_ops.mmu.flush_tlb_multi = kvm_flush_tlb_multi;
+ pv_ops.mmu.tlb_remove_table = tlb_remove_table;
+ pr_info("KVM setup pv remote TLB flush\n");
+ }
+
smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
if (pv_sched_yield_supported()) {
smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
@@ -681,6 +735,12 @@ static void __init kvm_guest_init(void)
kvm_guest_cpu_init();
#endif
+#ifdef CONFIG_KEXEC_CORE
+ machine_ops.crash_shutdown = kvm_crash_shutdown;
+#endif
+
+ register_syscore_ops(&kvm_syscore_ops);
+
/*
* Hard lockup detection is enabled by default. Disable it, as guests
* can get false positives too easily, for example if the host is
@@ -734,7 +794,7 @@ static uint32_t __init kvm_detect(void)
static void __init kvm_apic_init(void)
{
-#if defined(CONFIG_SMP)
+#ifdef CONFIG_SMP
if (pv_ipi_supported())
kvm_setup_pv_ipi();
#endif
@@ -794,32 +854,6 @@ static __init int activate_jump_labels(void)
}
arch_initcall(activate_jump_labels);
-static __init int kvm_alloc_cpumask(void)
-{
- int cpu;
- bool alloc = false;
-
- if (!kvm_para_available() || nopv)
- return 0;
-
- if (pv_tlb_flush_supported())
- alloc = true;
-
-#if defined(CONFIG_SMP)
- if (pv_ipi_supported())
- alloc = true;
-#endif
-
- if (alloc)
- for_each_possible_cpu(cpu) {
- zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
- GFP_KERNEL, cpu_to_node(cpu));
- }
-
- return 0;
-}
-arch_initcall(kvm_alloc_cpumask);
-
#ifdef CONFIG_PARAVIRT_SPINLOCKS
/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
@@ -836,28 +870,25 @@ static void kvm_kick_cpu(int cpu)
static void kvm_wait(u8 *ptr, u8 val)
{
- unsigned long flags;
-
if (in_nmi())
return;
- local_irq_save(flags);
-
- if (READ_ONCE(*ptr) != val)
- goto out;
-
/*
* halt until it's our turn and kicked. Note that we do safe halt
* for irq enabled case to avoid hang when lock info is overwritten
* in irq spinlock slowpath and no spurious interrupt occur to save us.
*/
- if (arch_irqs_disabled_flags(flags))
- halt();
- else
- safe_halt();
+ if (irqs_disabled()) {
+ if (READ_ONCE(*ptr) == val)
+ halt();
+ } else {
+ local_irq_disable();
-out:
- local_irq_restore(flags);
+ if (READ_ONCE(*ptr) == val)
+ safe_halt();
+
+ local_irq_enable();
+ }
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index aa593743acf6..ad273e5861c1 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -20,7 +20,6 @@
#include <asm/hypervisor.h>
#include <asm/mem_encrypt.h>
#include <asm/x86_init.h>
-#include <asm/reboot.h>
#include <asm/kvmclock.h>
static int kvmclock __initdata = 1;
@@ -106,7 +105,7 @@ static inline void kvm_sched_clock_init(bool stable)
if (!stable)
clear_sched_clock_stable();
kvm_sched_clock_offset = kvm_clock_read();
- pv_ops.time.sched_clock = kvm_sched_clock_read;
+ paravirt_set_sched_clock(kvm_sched_clock_read);
pr_info("kvm-clock: using sched offset of %llu cycles",
kvm_sched_clock_offset);
@@ -203,28 +202,9 @@ static void kvm_setup_secondary_clock(void)
}
#endif
-/*
- * After the clock is registered, the host will keep writing to the
- * registered memory location. If the guest happens to shutdown, this memory
- * won't be valid. In cases like kexec, in which you install a new kernel, this
- * means a random memory location will be kept being written. So before any
- * kind of shutdown from our side, we unregister the clock by writing anything
- * that does not have the 'enable' bit set in the msr
- */
-#ifdef CONFIG_KEXEC_CORE
-static void kvm_crash_shutdown(struct pt_regs *regs)
+void kvmclock_disable(void)
{
native_write_msr(msr_kvm_system_time, 0, 0);
- kvm_disable_steal_time();
- native_machine_crash_shutdown(regs);
-}
-#endif
-
-static void kvm_shutdown(void)
-{
- native_write_msr(msr_kvm_system_time, 0, 0);
- kvm_disable_steal_time();
- native_machine_shutdown();
}
static void __init kvmclock_init_mem(void)
@@ -268,21 +248,20 @@ static void __init kvmclock_init_mem(void)
static int __init kvm_setup_vsyscall_timeinfo(void)
{
-#ifdef CONFIG_X86_64
- u8 flags;
+ kvmclock_init_mem();
- if (!per_cpu(hv_clock_per_cpu, 0) || !kvmclock_vsyscall)
- return 0;
+#ifdef CONFIG_X86_64
+ if (per_cpu(hv_clock_per_cpu, 0) && kvmclock_vsyscall) {
+ u8 flags;
- flags = pvclock_read_flags(&hv_clock_boot[0].pvti);
- if (!(flags & PVCLOCK_TSC_STABLE_BIT))
- return 0;
+ flags = pvclock_read_flags(&hv_clock_boot[0].pvti);
+ if (!(flags & PVCLOCK_TSC_STABLE_BIT))
+ return 0;
- kvm_clock.vdso_clock_mode = VDSO_CLOCKMODE_PVCLOCK;
+ kvm_clock.vdso_clock_mode = VDSO_CLOCKMODE_PVCLOCK;
+ }
#endif
- kvmclock_init_mem();
-
return 0;
}
early_initcall(kvm_setup_vsyscall_timeinfo);
@@ -352,10 +331,6 @@ void __init kvmclock_init(void)
#endif
x86_platform.save_sched_clock_state = kvm_save_sched_clock_state;
x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state;
- machine_ops.shutdown = kvm_shutdown;
-#ifdef CONFIG_KEXEC_CORE
- machine_ops.crash_shutdown = kvm_crash_shutdown;
-#endif
kvm_get_preset_lpj();
/*
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 64b00b0d7fe8..1b373d79cedc 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -23,17 +23,6 @@
#include <asm/set_memory.h>
#include <asm/debugreg.h>
-static void set_gdt(void *newgdt, __u16 limit)
-{
- struct desc_ptr curgdt;
-
- /* ia32 supports unaligned loads & stores */
- curgdt.size = limit;
- curgdt.address = (unsigned long)newgdt;
-
- load_gdt(&curgdt);
-}
-
static void load_segments(void)
{
#define __STR(X) #X
@@ -232,8 +221,8 @@ void machine_kexec(struct kimage *image)
* The gdt & idt are now invalid.
* If you want to load them you must set up your own idt & gdt.
*/
- idt_invalidate(phys_to_virt(0));
- set_gdt(phys_to_virt(0), 0);
+ native_idt_invalidate();
+ native_gdt_invalidate();
/* now call it */
image->start = relocate_kernel_ptr((unsigned long)image->head,
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index a29a44a98e5b..131f30fdcfbd 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -256,35 +256,6 @@ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
return init_transition_pgtable(image, level4p);
}
-static void set_idt(void *newidt, u16 limit)
-{
- struct desc_ptr curidt;
-
- /* x86-64 supports unaliged loads & stores */
- curidt.size = limit;
- curidt.address = (unsigned long)newidt;
-
- __asm__ __volatile__ (
- "lidtq %0\n"
- : : "m" (curidt)
- );
-};
-
-
-static void set_gdt(void *newgdt, u16 limit)
-{
- struct desc_ptr curgdt;
-
- /* x86-64 supports unaligned loads & stores */
- curgdt.size = limit;
- curgdt.address = (unsigned long)newgdt;
-
- __asm__ __volatile__ (
- "lgdtq %0\n"
- : : "m" (curgdt)
- );
-};
-
static void load_segments(void)
{
__asm__ __volatile__ (
@@ -379,8 +350,8 @@ void machine_kexec(struct kimage *image)
* The gdt & idt are now invalid.
* If you want to load them you must set up your own idt & gdt.
*/
- set_gdt(phys_to_virt(0), 0);
- set_idt(phys_to_virt(0), 0);
+ native_idt_invalidate();
+ native_gdt_invalidate();
/* now call it */
image->start = relocate_kernel((unsigned long)image->head,
@@ -402,8 +373,8 @@ void machine_kexec(struct kimage *image)
#ifdef CONFIG_KEXEC_FILE
void *arch_kexec_kernel_image_load(struct kimage *image)
{
- vfree(image->arch.elf_headers);
- image->arch.elf_headers = NULL;
+ vfree(image->elf_headers);
+ image->elf_headers = NULL;
if (!image->fops || !image->fops->load)
return ERR_PTR(-ENOEXEC);
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index b5cb49e57df8..c94dec6a1834 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -95,7 +95,7 @@ static void get_fam10h_pci_mmconf_base(void)
return;
/* SYS_CFG */
- address = MSR_K8_SYSCFG;
+ address = MSR_AMD64_SYSCFG;
rdmsrl(address, val);
/* TOP_MEM2 is not enabled? */
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index bf250a339655..4bce802d25fb 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -33,7 +33,7 @@
#include <asm/reboot.h>
#include <asm/cache.h>
#include <asm/nospec-branch.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
#define CREATE_TRACE_POINTS
#include <trace/events/nmi.h>
@@ -524,6 +524,16 @@ nmi_restart:
mds_user_clear_cpu_buffers();
}
+#if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL)
+DEFINE_IDTENTRY_RAW(exc_nmi_noist)
+{
+ exc_nmi(regs);
+}
+#endif
+#if IS_MODULE(CONFIG_KVM_INTEL)
+EXPORT_SYMBOL_GPL(asm_exc_nmi_noist);
+#endif
+
void stop_nmi(void)
{
ignore_nmis++;
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 4f75d0cf6305..9e1ea99ad9df 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -32,3 +32,12 @@ bool pv_is_native_vcpu_is_preempted(void)
return pv_ops.lock.vcpu_is_preempted.func ==
__raw_callee_save___native_vcpu_is_preempted;
}
+
+void __init paravirt_set_cap(void)
+{
+ if (!pv_is_native_spin_unlock())
+ setup_force_cpu_cap(X86_FEATURE_PVUNLOCK);
+
+ if (!pv_is_native_vcpu_is_preempted())
+ setup_force_cpu_cap(X86_FEATURE_VCPUPREEMPT);
+}
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index c60222ab8ab9..04cafc057bed 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -14,6 +14,7 @@
#include <linux/highmem.h>
#include <linux/kprobes.h>
#include <linux/pgtable.h>
+#include <linux/static_call.h>
#include <asm/bug.h>
#include <asm/paravirt.h>
@@ -52,7 +53,10 @@ void __init default_banner(void)
}
/* Undefined instruction for dealing with missing ops pointers. */
-static const unsigned char ud2a[] = { 0x0f, 0x0b };
+static void paravirt_BUG(void)
+{
+ BUG();
+}
struct branch {
unsigned char opcode;
@@ -85,25 +89,6 @@ u64 notrace _paravirt_ident_64(u64 x)
{
return x;
}
-
-static unsigned paravirt_patch_jmp(void *insn_buff, const void *target,
- unsigned long addr, unsigned len)
-{
- struct branch *b = insn_buff;
- unsigned long delta = (unsigned long)target - (addr+5);
-
- if (len < 5) {
-#ifdef CONFIG_RETPOLINE
- WARN_ONCE(1, "Failing to patch indirect JMP in %ps\n", (void *)addr);
-#endif
- return len; /* call too long for patch site */
- }
-
- b->opcode = 0xe9; /* jmp */
- b->delta = delta;
-
- return 5;
-}
#endif
DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
@@ -114,8 +99,8 @@ void __init native_pv_lock_init(void)
static_branch_disable(&virt_spin_lock_key);
}
-unsigned paravirt_patch_default(u8 type, void *insn_buff,
- unsigned long addr, unsigned len)
+unsigned int paravirt_patch(u8 type, void *insn_buff, unsigned long addr,
+ unsigned int len)
{
/*
* Neat trick to map patch type back to the call within the
@@ -125,20 +110,10 @@ unsigned paravirt_patch_default(u8 type, void *insn_buff,
unsigned ret;
if (opfunc == NULL)
- /* If there's no function, patch it with a ud2a (BUG) */
- ret = paravirt_patch_insns(insn_buff, len, ud2a, ud2a+sizeof(ud2a));
+ /* If there's no function, patch it with paravirt_BUG() */
+ ret = paravirt_patch_call(insn_buff, paravirt_BUG, addr, len);
else if (opfunc == _paravirt_nop)
ret = 0;
-
-#ifdef CONFIG_PARAVIRT_XXL
- /* identity functions just return their single argument */
- else if (opfunc == _paravirt_ident_64)
- ret = paravirt_patch_ident_64(insn_buff, len);
-
- else if (type == PARAVIRT_PATCH(cpu.iret))
- /* If operation requires a jmp, then jmp */
- ret = paravirt_patch_jmp(insn_buff, opfunc, addr, len);
-#endif
else
/* Otherwise call the function. */
ret = paravirt_patch_call(insn_buff, opfunc, addr, len);
@@ -146,19 +121,6 @@ unsigned paravirt_patch_default(u8 type, void *insn_buff,
return ret;
}
-unsigned paravirt_patch_insns(void *insn_buff, unsigned len,
- const char *start, const char *end)
-{
- unsigned insn_len = end - start;
-
- /* Alternative instruction is too large for the patch site and we cannot continue: */
- BUG_ON(insn_len > len || start == NULL);
-
- memcpy(insn_buff, start, insn_len);
-
- return insn_len;
-}
-
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
@@ -167,6 +129,14 @@ static u64 native_steal_clock(int cpu)
return 0;
}
+DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
+DEFINE_STATIC_CALL(pv_sched_clock, native_sched_clock);
+
+void paravirt_set_sched_clock(u64 (*func)(void))
+{
+ static_call_update(pv_sched_clock, func);
+}
+
/* These are in entry.S */
extern void native_iret(void);
@@ -269,13 +239,6 @@ struct pv_info pv_info = {
#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
struct paravirt_patch_template pv_ops = {
- /* Init ops. */
- .init.patch = native_patch,
-
- /* Time ops. */
- .time.sched_clock = native_sched_clock,
- .time.steal_clock = native_steal_clock,
-
/* Cpu ops. */
.cpu.io_delay = native_io_delay,
@@ -308,8 +271,6 @@ struct paravirt_patch_template pv_ops = {
.cpu.load_sp0 = native_load_sp0,
- .cpu.iret = native_iret,
-
#ifdef CONFIG_X86_IOPL_IOPERM
.cpu.invalidate_io_bitmap = native_tss_invalidate_io_bitmap,
.cpu.update_io_bitmap = native_tss_update_io_bitmap,
@@ -330,7 +291,7 @@ struct paravirt_patch_template pv_ops = {
.mmu.flush_tlb_user = native_flush_tlb_local,
.mmu.flush_tlb_kernel = native_flush_tlb_global,
.mmu.flush_tlb_one_user = native_flush_tlb_one_user,
- .mmu.flush_tlb_others = native_flush_tlb_others,
+ .mmu.flush_tlb_multi = native_flush_tlb_multi,
.mmu.tlb_remove_table =
(void (*)(struct mmu_gather *, void *))tlb_remove_page,
@@ -414,6 +375,8 @@ struct paravirt_patch_template pv_ops = {
NOKPROBE_SYMBOL(native_get_debugreg);
NOKPROBE_SYMBOL(native_set_debugreg);
NOKPROBE_SYMBOL(native_load_idt);
+
+void (*paravirt_iret)(void) = native_iret;
#endif
EXPORT_SYMBOL(pv_ops);
diff --git a/arch/x86/kernel/paravirt_patch.c b/arch/x86/kernel/paravirt_patch.c
deleted file mode 100644
index abd27ec67397..000000000000
--- a/arch/x86/kernel/paravirt_patch.c
+++ /dev/null
@@ -1,99 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/stringify.h>
-
-#include <asm/paravirt.h>
-#include <asm/asm-offsets.h>
-
-#define PSTART(d, m) \
- patch_data_##d.m
-
-#define PEND(d, m) \
- (PSTART(d, m) + sizeof(patch_data_##d.m))
-
-#define PATCH(d, m, insn_buff, len) \
- paravirt_patch_insns(insn_buff, len, PSTART(d, m), PEND(d, m))
-
-#define PATCH_CASE(ops, m, data, insn_buff, len) \
- case PARAVIRT_PATCH(ops.m): \
- return PATCH(data, ops##_##m, insn_buff, len)
-
-#ifdef CONFIG_PARAVIRT_XXL
-struct patch_xxl {
- const unsigned char irq_irq_disable[1];
- const unsigned char irq_irq_enable[1];
- const unsigned char irq_save_fl[2];
- const unsigned char mmu_read_cr2[3];
- const unsigned char mmu_read_cr3[3];
- const unsigned char mmu_write_cr3[3];
- const unsigned char cpu_wbinvd[2];
- const unsigned char mov64[3];
-};
-
-static const struct patch_xxl patch_data_xxl = {
- .irq_irq_disable = { 0xfa }, // cli
- .irq_irq_enable = { 0xfb }, // sti
- .irq_save_fl = { 0x9c, 0x58 }, // pushf; pop %[re]ax
- .mmu_read_cr2 = { 0x0f, 0x20, 0xd0 }, // mov %cr2, %[re]ax
- .mmu_read_cr3 = { 0x0f, 0x20, 0xd8 }, // mov %cr3, %[re]ax
- .mmu_write_cr3 = { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3
- .cpu_wbinvd = { 0x0f, 0x09 }, // wbinvd
- .mov64 = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax
-};
-
-unsigned int paravirt_patch_ident_64(void *insn_buff, unsigned int len)
-{
- return PATCH(xxl, mov64, insn_buff, len);
-}
-# endif /* CONFIG_PARAVIRT_XXL */
-
-#ifdef CONFIG_PARAVIRT_SPINLOCKS
-struct patch_lock {
- unsigned char queued_spin_unlock[3];
- unsigned char vcpu_is_preempted[2];
-};
-
-static const struct patch_lock patch_data_lock = {
- .vcpu_is_preempted = { 0x31, 0xc0 }, // xor %eax, %eax
-
-# ifdef CONFIG_X86_64
- .queued_spin_unlock = { 0xc6, 0x07, 0x00 }, // movb $0, (%rdi)
-# else
- .queued_spin_unlock = { 0xc6, 0x00, 0x00 }, // movb $0, (%eax)
-# endif
-};
-#endif /* CONFIG_PARAVIRT_SPINLOCKS */
-
-unsigned int native_patch(u8 type, void *insn_buff, unsigned long addr,
- unsigned int len)
-{
- switch (type) {
-
-#ifdef CONFIG_PARAVIRT_XXL
- PATCH_CASE(irq, save_fl, xxl, insn_buff, len);
- PATCH_CASE(irq, irq_enable, xxl, insn_buff, len);
- PATCH_CASE(irq, irq_disable, xxl, insn_buff, len);
-
- PATCH_CASE(mmu, read_cr2, xxl, insn_buff, len);
- PATCH_CASE(mmu, read_cr3, xxl, insn_buff, len);
- PATCH_CASE(mmu, write_cr3, xxl, insn_buff, len);
-
- PATCH_CASE(cpu, wbinvd, xxl, insn_buff, len);
-#endif
-
-#ifdef CONFIG_PARAVIRT_SPINLOCKS
- case PARAVIRT_PATCH(lock.queued_spin_unlock):
- if (pv_is_native_spin_unlock())
- return PATCH(lock, queued_spin_unlock, insn_buff, len);
- break;
-
- case PARAVIRT_PATCH(lock.vcpu_is_preempted):
- if (pv_is_native_vcpu_is_preempted())
- return PATCH(lock, vcpu_is_preempted, insn_buff, len);
- break;
-#endif
- default:
- break;
- }
-
- return paravirt_patch_default(type, insn_buff, addr, len);
-}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 9c214d7085a4..e52b208b4641 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -63,14 +63,9 @@ __visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
*/
.sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
- /*
- * .sp1 is cpu_current_top_of_stack. The init task never
- * runs user code, but cpu_current_top_of_stack should still
- * be well defined before the first context switch.
- */
+#ifdef CONFIG_X86_32
.sp1 = TOP_OF_INIT_STACK,
-#ifdef CONFIG_X86_32
.ss0 = __KERNEL_DS,
.ss1 = __KERNEL_CS,
#endif
@@ -161,7 +156,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
#endif
/* Kernel thread ? */
- if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ if (unlikely(p->flags & PF_KTHREAD)) {
memset(childregs, 0, sizeof(struct pt_regs));
kthread_frame_init(frame, sp, arg);
return 0;
@@ -177,6 +172,23 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
task_user_gs(p) = get_user_gs(current_pt_regs());
#endif
+ if (unlikely(p->flags & PF_IO_WORKER)) {
+ /*
+ * An IO thread is a user space thread, but it doesn't
+ * return to ret_after_fork().
+ *
+ * In order to indicate that to tools like gdb,
+ * we reset the stack and instruction pointers.
+ *
+ * It does the same kernel frame setup to return to a kernel
+ * function that a kernel thread does.
+ */
+ childregs->sp = 0;
+ childregs->ip = 0;
+ kthread_frame_init(frame, sp, arg);
+ return 0;
+ }
+
/* Set a new TLS for the child thread? */
if (clone_flags & CLONE_SETTLS)
ret = set_new_tls(p, tls);
@@ -451,7 +463,7 @@ void speculative_store_bypass_ht_init(void)
* First HT sibling to come up on the core. Link shared state of
* the first HT sibling to itself. The siblings on the same core
* which come up later will see the shared state pointer and link
- * themself to the state of this CPU.
+ * themselves to the state of this CPU.
*/
st->shared_state = st;
}
@@ -919,7 +931,7 @@ unsigned long get_wchan(struct task_struct *p)
unsigned long start, bottom, top, sp, fp, ip, ret = 0;
int count = 0;
- if (p == current || p->state == TASK_RUNNING)
+ if (p == current || task_is_running(p))
return 0;
if (!try_get_task_stack(p))
@@ -963,7 +975,7 @@ unsigned long get_wchan(struct task_struct *p)
goto out;
}
fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
- } while (count++ < 16 && p->state != TASK_RUNNING);
+ } while (count++ < 16 && !task_is_running(p));
out:
put_task_stack(p);
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 87a4143aa7d7..4c208ea3bd9f 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -911,7 +911,7 @@ static int putreg32(struct task_struct *child, unsigned regno, u32 value)
* syscall with TS_COMPAT still set.
*/
regs->orig_ax = value;
- if (syscall_get_nr(child, regs) >= 0)
+ if (syscall_get_nr(child, regs) != -1)
child->thread_info.status |= TS_I386_REGS_POKED;
break;
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 11065dc03f5b..eda37df016f0 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -89,7 +89,7 @@ u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
/*
* Assumption here is that last_value, a global accumulator, always goes
* forward. If we are less than that, we should not be much smaller.
- * We assume there is an error marging we're inside, and then the correction
+ * We assume there is an error margin we're inside, and then the correction
* does not sacrifice accuracy.
*
* For reads: global may have changed between test and return,
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index b29657b76e3f..ebfb91108232 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -669,7 +669,7 @@ static void native_machine_emergency_restart(void)
break;
case BOOT_TRIPLE:
- idt_invalidate(NULL);
+ idt_invalidate();
__asm__ __volatile__("int3");
/* We're probably dead after this, but... */
diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S
index 94b33885f8d2..f469153eca8a 100644
--- a/arch/x86/kernel/relocate_kernel_32.S
+++ b/arch/x86/kernel/relocate_kernel_32.S
@@ -107,7 +107,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
* - Write protect disabled
* - No task switch
* - Don't do FP software emulation.
- * - Proctected mode enabled
+ * - Protected mode enabled
*/
movl %cr0, %eax
andl $~(X86_CR0_PG | X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %eax
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
index a4d9a261425b..c53271aebb64 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -121,7 +121,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
* - Write protect disabled
* - No task switch
* - Don't do FP software emulation.
- * - Proctected mode enabled
+ * - Protected mode enabled
*/
movq %cr0, %rax
andq $~(X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %rax
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index d883176ef2ce..9f1d9215a9fb 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -14,6 +14,7 @@
#include <linux/initrd.h>
#include <linux/iscsi_ibft.h>
#include <linux/memblock.h>
+#include <linux/panic_notifier.h>
#include <linux/pci.h>
#include <linux/root_dev.h>
#include <linux/hugetlb.h>
@@ -44,6 +45,7 @@
#include <asm/pci-direct.h>
#include <asm/prom.h>
#include <asm/proto.h>
+#include <asm/thermal.h>
#include <asm/unwind.h>
#include <asm/vsyscall.h>
#include <linux/vmalloc.h>
@@ -65,7 +67,7 @@ RESERVE_BRK(dmi_alloc, 65536);
/*
* Range of the BSS area. The size of the BSS area is determined
- * at link time, with RESERVE_BRK*() facility reserving additional
+ * at link time, with RESERVE_BRK() facility reserving additional
* chunks.
*/
unsigned long _brk_start = (unsigned long)__brk_base;
@@ -633,11 +635,16 @@ static void __init trim_snb_memory(void)
printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
/*
- * Reserve all memory below the 1 MB mark that has not
- * already been reserved.
+ * SandyBridge integrated graphics devices have a bug that prevents
+ * them from accessing certain memory ranges, namely anything below
+ * 1M and in the pages listed in bad_pages[] above.
+ *
+ * To avoid these pages being ever accessed by SNB gfx devices reserve
+ * bad_pages that have not already been reserved at boot time.
+ * All memory below the 1 MB mark is anyway reserved later during
+ * setup_arch(), so there is no need to reserve it here.
*/
- memblock_reserve(0, 1<<20);
-
+
for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
if (memblock_reserve(bad_pages[i], PAGE_SIZE))
printk(KERN_WARNING "failed to reserve 0x%08lx\n",
@@ -645,18 +652,6 @@ static void __init trim_snb_memory(void)
}
}
-/*
- * Here we put platform-specific memory range workarounds, i.e.
- * memory known to be corrupt or otherwise in need to be reserved on
- * specific platforms.
- *
- * If this gets used more widely it could use a real dispatch mechanism.
- */
-static void __init trim_platform_memory_ranges(void)
-{
- trim_snb_memory();
-}
-
static void __init trim_bios_range(void)
{
/*
@@ -701,35 +696,42 @@ static void __init e820_add_kernel_range(void)
e820__range_add(start, size, E820_TYPE_RAM);
}
-static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
-
-static int __init parse_reservelow(char *p)
+static void __init early_reserve_memory(void)
{
- unsigned long long size;
-
- if (!p)
- return -EINVAL;
+ /*
+ * Reserve the memory occupied by the kernel between _text and
+ * __end_of_kernel_reserve symbols. Any kernel sections after the
+ * __end_of_kernel_reserve symbol must be explicitly reserved with a
+ * separate memblock_reserve() or they will be discarded.
+ */
+ memblock_reserve(__pa_symbol(_text),
+ (unsigned long)__end_of_kernel_reserve - (unsigned long)_text);
- size = memparse(p, &p);
+ /*
+ * The first 4Kb of memory is a BIOS owned area, but generally it is
+ * not listed as such in the E820 table.
+ *
+ * Reserve the first 64K of memory since some BIOSes are known to
+ * corrupt low memory. After the real mode trampoline is allocated the
+ * rest of the memory below 640k is reserved.
+ *
+ * In addition, make sure page 0 is always reserved because on
+ * systems with L1TF its contents can be leaked to user processes.
+ */
+ memblock_reserve(0, SZ_64K);
- if (size < 4096)
- size = 4096;
+ early_reserve_initrd();
- if (size > 640*1024)
- size = 640*1024;
+ if (efi_enabled(EFI_BOOT))
+ efi_memblock_x86_reserve_range();
- reserve_low = size;
+ memblock_x86_reserve_range_setup_data();
- return 0;
+ reserve_ibft_region();
+ reserve_bios_regions();
+ trim_snb_memory();
}
-early_param("reservelow", parse_reservelow);
-
-static void __init trim_low_memory_range(void)
-{
- memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
-}
-
/*
* Dump out kernel offset information on panic.
*/
@@ -764,29 +766,6 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
void __init setup_arch(char **cmdline_p)
{
- /*
- * Reserve the memory occupied by the kernel between _text and
- * __end_of_kernel_reserve symbols. Any kernel sections after the
- * __end_of_kernel_reserve symbol must be explicitly reserved with a
- * separate memblock_reserve() or they will be discarded.
- */
- memblock_reserve(__pa_symbol(_text),
- (unsigned long)__end_of_kernel_reserve - (unsigned long)_text);
-
- /*
- * Make sure page 0 is always reserved because on systems with
- * L1TF its contents can be leaked to user processes.
- */
- memblock_reserve(0, PAGE_SIZE);
-
- early_reserve_initrd();
-
- /*
- * At this point everything still needed from the boot loader
- * or BIOS or kernel text should be early reserved or marked not
- * RAM in e820. All other memory is free game.
- */
-
#ifdef CONFIG_X86_32
memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
@@ -822,7 +801,6 @@ void __init setup_arch(char **cmdline_p)
idt_setup_early_traps();
early_cpu_init();
- arch_init_ideal_nops();
jump_label_init();
static_call_init();
early_ioremap_init();
@@ -910,8 +888,18 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
- if (efi_enabled(EFI_BOOT))
- efi_memblock_x86_reserve_range();
+ /*
+ * Do some memory reservations *before* memory is added to
+ * memblock, so memblock allocations won't overwrite it.
+ * Do it after early param, so we could get (unlikely) panic from
+ * serial.
+ *
+ * After this point everything still needed from the boot loader or
+ * firmware or kernel text should be early reserved or marked not
+ * RAM in e820. All other memory is free game.
+ */
+ early_reserve_memory();
+
#ifdef CONFIG_MEMORY_HOTPLUG
/*
* Memory used by the kernel cannot be hot-removed because Linux
@@ -938,9 +926,6 @@ void __init setup_arch(char **cmdline_p)
x86_report_nx();
- /* after early param, so could get panic from serial */
- memblock_x86_reserve_range_setup_data();
-
if (acpi_mps_check()) {
#ifdef CONFIG_X86_LOCAL_APIC
disable_apic = 1;
@@ -1032,14 +1017,12 @@ void __init setup_arch(char **cmdline_p)
*/
find_smp_config();
- reserve_ibft_region();
-
early_alloc_pgt_buf();
/*
* Need to conclude brk, before e820__memblock_setup()
- * it could use memblock_find_in_range, could overlap with
- * brk area.
+ * it could use memblock_find_in_range, could overlap with
+ * brk area.
*/
reserve_brk();
@@ -1054,8 +1037,6 @@ void __init setup_arch(char **cmdline_p)
*/
sev_setup_arch();
- reserve_bios_regions();
-
efi_fake_memmap();
efi_find_mirror();
efi_esrt_init();
@@ -1079,11 +1060,22 @@ void __init setup_arch(char **cmdline_p)
(max_pfn_mapped<<PAGE_SHIFT) - 1);
#endif
+ /*
+ * Find free memory for the real mode trampoline and place it there. If
+ * there is not enough free memory under 1M, on EFI-enabled systems
+ * there will be additional attempt to reclaim the memory for the real
+ * mode trampoline at efi_free_boot_services().
+ *
+ * Unconditionally reserve the entire first 1M of RAM because BIOSes
+ * are known to corrupt low memory and several hundred kilobytes are not
+ * worth complex detection what memory gets clobbered. Windows does the
+ * same thing for very similar reasons.
+ *
+ * Moreover, on machines with SandyBridge graphics or in setups that use
+ * crashkernel the entire 1M is reserved anyway.
+ */
reserve_real_mode();
- trim_platform_memory_ranges();
- trim_low_memory_range();
-
init_mem_mapping();
idt_setup_early_pf();
@@ -1129,6 +1121,8 @@ void __init setup_arch(char **cmdline_p)
reserve_initrd();
acpi_table_upgrade();
+ /* Look for ACPI tables and reserve memory occupied by them. */
+ acpi_boot_table_init();
vsmp_init();
@@ -1136,11 +1130,6 @@ void __init setup_arch(char **cmdline_p)
early_platform_quirks();
- /*
- * Parse the ACPI tables for possible boot-time SMP configuration.
- */
- acpi_boot_table_init();
-
early_acpi_boot_init();
initmem_init();
@@ -1223,6 +1212,14 @@ void __init setup_arch(char **cmdline_p)
x86_init.timers.wallclock_init();
+ /*
+ * This needs to run before setup_local_APIC() which soft-disables the
+ * local APIC temporarily and that masks the thermal LVT interrupt,
+ * leading to softlockups on machines which have configured SMI
+ * interrupt delivery.
+ */
+ therm_lvt_init();
+
mcheck_init();
register_refined_jiffies(CLOCK_TICK_RATE);
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index fd945ce78554..78a32b956e81 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -66,7 +66,7 @@ EXPORT_SYMBOL(__per_cpu_offset);
*/
static bool __init pcpu_need_numa(void)
{
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
pg_data_t *last = NULL;
unsigned int cpu;
@@ -101,7 +101,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
unsigned long align)
{
const unsigned long goal = __pa(MAX_DMA_ADDRESS);
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
int node = early_cpu_to_node(cpu);
void *ptr;
@@ -140,7 +140,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size)
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
{
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
if (early_cpu_to_node(from) == early_cpu_to_node(to))
return LOCAL_DISTANCE;
else
@@ -224,7 +224,6 @@ void __init setup_per_cpu_areas(void)
per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
per_cpu(cpu_number, cpu) = cpu;
setup_percpu_segment(cpu);
- setup_stack_canary_segment(cpu);
/*
* Copy data used in early init routines from the
* initial arrays to the per cpu data areas. These
diff --git a/arch/x86/kernel/sev-es-shared.c b/arch/x86/kernel/sev-shared.c
index cdc04d091242..9f90f460a28c 100644
--- a/arch/x86/kernel/sev-es-shared.c
+++ b/arch/x86/kernel/sev-shared.c
@@ -24,15 +24,15 @@ static bool __init sev_es_check_cpu_features(void)
return true;
}
-static void sev_es_terminate(unsigned int reason)
+static void __noreturn sev_es_terminate(unsigned int reason)
{
- u64 val = GHCB_SEV_TERMINATE;
+ u64 val = GHCB_MSR_TERM_REQ;
/*
* Tell the hypervisor what went wrong - only reason-set 0 is
* currently supported.
*/
- val |= GHCB_SEV_TERMINATE_REASON(0, reason);
+ val |= GHCB_SEV_TERM_REASON(0, reason);
/* Request Guest Termination from Hypvervisor */
sev_es_wr_ghcb_msr(val);
@@ -47,15 +47,15 @@ static bool sev_es_negotiate_protocol(void)
u64 val;
/* Do the GHCB protocol version negotiation */
- sev_es_wr_ghcb_msr(GHCB_SEV_INFO_REQ);
+ sev_es_wr_ghcb_msr(GHCB_MSR_SEV_INFO_REQ);
VMGEXIT();
val = sev_es_rd_ghcb_msr();
- if (GHCB_INFO(val) != GHCB_SEV_INFO)
+ if (GHCB_MSR_INFO(val) != GHCB_MSR_SEV_INFO_RESP)
return false;
- if (GHCB_PROTO_MAX(val) < GHCB_PROTO_OUR ||
- GHCB_PROTO_MIN(val) > GHCB_PROTO_OUR)
+ if (GHCB_MSR_PROTO_MAX(val) < GHCB_PROTO_OUR ||
+ GHCB_MSR_PROTO_MIN(val) > GHCB_PROTO_OUR)
return false;
return true;
@@ -63,6 +63,7 @@ static bool sev_es_negotiate_protocol(void)
static __always_inline void vc_ghcb_invalidate(struct ghcb *ghcb)
{
+ ghcb->save.sw_exit_code = 0;
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
}
@@ -153,28 +154,28 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EAX));
VMGEXIT();
val = sev_es_rd_ghcb_msr();
- if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
+ if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
goto fail;
regs->ax = val >> 32;
sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EBX));
VMGEXIT();
val = sev_es_rd_ghcb_msr();
- if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
+ if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
goto fail;
regs->bx = val >> 32;
sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_ECX));
VMGEXIT();
val = sev_es_rd_ghcb_msr();
- if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
+ if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
goto fail;
regs->cx = val >> 32;
sev_es_wr_ghcb_msr(GHCB_CPUID_REQ(fn, GHCB_CPUID_REQ_EDX));
VMGEXIT();
val = sev_es_rd_ghcb_msr();
- if (GHCB_SEV_GHCB_RESP_CODE(val) != GHCB_SEV_CPUID_RESP)
+ if (GHCB_RESP_CODE(val) != GHCB_MSR_CPUID_RESP)
goto fail;
regs->dx = val >> 32;
@@ -186,7 +187,6 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
* make it accessible to the hypervisor.
*
* In particular, check for:
- * - Hypervisor CPUID bit
* - Availability of CPUID leaf 0x8000001f
* - SEV CPUID bit.
*
@@ -194,10 +194,7 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
* can't be checked here.
*/
- if ((fn == 1 && !(regs->cx & BIT(31))))
- /* Hypervisor bit */
- goto fail;
- else if (fn == 0x80000000 && (regs->ax < 0x8000001f))
+ if (fn == 0x80000000 && (regs->ax < 0x8000001f))
/* SEV leaf check */
goto fail;
else if ((fn == 0x8000001f && !(regs->ax & BIT(1))))
@@ -210,12 +207,8 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code)
return;
fail:
- sev_es_wr_ghcb_msr(GHCB_SEV_TERMINATE);
- VMGEXIT();
-
- /* Shouldn't get here - if we do halt the machine */
- while (true)
- asm volatile("hlt\n");
+ /* Terminate the guest */
+ sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST);
}
static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev.c
index 84c1821819af..a6895e440bc3 100644
--- a/arch/x86/kernel/sev-es.c
+++ b/arch/x86/kernel/sev.c
@@ -7,12 +7,11 @@
* Author: Joerg Roedel <jroedel@suse.de>
*/
-#define pr_fmt(fmt) "SEV-ES: " fmt
+#define pr_fmt(fmt) "SEV: " fmt
#include <linux/sched/debug.h> /* For show_regs() */
#include <linux/percpu-defs.h>
#include <linux/mem_encrypt.h>
-#include <linux/lockdep.h>
#include <linux/printk.h>
#include <linux/mm_types.h>
#include <linux/set_memory.h>
@@ -22,7 +21,7 @@
#include <asm/cpu_entry_area.h>
#include <asm/stacktrace.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
#include <asm/insn-eval.h>
#include <asm/fpu/internal.h>
#include <asm/processor.h>
@@ -121,35 +120,57 @@ static void __init setup_vc_stacks(int cpu)
cea_set_pte((void *)vaddr, pa, PAGE_KERNEL);
}
-static __always_inline bool on_vc_stack(unsigned long sp)
+static __always_inline bool on_vc_stack(struct pt_regs *regs)
{
+ unsigned long sp = regs->sp;
+
+ /* User-mode RSP is not trusted */
+ if (user_mode(regs))
+ return false;
+
+ /* SYSCALL gap still has user-mode RSP */
+ if (ip_within_syscall_gap(regs))
+ return false;
+
return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
}
/*
- * This function handles the case when an NMI is raised in the #VC exception
- * handler entry code. In this case, the IST entry for #VC must be adjusted, so
- * that any subsequent #VC exception will not overwrite the stack contents of the
- * interrupted #VC handler.
+ * This function handles the case when an NMI is raised in the #VC
+ * exception handler entry code, before the #VC handler has switched off
+ * its IST stack. In this case, the IST entry for #VC must be adjusted,
+ * so that any nested #VC exception will not overwrite the stack
+ * contents of the interrupted #VC handler.
*
* The IST entry is adjusted unconditionally so that it can be also be
- * unconditionally adjusted back in sev_es_ist_exit(). Otherwise a nested
- * sev_es_ist_exit() call may adjust back the IST entry too early.
+ * unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a
+ * nested sev_es_ist_exit() call may adjust back the IST entry too
+ * early.
+ *
+ * The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run
+ * on the NMI IST stack, as they are only called from NMI handling code
+ * right now.
*/
void noinstr __sev_es_ist_enter(struct pt_regs *regs)
{
unsigned long old_ist, new_ist;
/* Read old IST entry */
- old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
+ new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
- /* Make room on the IST stack */
- if (on_vc_stack(regs->sp))
- new_ist = ALIGN_DOWN(regs->sp, 8) - sizeof(old_ist);
- else
- new_ist = old_ist - sizeof(old_ist);
+ /*
+ * If NMI happened while on the #VC IST stack, set the new IST
+ * value below regs->sp, so that the interrupted stack frame is
+ * not overwritten by subsequent #VC exceptions.
+ */
+ if (on_vc_stack(regs))
+ new_ist = regs->sp;
- /* Store old IST entry */
+ /*
+ * Reserve additional 8 bytes and store old IST value so this
+ * adjustment can be unrolled in __sev_es_ist_exit().
+ */
+ new_ist -= sizeof(old_ist);
*(unsigned long *)new_ist = old_ist;
/* Set new IST entry */
@@ -170,19 +191,39 @@ void noinstr __sev_es_ist_exit(void)
this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
}
-static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
+/*
+ * Nothing shall interrupt this code path while holding the per-CPU
+ * GHCB. The backup GHCB is only for NMIs interrupting this path.
+ *
+ * Callers must disable local interrupts around it.
+ */
+static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
{
struct sev_es_runtime_data *data;
struct ghcb *ghcb;
+ WARN_ON(!irqs_disabled());
+
data = this_cpu_read(runtime_data);
ghcb = &data->ghcb_page;
if (unlikely(data->ghcb_active)) {
/* GHCB is already in use - save its contents */
- if (unlikely(data->backup_ghcb_active))
- return NULL;
+ if (unlikely(data->backup_ghcb_active)) {
+ /*
+ * Backup-GHCB is also already in use. There is no way
+ * to continue here so just kill the machine. To make
+ * panic() work, mark GHCBs inactive so that messages
+ * can be printed out.
+ */
+ data->ghcb_active = false;
+ data->backup_ghcb_active = false;
+
+ instrumentation_begin();
+ panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
+ instrumentation_end();
+ }
/* Mark backup_ghcb active before writing to it */
data->backup_ghcb_active = true;
@@ -199,24 +240,6 @@ static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
return ghcb;
}
-static __always_inline void sev_es_put_ghcb(struct ghcb_state *state)
-{
- struct sev_es_runtime_data *data;
- struct ghcb *ghcb;
-
- data = this_cpu_read(runtime_data);
- ghcb = &data->ghcb_page;
-
- if (state->ghcb) {
- /* Restore GHCB from Backup */
- *ghcb = *state->ghcb;
- data->backup_ghcb_active = false;
- state->ghcb = NULL;
- } else {
- data->ghcb_active = false;
- }
-}
-
/* Needed in vc_early_forward_exception */
void do_early_exception(struct pt_regs *regs, int trapnr);
@@ -241,39 +264,61 @@ static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt,
return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
}
-static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
+static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt)
{
char buffer[MAX_INSN_SIZE];
- enum es_result ret;
- int res;
-
- if (user_mode(ctxt->regs)) {
- res = insn_fetch_from_user(ctxt->regs, buffer);
- if (!res) {
- ctxt->fi.vector = X86_TRAP_PF;
- ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
- ctxt->fi.cr2 = ctxt->regs->ip;
- return ES_EXCEPTION;
- }
+ int insn_bytes;
- if (!insn_decode(&ctxt->insn, ctxt->regs, buffer, res))
- return ES_DECODE_FAILED;
- } else {
- res = vc_fetch_insn_kernel(ctxt, buffer);
- if (res) {
- ctxt->fi.vector = X86_TRAP_PF;
- ctxt->fi.error_code = X86_PF_INSTR;
- ctxt->fi.cr2 = ctxt->regs->ip;
- return ES_EXCEPTION;
- }
+ insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
+ if (insn_bytes == 0) {
+ /* Nothing could be copied */
+ ctxt->fi.vector = X86_TRAP_PF;
+ ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
+ ctxt->fi.cr2 = ctxt->regs->ip;
+ return ES_EXCEPTION;
+ } else if (insn_bytes == -EINVAL) {
+ /* Effective RIP could not be calculated */
+ ctxt->fi.vector = X86_TRAP_GP;
+ ctxt->fi.error_code = 0;
+ ctxt->fi.cr2 = 0;
+ return ES_EXCEPTION;
+ }
- insn_init(&ctxt->insn, buffer, MAX_INSN_SIZE - res, 1);
- insn_get_length(&ctxt->insn);
+ if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, insn_bytes))
+ return ES_DECODE_FAILED;
+
+ if (ctxt->insn.immediate.got)
+ return ES_OK;
+ else
+ return ES_DECODE_FAILED;
+}
+
+static enum es_result __vc_decode_kern_insn(struct es_em_ctxt *ctxt)
+{
+ char buffer[MAX_INSN_SIZE];
+ int res, ret;
+
+ res = vc_fetch_insn_kernel(ctxt, buffer);
+ if (res) {
+ ctxt->fi.vector = X86_TRAP_PF;
+ ctxt->fi.error_code = X86_PF_INSTR;
+ ctxt->fi.cr2 = ctxt->regs->ip;
+ return ES_EXCEPTION;
}
- ret = ctxt->insn.immediate.got ? ES_OK : ES_DECODE_FAILED;
+ ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64);
+ if (ret < 0)
+ return ES_DECODE_FAILED;
+ else
+ return ES_OK;
+}
- return ret;
+static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
+{
+ if (user_mode(ctxt->regs))
+ return __vc_decode_user_insn(ctxt);
+ else
+ return __vc_decode_kern_insn(ctxt);
}
static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
@@ -286,31 +331,44 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
u16 d2;
u8 d1;
- /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
- if (!user_mode(ctxt->regs) && !access_ok(target, size)) {
- memcpy(dst, buf, size);
- return ES_OK;
- }
-
+ /*
+ * This function uses __put_user() independent of whether kernel or user
+ * memory is accessed. This works fine because __put_user() does no
+ * sanity checks of the pointer being accessed. All that it does is
+ * to report when the access failed.
+ *
+ * Also, this function runs in atomic context, so __put_user() is not
+ * allowed to sleep. The page-fault handler detects that it is running
+ * in atomic context and will not try to take mmap_sem and handle the
+ * fault, so additional pagefault_enable()/disable() calls are not
+ * needed.
+ *
+ * The access can't be done via copy_to_user() here because
+ * vc_write_mem() must not use string instructions to access unsafe
+ * memory. The reason is that MOVS is emulated by the #VC handler by
+ * splitting the move up into a read and a write and taking a nested #VC
+ * exception on whatever of them is the MMIO access. Using string
+ * instructions here would cause infinite nesting.
+ */
switch (size) {
case 1:
memcpy(&d1, buf, 1);
- if (put_user(d1, target))
+ if (__put_user(d1, target))
goto fault;
break;
case 2:
memcpy(&d2, buf, 2);
- if (put_user(d2, target))
+ if (__put_user(d2, target))
goto fault;
break;
case 4:
memcpy(&d4, buf, 4);
- if (put_user(d4, target))
+ if (__put_user(d4, target))
goto fault;
break;
case 8:
memcpy(&d8, buf, 8);
- if (put_user(d8, target))
+ if (__put_user(d8, target))
goto fault;
break;
default:
@@ -341,30 +399,43 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
u16 d2;
u8 d1;
- /* If instruction ran in kernel mode and the I/O buffer is in kernel space */
- if (!user_mode(ctxt->regs) && !access_ok(s, size)) {
- memcpy(buf, src, size);
- return ES_OK;
- }
-
+ /*
+ * This function uses __get_user() independent of whether kernel or user
+ * memory is accessed. This works fine because __get_user() does no
+ * sanity checks of the pointer being accessed. All that it does is
+ * to report when the access failed.
+ *
+ * Also, this function runs in atomic context, so __get_user() is not
+ * allowed to sleep. The page-fault handler detects that it is running
+ * in atomic context and will not try to take mmap_sem and handle the
+ * fault, so additional pagefault_enable()/disable() calls are not
+ * needed.
+ *
+ * The access can't be done via copy_from_user() here because
+ * vc_read_mem() must not use string instructions to access unsafe
+ * memory. The reason is that MOVS is emulated by the #VC handler by
+ * splitting the move up into a read and a write and taking a nested #VC
+ * exception on whatever of them is the MMIO access. Using string
+ * instructions here would cause infinite nesting.
+ */
switch (size) {
case 1:
- if (get_user(d1, s))
+ if (__get_user(d1, s))
goto fault;
memcpy(buf, &d1, 1);
break;
case 2:
- if (get_user(d2, s))
+ if (__get_user(d2, s))
goto fault;
memcpy(buf, &d2, 2);
break;
case 4:
- if (get_user(d4, s))
+ if (__get_user(d4, s))
goto fault;
memcpy(buf, &d4, 4);
break;
case 8:
- if (get_user(d8, s))
+ if (__get_user(d8, s))
goto fault;
memcpy(buf, &d8, 8);
break;
@@ -422,14 +493,39 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
}
/* Include code shared with pre-decompression boot stage */
-#include "sev-es-shared.c"
+#include "sev-shared.c"
+
+static noinstr void __sev_put_ghcb(struct ghcb_state *state)
+{
+ struct sev_es_runtime_data *data;
+ struct ghcb *ghcb;
+
+ WARN_ON(!irqs_disabled());
+
+ data = this_cpu_read(runtime_data);
+ ghcb = &data->ghcb_page;
+
+ if (state->ghcb) {
+ /* Restore GHCB from Backup */
+ *ghcb = *state->ghcb;
+ data->backup_ghcb_active = false;
+ state->ghcb = NULL;
+ } else {
+ /*
+ * Invalidate the GHCB so a VMGEXIT instruction issued
+ * from userspace won't appear to be valid.
+ */
+ vc_ghcb_invalidate(ghcb);
+ data->ghcb_active = false;
+ }
+}
void noinstr __sev_es_nmi_complete(void)
{
struct ghcb_state state;
struct ghcb *ghcb;
- ghcb = sev_es_get_ghcb(&state);
+ ghcb = __sev_get_ghcb(&state);
vc_ghcb_invalidate(ghcb);
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
@@ -439,7 +535,7 @@ void noinstr __sev_es_nmi_complete(void)
sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
VMGEXIT();
- sev_es_put_ghcb(&state);
+ __sev_put_ghcb(&state);
}
static u64 get_jump_table_addr(void)
@@ -451,7 +547,7 @@ static u64 get_jump_table_addr(void)
local_irq_save(flags);
- ghcb = sev_es_get_ghcb(&state);
+ ghcb = __sev_get_ghcb(&state);
vc_ghcb_invalidate(ghcb);
ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
@@ -465,7 +561,7 @@ static u64 get_jump_table_addr(void)
ghcb_sw_exit_info_2_is_valid(ghcb))
ret = ghcb->save.sw_exit_info_2;
- sev_es_put_ghcb(&state);
+ __sev_put_ghcb(&state);
local_irq_restore(flags);
@@ -590,7 +686,7 @@ static void sev_es_ap_hlt_loop(void)
struct ghcb_state state;
struct ghcb *ghcb;
- ghcb = sev_es_get_ghcb(&state);
+ ghcb = __sev_get_ghcb(&state);
while (true) {
vc_ghcb_invalidate(ghcb);
@@ -607,7 +703,7 @@ static void sev_es_ap_hlt_loop(void)
break;
}
- sev_es_put_ghcb(&state);
+ __sev_put_ghcb(&state);
}
/*
@@ -697,7 +793,7 @@ void __init sev_es_init_vc_handling(void)
sev_es_setup_play_dead();
/* Secondary CPUs use the runtime #VC handler */
- initial_vc_handler = (unsigned long)safe_stack_exc_vmm_communication;
+ initial_vc_handler = (unsigned long)kernel_exc_vmm_communication;
}
static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
@@ -1135,14 +1231,6 @@ static enum es_result vc_handle_trap_ac(struct ghcb *ghcb,
return ES_EXCEPTION;
}
-static __always_inline void vc_handle_trap_db(struct pt_regs *regs)
-{
- if (user_mode(regs))
- noist_exc_debug(regs);
- else
- exc_debug(regs);
-}
-
static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
struct ghcb *ghcb,
unsigned long exit_code)
@@ -1218,6 +1306,10 @@ static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
case X86_TRAP_UD:
exc_invalid_op(ctxt->regs);
break;
+ case X86_TRAP_PF:
+ write_cr2(ctxt->fi.cr2);
+ exc_page_fault(ctxt->regs, error_code);
+ break;
case X86_TRAP_AC:
exc_alignment_check(ctxt->regs, error_code);
break;
@@ -1234,54 +1326,15 @@ static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
}
-/*
- * Main #VC exception handler. It is called when the entry code was able to
- * switch off the IST to a safe kernel stack.
- *
- * With the current implementation it is always possible to switch to a safe
- * stack because #VC exceptions only happen at known places, like intercepted
- * instructions or accesses to MMIO areas/IO ports. They can also happen with
- * code instrumentation when the hypervisor intercepts #DB, but the critical
- * paths are forbidden to be instrumented, so #DB exceptions currently also
- * only happen in safe places.
- */
-DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
+static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code)
{
- struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
struct ghcb_state state;
struct es_em_ctxt ctxt;
enum es_result result;
struct ghcb *ghcb;
+ bool ret = true;
- lockdep_assert_irqs_disabled();
-
- /*
- * Handle #DB before calling into !noinstr code to avoid recursive #DB.
- */
- if (error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB) {
- vc_handle_trap_db(regs);
- return;
- }
-
- instrumentation_begin();
-
- /*
- * This is invoked through an interrupt gate, so IRQs are disabled. The
- * code below might walk page-tables for user or kernel addresses, so
- * keep the IRQs disabled to protect us against concurrent TLB flushes.
- */
-
- ghcb = sev_es_get_ghcb(&state);
- if (!ghcb) {
- /*
- * Mark GHCBs inactive so that panic() is able to print the
- * message.
- */
- data->ghcb_active = false;
- data->backup_ghcb_active = false;
-
- panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
- }
+ ghcb = __sev_get_ghcb(&state);
vc_ghcb_invalidate(ghcb);
result = vc_init_em_ctxt(&ctxt, regs, error_code);
@@ -1289,7 +1342,7 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
if (result == ES_OK)
result = vc_handle_exitcode(&ctxt, ghcb, error_code);
- sev_es_put_ghcb(&state);
+ __sev_put_ghcb(&state);
/* Done - now check the result */
switch (result) {
@@ -1297,17 +1350,20 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
vc_finish_insn(&ctxt);
break;
case ES_UNSUPPORTED:
- pr_err_ratelimited("Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
+ pr_err_ratelimited("Unsupported exit-code 0x%02lx in #VC exception (IP: 0x%lx)\n",
error_code, regs->ip);
- goto fail;
+ ret = false;
+ break;
case ES_VMM_ERROR:
pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
error_code, regs->ip);
- goto fail;
+ ret = false;
+ break;
case ES_DECODE_FAILED:
pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
error_code, regs->ip);
- goto fail;
+ ret = false;
+ break;
case ES_EXCEPTION:
vc_forward_exception(&ctxt);
break;
@@ -1323,23 +1379,52 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication)
BUG();
}
-out:
- instrumentation_end();
+ return ret;
+}
- return;
+static __always_inline bool vc_is_db(unsigned long error_code)
+{
+ return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB;
+}
-fail:
- if (user_mode(regs)) {
- /*
- * Do not kill the machine if user-space triggered the
- * exception. Send SIGBUS instead and let user-space deal with
- * it.
- */
- force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
- } else {
- pr_emerg("PANIC: Unhandled #VC exception in kernel space (result=%d)\n",
- result);
+/*
+ * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode
+ * and will panic when an error happens.
+ */
+DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication)
+{
+ irqentry_state_t irq_state;
+
+ /*
+ * With the current implementation it is always possible to switch to a
+ * safe stack because #VC exceptions only happen at known places, like
+ * intercepted instructions or accesses to MMIO areas/IO ports. They can
+ * also happen with code instrumentation when the hypervisor intercepts
+ * #DB, but the critical paths are forbidden to be instrumented, so #DB
+ * exceptions currently also only happen in safe places.
+ *
+ * But keep this here in case the noinstr annotations are violated due
+ * to bug elsewhere.
+ */
+ if (unlikely(on_vc_fallback_stack(regs))) {
+ instrumentation_begin();
+ panic("Can't handle #VC exception from unsupported context\n");
+ instrumentation_end();
+ }
+
+ /*
+ * Handle #DB before calling into !noinstr code to avoid recursive #DB.
+ */
+ if (vc_is_db(error_code)) {
+ exc_debug(regs);
+ return;
+ }
+
+ irq_state = irqentry_nmi_enter(regs);
+
+ instrumentation_begin();
+ if (!vc_raw_handle_exception(regs, error_code)) {
/* Show some debug info */
show_regs(regs);
@@ -1350,23 +1435,38 @@ fail:
panic("Returned from Terminate-Request to Hypervisor\n");
}
- goto out;
+ instrumentation_end();
+ irqentry_nmi_exit(regs, irq_state);
}
-/* This handler runs on the #VC fall-back stack. It can cause further #VC exceptions */
-DEFINE_IDTENTRY_VC_IST(exc_vmm_communication)
+/*
+ * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode
+ * and will kill the current task with SIGBUS when an error happens.
+ */
+DEFINE_IDTENTRY_VC_USER(exc_vmm_communication)
{
+ /*
+ * Handle #DB before calling into !noinstr code to avoid recursive #DB.
+ */
+ if (vc_is_db(error_code)) {
+ noist_exc_debug(regs);
+ return;
+ }
+
+ irqentry_enter_from_user_mode(regs);
instrumentation_begin();
- panic("Can't handle #VC exception from unsupported context\n");
- instrumentation_end();
-}
-DEFINE_IDTENTRY_VC(exc_vmm_communication)
-{
- if (likely(!on_vc_fallback_stack(regs)))
- safe_stack_exc_vmm_communication(regs, error_code);
- else
- ist_exc_vmm_communication(regs, error_code);
+ if (!vc_raw_handle_exception(regs, error_code)) {
+ /*
+ * Do not kill the machine if user-space triggered the
+ * exception. Send SIGBUS instead and let user-space deal with
+ * it.
+ */
+ force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
+ }
+
+ instrumentation_end();
+ irqentry_exit_to_user_mode(regs);
}
bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index ea794a083c44..e12779a2714d 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -492,7 +492,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
* SS descriptor, but we do need SS to be valid. It's possible
* that the old SS is entirely bogus -- this can happen if the
* signal we're trying to deliver is #GP or #SS caused by a bad
- * SS value. We also have a compatbility issue here: DOSEMU
+ * SS value. We also have a compatibility issue here: DOSEMU
* relies on the contents of the SS register indicating the
* SS value at the time of the signal, even though that code in
* DOSEMU predates sigreturn's ability to restore SS. (DOSEMU
@@ -713,7 +713,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
save_v86_state((struct kernel_vm86_regs *) regs, VM86_SIGNAL);
/* Are we from a system call? */
- if (syscall_get_nr(current, regs) >= 0) {
+ if (syscall_get_nr(current, regs) != -1) {
/* If so, check system call restarting.. */
switch (syscall_get_error(current, regs)) {
case -ERESTART_RESTARTBLOCK:
@@ -766,30 +766,8 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
{
- /*
- * This function is fundamentally broken as currently
- * implemented.
- *
- * The idea is that we want to trigger a call to the
- * restart_block() syscall and that we want in_ia32_syscall(),
- * in_x32_syscall(), etc. to match whatever they were in the
- * syscall being restarted. We assume that the syscall
- * instruction at (regs->ip - 2) matches whatever syscall
- * instruction we used to enter in the first place.
- *
- * The problem is that we can get here when ptrace pokes
- * syscall-like values into regs even if we're not in a syscall
- * at all.
- *
- * For now, we maintain historical behavior and guess based on
- * stored state. We could do better by saving the actual
- * syscall arch in restart_block or (with caveats on x32) by
- * checking if regs->ip points to 'int $0x80'. The current
- * behavior is incorrect if a tracer has a different bitness
- * than the tracee.
- */
#ifdef CONFIG_IA32_EMULATION
- if (current_thread_info()->status & (TS_COMPAT|TS_I386_REGS_POKED))
+ if (current->restart_block.arch_data & TS_COMPAT)
return __NR_ia32_restart_syscall;
#endif
#ifdef CONFIG_X86_X32_ABI
@@ -815,7 +793,7 @@ void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal)
}
/* Did we come from a system call? */
- if (syscall_get_nr(current, regs) >= 0) {
+ if (syscall_get_nr(current, regs) != -1) {
/* Restart the system call - no handlers present */
switch (syscall_get_error(current, regs)) {
case -ERESTARTNOHAND:
diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c
index a5330ff498f0..06743ec054d2 100644
--- a/arch/x86/kernel/signal_compat.c
+++ b/arch/x86/kernel/signal_compat.c
@@ -29,7 +29,7 @@ static inline void signal_compat_build_tests(void)
BUILD_BUG_ON(NSIGFPE != 15);
BUILD_BUG_ON(NSIGSEGV != 9);
BUILD_BUG_ON(NSIGBUS != 5);
- BUILD_BUG_ON(NSIGTRAP != 5);
+ BUILD_BUG_ON(NSIGTRAP != 6);
BUILD_BUG_ON(NSIGCHLD != 6);
BUILD_BUG_ON(NSIGSYS != 2);
@@ -127,6 +127,9 @@ static inline void signal_compat_build_tests(void)
BUILD_BUG_ON(offsetof(siginfo_t, si_addr) != 0x10);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr) != 0x0C);
+ BUILD_BUG_ON(offsetof(siginfo_t, si_trapno) != 0x18);
+ BUILD_BUG_ON(offsetof(compat_siginfo_t, si_trapno) != 0x10);
+
BUILD_BUG_ON(offsetof(siginfo_t, si_addr_lsb) != 0x18);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr_lsb) != 0x10);
@@ -138,6 +141,11 @@ static inline void signal_compat_build_tests(void)
BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x20);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pkey) != 0x14);
+ BUILD_BUG_ON(offsetof(siginfo_t, si_perf_data) != 0x18);
+ BUILD_BUG_ON(offsetof(siginfo_t, si_perf_type) != 0x20);
+ BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf_data) != 0x10);
+ BUILD_BUG_ON(offsetof(compat_siginfo_t, si_perf_type) != 0x14);
+
CHECK_CSI_OFFSET(_sigpoll);
CHECK_CSI_SIZE (_sigpoll, 2*sizeof(int));
CHECK_SI_SIZE (_sigpoll, 4*sizeof(int));
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index eff4ce3b10da..06db901fabe8 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -67,7 +67,7 @@
* 5AP. symmetric IO mode (normal Linux operation) not affected.
* 'noapic' mode has vector 0xf filled out properly.
* 6AP. 'noapic' mode might be affected - fixed in later steppings
- * 7AP. We do not assume writes to the LVT deassering IRQs
+ * 7AP. We do not assume writes to the LVT deasserting IRQs
* 8AP. We do not enable low power mode (deep sleep) during MP bootup
* 9AP. We do not use mixed mode
*
@@ -204,7 +204,7 @@ static void native_stop_other_cpus(int wait)
}
/*
* Don't wait longer than 10 ms if the caller didn't
- * reqeust it. If wait is true, the machine hangs here if
+ * request it. If wait is true, the machine hangs here if
* one or more CPUs do not reach shutdown state.
*/
timeout = USEC_PER_MSEC * 10;
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 02813a7f3a7c..9320285a5e29 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -232,11 +232,9 @@ static void notrace start_secondary(void *unused)
load_cr3(swapper_pg_dir);
__flush_tlb_all();
#endif
- cpu_init_exception_handling();
- cpu_init();
+ cpu_init_secondary();
rcu_cpu_starting(raw_smp_processor_id());
x86_cpuinit.early_percpu_clock_init();
- preempt_disable();
smp_callin();
enable_start_cpu0 = 0;
@@ -458,29 +456,52 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
return false;
}
+static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
+{
+ if (c->phys_proc_id == o->phys_proc_id &&
+ c->cpu_die_id == o->cpu_die_id)
+ return true;
+ return false;
+}
+
/*
- * Define snc_cpu[] for SNC (Sub-NUMA Cluster) CPUs.
+ * Unlike the other levels, we do not enforce keeping a
+ * multicore group inside a NUMA node. If this happens, we will
+ * discard the MC level of the topology later.
+ */
+static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
+{
+ if (c->phys_proc_id == o->phys_proc_id)
+ return true;
+ return false;
+}
+
+/*
+ * Define intel_cod_cpu[] for Intel COD (Cluster-on-Die) CPUs.
*
- * These are Intel CPUs that enumerate an LLC that is shared by
- * multiple NUMA nodes. The LLC on these systems is shared for
- * off-package data access but private to the NUMA node (half
- * of the package) for on-package access.
+ * Any Intel CPU that has multiple nodes per package and does not
+ * match intel_cod_cpu[] has the SNC (Sub-NUMA Cluster) topology.
*
- * CPUID (the source of the information about the LLC) can only
- * enumerate the cache as being shared *or* unshared, but not
- * this particular configuration. The CPU in this case enumerates
- * the cache to be shared across the entire package (spanning both
- * NUMA nodes).
+ * When in SNC mode, these CPUs enumerate an LLC that is shared
+ * by multiple NUMA nodes. The LLC is shared for off-package data
+ * access but private to the NUMA node (half of the package) for
+ * on-package access. CPUID (the source of the information about
+ * the LLC) can only enumerate the cache as shared or unshared,
+ * but not this particular configuration.
*/
-static const struct x86_cpu_id snc_cpu[] = {
- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
+static const struct x86_cpu_id intel_cod_cpu[] = {
+ X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, 0), /* COD */
+ X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, 0), /* COD */
+ X86_MATCH_INTEL_FAM6_MODEL(ANY, 1), /* SNC */
{}
};
static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{
+ const struct x86_cpu_id *id = x86_match_cpu(intel_cod_cpu);
int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
+ bool intel_snc = id && id->driver_data;
/* Do not match if we do not have a valid APICID for cpu: */
if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID)
@@ -495,32 +516,12 @@ static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
* means 'c' does not share the LLC of 'o'. This will be
* reflected to userspace.
*/
- if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu))
+ if (match_pkg(c, o) && !topology_same_node(c, o) && intel_snc)
return false;
return topology_sane(c, o, "llc");
}
-/*
- * Unlike the other levels, we do not enforce keeping a
- * multicore group inside a NUMA node. If this happens, we will
- * discard the MC level of the topology later.
- */
-static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
-{
- if (c->phys_proc_id == o->phys_proc_id)
- return true;
- return false;
-}
-
-static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
-{
- if ((c->phys_proc_id == o->phys_proc_id) &&
- (c->cpu_die_id == o->cpu_die_id))
- return true;
- return false;
-}
-
#if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC)
static inline int x86_sched_itmt_flags(void)
@@ -592,14 +593,23 @@ void set_cpu_sibling_map(int cpu)
for_each_cpu(i, cpu_sibling_setup_mask) {
o = &cpu_data(i);
+ if (match_pkg(c, o) && !topology_same_node(c, o))
+ x86_has_numa_in_package = true;
+
if ((i == cpu) || (has_smt && match_smt(c, o)))
link_mask(topology_sibling_cpumask, cpu, i);
if ((i == cpu) || (has_mp && match_llc(c, o)))
link_mask(cpu_llc_shared_mask, cpu, i);
+ if ((i == cpu) || (has_mp && match_die(c, o)))
+ link_mask(topology_die_cpumask, cpu, i);
}
+ threads = cpumask_weight(topology_sibling_cpumask(cpu));
+ if (threads > __max_smt_threads)
+ __max_smt_threads = threads;
+
/*
* This needs a separate iteration over the cpus because we rely on all
* topology_sibling_cpumask links to be set-up.
@@ -613,8 +623,7 @@ void set_cpu_sibling_map(int cpu)
/*
* Does this new cpu bringup a new core?
*/
- if (cpumask_weight(
- topology_sibling_cpumask(cpu)) == 1) {
+ if (threads == 1) {
/*
* for each core in package, increment
* the booted_cores for this new cpu
@@ -631,16 +640,7 @@ void set_cpu_sibling_map(int cpu)
} else if (i != cpu && !c->booted_cores)
c->booted_cores = cpu_data(i).booted_cores;
}
- if (match_pkg(c, o) && !topology_same_node(c, o))
- x86_has_numa_in_package = true;
-
- if ((i == cpu) || (has_mp && match_die(c, o)))
- link_mask(topology_die_cpumask, cpu, i);
}
-
- threads = cpumask_weight(topology_sibling_cpumask(cpu));
- if (threads > __max_smt_threads)
- __max_smt_threads = threads;
}
/* maps the cpu to the sched domain representing multi-core */
@@ -1407,7 +1407,7 @@ void __init calculate_max_logical_packages(void)
int ncpus;
/*
- * Today neither Intel nor AMD support heterogenous systems so
+ * Today neither Intel nor AMD support heterogeneous systems so
* extrapolate the boot cpu's data to all packages.
*/
ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
@@ -1659,13 +1659,17 @@ void play_dead_common(void)
local_irq_disable();
}
-static bool wakeup_cpu0(void)
+/**
+ * cond_wakeup_cpu0 - Wake up CPU0 if needed.
+ *
+ * If NMI wants to wake up CPU0, start CPU0.
+ */
+void cond_wakeup_cpu0(void)
{
if (smp_processor_id() == 0 && enable_start_cpu0)
- return true;
-
- return false;
+ start_cpu0();
}
+EXPORT_SYMBOL_GPL(cond_wakeup_cpu0);
/*
* We need to flush the caches before going to sleep, lest we have
@@ -1734,11 +1738,8 @@ static inline void mwait_play_dead(void)
__monitor(mwait_ptr, 0, 0);
mb();
__mwait(eax, 0);
- /*
- * If NMI wants to wake up CPU0, start CPU0.
- */
- if (wakeup_cpu0())
- start_cpu0();
+
+ cond_wakeup_cpu0();
}
}
@@ -1749,11 +1750,8 @@ void hlt_play_dead(void)
while (1) {
native_halt();
- /*
- * If NMI wants to wake up CPU0, start CPU0.
- */
- if (wakeup_cpu0())
- start_cpu0();
+
+ cond_wakeup_cpu0();
}
}
@@ -1865,9 +1863,6 @@ static bool slv_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
return true;
}
-#include <asm/cpu_device_id.h>
-#include <asm/intel-family.h>
-
#define X86_MATCH(model) \
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, \
INTEL_FAM6_##model, X86_FEATURE_APERFMPERF, NULL)
@@ -2046,7 +2041,7 @@ static bool amd_set_max_freq_ratio(void)
return false;
}
- highest_perf = perf_caps.highest_perf;
+ highest_perf = amd_get_highest_perf();
nominal_perf = perf_caps.nominal_perf;
if (!highest_perf || !nominal_perf) {
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 8627fda8d993..15b058eefc4e 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -29,12 +29,6 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
}
}
-/*
- * This function returns an error if it detects any unreliable features of the
- * stack. Otherwise it guarantees that the stack trace is reliable.
- *
- * If the task is not 'current', the caller *must* ensure the task is inactive.
- */
int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
void *cookie, struct task_struct *task)
{
diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
index 9442c4136c38..ea028e736831 100644
--- a/arch/x86/kernel/static_call.c
+++ b/arch/x86/kernel/static_call.c
@@ -34,7 +34,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type, void
break;
case NOP:
- code = ideal_nops[NOP_ATOMIC5];
+ code = x86_nops[5];
break;
case JMP:
@@ -66,7 +66,7 @@ static void __static_call_validate(void *insn, bool tail)
return;
} else {
if (opcode == CALL_INSN_OPCODE ||
- !memcmp(insn, ideal_nops[NOP_ATOMIC5], 5) ||
+ !memcmp(insn, x86_nops[5], 5) ||
!memcmp(insn, xor5rax, 5))
return;
}
diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c
index 653b7f617b61..8a56a6d80098 100644
--- a/arch/x86/kernel/sysfb_efi.c
+++ b/arch/x86/kernel/sysfb_efi.c
@@ -10,7 +10,7 @@
* EFI Quirks
* Several EFI systems do not correctly advertise their boot framebuffers.
* Hence, we use this static table of known broken machines and fix up the
- * information so framebuffer drivers can load corectly.
+ * information so framebuffer drivers can load correctly.
*/
#include <linux/dmi.h>
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 4c09ba110204..f9af561c3cd4 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -49,6 +49,30 @@ bool tboot_enabled(void)
return tboot != NULL;
}
+/* noinline to prevent gcc from warning about dereferencing constant fixaddr */
+static noinline __init bool check_tboot_version(void)
+{
+ if (memcmp(&tboot_uuid, &tboot->uuid, sizeof(tboot->uuid))) {
+ pr_warn("tboot at 0x%llx is invalid\n", boot_params.tboot_addr);
+ return false;
+ }
+
+ if (tboot->version < 5) {
+ pr_warn("tboot version is invalid: %u\n", tboot->version);
+ return false;
+ }
+
+ pr_info("found shared page at phys addr 0x%llx:\n",
+ boot_params.tboot_addr);
+ pr_debug("version: %d\n", tboot->version);
+ pr_debug("log_addr: 0x%08x\n", tboot->log_addr);
+ pr_debug("shutdown_entry: 0x%x\n", tboot->shutdown_entry);
+ pr_debug("tboot_base: 0x%08x\n", tboot->tboot_base);
+ pr_debug("tboot_size: 0x%x\n", tboot->tboot_size);
+
+ return true;
+}
+
void __init tboot_probe(void)
{
/* Look for valid page-aligned address for shared page. */
@@ -66,25 +90,9 @@ void __init tboot_probe(void)
/* Map and check for tboot UUID. */
set_fixmap(FIX_TBOOT_BASE, boot_params.tboot_addr);
- tboot = (struct tboot *)fix_to_virt(FIX_TBOOT_BASE);
- if (memcmp(&tboot_uuid, &tboot->uuid, sizeof(tboot->uuid))) {
- pr_warn("tboot at 0x%llx is invalid\n", boot_params.tboot_addr);
+ tboot = (void *)fix_to_virt(FIX_TBOOT_BASE);
+ if (!check_tboot_version())
tboot = NULL;
- return;
- }
- if (tboot->version < 5) {
- pr_warn("tboot version is invalid: %u\n", tboot->version);
- tboot = NULL;
- return;
- }
-
- pr_info("found shared page at phys addr 0x%llx:\n",
- boot_params.tboot_addr);
- pr_debug("version: %d\n", tboot->version);
- pr_debug("log_addr: 0x%08x\n", tboot->log_addr);
- pr_debug("shutdown_entry: 0x%x\n", tboot->shutdown_entry);
- pr_debug("tboot_base: 0x%08x\n", tboot->tboot_base);
- pr_debug("tboot_size: 0x%x\n", tboot->tboot_size);
}
static pgd_t *tboot_pg_dir;
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
index 64a496a0687f..3c883e064242 100644
--- a/arch/x86/kernel/tls.c
+++ b/arch/x86/kernel/tls.c
@@ -164,17 +164,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
savesegment(fs, sel);
if (sel == modified_sel)
loadsegment(fs, sel);
-
- savesegment(gs, sel);
- if (sel == modified_sel)
- load_gs_index(sel);
#endif
-#ifdef CONFIG_X86_32_LAZY_GS
savesegment(gs, sel);
if (sel == modified_sel)
- loadsegment(gs, sel);
-#endif
+ load_gs_index(sel);
} else {
#ifdef CONFIG_X86_64
if (p->thread.fsindex == modified_sel)
diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c
index f5477eab5692..bd83748e2bde 100644
--- a/arch/x86/kernel/topology.c
+++ b/arch/x86/kernel/topology.c
@@ -113,7 +113,7 @@ int arch_register_cpu(int num)
* Two known BSP/CPU0 dependencies: Resume from suspend/hibernate
* depends on BSP. PIC interrupts depend on BSP.
*
- * If the BSP depencies are under control, one can tell kernel to
+ * If the BSP dependencies are under control, one can tell kernel to
* enable BSP hotplug. This basically adds a control file and
* one can attempt to offline BSP.
*/
diff --git a/arch/x86/kernel/trace.c b/arch/x86/kernel/trace.c
new file mode 100644
index 000000000000..6b73b6f92ad3
--- /dev/null
+++ b/arch/x86/kernel/trace.c
@@ -0,0 +1,234 @@
+#include <asm/trace/irq_vectors.h>
+#include <linux/trace.h>
+
+#if defined(CONFIG_OSNOISE_TRACER) && defined(CONFIG_X86_LOCAL_APIC)
+/*
+ * trace_intel_irq_entry - record intel specific IRQ entry
+ */
+static void trace_intel_irq_entry(void *data, int vector)
+{
+ osnoise_trace_irq_entry(vector);
+}
+
+/*
+ * trace_intel_irq_exit - record intel specific IRQ exit
+ */
+static void trace_intel_irq_exit(void *data, int vector)
+{
+ char *vector_desc = (char *) data;
+
+ osnoise_trace_irq_exit(vector, vector_desc);
+}
+
+/*
+ * register_intel_irq_tp - Register intel specific IRQ entry tracepoints
+ */
+int osnoise_arch_register(void)
+{
+ int ret;
+
+ ret = register_trace_local_timer_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_err;
+
+ ret = register_trace_local_timer_exit(trace_intel_irq_exit, "local_timer");
+ if (ret)
+ goto out_timer_entry;
+
+#ifdef CONFIG_X86_THERMAL_VECTOR
+ ret = register_trace_thermal_apic_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_timer_exit;
+
+ ret = register_trace_thermal_apic_exit(trace_intel_irq_exit, "thermal_apic");
+ if (ret)
+ goto out_thermal_entry;
+#endif /* CONFIG_X86_THERMAL_VECTOR */
+
+#ifdef CONFIG_X86_MCE_AMD
+ ret = register_trace_deferred_error_apic_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_thermal_exit;
+
+ ret = register_trace_deferred_error_apic_exit(trace_intel_irq_exit, "deferred_error");
+ if (ret)
+ goto out_deferred_entry;
+#endif
+
+#ifdef CONFIG_X86_MCE_THRESHOLD
+ ret = register_trace_threshold_apic_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_deferred_exit;
+
+ ret = register_trace_threshold_apic_exit(trace_intel_irq_exit, "threshold_apic");
+ if (ret)
+ goto out_threshold_entry;
+#endif /* CONFIG_X86_MCE_THRESHOLD */
+
+#ifdef CONFIG_SMP
+ ret = register_trace_call_function_single_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_threshold_exit;
+
+ ret = register_trace_call_function_single_exit(trace_intel_irq_exit,
+ "call_function_single");
+ if (ret)
+ goto out_call_function_single_entry;
+
+ ret = register_trace_call_function_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_call_function_single_exit;
+
+ ret = register_trace_call_function_exit(trace_intel_irq_exit, "call_function");
+ if (ret)
+ goto out_call_function_entry;
+
+ ret = register_trace_reschedule_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_call_function_exit;
+
+ ret = register_trace_reschedule_exit(trace_intel_irq_exit, "reschedule");
+ if (ret)
+ goto out_reschedule_entry;
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_IRQ_WORK
+ ret = register_trace_irq_work_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_reschedule_exit;
+
+ ret = register_trace_irq_work_exit(trace_intel_irq_exit, "irq_work");
+ if (ret)
+ goto out_irq_work_entry;
+#endif
+
+ ret = register_trace_x86_platform_ipi_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_irq_work_exit;
+
+ ret = register_trace_x86_platform_ipi_exit(trace_intel_irq_exit, "x86_platform_ipi");
+ if (ret)
+ goto out_x86_ipi_entry;
+
+ ret = register_trace_error_apic_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_x86_ipi_exit;
+
+ ret = register_trace_error_apic_exit(trace_intel_irq_exit, "error_apic");
+ if (ret)
+ goto out_error_apic_entry;
+
+ ret = register_trace_spurious_apic_entry(trace_intel_irq_entry, NULL);
+ if (ret)
+ goto out_error_apic_exit;
+
+ ret = register_trace_spurious_apic_exit(trace_intel_irq_exit, "spurious_apic");
+ if (ret)
+ goto out_spurious_apic_entry;
+
+ return 0;
+
+out_spurious_apic_entry:
+ unregister_trace_spurious_apic_entry(trace_intel_irq_entry, NULL);
+out_error_apic_exit:
+ unregister_trace_error_apic_exit(trace_intel_irq_exit, "error_apic");
+out_error_apic_entry:
+ unregister_trace_error_apic_entry(trace_intel_irq_entry, NULL);
+out_x86_ipi_exit:
+ unregister_trace_x86_platform_ipi_exit(trace_intel_irq_exit, "x86_platform_ipi");
+out_x86_ipi_entry:
+ unregister_trace_x86_platform_ipi_entry(trace_intel_irq_entry, NULL);
+out_irq_work_exit:
+
+#ifdef CONFIG_IRQ_WORK
+ unregister_trace_irq_work_exit(trace_intel_irq_exit, "irq_work");
+out_irq_work_entry:
+ unregister_trace_irq_work_entry(trace_intel_irq_entry, NULL);
+out_reschedule_exit:
+#endif
+
+#ifdef CONFIG_SMP
+ unregister_trace_reschedule_exit(trace_intel_irq_exit, "reschedule");
+out_reschedule_entry:
+ unregister_trace_reschedule_entry(trace_intel_irq_entry, NULL);
+out_call_function_exit:
+ unregister_trace_call_function_exit(trace_intel_irq_exit, "call_function");
+out_call_function_entry:
+ unregister_trace_call_function_entry(trace_intel_irq_entry, NULL);
+out_call_function_single_exit:
+ unregister_trace_call_function_single_exit(trace_intel_irq_exit, "call_function_single");
+out_call_function_single_entry:
+ unregister_trace_call_function_single_entry(trace_intel_irq_entry, NULL);
+out_threshold_exit:
+#endif
+
+#ifdef CONFIG_X86_MCE_THRESHOLD
+ unregister_trace_threshold_apic_exit(trace_intel_irq_exit, "threshold_apic");
+out_threshold_entry:
+ unregister_trace_threshold_apic_entry(trace_intel_irq_entry, NULL);
+out_deferred_exit:
+#endif
+
+#ifdef CONFIG_X86_MCE_AMD
+ unregister_trace_deferred_error_apic_exit(trace_intel_irq_exit, "deferred_error");
+out_deferred_entry:
+ unregister_trace_deferred_error_apic_entry(trace_intel_irq_entry, NULL);
+out_thermal_exit:
+#endif /* CONFIG_X86_MCE_AMD */
+
+#ifdef CONFIG_X86_THERMAL_VECTOR
+ unregister_trace_thermal_apic_exit(trace_intel_irq_exit, "thermal_apic");
+out_thermal_entry:
+ unregister_trace_thermal_apic_entry(trace_intel_irq_entry, NULL);
+out_timer_exit:
+#endif /* CONFIG_X86_THERMAL_VECTOR */
+
+ unregister_trace_local_timer_exit(trace_intel_irq_exit, "local_timer");
+out_timer_entry:
+ unregister_trace_local_timer_entry(trace_intel_irq_entry, NULL);
+out_err:
+ return -EINVAL;
+}
+
+void osnoise_arch_unregister(void)
+{
+ unregister_trace_spurious_apic_exit(trace_intel_irq_exit, "spurious_apic");
+ unregister_trace_spurious_apic_entry(trace_intel_irq_entry, NULL);
+ unregister_trace_error_apic_exit(trace_intel_irq_exit, "error_apic");
+ unregister_trace_error_apic_entry(trace_intel_irq_entry, NULL);
+ unregister_trace_x86_platform_ipi_exit(trace_intel_irq_exit, "x86_platform_ipi");
+ unregister_trace_x86_platform_ipi_entry(trace_intel_irq_entry, NULL);
+
+#ifdef CONFIG_IRQ_WORK
+ unregister_trace_irq_work_exit(trace_intel_irq_exit, "irq_work");
+ unregister_trace_irq_work_entry(trace_intel_irq_entry, NULL);
+#endif
+
+#ifdef CONFIG_SMP
+ unregister_trace_reschedule_exit(trace_intel_irq_exit, "reschedule");
+ unregister_trace_reschedule_entry(trace_intel_irq_entry, NULL);
+ unregister_trace_call_function_exit(trace_intel_irq_exit, "call_function");
+ unregister_trace_call_function_entry(trace_intel_irq_entry, NULL);
+ unregister_trace_call_function_single_exit(trace_intel_irq_exit, "call_function_single");
+ unregister_trace_call_function_single_entry(trace_intel_irq_entry, NULL);
+#endif
+
+#ifdef CONFIG_X86_MCE_THRESHOLD
+ unregister_trace_threshold_apic_exit(trace_intel_irq_exit, "threshold_apic");
+ unregister_trace_threshold_apic_entry(trace_intel_irq_entry, NULL);
+#endif
+
+#ifdef CONFIG_X86_MCE_AMD
+ unregister_trace_deferred_error_apic_exit(trace_intel_irq_exit, "deferred_error");
+ unregister_trace_deferred_error_apic_entry(trace_intel_irq_entry, NULL);
+#endif
+
+#ifdef CONFIG_X86_THERMAL_VECTOR
+ unregister_trace_thermal_apic_exit(trace_intel_irq_exit, "thermal_apic");
+ unregister_trace_thermal_apic_entry(trace_intel_irq_entry, NULL);
+#endif /* CONFIG_X86_THERMAL_VECTOR */
+
+ unregister_trace_local_timer_exit(trace_intel_irq_exit, "local_timer");
+ unregister_trace_local_timer_entry(trace_intel_irq_entry, NULL);
+}
+#endif /* CONFIG_OSNOISE_TRAECR && CONFIG_X86_LOCAL_APIC */
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 7f5aec758f0e..ed540e09a399 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -395,7 +395,7 @@ DEFINE_IDTENTRY_DF(exc_double_fault)
/*
* Adjust our frame so that we return straight to the #GP
* vector with the expected RSP value. This is safe because
- * we won't enable interupts or schedule before we invoke
+ * we won't enable interrupts or schedule before we invoke
* general_protection, so nothing will clobber the stack
* frame we just set up.
*
@@ -498,14 +498,15 @@ static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs,
{
u8 insn_buf[MAX_INSN_SIZE];
struct insn insn;
+ int ret;
if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip,
MAX_INSN_SIZE))
return GP_NO_HINT;
- kernel_insn_init(&insn, insn_buf, MAX_INSN_SIZE);
- insn_get_modrm(&insn);
- insn_get_sib(&insn);
+ ret = insn_decode_kernel(&insn, insn_buf);
+ if (ret < 0)
+ return GP_NO_HINT;
*addr = (unsigned long)insn_get_addr_ref(&insn, regs);
if (*addr == -1UL)
@@ -556,7 +557,7 @@ DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
tsk->thread.trap_nr = X86_TRAP_GP;
if (fixup_vdso_exception(regs, X86_TRAP_GP, error_code, 0))
- return;
+ goto exit;
show_signal(tsk, SIGSEGV, "", desc, regs, error_code);
force_sig(SIGSEGV);
@@ -694,8 +695,7 @@ asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *r
* In the SYSCALL entry path the RSP value comes from user-space - don't
* trust it and switch to the current kernel stack
*/
- if (regs->ip >= (unsigned long)entry_SYSCALL_64 &&
- regs->ip < (unsigned long)entry_SYSCALL_64_safe_stack) {
+ if (ip_within_syscall_gap(regs)) {
sp = this_cpu_read(cpu_current_top_of_stack);
goto sync;
}
@@ -890,9 +890,6 @@ static __always_inline void exc_debug_kernel(struct pt_regs *regs,
if ((dr6 & DR_STEP) && is_sysenter_singlestep(regs))
dr6 &= ~DR_STEP;
- if (kprobe_debug_handler(regs))
- goto out;
-
/*
* The kernel doesn't use INT1
*/
@@ -979,6 +976,10 @@ static __always_inline void exc_debug_user(struct pt_regs *regs,
goto out_irq;
}
+ /* #DB for bus lock can only be triggered from userspace. */
+ if (dr6 & DR_BUS_LOCK)
+ handle_bus_lock(regs);
+
/* Add the virtual_dr6 bits for signals. */
dr6 |= current->thread.virtual_dr6;
if (dr6 & (DR_STEP | DR_TRAP_BITS) || icebp)
@@ -1058,7 +1059,7 @@ static void math_error(struct pt_regs *regs, int trapnr)
goto exit;
if (fixup_vdso_exception(regs, trapnr, 0, 0))
- return;
+ goto exit;
force_sig_fault(SIGFPE, si_code,
(void __user *)uprobe_get_trap_addr(regs));
@@ -1159,12 +1160,9 @@ void __init trap_init(void)
/* Init GHCB memory pages when running as an SEV-ES guest */
sev_es_init_vc_handling();
+ /* Initialize TSS before setting up traps so ISTs work */
+ cpu_init_exception_handling();
+ /* Setup traps as cpu_init() might #GP */
idt_setup_traps();
-
- /*
- * Should be a barrier for any external CPU state:
- */
cpu_init();
-
- idt_setup_ist_traps();
}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index f70dffc2771f..2e076a459a0c 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -14,6 +14,7 @@
#include <linux/percpu.h>
#include <linux/timex.h>
#include <linux/static_key.h>
+#include <linux/static_call.h>
#include <asm/hpet.h>
#include <asm/timer.h>
@@ -254,7 +255,7 @@ unsigned long long sched_clock(void)
bool using_native_sched_clock(void)
{
- return pv_ops.time.sched_clock == native_sched_clock;
+ return static_call_query(pv_sched_clock) == native_sched_clock;
}
#else
unsigned long long
@@ -739,7 +740,7 @@ static unsigned long pit_hpet_ptimer_calibrate_cpu(void)
* 2) Reference counter. If available we use the HPET or the
* PMTIMER as a reference to check the sanity of that value.
* We use separate TSC readouts and check inside of the
- * reference read for any possible disturbance. We dicard
+ * reference read for any possible disturbance. We discard
* disturbed values here as well. We do that around the PIT
* calibration delay loop as we have to wait for a certain
* amount of time anyway.
@@ -1079,7 +1080,7 @@ static void tsc_resume(struct clocksource *cs)
* very small window right after one CPU updated cycle_last under
* xtime/vsyscall_gtod lock and the other CPU reads a TSC value which
* is smaller than the cycle_last reference value due to a TSC which
- * is slighty behind. This delta is nowhere else observable, but in
+ * is slightly behind. This delta is nowhere else observable, but in
* that case it results in a forward time jump in the range of hours
* due to the unsigned delta calculation of the time keeping core
* code, which is necessary to support wrapping clocksources like pm
@@ -1127,6 +1128,7 @@ static int tsc_cs_enable(struct clocksource *cs)
static struct clocksource clocksource_tsc_early = {
.name = "tsc-early",
.rating = 299,
+ .uncertainty_margin = 32 * NSEC_PER_MSEC,
.read = read_tsc,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS |
@@ -1151,7 +1153,8 @@ static struct clocksource clocksource_tsc = {
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS |
CLOCK_SOURCE_VALID_FOR_HRES |
- CLOCK_SOURCE_MUST_VERIFY,
+ CLOCK_SOURCE_MUST_VERIFY |
+ CLOCK_SOURCE_VERIFY_PERCPU,
.vdso_clock_mode = VDSO_CLOCKMODE_TSC,
.enable = tsc_cs_enable,
.resume = tsc_resume,
@@ -1264,7 +1267,7 @@ EXPORT_SYMBOL(convert_art_to_tsc);
* corresponding clocksource
* @cycles: System counter value
* @cs: Clocksource corresponding to system counter value. Used
- * by timekeeping code to verify comparibility of two cycle
+ * by timekeeping code to verify comparability of two cycle
* values.
*/
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 3d3c761eb74a..50a4515fe0ad 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -472,7 +472,7 @@ retry:
/*
* Add the result to the previous adjustment value.
*
- * The adjustement value is slightly off by the overhead of the
+ * The adjustment value is slightly off by the overhead of the
* sync mechanism (observed values are ~200 TSC cycles), but this
* really depends on CPU, node distance and frequency. So
* compensating for this is hard to get right. Experiments show
diff --git a/arch/x86/kernel/umip.c b/arch/x86/kernel/umip.c
index f6225bf22c02..576b47e7523d 100644
--- a/arch/x86/kernel/umip.c
+++ b/arch/x86/kernel/umip.c
@@ -272,7 +272,7 @@ static int emulate_umip_insn(struct insn *insn, int umip_inst,
* by whether the operand is a register or a memory location.
* If operand is a register, return as many bytes as the operand
* size. If operand is memory, return only the two least
- * siginificant bytes.
+ * significant bytes.
*/
if (X86_MODRM_MOD(insn->modrm.value) == 3)
*data_size = insn->opnd_bytes;
@@ -346,17 +346,15 @@ bool fixup_umip_exception(struct pt_regs *regs)
if (!regs)
return false;
- nr_copied = insn_fetch_from_user(regs, buf);
-
/*
- * The insn_fetch_from_user above could have failed if user code
- * is protected by a memory protection key. Give up on emulation
- * in such a case. Should we issue a page fault?
+ * Give up on emulation if fetching the instruction failed. Should a
+ * page fault or a #GP be issued?
*/
- if (!nr_copied)
+ nr_copied = insn_fetch_from_user(regs, buf);
+ if (nr_copied <= 0)
return false;
- if (!insn_decode(&insn, regs, buf, nr_copied))
+ if (!insn_decode_from_regs(&insn, regs, buf, nr_copied))
return false;
umip_inst = identify_insn(&insn);
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
index 2a1d47f47eee..a1202536fc57 100644
--- a/arch/x86/kernel/unwind_orc.c
+++ b/arch/x86/kernel/unwind_orc.c
@@ -13,7 +13,7 @@
#define orc_warn_current(args...) \
({ \
- if (state->task == current) \
+ if (state->task == current && !state->error) \
orc_warn(args); \
})
@@ -367,8 +367,8 @@ static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
return false;
- *ip = regs->ip;
- *sp = regs->sp;
+ *ip = READ_ONCE_NOCHECK(regs->ip);
+ *sp = READ_ONCE_NOCHECK(regs->sp);
return true;
}
@@ -380,8 +380,8 @@ static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr
if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
return false;
- *ip = regs->ip;
- *sp = regs->sp;
+ *ip = READ_ONCE_NOCHECK(regs->ip);
+ *sp = READ_ONCE_NOCHECK(regs->sp);
return true;
}
@@ -402,12 +402,12 @@ static bool get_reg(struct unwind_state *state, unsigned int reg_off,
return false;
if (state->full_regs) {
- *val = ((unsigned long *)state->regs)[reg];
+ *val = READ_ONCE_NOCHECK(((unsigned long *)state->regs)[reg]);
return true;
}
if (state->prev_regs) {
- *val = ((unsigned long *)state->prev_regs)[reg];
+ *val = READ_ONCE_NOCHECK(((unsigned long *)state->prev_regs)[reg]);
return true;
}
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index a2b413394917..b63cf8f7745e 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -276,12 +276,12 @@ static bool is_prefix_bad(struct insn *insn)
static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool x86_64)
{
+ enum insn_mode m = x86_64 ? INSN_MODE_64 : INSN_MODE_32;
u32 volatile *good_insns;
+ int ret;
- insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64);
- /* has the side-effect of processing the entire instruction */
- insn_get_length(insn);
- if (!insn_complete(insn))
+ ret = insn_decode(insn, auprobe->insn, sizeof(auprobe->insn), m);
+ if (ret < 0)
return -ENOEXEC;
if (is_prefix_bad(insn))
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index a788d5120d4d..ac69894eab88 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -22,8 +22,6 @@ config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
depends on HAVE_KVM
depends on HIGH_RES_TIMERS
- # for TASKSTATS/TASK_DELAY_ACCT:
- depends on NET && MULTIUSER
depends on X86_LOCAL_APIC
select PREEMPT_NOTIFIERS
select MMU_NOTIFIER
@@ -36,8 +34,7 @@ config KVM
select KVM_ASYNC_PF
select USER_RETURN_NOTIFIER
select KVM_MMIO
- select TASKSTATS
- select TASK_DELAY_ACCT
+ select SCHED_INFO
select PERF_EVENTS
select HAVE_KVM_MSI
select HAVE_KVM_CPU_RELAX_INTERCEPT
@@ -46,6 +43,7 @@ config KVM
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_VFIO
select SRCU
+ select HAVE_KVM_PM_NOTIFIER if PM
help
Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent
@@ -84,6 +82,18 @@ config KVM_INTEL
To compile this as a module, choose M here: the module
will be called kvm-intel.
+config X86_SGX_KVM
+ bool "Software Guard eXtensions (SGX) Virtualization"
+ depends on X86_SGX && KVM_INTEL
+ help
+
+ Enables KVM guests to create SGX enclaves.
+
+ This includes support to expose "raw" unreclaimable enclave memory to
+ guests via a device node, e.g. /dev/sgx_vepc.
+
+ If unsure, say N.
+
config KVM_AMD
tristate "KVM for AMD processors support"
depends on KVM
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index 1b4766fe1de2..75dfd27b6e8a 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-ccflags-y += -Iarch/x86/kvm
+ccflags-y += -I $(srctree)/arch/x86/kvm
ccflags-$(CONFIG_KVM_WERROR) += -Werror
ifeq ($(CONFIG_FRAME_POINTER),y)
@@ -11,20 +11,31 @@ KVM := ../../../virt/kvm
kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
$(KVM)/eventfd.o $(KVM)/irqchip.o $(KVM)/vfio.o \
- $(KVM)/dirty_ring.o
+ $(KVM)/dirty_ring.o $(KVM)/binary_stats.o
kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \
i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \
mmu/spte.o
+
+ifdef CONFIG_HYPERV
+kvm-y += kvm_onhyperv.o
+endif
+
kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o
kvm-$(CONFIG_KVM_XEN) += xen.o
kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \
vmx/evmcs.o vmx/nested.o vmx/posted_intr.o
+kvm-intel-$(CONFIG_X86_SGX_KVM) += vmx/sgx.o
+
kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o svm/sev.o
+ifdef CONFIG_HYPERV
+kvm-amd-y += svm/svm_onhyperv.o
+endif
+
obj-$(CONFIG_KVM) += kvm.o
obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
obj-$(CONFIG_KVM_AMD) += kvm-amd.o
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 6bd2f8b830e4..c42613cfb5ba 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -18,6 +18,7 @@
#include <asm/processor.h>
#include <asm/user.h>
#include <asm/fpu/xstate.h>
+#include <asm/sgx.h>
#include "cpuid.h"
#include "lapic.h"
#include "mmu.h"
@@ -28,7 +29,7 @@
* Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be
* aligned to sizeof(unsigned long) because it's not accessed via bitops.
*/
-u32 kvm_cpu_caps[NCAPINTS] __read_mostly;
+u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
EXPORT_SYMBOL_GPL(kvm_cpu_caps);
static u32 xstate_required_size(u64 xstate_bv, bool compacted)
@@ -53,6 +54,7 @@ static u32 xstate_required_size(u64 xstate_bv, bool compacted)
}
#define F feature_bit
+#define SF(name) (boot_cpu_has(X86_FEATURE_##name) ? F(name) : 0)
static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
struct kvm_cpuid_entry2 *entries, int nent, u32 function, u32 index)
@@ -170,6 +172,21 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
vcpu->arch.guest_supported_xcr0 =
(best->eax | ((u64)best->edx << 32)) & supported_xcr0;
+ /*
+ * Bits 127:0 of the allowed SECS.ATTRIBUTES (CPUID.0x12.0x1) enumerate
+ * the supported XSAVE Feature Request Mask (XFRM), i.e. the enclave's
+ * requested XCR0 value. The enclave's XFRM must be a subset of XCRO
+ * at the time of EENTER, thus adjust the allowed XFRM by the guest's
+ * supported XCR0. Similar to XCR0 handling, FP and SSE are forced to
+ * '1' even on CPUs that don't support XSAVE.
+ */
+ best = kvm_find_cpuid_entry(vcpu, 0x12, 0x1);
+ if (best) {
+ best->ecx &= vcpu->arch.guest_supported_xcr0 & 0xffffffff;
+ best->edx &= vcpu->arch.guest_supported_xcr0 >> 32;
+ best->ecx |= XFEATURE_MASK_FPSSE;
+ }
+
kvm_update_pv_runtime(vcpu);
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
@@ -185,10 +202,10 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu);
/*
- * Except for the MMU, which needs to be reset after any vendor
- * specific adjustments to the reserved GPA bits.
+ * Except for the MMU, which needs to do its thing any vendor specific
+ * adjustments to the reserved GPA bits.
*/
- kvm_mmu_reset_context(vcpu);
+ kvm_mmu_after_set_cpuid(vcpu);
}
static int is_efer_nx(void)
@@ -347,13 +364,13 @@ out:
return r;
}
-static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
+/* Mask kvm_cpu_caps for @leaf with the raw CPUID capabilities of this CPU. */
+static __always_inline void __kvm_cpu_cap_mask(unsigned int leaf)
{
const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32);
struct kvm_cpuid_entry2 entry;
reverse_cpuid_check(leaf);
- kvm_cpu_caps[leaf] &= mask;
cpuid_count(cpuid.function, cpuid.index,
&entry.eax, &entry.ebx, &entry.ecx, &entry.edx);
@@ -361,6 +378,27 @@ static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
kvm_cpu_caps[leaf] &= *__cpuid_entry_get_reg(&entry, cpuid.reg);
}
+static __always_inline
+void kvm_cpu_cap_init_scattered(enum kvm_only_cpuid_leafs leaf, u32 mask)
+{
+ /* Use kvm_cpu_cap_mask for non-scattered leafs. */
+ BUILD_BUG_ON(leaf < NCAPINTS);
+
+ kvm_cpu_caps[leaf] = mask;
+
+ __kvm_cpu_cap_mask(leaf);
+}
+
+static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
+{
+ /* Use kvm_cpu_cap_init_scattered for scattered leafs. */
+ BUILD_BUG_ON(leaf >= NCAPINTS);
+
+ kvm_cpu_caps[leaf] &= mask;
+
+ __kvm_cpu_cap_mask(leaf);
+}
+
void kvm_set_cpu_caps(void)
{
unsigned int f_nx = is_efer_nx() ? F(NX) : 0;
@@ -371,12 +409,13 @@ void kvm_set_cpu_caps(void)
unsigned int f_gbpages = 0;
unsigned int f_lm = 0;
#endif
+ memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps));
- BUILD_BUG_ON(sizeof(kvm_cpu_caps) >
+ BUILD_BUG_ON(sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)) >
sizeof(boot_cpu_data.x86_capability));
memcpy(&kvm_cpu_caps, &boot_cpu_data.x86_capability,
- sizeof(kvm_cpu_caps));
+ sizeof(kvm_cpu_caps) - (NKVMCAPINTS * sizeof(*kvm_cpu_caps)));
kvm_cpu_cap_mask(CPUID_1_ECX,
/*
@@ -407,7 +446,7 @@ void kvm_set_cpu_caps(void)
);
kvm_cpu_cap_mask(CPUID_7_0_EBX,
- F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
+ F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
F(BMI2) | F(ERMS) | F(INVPCID) | F(RTM) | 0 /*MPX*/ | F(RDSEED) |
F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) |
F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
@@ -418,7 +457,8 @@ void kvm_set_cpu_caps(void)
F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
- F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/
+ F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/ |
+ F(SGX_LC) | F(BUS_LOCK_DETECT)
);
/* Set LA57 based on hardware capability. */
if (cpuid_ecx(7) & F(LA57))
@@ -457,6 +497,10 @@ void kvm_set_cpu_caps(void)
F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES)
);
+ kvm_cpu_cap_init_scattered(CPUID_12_EAX,
+ SF(SGX1) | SF(SGX2)
+ );
+
kvm_cpu_cap_mask(CPUID_8000_0001_ECX,
F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
@@ -514,11 +558,30 @@ void kvm_set_cpu_caps(void)
*/
kvm_cpu_cap_mask(CPUID_8000_000A_EDX, 0);
+ kvm_cpu_cap_mask(CPUID_8000_001F_EAX,
+ 0 /* SME */ | F(SEV) | 0 /* VM_PAGE_FLUSH */ | F(SEV_ES) |
+ F(SME_COHERENT));
+
kvm_cpu_cap_mask(CPUID_C000_0001_EDX,
F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
F(PMM) | F(PMM_EN)
);
+
+ /*
+ * Hide RDTSCP and RDPID if either feature is reported as supported but
+ * probing MSR_TSC_AUX failed. This is purely a sanity check and
+ * should never happen, but the guest will likely crash if RDTSCP or
+ * RDPID is misreported, and KVM has botched MSR_TSC_AUX emulation in
+ * the past. For example, the sanity check may fire if this instance of
+ * KVM is running as L1 on top of an older, broken KVM.
+ */
+ if (WARN_ON((kvm_cpu_cap_has(X86_FEATURE_RDTSCP) ||
+ kvm_cpu_cap_has(X86_FEATURE_RDPID)) &&
+ !kvm_is_supported_user_return_msr(MSR_TSC_AUX))) {
+ kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
+ kvm_cpu_cap_clear(X86_FEATURE_RDPID);
+ }
}
EXPORT_SYMBOL_GPL(kvm_set_cpu_caps);
@@ -589,8 +652,10 @@ static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
case 7:
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
entry->eax = 0;
- entry->ecx = F(RDPID);
+ if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
+ entry->ecx = F(RDPID);
++array->nent;
+ break;
default:
break;
}
@@ -778,6 +843,38 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->edx = 0;
}
break;
+ case 0x12:
+ /* Intel SGX */
+ if (!kvm_cpu_cap_has(X86_FEATURE_SGX)) {
+ entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
+ break;
+ }
+
+ /*
+ * Index 0: Sub-features, MISCSELECT (a.k.a extended features)
+ * and max enclave sizes. The SGX sub-features and MISCSELECT
+ * are restricted by kernel and KVM capabilities (like most
+ * feature flags), while enclave size is unrestricted.
+ */
+ cpuid_entry_override(entry, CPUID_12_EAX);
+ entry->ebx &= SGX_MISC_EXINFO;
+
+ entry = do_host_cpuid(array, function, 1);
+ if (!entry)
+ goto out;
+
+ /*
+ * Index 1: SECS.ATTRIBUTES. ATTRIBUTES are restricted a la
+ * feature flags. Advertise all supported flags, including
+ * privileged attributes that require explicit opt-in from
+ * userspace. ATTRIBUTES.XFRM is not adjusted as userspace is
+ * expected to derive it from supported XCR0.
+ */
+ entry->eax &= SGX_ATTR_DEBUG | SGX_ATTR_MODE64BIT |
+ SGX_ATTR_PROVISIONKEY | SGX_ATTR_EINITTOKENKEY |
+ SGX_ATTR_KSS;
+ entry->ebx &= 0;
+ break;
/* Intel PT */
case 0x14:
if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) {
@@ -869,8 +966,10 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
break;
/* Support memory encryption cpuid if host supports it */
case 0x8000001F:
- if (!boot_cpu_has(X86_FEATURE_SEV))
+ if (!kvm_cpu_cap_has(X86_FEATURE_SEV))
entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
+ else
+ cpuid_entry_override(entry, CPUID_8000_001F_EAX);
break;
/*Add support for Centaur's CPUID instruction*/
case 0xC0000000:
@@ -1033,7 +1132,7 @@ EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
* - Centaur: 0xc0000000 - 0xcfffffff
*
* The Hypervisor class is further subdivided into sub-classes that each act as
- * their own indepdent class associated with a 0x100 byte range. E.g. if Qemu
+ * their own independent class associated with a 0x100 byte range. E.g. if Qemu
* is advertising support for both HyperV and KVM, the resulting Hypervisor
* CPUID sub-classes are:
*
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 2a0c5064497f..c99edfff7f82 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -3,11 +3,12 @@
#define ARCH_X86_KVM_CPUID_H
#include "x86.h"
+#include "reverse_cpuid.h"
#include <asm/cpu.h>
#include <asm/processor.h>
#include <uapi/asm/kvm_para.h>
-extern u32 kvm_cpu_caps[NCAPINTS] __read_mostly;
+extern u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
void kvm_set_cpu_caps(void);
void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
@@ -58,144 +59,8 @@ static inline bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
return kvm_vcpu_is_legal_aligned_gpa(vcpu, gpa, PAGE_SIZE);
}
-struct cpuid_reg {
- u32 function;
- u32 index;
- int reg;
-};
-
-static const struct cpuid_reg reverse_cpuid[] = {
- [CPUID_1_EDX] = { 1, 0, CPUID_EDX},
- [CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX},
- [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
- [CPUID_1_ECX] = { 1, 0, CPUID_ECX},
- [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
- [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
- [CPUID_7_0_EBX] = { 7, 0, CPUID_EBX},
- [CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX},
- [CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
- [CPUID_6_EAX] = { 6, 0, CPUID_EAX},
- [CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
- [CPUID_7_ECX] = { 7, 0, CPUID_ECX},
- [CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
- [CPUID_7_EDX] = { 7, 0, CPUID_EDX},
- [CPUID_7_1_EAX] = { 7, 1, CPUID_EAX},
-};
-
-/*
- * Reverse CPUID and its derivatives can only be used for hardware-defined
- * feature words, i.e. words whose bits directly correspond to a CPUID leaf.
- * Retrieving a feature bit or masking guest CPUID from a Linux-defined word
- * is nonsensical as the bit number/mask is an arbitrary software-defined value
- * and can't be used by KVM to query/control guest capabilities. And obviously
- * the leaf being queried must have an entry in the lookup table.
- */
-static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
-{
- BUILD_BUG_ON(x86_leaf == CPUID_LNX_1);
- BUILD_BUG_ON(x86_leaf == CPUID_LNX_2);
- BUILD_BUG_ON(x86_leaf == CPUID_LNX_3);
- BUILD_BUG_ON(x86_leaf == CPUID_LNX_4);
- BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
- BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
-}
-
-/*
- * Retrieve the bit mask from an X86_FEATURE_* definition. Features contain
- * the hardware defined bit number (stored in bits 4:0) and a software defined
- * "word" (stored in bits 31:5). The word is used to index into arrays of
- * bit masks that hold the per-cpu feature capabilities, e.g. this_cpu_has().
- */
-static __always_inline u32 __feature_bit(int x86_feature)
-{
- reverse_cpuid_check(x86_feature / 32);
- return 1 << (x86_feature & 31);
-}
-
-#define feature_bit(name) __feature_bit(X86_FEATURE_##name)
-
-static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_feature)
-{
- unsigned int x86_leaf = x86_feature / 32;
-
- reverse_cpuid_check(x86_leaf);
- return reverse_cpuid[x86_leaf];
-}
-
-static __always_inline u32 *__cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
- u32 reg)
-{
- switch (reg) {
- case CPUID_EAX:
- return &entry->eax;
- case CPUID_EBX:
- return &entry->ebx;
- case CPUID_ECX:
- return &entry->ecx;
- case CPUID_EDX:
- return &entry->edx;
- default:
- BUILD_BUG();
- return NULL;
- }
-}
-
-static __always_inline u32 *cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
- unsigned int x86_feature)
-{
- const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
-
- return __cpuid_entry_get_reg(entry, cpuid.reg);
-}
-
-static __always_inline u32 cpuid_entry_get(struct kvm_cpuid_entry2 *entry,
- unsigned int x86_feature)
-{
- u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
-
- return *reg & __feature_bit(x86_feature);
-}
-
-static __always_inline bool cpuid_entry_has(struct kvm_cpuid_entry2 *entry,
- unsigned int x86_feature)
-{
- return cpuid_entry_get(entry, x86_feature);
-}
-
-static __always_inline void cpuid_entry_clear(struct kvm_cpuid_entry2 *entry,
- unsigned int x86_feature)
-{
- u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
-
- *reg &= ~__feature_bit(x86_feature);
-}
-
-static __always_inline void cpuid_entry_set(struct kvm_cpuid_entry2 *entry,
- unsigned int x86_feature)
-{
- u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
-
- *reg |= __feature_bit(x86_feature);
-}
-
-static __always_inline void cpuid_entry_change(struct kvm_cpuid_entry2 *entry,
- unsigned int x86_feature,
- bool set)
-{
- u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
-
- /*
- * Open coded instead of using cpuid_entry_{clear,set}() to coerce the
- * compiler into using CMOV instead of Jcc when possible.
- */
- if (set)
- *reg |= __feature_bit(x86_feature);
- else
- *reg &= ~__feature_bit(x86_feature);
-}
-
static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry,
- enum cpuid_leafs leaf)
+ unsigned int leaf)
{
u32 *reg = cpuid_entry_get_reg(entry, leaf * 32);
@@ -248,6 +113,14 @@ static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu)
is_guest_vendor_hygon(best->ebx, best->ecx, best->edx));
}
+static inline bool guest_cpuid_is_intel(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 0, 0);
+ return best && is_guest_vendor_intel(best->ebx, best->ecx, best->edx);
+}
+
static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
@@ -308,7 +181,7 @@ static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
{
- unsigned int x86_leaf = x86_feature / 32;
+ unsigned int x86_leaf = __feature_leaf(x86_feature);
reverse_cpuid_check(x86_leaf);
kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature);
@@ -316,7 +189,7 @@ static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature)
static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
{
- unsigned int x86_leaf = x86_feature / 32;
+ unsigned int x86_leaf = __feature_leaf(x86_feature);
reverse_cpuid_check(x86_leaf);
kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature);
@@ -324,7 +197,7 @@ static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature)
static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature)
{
- unsigned int x86_leaf = x86_feature / 32;
+ unsigned int x86_leaf = __feature_leaf(x86_feature);
reverse_cpuid_check(x86_leaf);
return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature);
diff --git a/arch/x86/kvm/debugfs.c b/arch/x86/kvm/debugfs.c
index 7e818d64bb4d..95a98413dc32 100644
--- a/arch/x86/kvm/debugfs.c
+++ b/arch/x86/kvm/debugfs.c
@@ -17,6 +17,15 @@ static int vcpu_get_timer_advance_ns(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(vcpu_timer_advance_ns_fops, vcpu_get_timer_advance_ns, NULL, "%llu\n");
+static int vcpu_get_guest_mode(void *data, u64 *val)
+{
+ struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
+ *val = vcpu->stat.guest_mode;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(vcpu_guest_mode_fops, vcpu_get_guest_mode, NULL, "%lld\n");
+
static int vcpu_get_tsc_offset(void *data, u64 *val)
{
struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
@@ -45,6 +54,8 @@ DEFINE_SIMPLE_ATTRIBUTE(vcpu_tsc_scaling_frac_fops, vcpu_get_tsc_scaling_frac_bi
void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
{
+ debugfs_create_file("guest_mode", 0444, debugfs_dentry, vcpu,
+ &vcpu_guest_mode_fops);
debugfs_create_file("tsc-offset", 0444, debugfs_dentry, vcpu,
&vcpu_tsc_offset_fops);
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index f7970ba6219f..2837110e66ed 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -22,7 +22,6 @@
#include "kvm_cache_regs.h"
#include "kvm_emulate.h"
#include <linux/stringify.h>
-#include <asm/fpu/api.h>
#include <asm/debugreg.h>
#include <asm/nospec-branch.h>
@@ -1081,116 +1080,14 @@ static void fetch_register_operand(struct operand *op)
}
}
-static void emulator_get_fpu(void)
-{
- fpregs_lock();
-
- fpregs_assert_state_consistent();
- if (test_thread_flag(TIF_NEED_FPU_LOAD))
- switch_fpu_return();
-}
-
-static void emulator_put_fpu(void)
-{
- fpregs_unlock();
-}
-
-static void read_sse_reg(sse128_t *data, int reg)
-{
- emulator_get_fpu();
- switch (reg) {
- case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
- case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
- case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
- case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
- case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
- case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
- case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
- case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
-#ifdef CONFIG_X86_64
- case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
- case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
- case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
- case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
- case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
- case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
- case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
- case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
-#endif
- default: BUG();
- }
- emulator_put_fpu();
-}
-
-static void write_sse_reg(sse128_t *data, int reg)
-{
- emulator_get_fpu();
- switch (reg) {
- case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
- case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
- case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
- case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
- case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
- case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
- case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
- case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
-#ifdef CONFIG_X86_64
- case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
- case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
- case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
- case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
- case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
- case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
- case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
- case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
-#endif
- default: BUG();
- }
- emulator_put_fpu();
-}
-
-static void read_mmx_reg(u64 *data, int reg)
-{
- emulator_get_fpu();
- switch (reg) {
- case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
- case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
- case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
- case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
- case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
- case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
- case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
- case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
- default: BUG();
- }
- emulator_put_fpu();
-}
-
-static void write_mmx_reg(u64 *data, int reg)
-{
- emulator_get_fpu();
- switch (reg) {
- case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
- case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
- case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
- case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
- case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
- case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
- case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
- case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
- default: BUG();
- }
- emulator_put_fpu();
-}
-
static int em_fninit(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
- emulator_get_fpu();
+ kvm_fpu_get();
asm volatile("fninit");
- emulator_put_fpu();
+ kvm_fpu_put();
return X86EMUL_CONTINUE;
}
@@ -1201,9 +1098,9 @@ static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
- emulator_get_fpu();
+ kvm_fpu_get();
asm volatile("fnstcw %0": "+m"(fcw));
- emulator_put_fpu();
+ kvm_fpu_put();
ctxt->dst.val = fcw;
@@ -1217,9 +1114,9 @@ static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
- emulator_get_fpu();
+ kvm_fpu_get();
asm volatile("fnstsw %0": "+m"(fsw));
- emulator_put_fpu();
+ kvm_fpu_put();
ctxt->dst.val = fsw;
@@ -1238,7 +1135,7 @@ static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
op->type = OP_XMM;
op->bytes = 16;
op->addr.xmm = reg;
- read_sse_reg(&op->vec_val, reg);
+ kvm_read_sse_reg(reg, &op->vec_val);
return;
}
if (ctxt->d & Mmx) {
@@ -1289,7 +1186,7 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
op->type = OP_XMM;
op->bytes = 16;
op->addr.xmm = ctxt->modrm_rm;
- read_sse_reg(&op->vec_val, ctxt->modrm_rm);
+ kvm_read_sse_reg(ctxt->modrm_rm, &op->vec_val);
return rc;
}
if (ctxt->d & Mmx) {
@@ -1866,10 +1763,10 @@ static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
op->bytes * op->count);
break;
case OP_XMM:
- write_sse_reg(&op->vec_val, op->addr.xmm);
+ kvm_write_sse_reg(op->addr.xmm, &op->vec_val);
break;
case OP_MM:
- write_mmx_reg(&op->mm_val, op->addr.mm);
+ kvm_write_mmx_reg(op->addr.mm, &op->mm_val);
break;
case OP_NONE:
/* no writeback */
@@ -2638,8 +2535,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
ctxt->ops->set_nmi_mask(ctxt, false);
- ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
- ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
+ ctxt->ops->exiting_smm(ctxt);
/*
* Get back to real mode, to prepare a safe state in which to load
@@ -2678,12 +2574,12 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
}
/*
- * Give pre_leave_smm() a chance to make ISA-specific changes to the
- * vCPU state (e.g. enter guest mode) before loading state from the SMM
+ * Give leave_smm() a chance to make ISA-specific changes to the vCPU
+ * state (e.g. enter guest mode) before loading state from the SMM
* state-save area.
*/
- if (ctxt->ops->pre_leave_smm(ctxt, buf))
- return X86EMUL_UNHANDLEABLE;
+ if (ctxt->ops->leave_smm(ctxt, buf))
+ goto emulate_shutdown;
#ifdef CONFIG_X86_64
if (emulator_has_longmode(ctxt))
@@ -2692,13 +2588,21 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
#endif
ret = rsm_load_state_32(ctxt, buf);
- if (ret != X86EMUL_CONTINUE) {
- /* FIXME: should triple fault */
- return X86EMUL_UNHANDLEABLE;
- }
+ if (ret != X86EMUL_CONTINUE)
+ goto emulate_shutdown;
- ctxt->ops->post_leave_smm(ctxt);
+ /*
+ * Note, the ctxt->ops callbacks are responsible for handling side
+ * effects when writing MSRs and CRs, e.g. MMU context resets, CPUID
+ * runtime updates, etc... If that changes, e.g. this flow is moved
+ * out of the emulator to make it look more like enter_smm(), then
+ * those side effects need to be explicitly handled for both success
+ * and shutdown.
+ */
+ return X86EMUL_CONTINUE;
+emulate_shutdown:
+ ctxt->ops->triple_fault(ctxt);
return X86EMUL_CONTINUE;
}
@@ -3222,7 +3126,7 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
}
/*
- * Now load segment descriptors. If fault happenes at this stage
+ * Now load segment descriptors. If fault happens at this stage
* it is handled in a context of new task
*/
ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
@@ -4124,11 +4028,11 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
- emulator_get_fpu();
+ kvm_fpu_get();
rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
- emulator_put_fpu();
+ kvm_fpu_put();
if (rc != X86EMUL_CONTINUE)
return rc;
@@ -4172,7 +4076,7 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
- emulator_get_fpu();
+ kvm_fpu_get();
if (size < __fxstate_size(16)) {
rc = fxregs_fixup(&fx_state, size);
@@ -4189,7 +4093,7 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
out:
- emulator_put_fpu();
+ kvm_fpu_put();
return rc;
}
@@ -4220,7 +4124,7 @@ static bool valid_cr(int nr)
}
}
-static int check_cr_read(struct x86_emulate_ctxt *ctxt)
+static int check_cr_access(struct x86_emulate_ctxt *ctxt)
{
if (!valid_cr(ctxt->modrm_reg))
return emulate_ud(ctxt);
@@ -4228,80 +4132,6 @@ static int check_cr_read(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
-static int check_cr_write(struct x86_emulate_ctxt *ctxt)
-{
- u64 new_val = ctxt->src.val64;
- int cr = ctxt->modrm_reg;
- u64 efer = 0;
-
- static u64 cr_reserved_bits[] = {
- 0xffffffff00000000ULL,
- 0, 0, 0, /* CR3 checked later */
- CR4_RESERVED_BITS,
- 0, 0, 0,
- CR8_RESERVED_BITS,
- };
-
- if (!valid_cr(cr))
- return emulate_ud(ctxt);
-
- if (new_val & cr_reserved_bits[cr])
- return emulate_gp(ctxt, 0);
-
- switch (cr) {
- case 0: {
- u64 cr4;
- if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
- ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
- return emulate_gp(ctxt, 0);
-
- cr4 = ctxt->ops->get_cr(ctxt, 4);
- ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
-
- if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
- !(cr4 & X86_CR4_PAE))
- return emulate_gp(ctxt, 0);
-
- break;
- }
- case 3: {
- u64 rsvd = 0;
-
- ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
- if (efer & EFER_LMA) {
- u64 maxphyaddr;
- u32 eax, ebx, ecx, edx;
-
- eax = 0x80000008;
- ecx = 0;
- if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
- &edx, true))
- maxphyaddr = eax & 0xff;
- else
- maxphyaddr = 36;
- rsvd = rsvd_bits(maxphyaddr, 63);
- if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
- rsvd &= ~X86_CR3_PCID_NOFLUSH;
- }
-
- if (new_val & rsvd)
- return emulate_gp(ctxt, 0);
-
- break;
- }
- case 4: {
- ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
-
- if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
- return emulate_gp(ctxt, 0);
-
- break;
- }
- }
-
- return X86EMUL_CONTINUE;
-}
-
static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
{
unsigned long dr7;
@@ -4576,7 +4406,7 @@ static const struct opcode group8[] = {
* from the register case of group9.
*/
static const struct gprefix pfx_0f_c7_7 = {
- N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
+ N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdpid),
};
@@ -4841,10 +4671,10 @@ static const struct opcode twobyte_table[256] = {
D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */
/* 0x20 - 0x2F */
- DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
+ DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access),
DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
- check_cr_write),
+ check_cr_access),
IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
check_dr_write),
N, N, N, N,
@@ -5185,7 +5015,7 @@ done:
return rc;
}
-int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
+int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type)
{
int rc = X86EMUL_CONTINUE;
int mode = ctxt->mode;
@@ -5396,7 +5226,8 @@ done_prefixes:
ctxt->execute = opcode.u.execute;
- if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
+ if (unlikely(emulation_type & EMULTYPE_TRAP_UD) &&
+ likely(!(ctxt->d & EmulateOnUD)))
return EMULATION_FAILED;
if (unlikely(ctxt->d &
@@ -5510,9 +5341,9 @@ static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
{
int rc;
- emulator_get_fpu();
+ kvm_fpu_get();
rc = asm_safe("fwait");
- emulator_put_fpu();
+ kvm_fpu_put();
if (unlikely(rc != X86EMUL_CONTINUE))
return emulate_exception(ctxt, MF_VECTOR, 0, false);
@@ -5523,7 +5354,7 @@ static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
static void fetch_possible_mmx_operand(struct operand *op)
{
if (op->type == OP_MM)
- read_mmx_reg(&op->mm_val, op->addr.mm);
+ kvm_read_mmx_reg(op->addr.mm, &op->mm_val);
}
static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
diff --git a/arch/x86/kvm/fpu.h b/arch/x86/kvm/fpu.h
new file mode 100644
index 000000000000..3ba12888bf66
--- /dev/null
+++ b/arch/x86/kvm/fpu.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __KVM_FPU_H_
+#define __KVM_FPU_H_
+
+#include <asm/fpu/api.h>
+
+typedef u32 __attribute__((vector_size(16))) sse128_t;
+#define __sse128_u union { sse128_t vec; u64 as_u64[2]; u32 as_u32[4]; }
+#define sse128_lo(x) ({ __sse128_u t; t.vec = x; t.as_u64[0]; })
+#define sse128_hi(x) ({ __sse128_u t; t.vec = x; t.as_u64[1]; })
+#define sse128_l0(x) ({ __sse128_u t; t.vec = x; t.as_u32[0]; })
+#define sse128_l1(x) ({ __sse128_u t; t.vec = x; t.as_u32[1]; })
+#define sse128_l2(x) ({ __sse128_u t; t.vec = x; t.as_u32[2]; })
+#define sse128_l3(x) ({ __sse128_u t; t.vec = x; t.as_u32[3]; })
+#define sse128(lo, hi) ({ __sse128_u t; t.as_u64[0] = lo; t.as_u64[1] = hi; t.vec; })
+
+static inline void _kvm_read_sse_reg(int reg, sse128_t *data)
+{
+ switch (reg) {
+ case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
+ case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
+ case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
+ case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
+ case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
+ case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
+ case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
+ case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
+#ifdef CONFIG_X86_64
+ case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
+ case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
+ case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
+ case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
+ case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
+ case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
+ case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
+ case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
+#endif
+ default: BUG();
+ }
+}
+
+static inline void _kvm_write_sse_reg(int reg, const sse128_t *data)
+{
+ switch (reg) {
+ case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
+ case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
+ case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
+ case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
+ case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
+ case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
+ case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
+ case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
+#ifdef CONFIG_X86_64
+ case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
+ case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
+ case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
+ case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
+ case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
+ case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
+ case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
+ case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
+#endif
+ default: BUG();
+ }
+}
+
+static inline void _kvm_read_mmx_reg(int reg, u64 *data)
+{
+ switch (reg) {
+ case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
+ case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
+ case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
+ case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
+ case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
+ case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
+ case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
+ case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
+ default: BUG();
+ }
+}
+
+static inline void _kvm_write_mmx_reg(int reg, const u64 *data)
+{
+ switch (reg) {
+ case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
+ case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
+ case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
+ case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
+ case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
+ case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
+ case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
+ case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
+ default: BUG();
+ }
+}
+
+static inline void kvm_fpu_get(void)
+{
+ fpregs_lock();
+
+ fpregs_assert_state_consistent();
+ if (test_thread_flag(TIF_NEED_FPU_LOAD))
+ switch_fpu_return();
+}
+
+static inline void kvm_fpu_put(void)
+{
+ fpregs_unlock();
+}
+
+static inline void kvm_read_sse_reg(int reg, sse128_t *data)
+{
+ kvm_fpu_get();
+ _kvm_read_sse_reg(reg, data);
+ kvm_fpu_put();
+}
+
+static inline void kvm_write_sse_reg(int reg, const sse128_t *data)
+{
+ kvm_fpu_get();
+ _kvm_write_sse_reg(reg, data);
+ kvm_fpu_put();
+}
+
+static inline void kvm_read_mmx_reg(int reg, u64 *data)
+{
+ kvm_fpu_get();
+ _kvm_read_mmx_reg(reg, data);
+ kvm_fpu_put();
+}
+
+static inline void kvm_write_mmx_reg(int reg, const u64 *data)
+{
+ kvm_fpu_get();
+ _kvm_write_mmx_reg(reg, data);
+ kvm_fpu_put();
+}
+
+#endif
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 58fa8c029867..b07592ca92f0 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -36,6 +36,7 @@
#include "trace.h"
#include "irq.h"
+#include "fpu.h"
/* "Hv#1" signature */
#define HYPERV_CPUID_SIGNATURE_EAX 0x31237648
@@ -273,15 +274,10 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu)
{
- struct kvm_cpuid_entry2 *entry;
-
- entry = kvm_find_cpuid_entry(vcpu,
- HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES,
- 0);
- if (!entry)
- return false;
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
- return entry->eax & HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
+ return hv_vcpu->cpuid_cache.syndbg_cap_eax &
+ HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
}
static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu)
@@ -520,10 +516,10 @@ static u64 get_time_ref_counter(struct kvm *kvm)
u64 tsc;
/*
- * The guest has not set up the TSC page or the clock isn't
- * stable, fall back to get_kvmclock_ns.
+ * Fall back to get_kvmclock_ns() when TSC page hasn't been set up,
+ * is broken, disabled or being updated.
*/
- if (!hv->tsc_ref.tsc_sequence)
+ if (hv->hv_tsc_page_status != HV_TSC_PAGE_SET)
return div_u64(get_kvmclock_ns(kvm), 100);
vcpu = kvm_get_vcpu(kvm, 0);
@@ -635,11 +631,17 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
union hv_stimer_config new_config = {.as_uint64 = config},
old_config = {.as_uint64 = stimer->config.as_uint64};
struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
if (!synic->active && !host)
return 1;
+ if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode &&
+ !(hv_vcpu->cpuid_cache.features_edx &
+ HV_STIMER_DIRECT_MODE_AVAILABLE)))
+ return 1;
+
trace_kvm_hv_stimer_set_config(hv_stimer_to_vcpu(stimer)->vcpu_id,
stimer->index, config, host);
@@ -1077,6 +1079,21 @@ static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
return true;
}
+/*
+ * Don't touch TSC page values if the guest has opted for TSC emulation after
+ * migration. KVM doesn't fully support reenlightenment notifications and TSC
+ * access emulation and Hyper-V is known to expect the values in TSC page to
+ * stay constant before TSC access emulation is disabled from guest side
+ * (HV_X64_MSR_TSC_EMULATION_STATUS). KVM userspace is expected to preserve TSC
+ * frequency and guest visible TSC value across migration (and prevent it when
+ * TSC scaling is unsupported).
+ */
+static inline bool tsc_page_update_unsafe(struct kvm_hv *hv)
+{
+ return (hv->hv_tsc_page_status != HV_TSC_PAGE_GUEST_CHANGED) &&
+ hv->hv_tsc_emulation_control;
+}
+
void kvm_hv_setup_tsc_page(struct kvm *kvm,
struct pvclock_vcpu_time_info *hv_clock)
{
@@ -1087,7 +1104,8 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
- if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
+ if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
+ hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
return;
mutex_lock(&hv->hv_lock);
@@ -1101,7 +1119,15 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
*/
if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
&tsc_seq, sizeof(tsc_seq))))
+ goto out_err;
+
+ if (tsc_seq && tsc_page_update_unsafe(hv)) {
+ if (kvm_read_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
+ goto out_err;
+
+ hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
goto out_unlock;
+ }
/*
* While we're computing and writing the parameters, force the
@@ -1110,15 +1136,15 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
hv->tsc_ref.tsc_sequence = 0;
if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
&hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
- goto out_unlock;
+ goto out_err;
if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
- goto out_unlock;
+ goto out_err;
/* Ensure sequence is zero before writing the rest of the struct. */
smp_wmb();
if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
- goto out_unlock;
+ goto out_err;
/*
* Now switch to the TSC page mechanism by writing the sequence.
@@ -1131,18 +1157,141 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
smp_wmb();
hv->tsc_ref.tsc_sequence = tsc_seq;
- kvm_write_guest(kvm, gfn_to_gpa(gfn),
- &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
+ if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
+ &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
+ goto out_err;
+
+ hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
+ goto out_unlock;
+
+out_err:
+ hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
out_unlock:
mutex_unlock(&hv->hv_lock);
}
+void kvm_hv_invalidate_tsc_page(struct kvm *kvm)
+{
+ struct kvm_hv *hv = to_kvm_hv(kvm);
+ u64 gfn;
+ int idx;
+
+ if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
+ hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET ||
+ tsc_page_update_unsafe(hv))
+ return;
+
+ mutex_lock(&hv->hv_lock);
+
+ if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
+ goto out_unlock;
+
+ /* Preserve HV_TSC_PAGE_GUEST_CHANGED/HV_TSC_PAGE_HOST_CHANGED states */
+ if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET)
+ hv->hv_tsc_page_status = HV_TSC_PAGE_UPDATING;
+
+ gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
+
+ hv->tsc_ref.tsc_sequence = 0;
+
+ /*
+ * Take the srcu lock as memslots will be accessed to check the gfn
+ * cache generation against the memslots generation.
+ */
+ idx = srcu_read_lock(&kvm->srcu);
+ if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
+ &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
+ hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
+ srcu_read_unlock(&kvm->srcu, idx);
+
+out_unlock:
+ mutex_unlock(&hv->hv_lock);
+}
+
+
+static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
+{
+ if (!hv_vcpu->enforce_cpuid)
+ return true;
+
+ switch (msr) {
+ case HV_X64_MSR_GUEST_OS_ID:
+ case HV_X64_MSR_HYPERCALL:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_HYPERCALL_AVAILABLE;
+ case HV_X64_MSR_VP_RUNTIME:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_VP_RUNTIME_AVAILABLE;
+ case HV_X64_MSR_TIME_REF_COUNT:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_TIME_REF_COUNT_AVAILABLE;
+ case HV_X64_MSR_VP_INDEX:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_VP_INDEX_AVAILABLE;
+ case HV_X64_MSR_RESET:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_RESET_AVAILABLE;
+ case HV_X64_MSR_REFERENCE_TSC:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_REFERENCE_TSC_AVAILABLE;
+ case HV_X64_MSR_SCONTROL:
+ case HV_X64_MSR_SVERSION:
+ case HV_X64_MSR_SIEFP:
+ case HV_X64_MSR_SIMP:
+ case HV_X64_MSR_EOM:
+ case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_SYNIC_AVAILABLE;
+ case HV_X64_MSR_STIMER0_CONFIG:
+ case HV_X64_MSR_STIMER1_CONFIG:
+ case HV_X64_MSR_STIMER2_CONFIG:
+ case HV_X64_MSR_STIMER3_CONFIG:
+ case HV_X64_MSR_STIMER0_COUNT:
+ case HV_X64_MSR_STIMER1_COUNT:
+ case HV_X64_MSR_STIMER2_COUNT:
+ case HV_X64_MSR_STIMER3_COUNT:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_SYNTIMER_AVAILABLE;
+ case HV_X64_MSR_EOI:
+ case HV_X64_MSR_ICR:
+ case HV_X64_MSR_TPR:
+ case HV_X64_MSR_VP_ASSIST_PAGE:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_MSR_APIC_ACCESS_AVAILABLE;
+ break;
+ case HV_X64_MSR_TSC_FREQUENCY:
+ case HV_X64_MSR_APIC_FREQUENCY:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_ACCESS_FREQUENCY_MSRS;
+ case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
+ case HV_X64_MSR_TSC_EMULATION_CONTROL:
+ case HV_X64_MSR_TSC_EMULATION_STATUS:
+ return hv_vcpu->cpuid_cache.features_eax &
+ HV_ACCESS_REENLIGHTENMENT;
+ case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
+ case HV_X64_MSR_CRASH_CTL:
+ return hv_vcpu->cpuid_cache.features_edx &
+ HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
+ case HV_X64_MSR_SYNDBG_OPTIONS:
+ case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
+ return hv_vcpu->cpuid_cache.features_edx &
+ HV_FEATURE_DEBUG_MSRS_AVAILABLE;
+ default:
+ break;
+ }
+
+ return false;
+}
+
static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
bool host)
{
struct kvm *kvm = vcpu->kvm;
struct kvm_hv *hv = to_kvm_hv(kvm);
+ if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
+ return 1;
+
switch (msr) {
case HV_X64_MSR_GUEST_OS_ID:
hv->hv_guest_os_id = data;
@@ -1193,8 +1342,15 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
}
case HV_X64_MSR_REFERENCE_TSC:
hv->hv_tsc_page = data;
- if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)
+ if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) {
+ if (!host)
+ hv->hv_tsc_page_status = HV_TSC_PAGE_GUEST_CHANGED;
+ else
+ hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
+ } else {
+ hv->hv_tsc_page_status = HV_TSC_PAGE_UNSET;
+ }
break;
case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
return kvm_hv_msr_set_crash_data(kvm,
@@ -1229,6 +1385,9 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
hv->hv_tsc_emulation_control = data;
break;
case HV_X64_MSR_TSC_EMULATION_STATUS:
+ if (data && !host)
+ return 1;
+
hv->hv_tsc_emulation_status = data;
break;
case HV_X64_MSR_TIME_REF_COUNT:
@@ -1261,6 +1420,9 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
{
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+ if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
+ return 1;
+
switch (msr) {
case HV_X64_MSR_VP_INDEX: {
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
@@ -1375,6 +1537,9 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
struct kvm *kvm = vcpu->kvm;
struct kvm_hv *hv = to_kvm_hv(kvm);
+ if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
+ return 1;
+
switch (msr) {
case HV_X64_MSR_GUEST_OS_ID:
data = hv->hv_guest_os_id;
@@ -1424,6 +1589,9 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
u64 data = 0;
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+ if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
+ return 1;
+
switch (msr) {
case HV_X64_MSR_VP_INDEX:
data = hv_vcpu->vp_index;
@@ -1552,8 +1720,22 @@ static __always_inline unsigned long *sparse_set_to_vcpu_mask(
return vcpu_bitmap;
}
-static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, u64 ingpa, u16 rep_cnt, bool ex)
+struct kvm_hv_hcall {
+ u64 param;
+ u64 ingpa;
+ u64 outgpa;
+ u16 code;
+ u16 rep_cnt;
+ u16 rep_idx;
+ bool fast;
+ bool rep;
+ sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS];
+};
+
+static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool ex)
{
+ int i;
+ gpa_t gpa;
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
struct hv_tlb_flush_ex flush_ex;
@@ -1567,8 +1749,15 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, u64 ingpa, u16 rep_cnt, bool
bool all_cpus;
if (!ex) {
- if (unlikely(kvm_read_guest(kvm, ingpa, &flush, sizeof(flush))))
- return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ if (hc->fast) {
+ flush.address_space = hc->ingpa;
+ flush.flags = hc->outgpa;
+ flush.processor_mask = sse128_lo(hc->xmm[0]);
+ } else {
+ if (unlikely(kvm_read_guest(kvm, hc->ingpa,
+ &flush, sizeof(flush))))
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ }
trace_kvm_hv_flush_tlb(flush.processor_mask,
flush.address_space, flush.flags);
@@ -1586,9 +1775,16 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, u64 ingpa, u16 rep_cnt, bool
all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
flush.processor_mask == 0;
} else {
- if (unlikely(kvm_read_guest(kvm, ingpa, &flush_ex,
- sizeof(flush_ex))))
- return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ if (hc->fast) {
+ flush_ex.address_space = hc->ingpa;
+ flush_ex.flags = hc->outgpa;
+ memcpy(&flush_ex.hv_vp_set,
+ &hc->xmm[0], sizeof(hc->xmm[0]));
+ } else {
+ if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex,
+ sizeof(flush_ex))))
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ }
trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
flush_ex.hv_vp_set.format,
@@ -1599,20 +1795,28 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, u64 ingpa, u16 rep_cnt, bool
all_cpus = flush_ex.hv_vp_set.format !=
HV_GENERIC_SET_SPARSE_4K;
- sparse_banks_len =
- bitmap_weight((unsigned long *)&valid_bank_mask, 64) *
- sizeof(sparse_banks[0]);
+ sparse_banks_len = bitmap_weight((unsigned long *)&valid_bank_mask, 64);
if (!sparse_banks_len && !all_cpus)
goto ret_success;
- if (!all_cpus &&
- kvm_read_guest(kvm,
- ingpa + offsetof(struct hv_tlb_flush_ex,
- hv_vp_set.bank_contents),
- sparse_banks,
- sparse_banks_len))
- return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ if (!all_cpus) {
+ if (hc->fast) {
+ if (sparse_banks_len > HV_HYPERCALL_MAX_XMM_REGISTERS - 1)
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ for (i = 0; i < sparse_banks_len; i += 2) {
+ sparse_banks[i] = sse128_lo(hc->xmm[i / 2 + 1]);
+ sparse_banks[i + 1] = sse128_hi(hc->xmm[i / 2 + 1]);
+ }
+ } else {
+ gpa = hc->ingpa + offsetof(struct hv_tlb_flush_ex,
+ hv_vp_set.bank_contents);
+ if (unlikely(kvm_read_guest(kvm, gpa, sparse_banks,
+ sparse_banks_len *
+ sizeof(sparse_banks[0]))))
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ }
+ }
}
cpumask_clear(&hv_vcpu->tlb_flush);
@@ -1625,13 +1829,13 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, u64 ingpa, u16 rep_cnt, bool
* vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
* analyze it here, flush TLB regardless of the specified address space.
*/
- kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH,
+ kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST,
NULL, vcpu_mask, &hv_vcpu->tlb_flush);
ret_success:
- /* We always do full TLB flush, set rep_done = rep_cnt. */
+ /* We always do full TLB flush, set 'Reps completed' = 'Rep Count' */
return (u64)HV_STATUS_SUCCESS |
- ((u64)rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
+ ((u64)hc->rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
}
static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector,
@@ -1653,8 +1857,7 @@ static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector,
}
}
-static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, u64 ingpa, u64 outgpa,
- bool ex, bool fast)
+static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool ex)
{
struct kvm *kvm = vcpu->kvm;
struct hv_send_ipi_ex send_ipi_ex;
@@ -1669,25 +1872,25 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, u64 ingpa, u64 outgpa,
bool all_cpus;
if (!ex) {
- if (!fast) {
- if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi,
+ if (!hc->fast) {
+ if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi,
sizeof(send_ipi))))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
sparse_banks[0] = send_ipi.cpu_mask;
vector = send_ipi.vector;
} else {
/* 'reserved' part of hv_send_ipi should be 0 */
- if (unlikely(ingpa >> 32 != 0))
+ if (unlikely(hc->ingpa >> 32 != 0))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
- sparse_banks[0] = outgpa;
- vector = (u32)ingpa;
+ sparse_banks[0] = hc->outgpa;
+ vector = (u32)hc->ingpa;
}
all_cpus = false;
valid_bank_mask = BIT_ULL(0);
trace_kvm_hv_send_ipi(vector, sparse_banks[0]);
} else {
- if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi_ex,
+ if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex,
sizeof(send_ipi_ex))))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
@@ -1707,8 +1910,8 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, u64 ingpa, u64 outgpa,
if (!all_cpus &&
kvm_read_guest(kvm,
- ingpa + offsetof(struct hv_send_ipi_ex,
- vp_set.bank_contents),
+ hc->ingpa + offsetof(struct hv_send_ipi_ex,
+ vp_set.bank_contents),
sparse_banks,
sparse_banks_len))
return HV_STATUS_INVALID_HYPERCALL_INPUT;
@@ -1730,12 +1933,67 @@ ret_success:
void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *entry;
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0);
- if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX)
+ if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) {
vcpu->arch.hyperv_enabled = true;
- else
+ } else {
vcpu->arch.hyperv_enabled = false;
+ return;
+ }
+
+ if (!to_hv_vcpu(vcpu) && kvm_hv_vcpu_init(vcpu))
+ return;
+
+ hv_vcpu = to_hv_vcpu(vcpu);
+
+ entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES, 0);
+ if (entry) {
+ hv_vcpu->cpuid_cache.features_eax = entry->eax;
+ hv_vcpu->cpuid_cache.features_ebx = entry->ebx;
+ hv_vcpu->cpuid_cache.features_edx = entry->edx;
+ } else {
+ hv_vcpu->cpuid_cache.features_eax = 0;
+ hv_vcpu->cpuid_cache.features_ebx = 0;
+ hv_vcpu->cpuid_cache.features_edx = 0;
+ }
+
+ entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO, 0);
+ if (entry) {
+ hv_vcpu->cpuid_cache.enlightenments_eax = entry->eax;
+ hv_vcpu->cpuid_cache.enlightenments_ebx = entry->ebx;
+ } else {
+ hv_vcpu->cpuid_cache.enlightenments_eax = 0;
+ hv_vcpu->cpuid_cache.enlightenments_ebx = 0;
+ }
+
+ entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES, 0);
+ if (entry)
+ hv_vcpu->cpuid_cache.syndbg_cap_eax = entry->eax;
+ else
+ hv_vcpu->cpuid_cache.syndbg_cap_eax = 0;
+}
+
+int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce)
+{
+ struct kvm_vcpu_hv *hv_vcpu;
+ int ret = 0;
+
+ if (!to_hv_vcpu(vcpu)) {
+ if (enforce) {
+ ret = kvm_hv_vcpu_init(vcpu);
+ if (ret)
+ return ret;
+ } else {
+ return 0;
+ }
+ }
+
+ hv_vcpu = to_hv_vcpu(vcpu);
+ hv_vcpu->enforce_cpuid = enforce;
+
+ return ret;
}
bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu)
@@ -1768,20 +2026,21 @@ static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
}
-static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
+static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
{
struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
struct eventfd_ctx *eventfd;
- if (unlikely(!fast)) {
+ if (unlikely(!hc->fast)) {
int ret;
- gpa_t gpa = param;
+ gpa_t gpa = hc->ingpa;
- if ((gpa & (__alignof__(param) - 1)) ||
- offset_in_page(gpa) + sizeof(param) > PAGE_SIZE)
+ if ((gpa & (__alignof__(hc->ingpa) - 1)) ||
+ offset_in_page(gpa) + sizeof(hc->ingpa) > PAGE_SIZE)
return HV_STATUS_INVALID_ALIGNMENT;
- ret = kvm_vcpu_read_guest(vcpu, gpa, &param, sizeof(param));
+ ret = kvm_vcpu_read_guest(vcpu, gpa,
+ &hc->ingpa, sizeof(hc->ingpa));
if (ret < 0)
return HV_STATUS_INVALID_ALIGNMENT;
}
@@ -1791,15 +2050,15 @@ static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
* have no use for it, and in all known usecases it is zero, so just
* report lookup failure if it isn't.
*/
- if (param & 0xffff00000000ULL)
+ if (hc->ingpa & 0xffff00000000ULL)
return HV_STATUS_INVALID_PORT_ID;
/* remaining bits are reserved-zero */
- if (param & ~KVM_HYPERV_CONN_ID_MASK)
+ if (hc->ingpa & ~KVM_HYPERV_CONN_ID_MASK)
return HV_STATUS_INVALID_HYPERCALL_INPUT;
/* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */
rcu_read_lock();
- eventfd = idr_find(&hv->conn_to_evt, param);
+ eventfd = idr_find(&hv->conn_to_evt, hc->ingpa);
rcu_read_unlock();
if (!eventfd)
return HV_STATUS_INVALID_PORT_ID;
@@ -1808,11 +2067,80 @@ static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, bool fast, u64 param)
return HV_STATUS_SUCCESS;
}
+static bool is_xmm_fast_hypercall(struct kvm_hv_hcall *hc)
+{
+ switch (hc->code) {
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
+ return true;
+ }
+
+ return false;
+}
+
+static void kvm_hv_hypercall_read_xmm(struct kvm_hv_hcall *hc)
+{
+ int reg;
+
+ kvm_fpu_get();
+ for (reg = 0; reg < HV_HYPERCALL_MAX_XMM_REGISTERS; reg++)
+ _kvm_read_sse_reg(reg, &hc->xmm[reg]);
+ kvm_fpu_put();
+}
+
+static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code)
+{
+ if (!hv_vcpu->enforce_cpuid)
+ return true;
+
+ switch (code) {
+ case HVCALL_NOTIFY_LONG_SPIN_WAIT:
+ return hv_vcpu->cpuid_cache.enlightenments_ebx &&
+ hv_vcpu->cpuid_cache.enlightenments_ebx != U32_MAX;
+ case HVCALL_POST_MESSAGE:
+ return hv_vcpu->cpuid_cache.features_ebx & HV_POST_MESSAGES;
+ case HVCALL_SIGNAL_EVENT:
+ return hv_vcpu->cpuid_cache.features_ebx & HV_SIGNAL_EVENTS;
+ case HVCALL_POST_DEBUG_DATA:
+ case HVCALL_RETRIEVE_DEBUG_DATA:
+ case HVCALL_RESET_DEBUG_SESSION:
+ /*
+ * Return 'true' when SynDBG is disabled so the resulting code
+ * will be HV_STATUS_INVALID_HYPERCALL_CODE.
+ */
+ return !kvm_hv_is_syndbg_enabled(hv_vcpu->vcpu) ||
+ hv_vcpu->cpuid_cache.features_ebx & HV_DEBUGGING;
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
+ if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
+ HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
+ return false;
+ fallthrough;
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
+ case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
+ return hv_vcpu->cpuid_cache.enlightenments_eax &
+ HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
+ case HVCALL_SEND_IPI_EX:
+ if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
+ HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
+ return false;
+ fallthrough;
+ case HVCALL_SEND_IPI:
+ return hv_vcpu->cpuid_cache.enlightenments_eax &
+ HV_X64_CLUSTER_IPI_RECOMMENDED;
+ default:
+ break;
+ }
+
+ return true;
+}
+
int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
{
- u64 param, ingpa, outgpa, ret = HV_STATUS_SUCCESS;
- uint16_t code, rep_idx, rep_cnt;
- bool fast, rep;
+ struct kvm_hv_hcall hc;
+ u64 ret = HV_STATUS_SUCCESS;
/*
* hypercall generates UD from non zero cpl and real mode
@@ -1825,104 +2153,113 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
#ifdef CONFIG_X86_64
if (is_64_bit_mode(vcpu)) {
- param = kvm_rcx_read(vcpu);
- ingpa = kvm_rdx_read(vcpu);
- outgpa = kvm_r8_read(vcpu);
+ hc.param = kvm_rcx_read(vcpu);
+ hc.ingpa = kvm_rdx_read(vcpu);
+ hc.outgpa = kvm_r8_read(vcpu);
} else
#endif
{
- param = ((u64)kvm_rdx_read(vcpu) << 32) |
- (kvm_rax_read(vcpu) & 0xffffffff);
- ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
- (kvm_rcx_read(vcpu) & 0xffffffff);
- outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
- (kvm_rsi_read(vcpu) & 0xffffffff);
+ hc.param = ((u64)kvm_rdx_read(vcpu) << 32) |
+ (kvm_rax_read(vcpu) & 0xffffffff);
+ hc.ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
+ (kvm_rcx_read(vcpu) & 0xffffffff);
+ hc.outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
+ (kvm_rsi_read(vcpu) & 0xffffffff);
}
- code = param & 0xffff;
- fast = !!(param & HV_HYPERCALL_FAST_BIT);
- rep_cnt = (param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff;
- rep_idx = (param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
- rep = !!(rep_cnt || rep_idx);
+ hc.code = hc.param & 0xffff;
+ hc.fast = !!(hc.param & HV_HYPERCALL_FAST_BIT);
+ hc.rep_cnt = (hc.param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff;
+ hc.rep_idx = (hc.param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
+ hc.rep = !!(hc.rep_cnt || hc.rep_idx);
- trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
+ if (hc.fast && is_xmm_fast_hypercall(&hc))
+ kvm_hv_hypercall_read_xmm(&hc);
- switch (code) {
+ trace_kvm_hv_hypercall(hc.code, hc.fast, hc.rep_cnt, hc.rep_idx,
+ hc.ingpa, hc.outgpa);
+
+ if (unlikely(!hv_check_hypercall_access(to_hv_vcpu(vcpu), hc.code))) {
+ ret = HV_STATUS_ACCESS_DENIED;
+ goto hypercall_complete;
+ }
+
+ switch (hc.code) {
case HVCALL_NOTIFY_LONG_SPIN_WAIT:
- if (unlikely(rep)) {
+ if (unlikely(hc.rep)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
kvm_vcpu_on_spin(vcpu, true);
break;
case HVCALL_SIGNAL_EVENT:
- if (unlikely(rep)) {
+ if (unlikely(hc.rep)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hvcall_signal_event(vcpu, fast, ingpa);
+ ret = kvm_hvcall_signal_event(vcpu, &hc);
if (ret != HV_STATUS_INVALID_PORT_ID)
break;
fallthrough; /* maybe userspace knows this conn_id */
case HVCALL_POST_MESSAGE:
/* don't bother userspace if it has no way to handle it */
- if (unlikely(rep || !to_hv_synic(vcpu)->active)) {
+ if (unlikely(hc.rep || !to_hv_synic(vcpu)->active)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
vcpu->run->exit_reason = KVM_EXIT_HYPERV;
vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
- vcpu->run->hyperv.u.hcall.input = param;
- vcpu->run->hyperv.u.hcall.params[0] = ingpa;
- vcpu->run->hyperv.u.hcall.params[1] = outgpa;
+ vcpu->run->hyperv.u.hcall.input = hc.param;
+ vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa;
+ vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa;
vcpu->arch.complete_userspace_io =
kvm_hv_hypercall_complete_userspace;
return 0;
case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
- if (unlikely(fast || !rep_cnt || rep_idx)) {
+ if (unlikely(!hc.rep_cnt || hc.rep_idx)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false);
+ ret = kvm_hv_flush_tlb(vcpu, &hc, false);
break;
case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
- if (unlikely(fast || rep)) {
+ if (unlikely(hc.rep)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, false);
+ ret = kvm_hv_flush_tlb(vcpu, &hc, false);
break;
case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
- if (unlikely(fast || !rep_cnt || rep_idx)) {
+ if (unlikely(!hc.rep_cnt || hc.rep_idx)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
+ ret = kvm_hv_flush_tlb(vcpu, &hc, true);
break;
case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
- if (unlikely(fast || rep)) {
+ if (unlikely(hc.rep)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
+ ret = kvm_hv_flush_tlb(vcpu, &hc, true);
break;
case HVCALL_SEND_IPI:
- if (unlikely(rep)) {
+ if (unlikely(hc.rep)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, false, fast);
+ ret = kvm_hv_send_ipi(vcpu, &hc, false);
break;
case HVCALL_SEND_IPI_EX:
- if (unlikely(fast || rep)) {
+ if (unlikely(hc.fast || hc.rep)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break;
}
- ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, true, false);
+ ret = kvm_hv_send_ipi(vcpu, &hc, true);
break;
case HVCALL_POST_DEBUG_DATA:
case HVCALL_RETRIEVE_DEBUG_DATA:
- if (unlikely(fast)) {
+ if (unlikely(hc.fast)) {
ret = HV_STATUS_INVALID_PARAMETER;
break;
}
@@ -1941,9 +2278,9 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
}
vcpu->run->exit_reason = KVM_EXIT_HYPERV;
vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
- vcpu->run->hyperv.u.hcall.input = param;
- vcpu->run->hyperv.u.hcall.params[0] = ingpa;
- vcpu->run->hyperv.u.hcall.params[1] = outgpa;
+ vcpu->run->hyperv.u.hcall.input = hc.param;
+ vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa;
+ vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa;
vcpu->arch.complete_userspace_io =
kvm_hv_hypercall_complete_userspace;
return 0;
@@ -1953,6 +2290,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
break;
}
+hypercall_complete:
return kvm_hv_hypercall_complete(vcpu, ret);
}
@@ -2101,6 +2439,7 @@ int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
ent->ebx |= HV_POST_MESSAGES;
ent->ebx |= HV_SIGNAL_EVENTS;
+ ent->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index e951af1fcb2c..730da8537d05 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -133,10 +133,12 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu);
void kvm_hv_setup_tsc_page(struct kvm *kvm,
struct pvclock_vcpu_time_info *hv_clock);
+void kvm_hv_invalidate_tsc_page(struct kvm *kvm);
void kvm_hv_init_vm(struct kvm *kvm);
void kvm_hv_destroy_vm(struct kvm *kvm);
void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu);
+int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce);
int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args);
int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries);
diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
index 8a4de3f12820..d5b72a08e566 100644
--- a/arch/x86/kvm/irq_comm.c
+++ b/arch/x86/kvm/irq_comm.c
@@ -269,7 +269,7 @@ int kvm_set_routing_entry(struct kvm *kvm,
const struct kvm_irq_routing_entry *ue)
{
/* We can't check irqchip_in_kernel() here as some callers are
- * currently inititalizing the irqchip. Other callers should therefore
+ * currently initializing the irqchip. Other callers should therefore
* check kvm_arch_can_set_irq_routing() before calling this function.
*/
switch (ue->type) {
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 2e11da2f5621..90e1ffdc05b7 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -55,6 +55,13 @@ static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
}
+static inline void kvm_register_clear_available(struct kvm_vcpu *vcpu,
+ enum kvm_reg reg)
+{
+ __clear_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
+ __clear_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
+}
+
static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
enum kvm_reg reg)
{
@@ -62,7 +69,12 @@ static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
__set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
}
-static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
+/*
+ * The "raw" register helpers are only for cases where the full 64 bits of a
+ * register are read/written irrespective of current vCPU mode. In other words,
+ * odds are good you shouldn't be using the raw variants.
+ */
+static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg)
{
if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
return 0;
@@ -73,8 +85,8 @@ static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
return vcpu->arch.regs[reg];
}
-static inline void kvm_register_write(struct kvm_vcpu *vcpu, int reg,
- unsigned long val)
+static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg,
+ unsigned long val)
{
if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
return;
@@ -85,22 +97,22 @@ static inline void kvm_register_write(struct kvm_vcpu *vcpu, int reg,
static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
{
- return kvm_register_read(vcpu, VCPU_REGS_RIP);
+ return kvm_register_read_raw(vcpu, VCPU_REGS_RIP);
}
static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
{
- kvm_register_write(vcpu, VCPU_REGS_RIP, val);
+ kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val);
}
static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu)
{
- return kvm_register_read(vcpu, VCPU_REGS_RSP);
+ return kvm_register_read_raw(vcpu, VCPU_REGS_RSP);
}
static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val)
{
- kvm_register_write(vcpu, VCPU_REGS_RSP, val);
+ kvm_register_write_raw(vcpu, VCPU_REGS_RSP, val);
}
static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
@@ -113,6 +125,11 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
return vcpu->arch.walk_mmu->pdptrs[index];
}
+static inline void kvm_pdptr_write(struct kvm_vcpu *vcpu, int index, u64 value)
+{
+ vcpu->arch.walk_mmu->pdptrs[index] = value;
+}
+
static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
{
ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
@@ -157,6 +174,7 @@ static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
{
vcpu->arch.hflags |= HF_GUEST_MASK;
+ vcpu->stat.guest_mode = 1;
}
static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
@@ -167,6 +185,8 @@ static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
vcpu->arch.load_eoi_exitmap_pending = false;
kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
}
+
+ vcpu->stat.guest_mode = 0;
}
static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
index 0d359115429a..68b420289d7e 100644
--- a/arch/x86/kvm/kvm_emulate.h
+++ b/arch/x86/kvm/kvm_emulate.h
@@ -13,6 +13,7 @@
#define _ASM_X86_KVM_X86_EMULATE_H
#include <asm/desc_defs.h>
+#include "fpu.h"
struct x86_emulate_ctxt;
enum x86_intercept;
@@ -229,15 +230,12 @@ struct x86_emulate_ops {
void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
- void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
- int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt,
- const char *smstate);
- void (*post_leave_smm)(struct x86_emulate_ctxt *ctxt);
+ void (*exiting_smm)(struct x86_emulate_ctxt *ctxt);
+ int (*leave_smm)(struct x86_emulate_ctxt *ctxt, const char *smstate);
+ void (*triple_fault)(struct x86_emulate_ctxt *ctxt);
int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr);
};
-typedef u32 __attribute__((vector_size(16))) sse128_t;
-
/* Type, address-of, and value of an instruction's operand. */
struct operand {
enum { OP_REG, OP_MEM, OP_MEM_STR, OP_IMM, OP_XMM, OP_MM, OP_NONE } type;
@@ -314,7 +312,6 @@ struct x86_emulate_ctxt {
int interruptibility;
bool perm_ok; /* do not check permissions if true */
- bool ud; /* inject an #UD if host doesn't support insn */
bool tf; /* TF value before instruction (after for syscall/sysret) */
bool have_exception;
@@ -468,6 +465,7 @@ enum x86_intercept {
x86_intercept_clgi,
x86_intercept_skinit,
x86_intercept_rdtscp,
+ x86_intercept_rdpid,
x86_intercept_icebp,
x86_intercept_wbinvd,
x86_intercept_monitor,
@@ -490,7 +488,7 @@ enum x86_intercept {
#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
#endif
-int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
+int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len, int emulation_type);
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
#define EMULATION_FAILED -1
#define EMULATION_OK 0
diff --git a/arch/x86/kvm/kvm_onhyperv.c b/arch/x86/kvm/kvm_onhyperv.c
new file mode 100644
index 000000000000..c7db2df50a7a
--- /dev/null
+++ b/arch/x86/kvm/kvm_onhyperv.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KVM L1 hypervisor optimizations on Hyper-V.
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/mshyperv.h>
+
+#include "hyperv.h"
+#include "kvm_onhyperv.h"
+
+static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
+ void *data)
+{
+ struct kvm_tlb_range *range = data;
+
+ return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn,
+ range->pages);
+}
+
+static inline int hv_remote_flush_root_tdp(hpa_t root_tdp,
+ struct kvm_tlb_range *range)
+{
+ if (range)
+ return hyperv_flush_guest_mapping_range(root_tdp,
+ kvm_fill_hv_flush_list_func, (void *)range);
+ else
+ return hyperv_flush_guest_mapping(root_tdp);
+}
+
+int hv_remote_flush_tlb_with_range(struct kvm *kvm,
+ struct kvm_tlb_range *range)
+{
+ struct kvm_arch *kvm_arch = &kvm->arch;
+ struct kvm_vcpu *vcpu;
+ int ret = 0, i, nr_unique_valid_roots;
+ hpa_t root;
+
+ spin_lock(&kvm_arch->hv_root_tdp_lock);
+
+ if (!VALID_PAGE(kvm_arch->hv_root_tdp)) {
+ nr_unique_valid_roots = 0;
+
+ /*
+ * Flush all valid roots, and see if all vCPUs have converged
+ * on a common root, in which case future flushes can skip the
+ * loop and flush the common root.
+ */
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ root = vcpu->arch.hv_root_tdp;
+ if (!VALID_PAGE(root) || root == kvm_arch->hv_root_tdp)
+ continue;
+
+ /*
+ * Set the tracked root to the first valid root. Keep
+ * this root for the entirety of the loop even if more
+ * roots are encountered as a low effort optimization
+ * to avoid flushing the same (first) root again.
+ */
+ if (++nr_unique_valid_roots == 1)
+ kvm_arch->hv_root_tdp = root;
+
+ if (!ret)
+ ret = hv_remote_flush_root_tdp(root, range);
+
+ /*
+ * Stop processing roots if a failure occurred and
+ * multiple valid roots have already been detected.
+ */
+ if (ret && nr_unique_valid_roots > 1)
+ break;
+ }
+
+ /*
+ * The optimized flush of a single root can't be used if there
+ * are multiple valid roots (obviously).
+ */
+ if (nr_unique_valid_roots > 1)
+ kvm_arch->hv_root_tdp = INVALID_PAGE;
+ } else {
+ ret = hv_remote_flush_root_tdp(kvm_arch->hv_root_tdp, range);
+ }
+
+ spin_unlock(&kvm_arch->hv_root_tdp_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(hv_remote_flush_tlb_with_range);
+
+int hv_remote_flush_tlb(struct kvm *kvm)
+{
+ return hv_remote_flush_tlb_with_range(kvm, NULL);
+}
+EXPORT_SYMBOL_GPL(hv_remote_flush_tlb);
diff --git a/arch/x86/kvm/kvm_onhyperv.h b/arch/x86/kvm/kvm_onhyperv.h
new file mode 100644
index 000000000000..1c67abf2eba9
--- /dev/null
+++ b/arch/x86/kvm/kvm_onhyperv.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * KVM L1 hypervisor optimizations on Hyper-V.
+ */
+
+#ifndef __ARCH_X86_KVM_KVM_ONHYPERV_H__
+#define __ARCH_X86_KVM_KVM_ONHYPERV_H__
+
+#if IS_ENABLED(CONFIG_HYPERV)
+int hv_remote_flush_tlb_with_range(struct kvm *kvm,
+ struct kvm_tlb_range *range);
+int hv_remote_flush_tlb(struct kvm *kvm);
+
+static inline void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
+{
+ struct kvm_arch *kvm_arch = &vcpu->kvm->arch;
+
+ if (kvm_x86_ops.tlb_remote_flush == hv_remote_flush_tlb) {
+ spin_lock(&kvm_arch->hv_root_tdp_lock);
+ vcpu->arch.hv_root_tdp = root_tdp;
+ if (root_tdp != kvm_arch->hv_root_tdp)
+ kvm_arch->hv_root_tdp = INVALID_PAGE;
+ spin_unlock(&kvm_arch->hv_root_tdp_lock);
+ }
+}
+#else /* !CONFIG_HYPERV */
+static inline void hv_track_root_tdp(struct kvm_vcpu *vcpu, hpa_t root_tdp)
+{
+}
+#endif /* !CONFIG_HYPERV */
+
+#endif
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 45d40bfacb7c..ba5a27879f1d 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -296,6 +296,10 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
}
+
+ /* Check if there are APF page ready requests pending */
+ if (enabled)
+ kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
}
static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
@@ -1406,6 +1410,9 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
if (!apic_x2apic_mode(apic))
valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI);
+ if (alignment + len > 4)
+ return 1;
+
if (offset > 0x3f0 || !(valid_reg_mask & APIC_REG_MASK(offset)))
return 1;
@@ -1490,6 +1497,15 @@ static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
static void cancel_hv_timer(struct kvm_lapic *apic);
+static void cancel_apic_timer(struct kvm_lapic *apic)
+{
+ hrtimer_cancel(&apic->lapic_timer.timer);
+ preempt_disable();
+ if (apic->lapic_timer.hv_timer_in_use)
+ cancel_hv_timer(apic);
+ preempt_enable();
+}
+
static void apic_update_lvtt(struct kvm_lapic *apic)
{
u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
@@ -1498,11 +1514,7 @@ static void apic_update_lvtt(struct kvm_lapic *apic)
if (apic->lapic_timer.timer_mode != timer_mode) {
if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
APIC_LVT_TIMER_TSCDEADLINE)) {
- hrtimer_cancel(&apic->lapic_timer.timer);
- preempt_disable();
- if (apic->lapic_timer.hv_timer_in_use)
- cancel_hv_timer(apic);
- preempt_enable();
+ cancel_apic_timer(apic);
kvm_lapic_set_reg(apic, APIC_TMICT, 0);
apic->lapic_timer.period = 0;
apic->lapic_timer.tscdeadline = 0;
@@ -1594,11 +1606,19 @@ static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
apic->lapic_timer.advance_expire_delta = guest_tsc - tsc_deadline;
+ if (lapic_timer_advance_dynamic) {
+ adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta);
+ /*
+ * If the timer fired early, reread the TSC to account for the
+ * overhead of the above adjustment to avoid waiting longer
+ * than is necessary.
+ */
+ if (guest_tsc < tsc_deadline)
+ guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
+ }
+
if (guest_tsc < tsc_deadline)
__wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
-
- if (lapic_timer_advance_dynamic)
- adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta);
}
void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
@@ -1642,13 +1662,22 @@ static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
}
if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
- kvm_wait_lapic_expire(vcpu);
+ /*
+ * Ensure the guest's timer has truly expired before posting an
+ * interrupt. Open code the relevant checks to avoid querying
+ * lapic_timer_int_injected(), which will be false since the
+ * interrupt isn't yet injected. Waiting until after injecting
+ * is not an option since that won't help a posted interrupt.
+ */
+ if (vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
+ vcpu->arch.apic->lapic_timer.timer_advance_ns)
+ __kvm_wait_lapic_expire(vcpu);
kvm_apic_inject_pending_timer_irqs(apic);
return;
}
atomic_inc(&apic->lapic_timer.pending);
- kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
+ kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
if (from_timer_fn)
kvm_vcpu_kick(vcpu);
}
@@ -1900,8 +1929,8 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
if (!apic->lapic_timer.hv_timer_in_use)
goto out;
WARN_ON(rcuwait_active(&vcpu->wait));
- cancel_hv_timer(apic);
apic_timer_expired(apic, false);
+ cancel_hv_timer(apic);
if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
advance_periodic_target_expiration(apic);
@@ -2071,7 +2100,7 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
if (apic_lvtt_tscdeadline(apic))
break;
- hrtimer_cancel(&apic->lapic_timer.timer);
+ cancel_apic_timer(apic);
kvm_lapic_set_reg(apic, APIC_TMICT, val);
start_apic_timer(apic);
break;
@@ -2252,6 +2281,8 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
if (value & MSR_IA32_APICBASE_ENABLE) {
kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
static_branch_slow_dec_deferred(&apic_hw_disabled);
+ /* Check if there are APF page ready requests pending */
+ kvm_make_request(KVM_REQ_APF_READY, vcpu);
} else {
static_branch_inc(&apic_hw_disabled.key);
atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
@@ -2595,10 +2626,12 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
apic_update_ppr(apic);
hrtimer_cancel(&apic->lapic_timer.timer);
+ apic->lapic_timer.expired_tscdeadline = 0;
apic_update_lvtt(apic);
apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
update_divide_count(apic);
__start_apic_timer(apic, APIC_TMCCT);
+ kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
kvm_apic_update_apicv(vcpu);
apic->highest_isr_cache = -1;
if (vcpu->arch.apicv_active) {
@@ -2840,7 +2873,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
}
-void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
+int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
u8 sipi_vector;
@@ -2848,7 +2881,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
unsigned long pe;
if (!lapic_in_kernel(vcpu))
- return;
+ return 0;
/*
* Read pending events before calling the check_events
@@ -2856,12 +2889,12 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
*/
pe = smp_load_acquire(&apic->pending_events);
if (!pe)
- return;
+ return 0;
if (is_guest_mode(vcpu)) {
- r = kvm_x86_ops.nested_ops->check_events(vcpu);
+ r = kvm_check_nested_events(vcpu);
if (r < 0)
- return;
+ return r == -EBUSY ? 0 : r;
/*
* If an event has happened and caused a vmexit,
* we know INITs are latched and therefore
@@ -2882,7 +2915,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
if (test_bit(KVM_APIC_SIPI, &pe))
clear_bit(KVM_APIC_SIPI, &apic->pending_events);
- return;
+ return 0;
}
if (test_bit(KVM_APIC_INIT, &pe)) {
@@ -2903,6 +2936,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
}
}
+ return 0;
}
void kvm_lapic_exit(void)
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 997c45a5963a..d7c25d0c1354 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -76,7 +76,7 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu);
int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu);
int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu);
int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu);
-void kvm_apic_accept_events(struct kvm_vcpu *vcpu);
+int kvm_apic_accept_events(struct kvm_vcpu *vcpu);
void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event);
u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu);
void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index c68bfc3e2402..83e6c6965f1e 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -44,6 +44,12 @@
#define PT32_ROOT_LEVEL 2
#define PT32E_ROOT_LEVEL 3
+#define KVM_MMU_CR4_ROLE_BITS (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | \
+ X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE | \
+ X86_CR4_LA57)
+
+#define KVM_MMU_CR0_ROLE_BITS (X86_CR0_PG | X86_CR0_WP)
+
static __always_inline u64 rsvd_bits(int s, int e)
{
BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s);
@@ -59,20 +65,22 @@ static __always_inline u64 rsvd_bits(int s, int e)
return ((2ULL << (e - s)) - 1) << s;
}
-void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask);
-
-void
-reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
+void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask);
+void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only);
-void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots);
-void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
- gpa_t nested_cr3);
+void kvm_init_mmu(struct kvm_vcpu *vcpu);
+void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
+ unsigned long cr4, u64 efer, gpa_t nested_cr3);
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
bool accessed_dirty, gpa_t new_eptp);
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
u64 fault_address, char *insn, int insn_len);
+int kvm_mmu_load(struct kvm_vcpu *vcpu);
+void kvm_mmu_unload(struct kvm_vcpu *vcpu);
+void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
+
static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
{
if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE))
@@ -102,8 +110,8 @@ static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
if (!VALID_PAGE(root_hpa))
return;
- static_call(kvm_x86_load_mmu_pgd)(vcpu, root_hpa | kvm_get_active_pcid(vcpu),
- vcpu->arch.mmu->shadow_root_level);
+ static_call(kvm_x86_load_mmu_pgd)(vcpu, root_hpa,
+ vcpu->arch.mmu->shadow_root_level);
}
int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
@@ -124,7 +132,7 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
* write-protects guest page to sync the guest modification, b) another one is
* used to sync dirty bitmap when we do KVM_GET_DIRTY_LOG. The differences
* between these two sorts are:
- * 1) the first case clears SPTE_MMU_WRITEABLE bit.
+ * 1) the first case clears MMU-writable bit.
* 2) the first case requires flushing tlb immediately avoiding corrupting
* shadow page table between all vcpus so it should be in the protection of
* mmu-lock. And the another case does not need to flush tlb until returning
@@ -135,17 +143,17 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
* So, there is the problem: the first case can meet the corrupted tlb caused
* by another case which write-protects pages but without flush tlb
* immediately. In order to making the first case be aware this problem we let
- * it flush tlb if we try to write-protect a spte whose SPTE_MMU_WRITEABLE bit
- * is set, it works since another case never touches SPTE_MMU_WRITEABLE bit.
+ * it flush tlb if we try to write-protect a spte whose MMU-writable bit
+ * is set, it works since another case never touches MMU-writable bit.
*
* Anyway, whenever a spte is updated (only permission and status bits are
- * changed) we need to check whether the spte with SPTE_MMU_WRITEABLE becomes
+ * changed) we need to check whether the spte with MMU-writable becomes
* readonly, if that happens, we need to flush tlb. Fortunately,
* mmu_spte_update() has already handled it perfectly.
*
- * The rules to use SPTE_MMU_WRITEABLE and PT_WRITABLE_MASK:
+ * The rules to use MMU-writable and PT_WRITABLE_MASK:
* - if we want to see if it has writable tlb entry or if the spte can be
- * writable on the mmu mapping, check SPTE_MMU_WRITEABLE, this is the most
+ * writable on the mmu mapping, check MMU-writable, this is the most
* case, otherwise
* - if we fix page fault on the spte or do write-protection by dirty logging,
* check PT_WRITABLE_MASK.
@@ -157,11 +165,6 @@ static inline bool is_writable_pte(unsigned long pte)
return pte & PT_WRITABLE_MASK;
}
-static inline bool is_write_protection(struct kvm_vcpu *vcpu)
-{
- return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
-}
-
/*
* Check if a given access (described through the I/D, W/R and U/S bits of a
* page fault error code pfec) causes a permission fault with the given PTE
@@ -227,4 +230,14 @@ int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
int kvm_mmu_post_init_vm(struct kvm *kvm);
void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
+static inline bool kvm_memslots_have_rmaps(struct kvm *kvm)
+{
+ /*
+ * Read memslot_have_rmaps before rmap pointers. Hence, threads reading
+ * memslots_have_rmaps in any lock context are guaranteed to see the
+ * pointers. Pairs with smp_store_release in alloc_all_memslots_rmaps.
+ */
+ return smp_load_acquire(&kvm->arch.memslots_have_rmaps);
+}
+
#endif
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index d75524bc8423..845d114ae075 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -48,13 +48,14 @@
#include <asm/memtype.h>
#include <asm/cmpxchg.h>
#include <asm/io.h>
+#include <asm/set_memory.h>
#include <asm/vmx.h>
#include <asm/kvm_page_track.h>
#include "trace.h"
extern bool itlb_multihit_kvm_mitigation;
-static int __read_mostly nx_huge_pages = -1;
+int __read_mostly nx_huge_pages = -1;
#ifdef CONFIG_PREEMPT_RT
/* Recovery can cause latency spikes, disable it for PREEMPT_RT. */
static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
@@ -175,9 +176,80 @@ static void mmu_spte_set(u64 *sptep, u64 spte);
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
+struct kvm_mmu_role_regs {
+ const unsigned long cr0;
+ const unsigned long cr4;
+ const u64 efer;
+};
+
#define CREATE_TRACE_POINTS
#include "mmutrace.h"
+/*
+ * Yes, lot's of underscores. They're a hint that you probably shouldn't be
+ * reading from the role_regs. Once the mmu_role is constructed, it becomes
+ * the single source of truth for the MMU's state.
+ */
+#define BUILD_MMU_ROLE_REGS_ACCESSOR(reg, name, flag) \
+static inline bool ____is_##reg##_##name(struct kvm_mmu_role_regs *regs)\
+{ \
+ return !!(regs->reg & flag); \
+}
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, pg, X86_CR0_PG);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr0, wp, X86_CR0_WP);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pse, X86_CR4_PSE);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pae, X86_CR4_PAE);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smep, X86_CR4_SMEP);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, smap, X86_CR4_SMAP);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, pke, X86_CR4_PKE);
+BUILD_MMU_ROLE_REGS_ACCESSOR(cr4, la57, X86_CR4_LA57);
+BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX);
+BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA);
+
+/*
+ * The MMU itself (with a valid role) is the single source of truth for the
+ * MMU. Do not use the regs used to build the MMU/role, nor the vCPU. The
+ * regs don't account for dependencies, e.g. clearing CR4 bits if CR0.PG=1,
+ * and the vCPU may be incorrect/irrelevant.
+ */
+#define BUILD_MMU_ROLE_ACCESSOR(base_or_ext, reg, name) \
+static inline bool is_##reg##_##name(struct kvm_mmu *mmu) \
+{ \
+ return !!(mmu->mmu_role. base_or_ext . reg##_##name); \
+}
+BUILD_MMU_ROLE_ACCESSOR(ext, cr0, pg);
+BUILD_MMU_ROLE_ACCESSOR(base, cr0, wp);
+BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pse);
+BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pae);
+BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smep);
+BUILD_MMU_ROLE_ACCESSOR(ext, cr4, smap);
+BUILD_MMU_ROLE_ACCESSOR(ext, cr4, pke);
+BUILD_MMU_ROLE_ACCESSOR(ext, cr4, la57);
+BUILD_MMU_ROLE_ACCESSOR(base, efer, nx);
+
+static struct kvm_mmu_role_regs vcpu_to_role_regs(struct kvm_vcpu *vcpu)
+{
+ struct kvm_mmu_role_regs regs = {
+ .cr0 = kvm_read_cr0_bits(vcpu, KVM_MMU_CR0_ROLE_BITS),
+ .cr4 = kvm_read_cr4_bits(vcpu, KVM_MMU_CR4_ROLE_BITS),
+ .efer = vcpu->arch.efer,
+ };
+
+ return regs;
+}
+
+static int role_regs_to_root_level(struct kvm_mmu_role_regs *regs)
+{
+ if (!____is_cr0_pg(regs))
+ return 0;
+ else if (____is_efer_lma(regs))
+ return ____is_cr4_la57(regs) ? PT64_ROOT_5LEVEL :
+ PT64_ROOT_4LEVEL;
+ else if (____is_cr4_pae(regs))
+ return PT32E_ROOT_LEVEL;
+ else
+ return PT32_ROOT_LEVEL;
+}
static inline bool kvm_available_flush_tlb_with_range(void)
{
@@ -207,18 +279,13 @@ void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
kvm_flush_remote_tlbs_with_range(kvm, &range);
}
-bool is_nx_huge_page_enabled(void)
-{
- return READ_ONCE(nx_huge_pages);
-}
-
static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
unsigned int access)
{
- u64 mask = make_mmio_spte(vcpu, gfn, access);
+ u64 spte = make_mmio_spte(vcpu, gfn, access);
- trace_mark_mmio_spte(sptep, gfn, mask);
- mmu_spte_set(sptep, mask);
+ trace_mark_mmio_spte(sptep, gfn, spte);
+ mmu_spte_set(sptep, spte);
}
static gfn_t get_mmio_spte_gfn(u64 spte)
@@ -236,17 +303,6 @@ static unsigned get_mmio_spte_access(u64 spte)
return spte & shadow_mmio_access_mask;
}
-static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
- kvm_pfn_t pfn, unsigned int access)
-{
- if (unlikely(is_noslot_pfn(pfn))) {
- mark_mmio_spte(vcpu, sptep, gfn, access);
- return true;
- }
-
- return false;
-}
-
static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
{
u64 kvm_gen, spte_gen, gen;
@@ -279,11 +335,6 @@ static int is_cpuid_PSE36(void)
return 1;
}
-static int is_nx(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.efer & EFER_NX;
-}
-
static gfn_t pse36_gfn_delta(u32 gpte)
{
int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
@@ -725,8 +776,7 @@ static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
* handling slots that are not large page aligned.
*/
static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
- struct kvm_memory_slot *slot,
- int level)
+ const struct kvm_memory_slot *slot, int level)
{
unsigned long idx;
@@ -1118,7 +1168,7 @@ static bool spte_write_protect(u64 *sptep, bool pt_protect)
rmap_printk("spte %p %llx\n", sptep, *sptep);
if (pt_protect)
- spte &= ~SPTE_MMU_WRITEABLE;
+ spte &= ~shadow_mmu_writable_mask;
spte = spte & ~PT_WRITABLE_MASK;
return mmu_spte_update(sptep, spte);
@@ -1188,8 +1238,7 @@ static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
* @gfn_offset: start of the BITS_PER_LONG pages we care about
* @mask: indicates which pages we should protect
*
- * Used when we do not need to care about huge page mappings: e.g. during dirty
- * logging we do not have any such mappings.
+ * Used when we do not need to care about huge page mappings.
*/
static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
@@ -1200,6 +1249,10 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
if (is_tdp_mmu_enabled(kvm))
kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
slot->base_gfn + gfn_offset, mask, true);
+
+ if (!kvm_memslots_have_rmaps(kvm))
+ return;
+
while (mask) {
rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
PG_LEVEL_4K, slot);
@@ -1229,6 +1282,10 @@ static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
if (is_tdp_mmu_enabled(kvm))
kvm_tdp_mmu_clear_dirty_pt_masked(kvm, slot,
slot->base_gfn + gfn_offset, mask, false);
+
+ if (!kvm_memslots_have_rmaps(kvm))
+ return;
+
while (mask) {
rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
PG_LEVEL_4K, slot);
@@ -1246,13 +1303,36 @@ static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
* It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
* enable dirty logging for them.
*
- * Used when we do not need to care about huge page mappings: e.g. during dirty
- * logging we do not have any such mappings.
+ * We need to care about huge page mappings: e.g. during dirty logging we may
+ * have such mappings.
*/
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset, unsigned long mask)
{
+ /*
+ * Huge pages are NOT write protected when we start dirty logging in
+ * initially-all-set mode; must write protect them here so that they
+ * are split to 4K on the first write.
+ *
+ * The gfn_offset is guaranteed to be aligned to 64, but the base_gfn
+ * of memslot has no such restriction, so the range can cross two large
+ * pages.
+ */
+ if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
+ gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask);
+ gfn_t end = slot->base_gfn + gfn_offset + __fls(mask);
+
+ kvm_mmu_slot_gfn_write_protect(kvm, slot, start, PG_LEVEL_2M);
+
+ /* Cross two large pages? */
+ if (ALIGN(start << PAGE_SHIFT, PMD_SIZE) !=
+ ALIGN(end << PAGE_SHIFT, PMD_SIZE))
+ kvm_mmu_slot_gfn_write_protect(kvm, slot, end,
+ PG_LEVEL_2M);
+ }
+
+ /* Now handle 4K PTEs. */
if (kvm_x86_ops.cpu_dirty_log_size)
kvm_mmu_clear_dirty_pt_masked(kvm, slot, gfn_offset, mask);
else
@@ -1265,20 +1345,23 @@ int kvm_cpu_dirty_log_size(void)
}
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
- struct kvm_memory_slot *slot, u64 gfn)
+ struct kvm_memory_slot *slot, u64 gfn,
+ int min_level)
{
struct kvm_rmap_head *rmap_head;
int i;
bool write_protected = false;
- for (i = PG_LEVEL_4K; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
- rmap_head = __gfn_to_rmap(gfn, i, slot);
- write_protected |= __rmap_write_protect(kvm, rmap_head, true);
+ if (kvm_memslots_have_rmaps(kvm)) {
+ for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
+ rmap_head = __gfn_to_rmap(gfn, i, slot);
+ write_protected |= __rmap_write_protect(kvm, rmap_head, true);
+ }
}
if (is_tdp_mmu_enabled(kvm))
write_protected |=
- kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn);
+ kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
return write_protected;
}
@@ -1288,7 +1371,7 @@ static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
struct kvm_memory_slot *slot;
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
- return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
+ return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
}
static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
@@ -1308,26 +1391,25 @@ static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
return flush;
}
-static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
- struct kvm_memory_slot *slot, gfn_t gfn, int level,
- unsigned long data)
+static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+ struct kvm_memory_slot *slot, gfn_t gfn, int level,
+ pte_t unused)
{
return kvm_zap_rmapp(kvm, rmap_head, slot);
}
-static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
- struct kvm_memory_slot *slot, gfn_t gfn, int level,
- unsigned long data)
+static bool kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+ struct kvm_memory_slot *slot, gfn_t gfn, int level,
+ pte_t pte)
{
u64 *sptep;
struct rmap_iterator iter;
int need_flush = 0;
u64 new_spte;
- pte_t *ptep = (pte_t *)data;
kvm_pfn_t new_pfn;
- WARN_ON(pte_huge(*ptep));
- new_pfn = pte_pfn(*ptep);
+ WARN_ON(pte_huge(pte));
+ new_pfn = pte_pfn(pte);
restart:
for_each_rmap_spte(rmap_head, &iter, sptep) {
@@ -1336,7 +1418,7 @@ restart:
need_flush = 1;
- if (pte_write(*ptep)) {
+ if (pte_write(pte)) {
pte_list_remove(rmap_head, sptep);
goto restart;
} else {
@@ -1424,93 +1506,54 @@ static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
slot_rmap_walk_okay(_iter_); \
slot_rmap_walk_next(_iter_))
-static __always_inline int
-kvm_handle_hva_range(struct kvm *kvm,
- unsigned long start,
- unsigned long end,
- unsigned long data,
- int (*handler)(struct kvm *kvm,
- struct kvm_rmap_head *rmap_head,
- struct kvm_memory_slot *slot,
- gfn_t gfn,
- int level,
- unsigned long data))
+typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+ struct kvm_memory_slot *slot, gfn_t gfn,
+ int level, pte_t pte);
+
+static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm,
+ struct kvm_gfn_range *range,
+ rmap_handler_t handler)
{
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
struct slot_rmap_walk_iterator iterator;
- int ret = 0;
- int i;
+ bool ret = false;
- for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
- slots = __kvm_memslots(kvm, i);
- kvm_for_each_memslot(memslot, slots) {
- unsigned long hva_start, hva_end;
- gfn_t gfn_start, gfn_end;
-
- hva_start = max(start, memslot->userspace_addr);
- hva_end = min(end, memslot->userspace_addr +
- (memslot->npages << PAGE_SHIFT));
- if (hva_start >= hva_end)
- continue;
- /*
- * {gfn(page) | page intersects with [hva_start, hva_end)} =
- * {gfn_start, gfn_start+1, ..., gfn_end-1}.
- */
- gfn_start = hva_to_gfn_memslot(hva_start, memslot);
- gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
-
- for_each_slot_rmap_range(memslot, PG_LEVEL_4K,
- KVM_MAX_HUGEPAGE_LEVEL,
- gfn_start, gfn_end - 1,
- &iterator)
- ret |= handler(kvm, iterator.rmap, memslot,
- iterator.gfn, iterator.level, data);
- }
- }
+ for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
+ range->start, range->end - 1, &iterator)
+ ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn,
+ iterator.level, range->pte);
return ret;
}
-static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
- unsigned long data,
- int (*handler)(struct kvm *kvm,
- struct kvm_rmap_head *rmap_head,
- struct kvm_memory_slot *slot,
- gfn_t gfn, int level,
- unsigned long data))
-{
- return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
-}
-
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
- unsigned flags)
+bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
- int r;
+ bool flush = false;
- r = kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
+ if (kvm_memslots_have_rmaps(kvm))
+ flush = kvm_handle_gfn_range(kvm, range, kvm_unmap_rmapp);
if (is_tdp_mmu_enabled(kvm))
- r |= kvm_tdp_mmu_zap_hva_range(kvm, start, end);
+ flush |= kvm_tdp_mmu_unmap_gfn_range(kvm, range, flush);
- return r;
+ return flush;
}
-int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- int r;
+ bool flush = false;
- r = kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
+ if (kvm_memslots_have_rmaps(kvm))
+ flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmapp);
if (is_tdp_mmu_enabled(kvm))
- r |= kvm_tdp_mmu_set_spte_hva(kvm, hva, &pte);
+ flush |= kvm_tdp_mmu_set_spte_gfn(kvm, range);
- return r;
+ return flush;
}
-static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
- struct kvm_memory_slot *slot, gfn_t gfn, int level,
- unsigned long data)
+static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+ struct kvm_memory_slot *slot, gfn_t gfn, int level,
+ pte_t unused)
{
u64 *sptep;
struct rmap_iterator iter;
@@ -1519,13 +1562,12 @@ static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
for_each_rmap_spte(rmap_head, &iter, sptep)
young |= mmu_spte_age(sptep);
- trace_kvm_age_page(gfn, level, slot, young);
return young;
}
-static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
- struct kvm_memory_slot *slot, gfn_t gfn,
- int level, unsigned long data)
+static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+ struct kvm_memory_slot *slot, gfn_t gfn,
+ int level, pte_t unused)
{
u64 *sptep;
struct rmap_iterator iter;
@@ -1547,29 +1589,33 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
- kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0);
+ kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
KVM_PAGES_PER_HPAGE(sp->role.level));
}
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
+bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- int young = false;
+ bool young = false;
+
+ if (kvm_memslots_have_rmaps(kvm))
+ young = kvm_handle_gfn_range(kvm, range, kvm_age_rmapp);
- young = kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
if (is_tdp_mmu_enabled(kvm))
- young |= kvm_tdp_mmu_age_hva_range(kvm, start, end);
+ young |= kvm_tdp_mmu_age_gfn_range(kvm, range);
return young;
}
-int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- int young = false;
+ bool young = false;
+
+ if (kvm_memslots_have_rmaps(kvm))
+ young = kvm_handle_gfn_range(kvm, range, kvm_test_age_rmapp);
- young = kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
if (is_tdp_mmu_enabled(kvm))
- young |= kvm_tdp_mmu_test_age_hva(kvm, hva);
+ young |= kvm_tdp_mmu_test_age_gfn(kvm, range);
return young;
}
@@ -1800,17 +1846,10 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
&(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)]) \
if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
-static inline bool is_ept_sp(struct kvm_mmu_page *sp)
-{
- return sp->role.cr0_wp && sp->role.smap_andnot_wp;
-}
-
-/* @sp->gfn should be write-protected at the call site */
-static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
- struct list_head *invalid_list)
+static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+ struct list_head *invalid_list)
{
- if ((!is_ept_sp(sp) && sp->role.gpte_is_8_bytes != !!is_pae(vcpu)) ||
- vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
+ if (vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
return false;
}
@@ -1856,31 +1895,6 @@ static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
}
-static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
- struct list_head *invalid_list)
-{
- kvm_unlink_unsync_page(vcpu->kvm, sp);
- return __kvm_sync_page(vcpu, sp, invalid_list);
-}
-
-/* @gfn should be write-protected at the call site */
-static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
- struct list_head *invalid_list)
-{
- struct kvm_mmu_page *s;
- bool ret = false;
-
- for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
- if (!s->unsync)
- continue;
-
- WARN_ON(s->role.level != PG_LEVEL_4K);
- ret |= kvm_sync_page(vcpu, s, invalid_list);
- }
-
- return ret;
-}
-
struct mmu_page_path {
struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
unsigned int idx[PT64_ROOT_MAX_LEVEL];
@@ -1975,6 +1989,7 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
}
for_each_sp(pages, sp, parents, i) {
+ kvm_unlink_unsync_page(vcpu->kvm, sp);
flush |= kvm_sync_page(vcpu, sp, &invalid_list);
mmu_pages_clear_parents(&parents);
}
@@ -2010,8 +2025,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
struct hlist_head *sp_list;
unsigned quadrant;
struct kvm_mmu_page *sp;
- bool need_sync = false;
- bool flush = false;
int collisions = 0;
LIST_HEAD(invalid_list);
@@ -2034,20 +2047,39 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
continue;
}
- if (!need_sync && sp->unsync)
- need_sync = true;
-
- if (sp->role.word != role.word)
+ if (sp->role.word != role.word) {
+ /*
+ * If the guest is creating an upper-level page, zap
+ * unsync pages for the same gfn. While it's possible
+ * the guest is using recursive page tables, in all
+ * likelihood the guest has stopped using the unsync
+ * page and is installing a completely unrelated page.
+ * Unsync pages must not be left as is, because the new
+ * upper-level page will be write-protected.
+ */
+ if (level > PG_LEVEL_4K && sp->unsync)
+ kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
+ &invalid_list);
continue;
+ }
if (direct_mmu)
goto trace_get_page;
if (sp->unsync) {
- /* The page is good, but __kvm_sync_page might still end
- * up zapping it. If so, break in order to rebuild it.
+ /*
+ * The page is good, but is stale. kvm_sync_page does
+ * get the latest guest state, but (unlike mmu_unsync_children)
+ * it doesn't write-protect the page or mark it synchronized!
+ * This way the validity of the mapping is ensured, but the
+ * overhead of write protection is not incurred until the
+ * guest invalidates the TLB mapping. This allows multiple
+ * SPs for a single gfn to be unsync.
+ *
+ * If the sync fails, the page is zapped. If so, break
+ * in order to rebuild it.
*/
- if (!__kvm_sync_page(vcpu, sp, &invalid_list))
+ if (!kvm_sync_page(vcpu, sp, &invalid_list))
break;
WARN_ON(!list_empty(&invalid_list));
@@ -2072,22 +2104,14 @@ trace_get_page:
sp->role = role;
hlist_add_head(&sp->hash_link, sp_list);
if (!direct) {
- /*
- * we should do write protection before syncing pages
- * otherwise the content of the synced shadow page may
- * be inconsistent with guest page table.
- */
account_shadowed(vcpu->kvm, sp);
if (level == PG_LEVEL_4K && rmap_write_protect(vcpu, gfn))
kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
-
- if (level > PG_LEVEL_4K && need_sync)
- flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
}
trace_kvm_mmu_get_page(sp, true);
-
- kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
out:
+ kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
+
if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
return sp;
@@ -2421,6 +2445,15 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
kvm_mmu_zap_oldest_mmu_pages(vcpu->kvm, KVM_REFILL_PAGES - avail);
+ /*
+ * Note, this check is intentionally soft, it only guarantees that one
+ * page is available, while the caller may end up allocating as many as
+ * four pages, e.g. for PAE roots or for 5-level paging. Temporarily
+ * exceeding the (arbitrary by default) limit will not harm the host,
+ * being too aggressive may unnecessarily kill the guest, and getting an
+ * exact count is far more trouble than it's worth, especially in the
+ * page fault paths.
+ */
if (!kvm_mmu_available_pages(vcpu->kvm))
return -ENOSPC;
return 0;
@@ -2491,17 +2524,33 @@ static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
kvm_mmu_mark_parents_unsync(sp);
}
-bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
- bool can_unsync)
+/*
+ * Attempt to unsync any shadow pages that can be reached by the specified gfn,
+ * KVM is creating a writable mapping for said gfn. Returns 0 if all pages
+ * were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
+ * be write-protected.
+ */
+int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
{
struct kvm_mmu_page *sp;
+ /*
+ * Force write-protection if the page is being tracked. Note, the page
+ * track machinery is used to write-protect upper-level shadow pages,
+ * i.e. this guards the role.level == 4K assertion below!
+ */
if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
- return true;
+ return -EPERM;
+ /*
+ * The page is not write-tracked, mark existing shadow pages unsync
+ * unless KVM is synchronizing an unsync SP (can_unsync = false). In
+ * that case, KVM must complete emulation of the guest TLB flush before
+ * allowing shadow pages to become unsync (writable by the guest).
+ */
for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
if (!can_unsync)
- return true;
+ return -EPERM;
if (sp->unsync)
continue;
@@ -2532,8 +2581,8 @@ bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
* 2.2 Guest issues TLB flush.
* That causes a VM Exit.
*
- * 2.3 kvm_mmu_sync_pages() reads sp->unsync.
- * Since it is false, so it just returns.
+ * 2.3 Walking of unsync pages sees sp->unsync is
+ * false and skips the page.
*
* 2.4 Guest accesses GVA X.
* Since the mapping in the SP was not updated,
@@ -2549,7 +2598,7 @@ bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
*/
smp_wmb();
- return false;
+ return 0;
}
static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
@@ -2561,9 +2610,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
struct kvm_mmu_page *sp;
int ret;
- if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
- return 0;
-
sp = sptep_to_sp(sptep);
ret = make_spte(vcpu, pte_access, level, gfn, pfn, *sptep, speculative,
@@ -2593,6 +2639,11 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
*sptep, write_fault, gfn);
+ if (unlikely(is_noslot_pfn(pfn))) {
+ mark_mmio_spte(vcpu, sptep, gfn, pte_access);
+ return RET_PF_EMULATE;
+ }
+
if (is_shadow_present_pte(*sptep)) {
/*
* If we overwrite a PTE page pointer with a 2MB PMD, unlink
@@ -2626,9 +2677,6 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
KVM_PAGES_PER_HPAGE(level));
- if (unlikely(is_mmio_spte(*sptep)))
- ret = RET_PF_EMULATE;
-
/*
* The fault is fully spurious if and only if the new SPTE and old SPTE
* are identical, and emulation is not required.
@@ -2745,7 +2793,7 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
}
static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
- struct kvm_memory_slot *slot)
+ const struct kvm_memory_slot *slot)
{
unsigned long hva;
pte_t *pte;
@@ -2771,8 +2819,9 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn,
return level;
}
-int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_memory_slot *slot,
- gfn_t gfn, kvm_pfn_t pfn, int max_level)
+int kvm_mmu_max_mapping_level(struct kvm *kvm,
+ const struct kvm_memory_slot *slot, gfn_t gfn,
+ kvm_pfn_t pfn, int max_level)
{
struct kvm_lpage_info *linfo;
@@ -2870,9 +2919,6 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
gfn_t gfn = gpa >> PAGE_SHIFT;
gfn_t base_gfn = gfn;
- if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
- return RET_PF_RETRY;
-
level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
huge_page_disallowed, &req_level);
@@ -2946,9 +2992,19 @@ static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
return true;
}
- if (unlikely(is_noslot_pfn(pfn)))
+ if (unlikely(is_noslot_pfn(pfn))) {
vcpu_cache_mmio_info(vcpu, gva, gfn,
access & shadow_mmio_access_mask);
+ /*
+ * If MMIO caching is disabled, emulate immediately without
+ * touching the shadow page tables as attempting to install an
+ * MMIO SPTE will just be an expensive nop.
+ */
+ if (unlikely(!shadow_mmio_value)) {
+ *ret_val = RET_PF_EMULATE;
+ return true;
+ }
+ }
return false;
}
@@ -3061,6 +3117,9 @@ static int fast_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
if (!is_shadow_present_pte(spte))
break;
+ if (!is_shadow_present_pte(spte))
+ break;
+
sp = sptep_to_sp(iterator.sptep);
if (!is_last_spte(spte, sp->role.level))
break;
@@ -3150,12 +3209,10 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
- if (kvm_mmu_put_root(kvm, sp)) {
- if (is_tdp_mmu_page(sp))
- kvm_tdp_mmu_free_root(kvm, sp);
- else if (sp->role.invalid)
- kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
- }
+ if (is_tdp_mmu_page(sp))
+ kvm_tdp_mmu_put_root(kvm, sp, false);
+ else if (!--sp->root_count && sp->role.invalid)
+ kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
*root_hpa = INVALID_PAGE;
}
@@ -3193,14 +3250,17 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
(mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list);
- } else {
- for (i = 0; i < 4; ++i)
- if (mmu->pae_root[i] != 0)
- mmu_free_root_page(kvm,
- &mmu->pae_root[i],
- &invalid_list);
- mmu->root_hpa = INVALID_PAGE;
+ } else if (mmu->pae_root) {
+ for (i = 0; i < 4; ++i) {
+ if (!IS_VALID_PAE_ROOT(mmu->pae_root[i]))
+ continue;
+
+ mmu_free_root_page(kvm, &mmu->pae_root[i],
+ &invalid_list);
+ mmu->pae_root[i] = INVALID_PAE_ROOT;
+ }
}
+ mmu->root_hpa = INVALID_PAGE;
mmu->root_pgd = 0;
}
@@ -3209,6 +3269,33 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
}
EXPORT_SYMBOL_GPL(kvm_mmu_free_roots);
+void kvm_mmu_free_guest_mode_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
+{
+ unsigned long roots_to_free = 0;
+ hpa_t root_hpa;
+ int i;
+
+ /*
+ * This should not be called while L2 is active, L2 can't invalidate
+ * _only_ its own roots, e.g. INVVPID unconditionally exits.
+ */
+ WARN_ON_ONCE(mmu->mmu_role.base.guest_mode);
+
+ for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
+ root_hpa = mmu->prev_roots[i].hpa;
+ if (!VALID_PAGE(root_hpa))
+ continue;
+
+ if (!to_shadow_page(root_hpa) ||
+ to_shadow_page(root_hpa)->role.guest_mode)
+ roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
+ }
+
+ kvm_mmu_free_roots(vcpu, mmu, roots_to_free);
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_free_guest_mode_roots);
+
+
static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
{
int ret = 0;
@@ -3226,155 +3313,212 @@ static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
{
struct kvm_mmu_page *sp;
- write_lock(&vcpu->kvm->mmu_lock);
-
- if (make_mmu_pages_available(vcpu)) {
- write_unlock(&vcpu->kvm->mmu_lock);
- return INVALID_PAGE;
- }
sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL);
++sp->root_count;
- write_unlock(&vcpu->kvm->mmu_lock);
return __pa(sp->spt);
}
static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
{
- u8 shadow_root_level = vcpu->arch.mmu->shadow_root_level;
+ struct kvm_mmu *mmu = vcpu->arch.mmu;
+ u8 shadow_root_level = mmu->shadow_root_level;
hpa_t root;
unsigned i;
+ int r;
+
+ write_lock(&vcpu->kvm->mmu_lock);
+ r = make_mmu_pages_available(vcpu);
+ if (r < 0)
+ goto out_unlock;
if (is_tdp_mmu_enabled(vcpu->kvm)) {
root = kvm_tdp_mmu_get_vcpu_root_hpa(vcpu);
-
- if (!VALID_PAGE(root))
- return -ENOSPC;
- vcpu->arch.mmu->root_hpa = root;
+ mmu->root_hpa = root;
} else if (shadow_root_level >= PT64_ROOT_4LEVEL) {
- root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level,
- true);
-
- if (!VALID_PAGE(root))
- return -ENOSPC;
- vcpu->arch.mmu->root_hpa = root;
+ root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true);
+ mmu->root_hpa = root;
} else if (shadow_root_level == PT32E_ROOT_LEVEL) {
+ if (WARN_ON_ONCE(!mmu->pae_root)) {
+ r = -EIO;
+ goto out_unlock;
+ }
+
for (i = 0; i < 4; ++i) {
- MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
+ WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT),
i << 30, PT32_ROOT_LEVEL, true);
- if (!VALID_PAGE(root))
- return -ENOSPC;
- vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK;
+ mmu->pae_root[i] = root | PT_PRESENT_MASK |
+ shadow_me_mask;
}
- vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
- } else
- BUG();
+ mmu->root_hpa = __pa(mmu->pae_root);
+ } else {
+ WARN_ONCE(1, "Bad TDP root level = %d\n", shadow_root_level);
+ r = -EIO;
+ goto out_unlock;
+ }
/* root_pgd is ignored for direct MMUs. */
- vcpu->arch.mmu->root_pgd = 0;
-
- return 0;
+ mmu->root_pgd = 0;
+out_unlock:
+ write_unlock(&vcpu->kvm->mmu_lock);
+ return r;
}
static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
{
- u64 pdptr, pm_mask;
+ struct kvm_mmu *mmu = vcpu->arch.mmu;
+ u64 pdptrs[4], pm_mask;
gfn_t root_gfn, root_pgd;
hpa_t root;
- int i;
+ unsigned i;
+ int r;
- root_pgd = vcpu->arch.mmu->get_guest_pgd(vcpu);
+ root_pgd = mmu->get_guest_pgd(vcpu);
root_gfn = root_pgd >> PAGE_SHIFT;
if (mmu_check_root(vcpu, root_gfn))
return 1;
/*
+ * On SVM, reading PDPTRs might access guest memory, which might fault
+ * and thus might sleep. Grab the PDPTRs before acquiring mmu_lock.
+ */
+ if (mmu->root_level == PT32E_ROOT_LEVEL) {
+ for (i = 0; i < 4; ++i) {
+ pdptrs[i] = mmu->get_pdptr(vcpu, i);
+ if (!(pdptrs[i] & PT_PRESENT_MASK))
+ continue;
+
+ if (mmu_check_root(vcpu, pdptrs[i] >> PAGE_SHIFT))
+ return 1;
+ }
+ }
+
+ r = alloc_all_memslots_rmaps(vcpu->kvm);
+ if (r)
+ return r;
+
+ write_lock(&vcpu->kvm->mmu_lock);
+ r = make_mmu_pages_available(vcpu);
+ if (r < 0)
+ goto out_unlock;
+
+ /*
* Do we shadow a long mode page table? If so we need to
* write-protect the guests page table root.
*/
- if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
- MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->root_hpa));
-
+ if (mmu->root_level >= PT64_ROOT_4LEVEL) {
root = mmu_alloc_root(vcpu, root_gfn, 0,
- vcpu->arch.mmu->shadow_root_level, false);
- if (!VALID_PAGE(root))
- return -ENOSPC;
- vcpu->arch.mmu->root_hpa = root;
+ mmu->shadow_root_level, false);
+ mmu->root_hpa = root;
goto set_root_pgd;
}
+ if (WARN_ON_ONCE(!mmu->pae_root)) {
+ r = -EIO;
+ goto out_unlock;
+ }
+
/*
* We shadow a 32 bit page table. This may be a legacy 2-level
* or a PAE 3-level page table. In either case we need to be aware that
* the shadow page table may be a PAE or a long mode page table.
*/
- pm_mask = PT_PRESENT_MASK;
- if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL)
+ pm_mask = PT_PRESENT_MASK | shadow_me_mask;
+ if (mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
+ if (WARN_ON_ONCE(!mmu->pml4_root)) {
+ r = -EIO;
+ goto out_unlock;
+ }
+
+ mmu->pml4_root[0] = __pa(mmu->pae_root) | pm_mask;
+ }
+
for (i = 0; i < 4; ++i) {
- MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
- if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
- pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i);
- if (!(pdptr & PT_PRESENT_MASK)) {
- vcpu->arch.mmu->pae_root[i] = 0;
+ WARN_ON_ONCE(IS_VALID_PAE_ROOT(mmu->pae_root[i]));
+
+ if (mmu->root_level == PT32E_ROOT_LEVEL) {
+ if (!(pdptrs[i] & PT_PRESENT_MASK)) {
+ mmu->pae_root[i] = INVALID_PAE_ROOT;
continue;
}
- root_gfn = pdptr >> PAGE_SHIFT;
- if (mmu_check_root(vcpu, root_gfn))
- return 1;
+ root_gfn = pdptrs[i] >> PAGE_SHIFT;
}
root = mmu_alloc_root(vcpu, root_gfn, i << 30,
PT32_ROOT_LEVEL, false);
- if (!VALID_PAGE(root))
- return -ENOSPC;
- vcpu->arch.mmu->pae_root[i] = root | pm_mask;
+ mmu->pae_root[i] = root | pm_mask;
}
- vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
+
+ if (mmu->shadow_root_level == PT64_ROOT_4LEVEL)
+ mmu->root_hpa = __pa(mmu->pml4_root);
+ else
+ mmu->root_hpa = __pa(mmu->pae_root);
+
+set_root_pgd:
+ mmu->root_pgd = root_pgd;
+out_unlock:
+ write_unlock(&vcpu->kvm->mmu_lock);
+
+ return 0;
+}
+
+static int mmu_alloc_special_roots(struct kvm_vcpu *vcpu)
+{
+ struct kvm_mmu *mmu = vcpu->arch.mmu;
+ u64 *pml4_root, *pae_root;
/*
- * If we shadow a 32 bit page table with a long mode page
- * table we enter this path.
+ * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
+ * tables are allocated and initialized at root creation as there is no
+ * equivalent level in the guest's NPT to shadow. Allocate the tables
+ * on demand, as running a 32-bit L1 VMM on 64-bit KVM is very rare.
*/
- if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
- if (vcpu->arch.mmu->lm_root == NULL) {
- /*
- * The additional page necessary for this is only
- * allocated on demand.
- */
+ if (mmu->direct_map || mmu->root_level >= PT64_ROOT_4LEVEL ||
+ mmu->shadow_root_level < PT64_ROOT_4LEVEL)
+ return 0;
- u64 *lm_root;
+ /*
+ * This mess only works with 4-level paging and needs to be updated to
+ * work with 5-level paging.
+ */
+ if (WARN_ON_ONCE(mmu->shadow_root_level != PT64_ROOT_4LEVEL))
+ return -EIO;
- lm_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
- if (lm_root == NULL)
- return 1;
+ if (mmu->pae_root && mmu->pml4_root)
+ return 0;
- lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
+ /*
+ * The special roots should always be allocated in concert. Yell and
+ * bail if KVM ends up in a state where only one of the roots is valid.
+ */
+ if (WARN_ON_ONCE(!tdp_enabled || mmu->pae_root || mmu->pml4_root))
+ return -EIO;
- vcpu->arch.mmu->lm_root = lm_root;
- }
+ /*
+ * Unlike 32-bit NPT, the PDP table doesn't need to be in low mem, and
+ * doesn't need to be decrypted.
+ */
+ pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
+ if (!pae_root)
+ return -ENOMEM;
- vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
+ pml4_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
+ if (!pml4_root) {
+ free_page((unsigned long)pae_root);
+ return -ENOMEM;
}
-set_root_pgd:
- vcpu->arch.mmu->root_pgd = root_pgd;
+ mmu->pae_root = pae_root;
+ mmu->pml4_root = pml4_root;
return 0;
}
-static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
-{
- if (vcpu->arch.mmu->direct_map)
- return mmu_alloc_direct_roots(vcpu);
- else
- return mmu_alloc_shadow_roots(vcpu);
-}
-
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
{
int i;
@@ -3399,8 +3543,8 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
* flush strictly after those changes are made. We only need to
* ensure that the other CPU sets these flags before any actual
* changes to the page tables are made. The comments in
- * mmu_need_write_protect() describe what could go wrong if this
- * requirement isn't satisfied.
+ * mmu_try_to_unsync_pages() describe what could go wrong if
+ * this requirement isn't satisfied.
*/
if (!smp_load_acquire(&sp->unsync) &&
!smp_load_acquire(&sp->unsync_children))
@@ -3422,7 +3566,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu->pae_root[i];
- if (root && VALID_PAGE(root)) {
+ if (IS_VALID_PAE_ROOT(root)) {
root &= PT64_BASE_ADDR_MASK;
sp = to_shadow_page(root);
mmu_sync_children(vcpu, sp);
@@ -3450,19 +3594,6 @@ static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
}
-static bool
-__is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
-{
- int bit7 = (pte >> 7) & 1;
-
- return pte & rsvd_check->rsvd_bits_mask[bit7][level-1];
-}
-
-static bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, u64 pte)
-{
- return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f);
-}
-
static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
{
/*
@@ -3516,12 +3647,7 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
int root, leaf, level;
bool reserved = false;
- if (!VALID_PAGE(vcpu->arch.mmu->root_hpa)) {
- *sptep = 0ull;
- return reserved;
- }
-
- if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
+ if (is_tdp_mmu(vcpu->arch.mmu))
leaf = kvm_tdp_mmu_get_walk(vcpu, addr, sptes, &root);
else
leaf = get_walk(vcpu, addr, sptes, &root);
@@ -3545,20 +3671,15 @@ static bool get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
rsvd_check = &vcpu->arch.mmu->shadow_zero_check;
for (level = root; level >= leaf; level--)
- /*
- * Use a bitwise-OR instead of a logical-OR to aggregate the
- * reserved bit and EPT's invalid memtype/XWR checks to avoid
- * adding a Jcc in the loop.
- */
- reserved |= __is_bad_mt_xwr(rsvd_check, sptes[level]) |
- __is_rsvd_bits_set(rsvd_check, sptes[level], level);
+ reserved |= is_rsvd_spte(rsvd_check, sptes[level], level);
if (reserved) {
- pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n",
+ pr_err("%s: reserved bits set on MMU-present spte, addr 0x%llx, hierarchy:\n",
__func__, addr);
for (level = root; level >= leaf; level--)
- pr_err("------ spte 0x%llx level %d.\n",
- sptes[level], level);
+ pr_err("------ spte = 0x%llx level = %d, rsvd bits = 0x%llx",
+ sptes[level], level,
+ get_rsvd_bits(rsvd_check, sptes[level], level));
}
return reserved;
@@ -3653,6 +3774,14 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
bool async;
+ /*
+ * Retry the page fault if the gfn hit a memslot that is being deleted
+ * or moved. This ensures any existing SPTEs for the old memslot will
+ * be zapped before KVM inserts a new MMIO SPTE for the gfn.
+ */
+ if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
+ return true;
+
/* Don't expose private memslots to L2. */
if (is_guest_mode(vcpu) && !kvm_is_visible_memslot(slot)) {
*pfn = KVM_PFN_NOSLOT;
@@ -3684,6 +3813,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
bool prefault, int max_level, bool is_tdp)
{
+ bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
bool write = error_code & PFERR_WRITE_MASK;
bool map_writable;
@@ -3696,7 +3826,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
if (page_fault_handle_page_track(vcpu, error_code, gfn))
return RET_PF_EMULATE;
- if (!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)) {
+ if (!is_tdp_mmu_fault) {
r = fast_page_fault(vcpu, gpa, error_code);
if (r != RET_PF_INVALID)
return r;
@@ -3718,7 +3848,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
r = RET_PF_RETRY;
- if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
+ if (is_tdp_mmu_fault)
read_lock(&vcpu->kvm->mmu_lock);
else
write_lock(&vcpu->kvm->mmu_lock);
@@ -3729,7 +3859,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
if (r)
goto out_unlock;
- if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
+ if (is_tdp_mmu_fault)
r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, max_level,
pfn, prefault);
else
@@ -3737,7 +3867,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
prefault, is_tdp);
out_unlock:
- if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
+ if (is_tdp_mmu_fault)
read_unlock(&vcpu->kvm->mmu_lock);
else
write_unlock(&vcpu->kvm->mmu_lock);
@@ -3807,17 +3937,13 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
max_level, true);
}
-static void nonpaging_init_context(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context)
+static void nonpaging_init_context(struct kvm_mmu *context)
{
context->page_fault = nonpaging_page_fault;
context->gva_to_gpa = nonpaging_gva_to_gpa;
context->sync_page = nonpaging_sync_page;
context->invlpg = NULL;
- context->root_level = 0;
- context->shadow_root_level = PT32E_ROOT_LEVEL;
context->direct_map = true;
- context->nx = false;
}
static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
@@ -3880,8 +4006,7 @@ static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd,
}
static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
- union kvm_mmu_page_role new_role,
- bool skip_tlb_flush, bool skip_mmu_sync)
+ union kvm_mmu_page_role new_role)
{
if (!fast_pgd_switch(vcpu, new_pgd, new_role)) {
kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT);
@@ -3896,10 +4021,10 @@ static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
*/
kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
- if (!skip_mmu_sync || force_flush_and_sync_on_reuse)
+ if (force_flush_and_sync_on_reuse) {
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
- if (!skip_tlb_flush || force_flush_and_sync_on_reuse)
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
+ }
/*
* The last MMIO access's GVA and GPA are cached in the VCPU. When
@@ -3918,11 +4043,9 @@ static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
to_shadow_page(vcpu->arch.mmu->root_hpa));
}
-void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush,
- bool skip_mmu_sync)
+void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd)
{
- __kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu),
- skip_tlb_flush, skip_mmu_sync);
+ __kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu));
}
EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
@@ -3948,26 +4071,6 @@ static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
return false;
}
-static inline bool is_last_gpte(struct kvm_mmu *mmu,
- unsigned level, unsigned gpte)
-{
- /*
- * The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
- * If it is clear, there are no large pages at this level, so clear
- * PT_PAGE_SIZE_MASK in gpte if that is the case.
- */
- gpte &= level - mmu->last_nonleaf_level;
-
- /*
- * PG_LEVEL_4K always terminates. The RHS has bit 7 set
- * iff level <= PG_LEVEL_4K, which for our purpose means
- * level == PG_LEVEL_4K; set PT_PAGE_SIZE_MASK in gpte then.
- */
- gpte |= level - PG_LEVEL_4K - 1;
-
- return gpte & PT_PAGE_SIZE_MASK;
-}
-
#define PTTYPE_EPT 18 /* arbitrary */
#define PTTYPE PTTYPE_EPT
#include "paging_tmpl.h"
@@ -3982,8 +4085,7 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu,
#undef PTTYPE
static void
-__reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
- struct rsvd_bits_validate *rsvd_check,
+__reset_rsvds_bits_mask(struct rsvd_bits_validate *rsvd_check,
u64 pa_bits_rsvd, int level, bool nx, bool gbpages,
bool pse, bool amd)
{
@@ -4072,14 +4174,29 @@ __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
}
}
+static bool guest_can_use_gbpages(struct kvm_vcpu *vcpu)
+{
+ /*
+ * If TDP is enabled, let the guest use GBPAGES if they're supported in
+ * hardware. The hardware page walker doesn't let KVM disable GBPAGES,
+ * i.e. won't treat them as reserved, and KVM doesn't redo the GVA->GPA
+ * walk for performance and complexity reasons. Not to mention KVM
+ * _can't_ solve the problem because GVA->GPA walks aren't visible to
+ * KVM once a TDP translation is installed. Mimic hardware behavior so
+ * that KVM's is at least consistent, i.e. doesn't randomly inject #PF.
+ */
+ return tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) :
+ guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES);
+}
+
static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
struct kvm_mmu *context)
{
- __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
+ __reset_rsvds_bits_mask(&context->guest_rsvd_check,
vcpu->arch.reserved_gpa_bits,
- context->root_level, context->nx,
- guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
- is_pse(vcpu),
+ context->root_level, is_efer_nx(context),
+ guest_can_use_gbpages(vcpu),
+ is_cr4_pse(context),
guest_cpuid_is_amd_or_hygon(vcpu));
}
@@ -4132,24 +4249,32 @@ static inline u64 reserved_hpa_bits(void)
* table in guest or amd nested guest, its mmu features completely
* follow the features in guest.
*/
-void
-reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
+static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *context)
{
- bool uses_nx = context->nx ||
- context->mmu_role.base.smep_andnot_wp;
+ /*
+ * KVM uses NX when TDP is disabled to handle a variety of scenarios,
+ * notably for huge SPTEs if iTLB multi-hit mitigation is enabled and
+ * to generate correct permissions for CR0.WP=0/CR4.SMEP=1/EFER.NX=0.
+ * The iTLB multi-hit workaround can be toggled at any time, so assume
+ * NX can be used by any non-nested shadow MMU to avoid having to reset
+ * MMU contexts. Note, KVM forces EFER.NX=1 when TDP is disabled.
+ */
+ bool uses_nx = is_efer_nx(context) || !tdp_enabled;
+
+ /* @amd adds a check on bit of SPTEs, which KVM shouldn't use anyways. */
+ bool is_amd = true;
+ /* KVM doesn't use 2-level page tables for the shadow MMU. */
+ bool is_pse = false;
struct rsvd_bits_validate *shadow_zero_check;
int i;
- /*
- * Passing "true" to the last argument is okay; it adds a check
- * on bit 8 of the SPTEs which KVM doesn't use anyway.
- */
+ WARN_ON_ONCE(context->shadow_root_level < PT32E_ROOT_LEVEL);
+
shadow_zero_check = &context->shadow_zero_check;
- __reset_rsvds_bits_mask(vcpu, shadow_zero_check,
- reserved_hpa_bits(),
+ __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
context->shadow_root_level, uses_nx,
- guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
- is_pse(vcpu), true);
+ guest_can_use_gbpages(vcpu), is_pse, is_amd);
if (!shadow_me_mask)
return;
@@ -4160,7 +4285,6 @@ reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
}
}
-EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
static inline bool boot_cpu_is_amd(void)
{
@@ -4182,11 +4306,10 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
shadow_zero_check = &context->shadow_zero_check;
if (boot_cpu_is_amd())
- __reset_rsvds_bits_mask(vcpu, shadow_zero_check,
- reserved_hpa_bits(),
+ __reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
context->shadow_root_level, false,
boot_cpu_has(X86_FEATURE_GBPAGES),
- true, true);
+ false, true);
else
__reset_rsvds_bits_mask_ept(shadow_zero_check,
reserved_hpa_bits(), false);
@@ -4222,8 +4345,7 @@ reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
(7 & (access) ? 128 : 0))
-static void update_permission_bitmask(struct kvm_vcpu *vcpu,
- struct kvm_mmu *mmu, bool ept)
+static void update_permission_bitmask(struct kvm_mmu *mmu, bool ept)
{
unsigned byte;
@@ -4231,9 +4353,10 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu,
const u8 w = BYTE_MASK(ACC_WRITE_MASK);
const u8 u = BYTE_MASK(ACC_USER_MASK);
- bool cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP) != 0;
- bool cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP) != 0;
- bool cr0_wp = is_write_protection(vcpu);
+ bool cr4_smep = is_cr4_smep(mmu);
+ bool cr4_smap = is_cr4_smap(mmu);
+ bool cr0_wp = is_cr0_wp(mmu);
+ bool efer_nx = is_efer_nx(mmu);
for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
unsigned pfec = byte << 1;
@@ -4259,7 +4382,7 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu,
u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;
/* Not really needed: !nx will cause pte.nx to fault */
- if (!mmu->nx)
+ if (!efer_nx)
ff = 0;
/* Allow supervisor writes if !cr0.wp */
@@ -4318,24 +4441,17 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu,
* away both AD and WD. For all reads or if the last condition holds, WD
* only will be masked away.
*/
-static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
- bool ept)
+static void update_pkru_bitmask(struct kvm_mmu *mmu)
{
unsigned bit;
bool wp;
- if (ept) {
+ if (!is_cr4_pke(mmu)) {
mmu->pkru_mask = 0;
return;
}
- /* PKEY is enabled only if CR4.PKE and EFER.LMA are both set. */
- if (!kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || !is_long_mode(vcpu)) {
- mmu->pkru_mask = 0;
- return;
- }
-
- wp = is_write_protection(vcpu);
+ wp = is_cr0_wp(mmu);
for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
unsigned pfec, pkey_bits;
@@ -4369,81 +4485,51 @@ static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
}
}
-static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
+static void reset_guest_paging_metadata(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *mmu)
{
- unsigned root_level = mmu->root_level;
+ if (!is_cr0_pg(mmu))
+ return;
- mmu->last_nonleaf_level = root_level;
- if (root_level == PT32_ROOT_LEVEL && is_pse(vcpu))
- mmu->last_nonleaf_level++;
+ reset_rsvds_bits_mask(vcpu, mmu);
+ update_permission_bitmask(mmu, false);
+ update_pkru_bitmask(mmu);
}
-static void paging64_init_context_common(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context,
- int level)
+static void paging64_init_context(struct kvm_mmu *context)
{
- context->nx = is_nx(vcpu);
- context->root_level = level;
-
- reset_rsvds_bits_mask(vcpu, context);
- update_permission_bitmask(vcpu, context, false);
- update_pkru_bitmask(vcpu, context, false);
- update_last_nonleaf_level(vcpu, context);
-
- MMU_WARN_ON(!is_pae(vcpu));
context->page_fault = paging64_page_fault;
context->gva_to_gpa = paging64_gva_to_gpa;
context->sync_page = paging64_sync_page;
context->invlpg = paging64_invlpg;
- context->shadow_root_level = level;
context->direct_map = false;
}
-static void paging64_init_context(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context)
-{
- int root_level = is_la57_mode(vcpu) ?
- PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
-
- paging64_init_context_common(vcpu, context, root_level);
-}
-
-static void paging32_init_context(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context)
+static void paging32_init_context(struct kvm_mmu *context)
{
- context->nx = false;
- context->root_level = PT32_ROOT_LEVEL;
-
- reset_rsvds_bits_mask(vcpu, context);
- update_permission_bitmask(vcpu, context, false);
- update_pkru_bitmask(vcpu, context, false);
- update_last_nonleaf_level(vcpu, context);
-
context->page_fault = paging32_page_fault;
context->gva_to_gpa = paging32_gva_to_gpa;
context->sync_page = paging32_sync_page;
context->invlpg = paging32_invlpg;
- context->shadow_root_level = PT32E_ROOT_LEVEL;
context->direct_map = false;
}
-static void paging32E_init_context(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context)
-{
- paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
-}
-
-static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
+static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs)
{
union kvm_mmu_extended_role ext = {0};
- ext.cr0_pg = !!is_paging(vcpu);
- ext.cr4_pae = !!is_pae(vcpu);
- ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
- ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
- ext.cr4_pse = !!is_pse(vcpu);
- ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
- ext.maxphyaddr = cpuid_maxphyaddr(vcpu);
+ if (____is_cr0_pg(regs)) {
+ ext.cr0_pg = 1;
+ ext.cr4_pae = ____is_cr4_pae(regs);
+ ext.cr4_smep = ____is_cr4_smep(regs);
+ ext.cr4_smap = ____is_cr4_smap(regs);
+ ext.cr4_pse = ____is_cr4_pse(regs);
+
+ /* PKEY and LA57 are active iff long mode is active. */
+ ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
+ ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
+ }
ext.valid = 1;
@@ -4451,20 +4537,23 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
}
static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs,
bool base_only)
{
union kvm_mmu_role role = {0};
role.base.access = ACC_ALL;
- role.base.nxe = !!is_nx(vcpu);
- role.base.cr0_wp = is_write_protection(vcpu);
+ if (____is_cr0_pg(regs)) {
+ role.base.efer_nx = ____is_efer_nx(regs);
+ role.base.cr0_wp = ____is_cr0_wp(regs);
+ }
role.base.smm = is_smm(vcpu);
role.base.guest_mode = is_guest_mode(vcpu);
if (base_only)
return role;
- role.ext = kvm_calc_mmu_role_ext(vcpu);
+ role.ext = kvm_calc_mmu_role_ext(vcpu, regs);
return role;
}
@@ -4479,9 +4568,10 @@ static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)
}
static union kvm_mmu_role
-kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
+kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs, bool base_only)
{
- union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
+ union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
role.base.ad_disabled = (shadow_accessed_mask == 0);
role.base.level = kvm_mmu_get_tdp_level(vcpu);
@@ -4494,8 +4584,9 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *context = &vcpu->arch.root_mmu;
+ struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
union kvm_mmu_role new_role =
- kvm_calc_tdp_mmu_root_page_role(vcpu, false);
+ kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, false);
if (new_role.as_u64 == context->mmu_role.as_u64)
return;
@@ -4509,60 +4600,44 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->get_guest_pgd = get_cr3;
context->get_pdptr = kvm_pdptr_read;
context->inject_page_fault = kvm_inject_page_fault;
+ context->root_level = role_regs_to_root_level(&regs);
- if (!is_paging(vcpu)) {
- context->nx = false;
+ if (!is_cr0_pg(context))
context->gva_to_gpa = nonpaging_gva_to_gpa;
- context->root_level = 0;
- } else if (is_long_mode(vcpu)) {
- context->nx = is_nx(vcpu);
- context->root_level = is_la57_mode(vcpu) ?
- PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
- reset_rsvds_bits_mask(vcpu, context);
+ else if (is_cr4_pae(context))
context->gva_to_gpa = paging64_gva_to_gpa;
- } else if (is_pae(vcpu)) {
- context->nx = is_nx(vcpu);
- context->root_level = PT32E_ROOT_LEVEL;
- reset_rsvds_bits_mask(vcpu, context);
- context->gva_to_gpa = paging64_gva_to_gpa;
- } else {
- context->nx = false;
- context->root_level = PT32_ROOT_LEVEL;
- reset_rsvds_bits_mask(vcpu, context);
+ else
context->gva_to_gpa = paging32_gva_to_gpa;
- }
- update_permission_bitmask(vcpu, context, false);
- update_pkru_bitmask(vcpu, context, false);
- update_last_nonleaf_level(vcpu, context);
+ reset_guest_paging_metadata(vcpu, context);
reset_tdp_shadow_zero_bits_mask(vcpu, context);
}
static union kvm_mmu_role
-kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu, bool base_only)
+kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs, bool base_only)
{
- union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
+ union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
- role.base.smep_andnot_wp = role.ext.cr4_smep &&
- !is_write_protection(vcpu);
- role.base.smap_andnot_wp = role.ext.cr4_smap &&
- !is_write_protection(vcpu);
- role.base.gpte_is_8_bytes = !!is_pae(vcpu);
+ role.base.smep_andnot_wp = role.ext.cr4_smep && !____is_cr0_wp(regs);
+ role.base.smap_andnot_wp = role.ext.cr4_smap && !____is_cr0_wp(regs);
+ role.base.gpte_is_8_bytes = ____is_cr0_pg(regs) && ____is_cr4_pae(regs);
return role;
}
static union kvm_mmu_role
-kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
+kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs, bool base_only)
{
union kvm_mmu_role role =
- kvm_calc_shadow_root_page_role_common(vcpu, base_only);
+ kvm_calc_shadow_root_page_role_common(vcpu, regs, base_only);
- role.base.direct = !is_paging(vcpu);
+ role.base.direct = !____is_cr0_pg(regs);
- if (!is_long_mode(vcpu))
+ if (!____is_efer_lma(regs))
role.base.level = PT32E_ROOT_LEVEL;
- else if (is_la57_mode(vcpu))
+ else if (____is_cr4_la57(regs))
role.base.level = PT64_ROOT_5LEVEL;
else
role.base.level = PT64_ROOT_4LEVEL;
@@ -4571,37 +4646,44 @@ kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
}
static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
- u32 cr0, u32 cr4, u32 efer,
+ struct kvm_mmu_role_regs *regs,
union kvm_mmu_role new_role)
{
- if (!(cr0 & X86_CR0_PG))
- nonpaging_init_context(vcpu, context);
- else if (efer & EFER_LMA)
- paging64_init_context(vcpu, context);
- else if (cr4 & X86_CR4_PAE)
- paging32E_init_context(vcpu, context);
- else
- paging32_init_context(vcpu, context);
+ if (new_role.as_u64 == context->mmu_role.as_u64)
+ return;
context->mmu_role.as_u64 = new_role.as_u64;
+
+ if (!is_cr0_pg(context))
+ nonpaging_init_context(context);
+ else if (is_cr4_pae(context))
+ paging64_init_context(context);
+ else
+ paging32_init_context(context);
+ context->root_level = role_regs_to_root_level(regs);
+
+ reset_guest_paging_metadata(vcpu, context);
+ context->shadow_root_level = new_role.base.level;
+
reset_shadow_zero_bits_mask(vcpu, context);
}
-static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer)
+static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs)
{
struct kvm_mmu *context = &vcpu->arch.root_mmu;
union kvm_mmu_role new_role =
- kvm_calc_shadow_mmu_root_page_role(vcpu, false);
+ kvm_calc_shadow_mmu_root_page_role(vcpu, regs, false);
- if (new_role.as_u64 != context->mmu_role.as_u64)
- shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
+ shadow_mmu_init_context(vcpu, context, regs, new_role);
}
static union kvm_mmu_role
-kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu)
+kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_role_regs *regs)
{
union kvm_mmu_role role =
- kvm_calc_shadow_root_page_role_common(vcpu, false);
+ kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
role.base.direct = false;
role.base.level = kvm_mmu_get_tdp_level(vcpu);
@@ -4609,18 +4691,22 @@ kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu)
return role;
}
-void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
- gpa_t nested_cr3)
+void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
+ unsigned long cr4, u64 efer, gpa_t nested_cr3)
{
struct kvm_mmu *context = &vcpu->arch.guest_mmu;
- union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu);
+ struct kvm_mmu_role_regs regs = {
+ .cr0 = cr0,
+ .cr4 = cr4,
+ .efer = efer,
+ };
+ union kvm_mmu_role new_role;
- context->shadow_root_level = new_role.base.level;
+ new_role = kvm_calc_shadow_npt_root_page_role(vcpu, &regs);
- __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base, false, false);
+ __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base);
- if (new_role.as_u64 != context->mmu_role.as_u64)
- shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
+ shadow_mmu_init_context(vcpu, context, &regs, new_role);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
@@ -4640,15 +4726,10 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
role.base.guest_mode = true;
role.base.access = ACC_ALL;
- /*
- * WP=1 and NOT_WP=1 is an impossible combination, use WP and the
- * SMAP variation to denote shadow EPT entries.
- */
- role.base.cr0_wp = true;
- role.base.smap_andnot_wp = true;
-
- role.ext = kvm_calc_mmu_role_ext(vcpu);
+ /* EPT, and thus nested EPT, does not consume CR0, CR4, nor EFER. */
+ role.ext.word = 0;
role.ext.execonly = execonly;
+ role.ext.valid = 1;
return role;
}
@@ -4662,14 +4743,15 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
execonly, level);
- __kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base, true, true);
+ __kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base);
if (new_role.as_u64 == context->mmu_role.as_u64)
return;
+ context->mmu_role.as_u64 = new_role.as_u64;
+
context->shadow_root_level = level;
- context->nx = true;
context->ept_ad = accessed_dirty;
context->page_fault = ept_page_fault;
context->gva_to_gpa = ept_gva_to_gpa;
@@ -4677,11 +4759,9 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
context->invlpg = ept_invlpg;
context->root_level = level;
context->direct_map = false;
- context->mmu_role.as_u64 = new_role.as_u64;
- update_permission_bitmask(vcpu, context, true);
- update_pkru_bitmask(vcpu, context, true);
- update_last_nonleaf_level(vcpu, context);
+ update_permission_bitmask(context, true);
+ update_pkru_bitmask(context);
reset_rsvds_bits_mask_ept(vcpu, context, execonly);
reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
}
@@ -4690,20 +4770,36 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *context = &vcpu->arch.root_mmu;
+ struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
- kvm_init_shadow_mmu(vcpu,
- kvm_read_cr0_bits(vcpu, X86_CR0_PG),
- kvm_read_cr4_bits(vcpu, X86_CR4_PAE),
- vcpu->arch.efer);
+ kvm_init_shadow_mmu(vcpu, &regs);
context->get_guest_pgd = get_cr3;
context->get_pdptr = kvm_pdptr_read;
context->inject_page_fault = kvm_inject_page_fault;
}
+static union kvm_mmu_role
+kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, struct kvm_mmu_role_regs *regs)
+{
+ union kvm_mmu_role role;
+
+ role = kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
+
+ /*
+ * Nested MMUs are used only for walking L2's gva->gpa, they never have
+ * shadow pages of their own and so "direct" has no meaning. Set it
+ * to "true" to try to detect bogus usage of the nested MMU.
+ */
+ role.base.direct = true;
+ role.base.level = role_regs_to_root_level(regs);
+ return role;
+}
+
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
{
- union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false);
+ struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
+ union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu, &regs);
struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
if (new_role.as_u64 == g_context->mmu_role.as_u64)
@@ -4713,6 +4809,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
g_context->get_guest_pgd = get_cr3;
g_context->get_pdptr = kvm_pdptr_read;
g_context->inject_page_fault = kvm_inject_page_fault;
+ g_context->root_level = new_role.base.level;
/*
* L2 page tables are never shadowed, so there is no need to sync
@@ -4728,44 +4825,20 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
* nested page tables as the second level of translation. Basically
* the gva_to_gpa functions between mmu and nested_mmu are swapped.
*/
- if (!is_paging(vcpu)) {
- g_context->nx = false;
- g_context->root_level = 0;
+ if (!is_paging(vcpu))
g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
- } else if (is_long_mode(vcpu)) {
- g_context->nx = is_nx(vcpu);
- g_context->root_level = is_la57_mode(vcpu) ?
- PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
- reset_rsvds_bits_mask(vcpu, g_context);
+ else if (is_long_mode(vcpu))
g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
- } else if (is_pae(vcpu)) {
- g_context->nx = is_nx(vcpu);
- g_context->root_level = PT32E_ROOT_LEVEL;
- reset_rsvds_bits_mask(vcpu, g_context);
+ else if (is_pae(vcpu))
g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
- } else {
- g_context->nx = false;
- g_context->root_level = PT32_ROOT_LEVEL;
- reset_rsvds_bits_mask(vcpu, g_context);
+ else
g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
- }
- update_permission_bitmask(vcpu, g_context, false);
- update_pkru_bitmask(vcpu, g_context, false);
- update_last_nonleaf_level(vcpu, g_context);
+ reset_guest_paging_metadata(vcpu, g_context);
}
-void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots)
+void kvm_init_mmu(struct kvm_vcpu *vcpu)
{
- if (reset_roots) {
- uint i;
-
- vcpu->arch.mmu->root_hpa = INVALID_PAGE;
-
- for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
- vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
- }
-
if (mmu_is_nested(vcpu))
init_kvm_nested_mmu(vcpu);
else if (tdp_enabled)
@@ -4778,20 +4851,53 @@ EXPORT_SYMBOL_GPL(kvm_init_mmu);
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
{
+ struct kvm_mmu_role_regs regs = vcpu_to_role_regs(vcpu);
union kvm_mmu_role role;
if (tdp_enabled)
- role = kvm_calc_tdp_mmu_root_page_role(vcpu, true);
+ role = kvm_calc_tdp_mmu_root_page_role(vcpu, &regs, true);
else
- role = kvm_calc_shadow_mmu_root_page_role(vcpu, true);
+ role = kvm_calc_shadow_mmu_root_page_role(vcpu, &regs, true);
return role.base;
}
+void kvm_mmu_after_set_cpuid(struct kvm_vcpu *vcpu)
+{
+ /*
+ * Invalidate all MMU roles to force them to reinitialize as CPUID
+ * information is factored into reserved bit calculations.
+ */
+ vcpu->arch.root_mmu.mmu_role.ext.valid = 0;
+ vcpu->arch.guest_mmu.mmu_role.ext.valid = 0;
+ vcpu->arch.nested_mmu.mmu_role.ext.valid = 0;
+ kvm_mmu_reset_context(vcpu);
+
+ /*
+ * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
+ * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
+ * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page
+ * faults due to reusing SPs/SPTEs. Alert userspace, but otherwise
+ * sweep the problem under the rug.
+ *
+ * KVM's horrific CPUID ABI makes the problem all but impossible to
+ * solve, as correctly handling multiple vCPU models (with respect to
+ * paging and physical address properties) in a single VM would require
+ * tracking all relevant CPUID information in kvm_mmu_page_role. That
+ * is very undesirable as it would double the memory requirements for
+ * gfn_track (see struct kvm_mmu_page_role comments), and in practice
+ * no sane VMM mucks with the core vCPU model on the fly.
+ */
+ if (vcpu->arch.last_vmentry_cpu != -1) {
+ pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} after KVM_RUN may cause guest instability\n");
+ pr_warn_ratelimited("KVM: KVM_SET_CPUID{,2} will fail after KVM_RUN starting with Linux 5.16\n");
+ }
+}
+
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
{
kvm_mmu_unload(vcpu);
- kvm_init_mmu(vcpu, true);
+ kvm_init_mmu(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
@@ -4802,16 +4908,23 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
r = mmu_topup_memory_caches(vcpu, !vcpu->arch.mmu->direct_map);
if (r)
goto out;
- r = mmu_alloc_roots(vcpu);
- kvm_mmu_sync_roots(vcpu);
+ r = mmu_alloc_special_roots(vcpu);
+ if (r)
+ goto out;
+ if (vcpu->arch.mmu->direct_map)
+ r = mmu_alloc_direct_roots(vcpu);
+ else
+ r = mmu_alloc_shadow_roots(vcpu);
if (r)
goto out;
+
+ kvm_mmu_sync_roots(vcpu);
+
kvm_mmu_load_pgd(vcpu);
static_call(kvm_x86_tlb_flush_current)(vcpu);
out:
return r;
}
-EXPORT_SYMBOL_GPL(kvm_mmu_load);
void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{
@@ -4820,7 +4933,6 @@ void kvm_mmu_unload(struct kvm_vcpu *vcpu)
kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa));
}
-EXPORT_SYMBOL_GPL(kvm_mmu_unload);
static bool need_remote_flush(u64 old, u64 new)
{
@@ -4961,7 +5073,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
/*
* No need to care whether allocation memory is successful
- * or not since pte prefetch is skiped if it does not have
+ * or not since pte prefetch is skipped if it does not have
* enough objects in the cache.
*/
mmu_topup_memory_caches(vcpu, true);
@@ -5169,10 +5281,10 @@ typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_
static __always_inline bool
slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, int start_level, int end_level,
- gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
+ gfn_t start_gfn, gfn_t end_gfn, bool flush_on_yield,
+ bool flush)
{
struct slot_rmap_walk_iterator iterator;
- bool flush = false;
for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
end_gfn, &iterator) {
@@ -5180,7 +5292,7 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
flush |= fn(kvm, iterator.rmap, memslot);
if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
- if (flush && lock_flush_tlb) {
+ if (flush && flush_on_yield) {
kvm_flush_remote_tlbs_with_address(kvm,
start_gfn,
iterator.gfn - start_gfn + 1);
@@ -5190,38 +5302,34 @@ slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
}
}
- if (flush && lock_flush_tlb) {
- kvm_flush_remote_tlbs_with_address(kvm, start_gfn,
- end_gfn - start_gfn + 1);
- flush = false;
- }
-
return flush;
}
static __always_inline bool
slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
slot_level_handler fn, int start_level, int end_level,
- bool lock_flush_tlb)
+ bool flush_on_yield)
{
return slot_handle_level_range(kvm, memslot, fn, start_level,
end_level, memslot->base_gfn,
memslot->base_gfn + memslot->npages - 1,
- lock_flush_tlb);
+ flush_on_yield, false);
}
static __always_inline bool
slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
- slot_level_handler fn, bool lock_flush_tlb)
+ slot_level_handler fn, bool flush_on_yield)
{
return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
- PG_LEVEL_4K, lock_flush_tlb);
+ PG_LEVEL_4K, flush_on_yield);
}
static void free_mmu_pages(struct kvm_mmu *mmu)
{
+ if (!tdp_enabled && mmu->pae_root)
+ set_memory_encrypted((unsigned long)mmu->pae_root, 1);
free_page((unsigned long)mmu->pae_root);
- free_page((unsigned long)mmu->lm_root);
+ free_page((unsigned long)mmu->pml4_root);
}
static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
@@ -5240,9 +5348,11 @@ static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
* while the PDP table is a per-vCPU construct that's allocated at MMU
* creation. When emulating 32-bit mode, cr3 is only 32 bits even on
* x86_64. Therefore we need to allocate the PDP table in the first
- * 4GB of memory, which happens to fit the DMA32 zone. Except for
- * SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can
- * skip allocating the PDP table.
+ * 4GB of memory, which happens to fit the DMA32 zone. TDP paging
+ * generally doesn't use PAE paging and can skip allocating the PDP
+ * table. The main exception, handled here, is SVM's 32-bit NPT. The
+ * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
+ * KVM; that horror is handled on-demand by mmu_alloc_shadow_roots().
*/
if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
return 0;
@@ -5252,8 +5362,22 @@ static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
return -ENOMEM;
mmu->pae_root = page_address(page);
+
+ /*
+ * CR3 is only 32 bits when PAE paging is used, thus it's impossible to
+ * get the CPU to treat the PDPTEs as encrypted. Decrypt the page so
+ * that KVM's writes and the CPU's reads get along. Note, this is
+ * only necessary when using shadow paging, as 64-bit NPT can get at
+ * the C-bit even when shadowing 32-bit NPT, and SME isn't supported
+ * by 32-bit kernels (when KVM itself uses 32-bit NPT).
+ */
+ if (!tdp_enabled)
+ set_memory_decrypted((unsigned long)mmu->pae_root, 1);
+ else
+ WARN_ON_ONCE(shadow_me_mask);
+
for (i = 0; i < 4; ++i)
- mmu->pae_root[i] = INVALID_PAGE;
+ mmu->pae_root[i] = INVALID_PAE_ROOT;
return 0;
}
@@ -5365,6 +5489,15 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
*/
kvm->arch.mmu_valid_gen = kvm->arch.mmu_valid_gen ? 0 : 1;
+ /* In order to ensure all threads see this change when
+ * handling the MMU reload signal, this must happen in the
+ * same critical section as kvm_reload_remote_mmus, and
+ * before kvm_zap_obsolete_pages as kvm_zap_obsolete_pages
+ * could drop the MMU lock and yield.
+ */
+ if (is_tdp_mmu_enabled(kvm))
+ kvm_tdp_mmu_invalidate_all_roots(kvm);
+
/*
* Notify all vcpus to reload its shadow page table and flush TLB.
* Then all vcpus will switch to new shadow page table with the new
@@ -5377,10 +5510,13 @@ static void kvm_mmu_zap_all_fast(struct kvm *kvm)
kvm_zap_obsolete_pages(kvm);
- if (is_tdp_mmu_enabled(kvm))
- kvm_tdp_mmu_zap_all(kvm);
-
write_unlock(&kvm->mmu_lock);
+
+ if (is_tdp_mmu_enabled(kvm)) {
+ read_lock(&kvm->mmu_lock);
+ kvm_tdp_mmu_zap_invalidated_roots(kvm);
+ read_unlock(&kvm->mmu_lock);
+ }
}
static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
@@ -5399,7 +5535,13 @@ void kvm_mmu_init_vm(struct kvm *kvm)
{
struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
- kvm_mmu_init_tdp_mmu(kvm);
+ if (!kvm_mmu_init_tdp_mmu(kvm))
+ /*
+ * No smp_load/store wrappers needed here as we are in
+ * VM init and there cannot be any memslots / other threads
+ * accessing this struct kvm yet.
+ */
+ kvm->arch.memslots_have_rmaps = true;
node->track_write = kvm_mmu_pte_write;
node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
@@ -5420,33 +5562,44 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
int i;
- bool flush;
-
- write_lock(&kvm->mmu_lock);
- for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
- slots = __kvm_memslots(kvm, i);
- kvm_for_each_memslot(memslot, slots) {
- gfn_t start, end;
-
- start = max(gfn_start, memslot->base_gfn);
- end = min(gfn_end, memslot->base_gfn + memslot->npages);
- if (start >= end)
- continue;
+ bool flush = false;
- slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
- PG_LEVEL_4K,
- KVM_MAX_HUGEPAGE_LEVEL,
- start, end - 1, true);
+ if (kvm_memslots_have_rmaps(kvm)) {
+ write_lock(&kvm->mmu_lock);
+ for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+ slots = __kvm_memslots(kvm, i);
+ kvm_for_each_memslot(memslot, slots) {
+ gfn_t start, end;
+
+ start = max(gfn_start, memslot->base_gfn);
+ end = min(gfn_end, memslot->base_gfn + memslot->npages);
+ if (start >= end)
+ continue;
+
+ flush = slot_handle_level_range(kvm, memslot,
+ kvm_zap_rmapp, PG_LEVEL_4K,
+ KVM_MAX_HUGEPAGE_LEVEL, start,
+ end - 1, true, flush);
+ }
}
+ if (flush)
+ kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);
+ write_unlock(&kvm->mmu_lock);
}
if (is_tdp_mmu_enabled(kvm)) {
- flush = kvm_tdp_mmu_zap_gfn_range(kvm, gfn_start, gfn_end);
+ flush = false;
+
+ read_lock(&kvm->mmu_lock);
+ for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
+ flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start,
+ gfn_end, flush, true);
if (flush)
- kvm_flush_remote_tlbs(kvm);
- }
+ kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
+ gfn_end);
- write_unlock(&kvm->mmu_lock);
+ read_unlock(&kvm->mmu_lock);
+ }
}
static bool slot_rmap_write_protect(struct kvm *kvm,
@@ -5460,14 +5613,21 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
struct kvm_memory_slot *memslot,
int start_level)
{
- bool flush;
+ bool flush = false;
- write_lock(&kvm->mmu_lock);
- flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
- start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
- if (is_tdp_mmu_enabled(kvm))
- flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, PG_LEVEL_4K);
- write_unlock(&kvm->mmu_lock);
+ if (kvm_memslots_have_rmaps(kvm)) {
+ write_lock(&kvm->mmu_lock);
+ flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
+ start_level, KVM_MAX_HUGEPAGE_LEVEL,
+ false);
+ write_unlock(&kvm->mmu_lock);
+ }
+
+ if (is_tdp_mmu_enabled(kvm)) {
+ read_lock(&kvm->mmu_lock);
+ flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
+ read_unlock(&kvm->mmu_lock);
+ }
/*
* We can flush all the TLBs out of the mmu lock without TLB
@@ -5476,9 +5636,9 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
* spte from present to present (changing the spte from present
* to nonpresent will flush all the TLBs immediately), in other
* words, the only case we care is mmu_spte_update() where we
- * have checked SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE
- * instead of PT_WRITABLE_MASK, that means it does not depend
- * on PT_WRITABLE_MASK anymore.
+ * have checked Host-writable | MMU-writable instead of
+ * PT_WRITABLE_MASK, that means it does not depend on PT_WRITABLE_MASK
+ * anymore.
*/
if (flush)
kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
@@ -5529,21 +5689,31 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
{
/* FIXME: const-ify all uses of struct kvm_memory_slot. */
struct kvm_memory_slot *slot = (struct kvm_memory_slot *)memslot;
+ bool flush = false;
- write_lock(&kvm->mmu_lock);
- slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
+ if (kvm_memslots_have_rmaps(kvm)) {
+ write_lock(&kvm->mmu_lock);
+ flush = slot_handle_leaf(kvm, slot, kvm_mmu_zap_collapsible_spte, true);
+ if (flush)
+ kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
+ write_unlock(&kvm->mmu_lock);
+ }
- if (is_tdp_mmu_enabled(kvm))
- kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot);
- write_unlock(&kvm->mmu_lock);
+ if (is_tdp_mmu_enabled(kvm)) {
+ read_lock(&kvm->mmu_lock);
+ flush = kvm_tdp_mmu_zap_collapsible_sptes(kvm, slot, flush);
+ if (flush)
+ kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
+ read_unlock(&kvm->mmu_lock);
+ }
}
void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
- struct kvm_memory_slot *memslot)
+ const struct kvm_memory_slot *memslot)
{
/*
* All current use cases for flushing the TLBs for a specific memslot
- * are related to dirty logging, and do the TLB flush out of mmu_lock.
+ * related to dirty logging, and many do the TLB flush out of mmu_lock.
* The interaction between the various operations on memslot must be
* serialized by slots_locks to ensure the TLB flush from one operation
* is observed by any other operation on the same memslot.
@@ -5556,13 +5726,20 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
struct kvm_memory_slot *memslot)
{
- bool flush;
+ bool flush = false;
- write_lock(&kvm->mmu_lock);
- flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
- if (is_tdp_mmu_enabled(kvm))
+ if (kvm_memslots_have_rmaps(kvm)) {
+ write_lock(&kvm->mmu_lock);
+ flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty,
+ false);
+ write_unlock(&kvm->mmu_lock);
+ }
+
+ if (is_tdp_mmu_enabled(kvm)) {
+ read_lock(&kvm->mmu_lock);
flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
- write_unlock(&kvm->mmu_lock);
+ read_unlock(&kvm->mmu_lock);
+ }
/*
* It's also safe to flush TLBs out of mmu lock here as currently this
@@ -5701,25 +5878,6 @@ static void mmu_destroy_caches(void)
kmem_cache_destroy(mmu_page_header_cache);
}
-static void kvm_set_mmio_spte_mask(void)
-{
- u64 mask;
-
- /*
- * Set a reserved PA bit in MMIO SPTEs to generate page faults with
- * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT
- * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports
- * 52-bit physical addresses then there are no reserved PA bits in the
- * PTEs and so the reserved PA approach must be disabled.
- */
- if (shadow_phys_bits < 52)
- mask = BIT_ULL(51) | PT_PRESENT_MASK;
- else
- mask = 0;
-
- kvm_mmu_set_mmio_spte_mask(mask, ACC_WRITE_MASK | ACC_USER_MASK);
-}
-
static bool get_nx_auto_mode(void)
{
/* Return true when CPU has the bug, and mitigations are ON */
@@ -5785,8 +5943,6 @@ int kvm_mmu_module_init(void)
kvm_mmu_reset_all_pte_masks();
- kvm_set_mmio_spte_mask();
-
pte_list_desc_cache = kmem_cache_create("pte_list_desc",
sizeof(struct pte_list_desc),
0, SLAB_ACCOUNT, NULL);
@@ -5880,17 +6036,19 @@ static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel
static void kvm_recover_nx_lpages(struct kvm *kvm)
{
+ unsigned long nx_lpage_splits = kvm->stat.nx_lpage_splits;
int rcu_idx;
struct kvm_mmu_page *sp;
unsigned int ratio;
LIST_HEAD(invalid_list);
+ bool flush = false;
ulong to_zap;
rcu_idx = srcu_read_lock(&kvm->srcu);
write_lock(&kvm->mmu_lock);
ratio = READ_ONCE(nx_huge_pages_recovery_ratio);
- to_zap = ratio ? DIV_ROUND_UP(kvm->stat.nx_lpage_splits, ratio) : 0;
+ to_zap = ratio ? DIV_ROUND_UP(nx_lpage_splits, ratio) : 0;
for ( ; to_zap; --to_zap) {
if (list_empty(&kvm->arch.lpage_disallowed_mmu_pages))
break;
@@ -5905,19 +6063,19 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
lpage_disallowed_link);
WARN_ON_ONCE(!sp->lpage_disallowed);
if (is_tdp_mmu_page(sp)) {
- kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn,
- sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level));
+ flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
} else {
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
WARN_ON_ONCE(sp->lpage_disallowed);
}
if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
- kvm_mmu_commit_zap_page(kvm, &invalid_list);
+ kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
cond_resched_rwlock_write(&kvm->mmu_lock);
+ flush = false;
}
}
- kvm_mmu_commit_zap_page(kvm, &invalid_list);
+ kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
write_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, rcu_idx);
diff --git a/arch/x86/kvm/mmu/mmu_audit.c b/arch/x86/kvm/mmu/mmu_audit.c
index ced15fd58fde..cedc17b2f60e 100644
--- a/arch/x86/kvm/mmu/mmu_audit.c
+++ b/arch/x86/kvm/mmu/mmu_audit.c
@@ -70,7 +70,7 @@ static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu->pae_root[i];
- if (root && VALID_PAGE(root)) {
+ if (IS_VALID_PAE_ROOT(root)) {
root &= PT64_BASE_ADDR_MASK;
sp = to_shadow_page(root);
__mmu_spte_walk(vcpu, sp, fn, 2);
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index ec4fc28b325a..35567293c1fd 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -20,6 +20,16 @@ extern bool dbg;
#define MMU_WARN_ON(x) do { } while (0)
#endif
+/*
+ * Unlike regular MMU roots, PAE "roots", a.k.a. PDPTEs/PDPTRs, have a PRESENT
+ * bit, and thus are guaranteed to be non-zero when valid. And, when a guest
+ * PDPTR is !PRESENT, its corresponding PAE root cannot be set to INVALID_PAGE,
+ * as the CPU would treat that as PRESENT PDPTR with reserved bits set. Use
+ * '0' instead of INVALID_PAGE to indicate an invalid PAE root.
+ */
+#define INVALID_PAE_ROOT 0
+#define IS_VALID_PAE_ROOT(x) (!!(x))
+
struct kvm_mmu_page {
struct list_head link;
struct hlist_node hash_link;
@@ -40,7 +50,11 @@ struct kvm_mmu_page {
u64 *spt;
/* hold the gfn of each spte inside spt */
gfn_t *gfns;
- int root_count; /* Currently serving as active root */
+ /* Currently serving as active root */
+ union {
+ int root_count;
+ refcount_t tdp_mmu_root_count;
+ };
unsigned int unsync_children;
struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
DECLARE_BITMAP(unsync_child_bitmap, 512);
@@ -59,7 +73,7 @@ struct kvm_mmu_page {
#ifdef CONFIG_X86_64
bool tdp_mmu_page;
- /* Used for freeing the page asyncronously if it is a TDP MMU page. */
+ /* Used for freeing the page asynchronously if it is a TDP MMU page. */
struct rcu_head rcu_head;
#endif
};
@@ -78,6 +92,16 @@ static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
return to_shadow_page(__pa(sptep));
}
+static inline int kvm_mmu_role_as_id(union kvm_mmu_page_role role)
+{
+ return role.smm ? 1 : 0;
+}
+
+static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
+{
+ return kvm_mmu_role_as_id(sp->role);
+}
+
static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
{
/*
@@ -92,33 +116,22 @@ static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
kvm_x86_ops.cpu_dirty_log_size;
}
-bool is_nx_huge_page_enabled(void);
-bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
- bool can_unsync);
+extern int nx_huge_pages;
+static inline bool is_nx_huge_page_enabled(void)
+{
+ return READ_ONCE(nx_huge_pages);
+}
+
+int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync);
void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
- struct kvm_memory_slot *slot, u64 gfn);
+ struct kvm_memory_slot *slot, u64 gfn,
+ int min_level);
void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
u64 start_gfn, u64 pages);
-static inline void kvm_mmu_get_root(struct kvm *kvm, struct kvm_mmu_page *sp)
-{
- BUG_ON(!sp->root_count);
- lockdep_assert_held(&kvm->mmu_lock);
-
- ++sp->root_count;
-}
-
-static inline bool kvm_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *sp)
-{
- lockdep_assert_held(&kvm->mmu_lock);
- --sp->root_count;
-
- return !sp->root_count;
-}
-
/*
* Return values of handle_mmio_page_fault, mmu.page_fault, and fast_page_fault().
*
@@ -141,16 +154,15 @@ enum {
#define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1)
#define SET_SPTE_SPURIOUS BIT(2)
-int kvm_mmu_max_mapping_level(struct kvm *kvm, struct kvm_memory_slot *slot,
- gfn_t gfn, kvm_pfn_t pfn, int max_level);
+int kvm_mmu_max_mapping_level(struct kvm *kvm,
+ const struct kvm_memory_slot *slot, gfn_t gfn,
+ kvm_pfn_t pfn, int max_level);
int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
int max_level, kvm_pfn_t *pfnp,
bool huge_page_disallowed, int *req_level);
void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
kvm_pfn_t *pfnp, int *goal_levelp);
-bool is_nx_huge_page_enabled(void);
-
void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
diff --git a/arch/x86/kvm/mmu/mmutrace.h b/arch/x86/kvm/mmu/mmutrace.h
index e798489b56b5..efbad33a0645 100644
--- a/arch/x86/kvm/mmu/mmutrace.h
+++ b/arch/x86/kvm/mmu/mmutrace.h
@@ -40,7 +40,7 @@
role.direct ? " direct" : "", \
access_str[role.access], \
role.invalid ? " invalid" : "", \
- role.nxe ? "" : "!", \
+ role.efer_nx ? "" : "!", \
role.ad_disabled ? "!" : "", \
__entry->root_count, \
__entry->unsync ? "unsync" : "sync", 0); \
diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c
index 34bb0ec69bd8..91a9f7e0fd91 100644
--- a/arch/x86/kvm/mmu/page_track.c
+++ b/arch/x86/kvm/mmu/page_track.c
@@ -100,7 +100,7 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
kvm_mmu_gfn_disallow_lpage(slot, gfn);
if (mode == KVM_PAGE_TRACK_WRITE)
- if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn))
+ if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K))
kvm_flush_remote_tlbs(kvm);
}
EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 55d7b473ac44..490a028ddabe 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -90,8 +90,8 @@ struct guest_walker {
gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
bool pte_writable[PT_MAX_FULL_LEVELS];
- unsigned pt_access;
- unsigned pte_access;
+ unsigned int pt_access[PT_MAX_FULL_LEVELS];
+ unsigned int pte_access;
gfn_t gfn;
struct x86_exception fault;
};
@@ -305,6 +305,35 @@ static inline unsigned FNAME(gpte_pkeys)(struct kvm_vcpu *vcpu, u64 gpte)
return pkeys;
}
+static inline bool FNAME(is_last_gpte)(struct kvm_mmu *mmu,
+ unsigned int level, unsigned int gpte)
+{
+ /*
+ * For EPT and PAE paging (both variants), bit 7 is either reserved at
+ * all level or indicates a huge page (ignoring CR3/EPTP). In either
+ * case, bit 7 being set terminates the walk.
+ */
+#if PTTYPE == 32
+ /*
+ * 32-bit paging requires special handling because bit 7 is ignored if
+ * CR4.PSE=0, not reserved. Clear bit 7 in the gpte if the level is
+ * greater than the last level for which bit 7 is the PAGE_SIZE bit.
+ *
+ * The RHS has bit 7 set iff level < (2 + PSE). If it is clear, bit 7
+ * is not reserved and does not indicate a large page at this level,
+ * so clear PT_PAGE_SIZE_MASK in gpte if that is the case.
+ */
+ gpte &= level - (PT32_ROOT_LEVEL + mmu->mmu_role.ext.cr4_pse);
+#endif
+ /*
+ * PG_LEVEL_4K always terminates. The RHS has bit 7 set
+ * iff level <= PG_LEVEL_4K, which for our purpose means
+ * level == PG_LEVEL_4K; set PT_PAGE_SIZE_MASK in gpte then.
+ */
+ gpte |= level - PG_LEVEL_4K - 1;
+
+ return gpte & PT_PAGE_SIZE_MASK;
+}
/*
* Fetch a guest pte for a guest virtual address, or for an L2's GPA.
*/
@@ -418,13 +447,15 @@ retry_walk:
}
walker->ptes[walker->level - 1] = pte;
- } while (!is_last_gpte(mmu, walker->level, pte));
+
+ /* Convert to ACC_*_MASK flags for struct guest_walker. */
+ walker->pt_access[walker->level - 1] = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
+ } while (!FNAME(is_last_gpte)(mmu, walker->level, pte));
pte_pkey = FNAME(gpte_pkeys)(vcpu, pte);
accessed_dirty = have_ad ? pte_access & PT_GUEST_ACCESSED_MASK : 0;
/* Convert to ACC_*_MASK flags for struct guest_walker. */
- walker->pt_access = FNAME(gpte_access)(pt_access ^ walk_nx_mask);
walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask);
errcode = permission_fault(vcpu, mmu, walker->pte_access, pte_pkey, access);
if (unlikely(errcode))
@@ -463,13 +494,13 @@ retry_walk:
}
pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
- __func__, (u64)pte, walker->pte_access, walker->pt_access);
+ __func__, (u64)pte, walker->pte_access,
+ walker->pt_access[walker->level - 1]);
return 1;
error:
errcode |= write_fault | user_fault;
- if (fetch_fault && (mmu->nx ||
- kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
+ if (fetch_fault && (is_efer_nx(mmu) || is_cr4_smep(mmu)))
errcode |= PFERR_FETCH_MASK;
walker->fault.vector = PF_VECTOR;
@@ -503,6 +534,7 @@ error:
#endif
walker->fault.address = addr;
walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
+ walker->fault.async_page_fault = false;
trace_kvm_mmu_walker_error(walker->fault.error_code);
return 0;
@@ -642,7 +674,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
struct kvm_mmu_page *sp = NULL;
struct kvm_shadow_walk_iterator it;
- unsigned direct_access, access = gw->pt_access;
+ unsigned int direct_access, access;
int top_level, level, req_level, ret;
gfn_t base_gfn = gw->gfn;
@@ -674,6 +706,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
sp = NULL;
if (!is_shadow_present_pte(*it.sptep)) {
table_gfn = gw->table_gfn[it.level - 2];
+ access = gw->pt_access[it.level - 2];
sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
false, access);
}
@@ -762,7 +795,7 @@ FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
bool self_changed = false;
if (!(walker->pte_access & ACC_WRITE_MASK ||
- (!is_write_protection(vcpu) && !user_fault)))
+ (!is_cr0_wp(vcpu->arch.mmu) && !user_fault)))
return false;
for (level = walker->level; level <= walker->max_level; level++) {
@@ -860,8 +893,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
* we will cache the incorrect access into mmio spte.
*/
if (write_fault && !(walker.pte_access & ACC_WRITE_MASK) &&
- !is_write_protection(vcpu) && !user_fault &&
- !is_noslot_pfn(pfn)) {
+ !is_cr0_wp(vcpu->arch.mmu) && !user_fault && !is_noslot_pfn(pfn)) {
walker.pte_access |= ACC_WRITE_MASK;
walker.pte_access &= ~ACC_USER_MASK;
@@ -871,7 +903,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code,
* then we should prevent the kernel from executing it
* if SMEP is enabled.
*/
- if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
+ if (is_cr4_smep(vcpu->arch.mmu))
walker.pte_access &= ~ACC_EXEC_MASK;
}
@@ -1026,13 +1058,36 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr,
*/
static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
+ union kvm_mmu_page_role mmu_role = vcpu->arch.mmu->mmu_role.base;
int i, nr_present = 0;
bool host_writable;
gpa_t first_pte_gpa;
int set_spte_ret = 0;
- /* direct kvm_mmu_page can not be unsync. */
- BUG_ON(sp->role.direct);
+ /*
+ * Ignore various flags when verifying that it's safe to sync a shadow
+ * page using the current MMU context.
+ *
+ * - level: not part of the overall MMU role and will never match as the MMU's
+ * level tracks the root level
+ * - access: updated based on the new guest PTE
+ * - quadrant: not part of the overall MMU role (similar to level)
+ */
+ const union kvm_mmu_page_role sync_role_ign = {
+ .level = 0xf,
+ .access = 0x7,
+ .quadrant = 0x3,
+ };
+
+ /*
+ * Direct pages can never be unsync, and KVM should never attempt to
+ * sync a shadow page for a different MMU context, e.g. if the role
+ * differs then the memslot lookup (SMM vs. non-SMM) will be bogus, the
+ * reserved bits checks will be wrong, etc...
+ */
+ if (WARN_ON_ONCE(sp->role.direct ||
+ (sp->role.word ^ mmu_role.word) & ~sync_role_ign.word))
+ return 0;
first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
@@ -1084,7 +1139,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
nr_present++;
- host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
+ host_writable = sp->spt[i] & shadow_host_writable_mask;
set_spte_ret |= set_spte(vcpu, &sp->spt[i],
pte_access, PG_LEVEL_4K,
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index ef55f0bc4ccf..3e97cdb13eb7 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -16,13 +16,20 @@
#include "spte.h"
#include <asm/e820/api.h>
+#include <asm/vmx.h>
+static bool __read_mostly enable_mmio_caching = true;
+module_param_named(mmio_caching, enable_mmio_caching, bool, 0444);
+
+u64 __read_mostly shadow_host_writable_mask;
+u64 __read_mostly shadow_mmu_writable_mask;
u64 __read_mostly shadow_nx_mask;
u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
u64 __read_mostly shadow_user_mask;
u64 __read_mostly shadow_accessed_mask;
u64 __read_mostly shadow_dirty_mask;
u64 __read_mostly shadow_mmio_value;
+u64 __read_mostly shadow_mmio_mask;
u64 __read_mostly shadow_mmio_access_mask;
u64 __read_mostly shadow_present_mask;
u64 __read_mostly shadow_me_mask;
@@ -38,7 +45,6 @@ static u64 generation_mmio_spte_mask(u64 gen)
u64 mask;
WARN_ON(gen & ~MMIO_SPTE_GEN_MASK);
- BUILD_BUG_ON((MMIO_SPTE_GEN_HIGH_MASK | MMIO_SPTE_GEN_LOW_MASK) & SPTE_SPECIAL_MASK);
mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK;
mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK;
@@ -48,16 +54,18 @@ static u64 generation_mmio_spte_mask(u64 gen)
u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
{
u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK;
- u64 mask = generation_mmio_spte_mask(gen);
+ u64 spte = generation_mmio_spte_mask(gen);
u64 gpa = gfn << PAGE_SHIFT;
+ WARN_ON_ONCE(!shadow_mmio_value);
+
access &= shadow_mmio_access_mask;
- mask |= shadow_mmio_value | access;
- mask |= gpa | shadow_nonpresent_or_rsvd_mask;
- mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
+ spte |= shadow_mmio_value | access;
+ spte |= gpa | shadow_nonpresent_or_rsvd_mask;
+ spte |= (gpa & shadow_nonpresent_or_rsvd_mask)
<< SHADOW_NONPRESENT_OR_RSVD_MASK_LEN;
- return mask;
+ return spte;
}
static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
@@ -86,13 +94,13 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
bool can_unsync, bool host_writable, bool ad_disabled,
u64 *new_spte)
{
- u64 spte = 0;
+ u64 spte = SPTE_MMU_PRESENT_MASK;
int ret = 0;
if (ad_disabled)
- spte |= SPTE_AD_DISABLED_MASK;
+ spte |= SPTE_TDP_AD_DISABLED_MASK;
else if (kvm_vcpu_ad_need_write_protect(vcpu))
- spte |= SPTE_AD_WRPROT_ONLY_MASK;
+ spte |= SPTE_TDP_AD_WRPROT_ONLY_MASK;
/*
* For the EPT case, shadow_present_mask is 0 if hardware
@@ -124,7 +132,7 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
kvm_is_mmio_pfn(pfn));
if (host_writable)
- spte |= SPTE_HOST_WRITEABLE;
+ spte |= shadow_host_writable_mask;
else
pte_access &= ~ACC_WRITE_MASK;
@@ -134,23 +142,29 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
spte |= (u64)pfn << PAGE_SHIFT;
if (pte_access & ACC_WRITE_MASK) {
- spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
+ spte |= PT_WRITABLE_MASK | shadow_mmu_writable_mask;
/*
* Optimization: for pte sync, if spte was writable the hash
* lookup is unnecessary (and expensive). Write protection
- * is responsibility of mmu_get_page / kvm_sync_page.
+ * is responsibility of kvm_mmu_get_page / kvm_mmu_sync_roots.
* Same reasoning can be applied to dirty page accounting.
*/
if (!can_unsync && is_writable_pte(old_spte))
goto out;
- if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
+ /*
+ * Unsync shadow pages that are reachable by the new, writable
+ * SPTE. Write-protect the SPTE if the page can't be unsync'd,
+ * e.g. it's write-tracked (upper-level SPs) or has one or more
+ * shadow pages and unsync'ing pages is not allowed.
+ */
+ if (mmu_try_to_unsync_pages(vcpu, gfn, can_unsync)) {
pgprintk("%s: found shadow page for %llx, marking ro\n",
__func__, gfn);
ret |= SET_SPTE_WRITE_PROTECTED_PT;
pte_access &= ~ACC_WRITE_MASK;
- spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
+ spte &= ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
}
}
@@ -161,19 +175,23 @@ int make_spte(struct kvm_vcpu *vcpu, unsigned int pte_access, int level,
spte = mark_spte_for_access_track(spte);
out:
+ WARN_ONCE(is_rsvd_spte(&vcpu->arch.mmu->shadow_zero_check, spte, level),
+ "spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level,
+ get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level));
+
*new_spte = spte;
return ret;
}
u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled)
{
- u64 spte;
+ u64 spte = SPTE_MMU_PRESENT_MASK;
- spte = __pa(child_pt) | shadow_present_mask | PT_WRITABLE_MASK |
- shadow_user_mask | shadow_x_mask | shadow_me_mask;
+ spte |= __pa(child_pt) | shadow_present_mask | PT_WRITABLE_MASK |
+ shadow_user_mask | shadow_x_mask | shadow_me_mask;
if (ad_disabled)
- spte |= SPTE_AD_DISABLED_MASK;
+ spte |= SPTE_TDP_AD_DISABLED_MASK;
else
spte |= shadow_accessed_mask;
@@ -188,7 +206,7 @@ u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn)
new_spte |= (u64)new_pfn << PAGE_SHIFT;
new_spte &= ~PT_WRITABLE_MASK;
- new_spte &= ~SPTE_HOST_WRITEABLE;
+ new_spte &= ~shadow_host_writable_mask;
new_spte = mark_spte_for_access_track(new_spte);
@@ -242,53 +260,68 @@ u64 mark_spte_for_access_track(u64 spte)
return spte;
}
-void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask)
+void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
{
BUG_ON((u64)(unsigned)access_mask != access_mask);
- WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << SHADOW_NONPRESENT_OR_RSVD_MASK_LEN));
WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask);
- shadow_mmio_value = mmio_value | SPTE_MMIO_MASK;
+
+ if (!enable_mmio_caching)
+ mmio_value = 0;
+
+ /*
+ * Disable MMIO caching if the MMIO value collides with the bits that
+ * are used to hold the relocated GFN when the L1TF mitigation is
+ * enabled. This should never fire as there is no known hardware that
+ * can trigger this condition, e.g. SME/SEV CPUs that require a custom
+ * MMIO value are not susceptible to L1TF.
+ */
+ if (WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask <<
+ SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)))
+ mmio_value = 0;
+
+ /*
+ * The masked MMIO value must obviously match itself and a removed SPTE
+ * must not get a false positive. Removed SPTEs and MMIO SPTEs should
+ * never collide as MMIO must set some RWX bits, and removed SPTEs must
+ * not set any RWX bits.
+ */
+ if (WARN_ON((mmio_value & mmio_mask) != mmio_value) ||
+ WARN_ON(mmio_value && (REMOVED_SPTE & mmio_mask) == mmio_value))
+ mmio_value = 0;
+
+ shadow_mmio_value = mmio_value;
+ shadow_mmio_mask = mmio_mask;
shadow_mmio_access_mask = access_mask;
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
-/*
- * Sets the shadow PTE masks used by the MMU.
- *
- * Assumptions:
- * - Setting either @accessed_mask or @dirty_mask requires setting both
- * - At least one of @accessed_mask or @acc_track_mask must be set
- */
-void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
- u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
- u64 acc_track_mask, u64 me_mask)
+void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only)
{
- BUG_ON(!dirty_mask != !accessed_mask);
- BUG_ON(!accessed_mask && !acc_track_mask);
- BUG_ON(acc_track_mask & SPTE_SPECIAL_MASK);
-
- shadow_user_mask = user_mask;
- shadow_accessed_mask = accessed_mask;
- shadow_dirty_mask = dirty_mask;
- shadow_nx_mask = nx_mask;
- shadow_x_mask = x_mask;
- shadow_present_mask = p_mask;
- shadow_acc_track_mask = acc_track_mask;
- shadow_me_mask = me_mask;
+ shadow_user_mask = VMX_EPT_READABLE_MASK;
+ shadow_accessed_mask = has_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull;
+ shadow_dirty_mask = has_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull;
+ shadow_nx_mask = 0ull;
+ shadow_x_mask = VMX_EPT_EXECUTABLE_MASK;
+ shadow_present_mask = has_exec_only ? 0ull : VMX_EPT_READABLE_MASK;
+ shadow_acc_track_mask = VMX_EPT_RWX_MASK;
+ shadow_me_mask = 0ull;
+
+ shadow_host_writable_mask = EPT_SPTE_HOST_WRITABLE;
+ shadow_mmu_writable_mask = EPT_SPTE_MMU_WRITABLE;
+
+ /*
+ * EPT Misconfigurations are generated if the value of bits 2:0
+ * of an EPT paging-structure entry is 110b (write/execute).
+ */
+ kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE,
+ VMX_EPT_RWX_MASK, 0);
}
-EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
+EXPORT_SYMBOL_GPL(kvm_mmu_set_ept_masks);
void kvm_mmu_reset_all_pte_masks(void)
{
u8 low_phys_bits;
-
- shadow_user_mask = 0;
- shadow_accessed_mask = 0;
- shadow_dirty_mask = 0;
- shadow_nx_mask = 0;
- shadow_x_mask = 0;
- shadow_present_mask = 0;
- shadow_acc_track_mask = 0;
+ u64 mask;
shadow_phys_bits = kvm_get_shadow_phys_bits();
@@ -315,4 +348,30 @@ void kvm_mmu_reset_all_pte_masks(void)
shadow_nonpresent_or_rsvd_lower_gfn_mask =
GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
+
+ shadow_user_mask = PT_USER_MASK;
+ shadow_accessed_mask = PT_ACCESSED_MASK;
+ shadow_dirty_mask = PT_DIRTY_MASK;
+ shadow_nx_mask = PT64_NX_MASK;
+ shadow_x_mask = 0;
+ shadow_present_mask = PT_PRESENT_MASK;
+ shadow_acc_track_mask = 0;
+ shadow_me_mask = sme_me_mask;
+
+ shadow_host_writable_mask = DEFAULT_SPTE_HOST_WRITEABLE;
+ shadow_mmu_writable_mask = DEFAULT_SPTE_MMU_WRITEABLE;
+
+ /*
+ * Set a reserved PA bit in MMIO SPTEs to generate page faults with
+ * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT
+ * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports
+ * 52-bit physical addresses then there are no reserved PA bits in the
+ * PTEs and so the reserved PA approach must be disabled.
+ */
+ if (shadow_phys_bits < 52)
+ mask = BIT_ULL(51) | PT_PRESENT_MASK;
+ else
+ mask = 0;
+
+ kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);
}
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index 6de3950fd704..7a5ce9314107 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -5,18 +5,33 @@
#include "mmu_internal.h"
-#define PT_FIRST_AVAIL_BITS_SHIFT 10
-#define PT64_SECOND_AVAIL_BITS_SHIFT 54
+/*
+ * A MMU present SPTE is backed by actual memory and may or may not be present
+ * in hardware. E.g. MMIO SPTEs are not considered present. Use bit 11, as it
+ * is ignored by all flavors of SPTEs and checking a low bit often generates
+ * better code than for a high bit, e.g. 56+. MMU present checks are pervasive
+ * enough that the improved code generation is noticeable in KVM's footprint.
+ */
+#define SPTE_MMU_PRESENT_MASK BIT_ULL(11)
/*
- * The mask used to denote special SPTEs, which can be either MMIO SPTEs or
- * Access Tracking SPTEs.
+ * TDP SPTES (more specifically, EPT SPTEs) may not have A/D bits, and may also
+ * be restricted to using write-protection (for L2 when CPU dirty logging, i.e.
+ * PML, is enabled). Use bits 52 and 53 to hold the type of A/D tracking that
+ * is must be employed for a given TDP SPTE.
+ *
+ * Note, the "enabled" mask must be '0', as bits 62:52 are _reserved_ for PAE
+ * paging, including NPT PAE. This scheme works because legacy shadow paging
+ * is guaranteed to have A/D bits and write-protection is forced only for
+ * TDP with CPU dirty logging (PML). If NPT ever gains PML-like support, it
+ * must be restricted to 64-bit KVM.
*/
-#define SPTE_SPECIAL_MASK (3ULL << 52)
-#define SPTE_AD_ENABLED_MASK (0ULL << 52)
-#define SPTE_AD_DISABLED_MASK (1ULL << 52)
-#define SPTE_AD_WRPROT_ONLY_MASK (2ULL << 52)
-#define SPTE_MMIO_MASK (3ULL << 52)
+#define SPTE_TDP_AD_SHIFT 52
+#define SPTE_TDP_AD_MASK (3ULL << SPTE_TDP_AD_SHIFT)
+#define SPTE_TDP_AD_ENABLED_MASK (0ULL << SPTE_TDP_AD_SHIFT)
+#define SPTE_TDP_AD_DISABLED_MASK (1ULL << SPTE_TDP_AD_SHIFT)
+#define SPTE_TDP_AD_WRPROT_ONLY_MASK (2ULL << SPTE_TDP_AD_SHIFT)
+static_assert(SPTE_TDP_AD_ENABLED_MASK == 0);
#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
#define PT64_BASE_ADDR_MASK (physical_mask & ~(u64)(PAGE_SIZE-1))
@@ -51,16 +66,46 @@
(((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
+/* Bits 9 and 10 are ignored by all non-EPT PTEs. */
+#define DEFAULT_SPTE_HOST_WRITEABLE BIT_ULL(9)
+#define DEFAULT_SPTE_MMU_WRITEABLE BIT_ULL(10)
-#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
-#define SPTE_MMU_WRITEABLE (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1))
+/*
+ * The mask/shift to use for saving the original R/X bits when marking the PTE
+ * as not-present for access tracking purposes. We do not save the W bit as the
+ * PTEs being access tracked also need to be dirty tracked, so the W bit will be
+ * restored only when a write is attempted to the page. This mask obviously
+ * must not overlap the A/D type mask.
+ */
+#define SHADOW_ACC_TRACK_SAVED_BITS_MASK (PT64_EPT_READABLE_MASK | \
+ PT64_EPT_EXECUTABLE_MASK)
+#define SHADOW_ACC_TRACK_SAVED_BITS_SHIFT 54
+#define SHADOW_ACC_TRACK_SAVED_MASK (SHADOW_ACC_TRACK_SAVED_BITS_MASK << \
+ SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
+static_assert(!(SPTE_TDP_AD_MASK & SHADOW_ACC_TRACK_SAVED_MASK));
/*
- * Due to limited space in PTEs, the MMIO generation is a 18 bit subset of
+ * Low ignored bits are at a premium for EPT, use high ignored bits, taking care
+ * to not overlap the A/D type mask or the saved access bits of access-tracked
+ * SPTEs when A/D bits are disabled.
+ */
+#define EPT_SPTE_HOST_WRITABLE BIT_ULL(57)
+#define EPT_SPTE_MMU_WRITABLE BIT_ULL(58)
+
+static_assert(!(EPT_SPTE_HOST_WRITABLE & SPTE_TDP_AD_MASK));
+static_assert(!(EPT_SPTE_MMU_WRITABLE & SPTE_TDP_AD_MASK));
+static_assert(!(EPT_SPTE_HOST_WRITABLE & SHADOW_ACC_TRACK_SAVED_MASK));
+static_assert(!(EPT_SPTE_MMU_WRITABLE & SHADOW_ACC_TRACK_SAVED_MASK));
+
+/* Defined only to keep the above static asserts readable. */
+#undef SHADOW_ACC_TRACK_SAVED_MASK
+
+/*
+ * Due to limited space in PTEs, the MMIO generation is a 19 bit subset of
* the memslots generation and is derived as follows:
*
- * Bits 0-8 of the MMIO generation are propagated to spte bits 3-11
- * Bits 9-17 of the MMIO generation are propagated to spte bits 54-62
+ * Bits 0-7 of the MMIO generation are propagated to spte bits 3-10
+ * Bits 8-18 of the MMIO generation are propagated to spte bits 52-62
*
* The KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS flag is intentionally not included in
* the MMIO generation number, as doing so would require stealing a bit from
@@ -71,39 +116,44 @@
*/
#define MMIO_SPTE_GEN_LOW_START 3
-#define MMIO_SPTE_GEN_LOW_END 11
+#define MMIO_SPTE_GEN_LOW_END 10
-#define MMIO_SPTE_GEN_HIGH_START PT64_SECOND_AVAIL_BITS_SHIFT
+#define MMIO_SPTE_GEN_HIGH_START 52
#define MMIO_SPTE_GEN_HIGH_END 62
#define MMIO_SPTE_GEN_LOW_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \
MMIO_SPTE_GEN_LOW_START)
#define MMIO_SPTE_GEN_HIGH_MASK GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \
MMIO_SPTE_GEN_HIGH_START)
+static_assert(!(SPTE_MMU_PRESENT_MASK &
+ (MMIO_SPTE_GEN_LOW_MASK | MMIO_SPTE_GEN_HIGH_MASK)));
#define MMIO_SPTE_GEN_LOW_BITS (MMIO_SPTE_GEN_LOW_END - MMIO_SPTE_GEN_LOW_START + 1)
#define MMIO_SPTE_GEN_HIGH_BITS (MMIO_SPTE_GEN_HIGH_END - MMIO_SPTE_GEN_HIGH_START + 1)
/* remember to adjust the comment above as well if you change these */
-static_assert(MMIO_SPTE_GEN_LOW_BITS == 9 && MMIO_SPTE_GEN_HIGH_BITS == 9);
+static_assert(MMIO_SPTE_GEN_LOW_BITS == 8 && MMIO_SPTE_GEN_HIGH_BITS == 11);
#define MMIO_SPTE_GEN_LOW_SHIFT (MMIO_SPTE_GEN_LOW_START - 0)
#define MMIO_SPTE_GEN_HIGH_SHIFT (MMIO_SPTE_GEN_HIGH_START - MMIO_SPTE_GEN_LOW_BITS)
#define MMIO_SPTE_GEN_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_BITS + MMIO_SPTE_GEN_HIGH_BITS - 1, 0)
+extern u64 __read_mostly shadow_host_writable_mask;
+extern u64 __read_mostly shadow_mmu_writable_mask;
extern u64 __read_mostly shadow_nx_mask;
extern u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
extern u64 __read_mostly shadow_user_mask;
extern u64 __read_mostly shadow_accessed_mask;
extern u64 __read_mostly shadow_dirty_mask;
extern u64 __read_mostly shadow_mmio_value;
+extern u64 __read_mostly shadow_mmio_mask;
extern u64 __read_mostly shadow_mmio_access_mask;
extern u64 __read_mostly shadow_present_mask;
extern u64 __read_mostly shadow_me_mask;
/*
- * SPTEs used by MMUs without A/D bits are marked with SPTE_AD_DISABLED_MASK;
+ * SPTEs in MMUs without A/D bits are marked with SPTE_TDP_AD_DISABLED_MASK;
* shadow_acc_track_mask is the set of bits to be cleared in non-accessed
* pages.
*/
@@ -121,28 +171,21 @@ extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
#define SHADOW_NONPRESENT_OR_RSVD_MASK_LEN 5
/*
- * The mask/shift to use for saving the original R/X bits when marking the PTE
- * as not-present for access tracking purposes. We do not save the W bit as the
- * PTEs being access tracked also need to be dirty tracked, so the W bit will be
- * restored only when a write is attempted to the page.
- */
-#define SHADOW_ACC_TRACK_SAVED_BITS_MASK (PT64_EPT_READABLE_MASK | \
- PT64_EPT_EXECUTABLE_MASK)
-#define SHADOW_ACC_TRACK_SAVED_BITS_SHIFT PT64_SECOND_AVAIL_BITS_SHIFT
-
-/*
* If a thread running without exclusive control of the MMU lock must perform a
* multi-part operation on an SPTE, it can set the SPTE to REMOVED_SPTE as a
* non-present intermediate value. Other threads which encounter this value
* should not modify the SPTE.
*
- * This constant works because it is considered non-present on both AMD and
- * Intel CPUs and does not create a L1TF vulnerability because the pfn section
- * is zeroed out.
+ * Use a semi-arbitrary value that doesn't set RWX bits, i.e. is not-present on
+ * bot AMD and Intel CPUs, and doesn't set PFN bits, i.e. doesn't create a L1TF
+ * vulnerability. Use only low bits to avoid 64-bit immediates.
*
* Only used by the TDP MMU.
*/
-#define REMOVED_SPTE (1ull << 59)
+#define REMOVED_SPTE 0x5a0ULL
+
+/* Removed SPTEs must not be misconstrued as shadow present PTEs. */
+static_assert(!(REMOVED_SPTE & SPTE_MMU_PRESENT_MASK));
static inline bool is_removed_spte(u64 spte)
{
@@ -167,7 +210,13 @@ extern u8 __read_mostly shadow_phys_bits;
static inline bool is_mmio_spte(u64 spte)
{
- return (spte & SPTE_SPECIAL_MASK) == SPTE_MMIO_MASK;
+ return (spte & shadow_mmio_mask) == shadow_mmio_value &&
+ likely(shadow_mmio_value);
+}
+
+static inline bool is_shadow_present_pte(u64 pte)
+{
+ return !!(pte & SPTE_MMU_PRESENT_MASK);
}
static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
@@ -177,25 +226,30 @@ static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
static inline bool spte_ad_enabled(u64 spte)
{
- MMU_WARN_ON(is_mmio_spte(spte));
- return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_DISABLED_MASK;
+ MMU_WARN_ON(!is_shadow_present_pte(spte));
+ return (spte & SPTE_TDP_AD_MASK) != SPTE_TDP_AD_DISABLED_MASK;
}
static inline bool spte_ad_need_write_protect(u64 spte)
{
- MMU_WARN_ON(is_mmio_spte(spte));
- return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_ENABLED_MASK;
+ MMU_WARN_ON(!is_shadow_present_pte(spte));
+ /*
+ * This is benign for non-TDP SPTEs as SPTE_TDP_AD_ENABLED_MASK is '0',
+ * and non-TDP SPTEs will never set these bits. Optimize for 64-bit
+ * TDP and do the A/D type check unconditionally.
+ */
+ return (spte & SPTE_TDP_AD_MASK) != SPTE_TDP_AD_ENABLED_MASK;
}
static inline u64 spte_shadow_accessed_mask(u64 spte)
{
- MMU_WARN_ON(is_mmio_spte(spte));
+ MMU_WARN_ON(!is_shadow_present_pte(spte));
return spte_ad_enabled(spte) ? shadow_accessed_mask : 0;
}
static inline u64 spte_shadow_dirty_mask(u64 spte)
{
- MMU_WARN_ON(is_mmio_spte(spte));
+ MMU_WARN_ON(!is_shadow_present_pte(spte));
return spte_ad_enabled(spte) ? shadow_dirty_mask : 0;
}
@@ -204,11 +258,6 @@ static inline bool is_access_track_spte(u64 spte)
return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0;
}
-static inline bool is_shadow_present_pte(u64 pte)
-{
- return (pte != 0) && !is_mmio_spte(pte) && !is_removed_spte(pte);
-}
-
static inline bool is_large_pte(u64 pte)
{
return pte & PT_PAGE_SIZE_MASK;
@@ -244,10 +293,42 @@ static inline bool is_dirty_spte(u64 spte)
return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK;
}
+static inline u64 get_rsvd_bits(struct rsvd_bits_validate *rsvd_check, u64 pte,
+ int level)
+{
+ int bit7 = (pte >> 7) & 1;
+
+ return rsvd_check->rsvd_bits_mask[bit7][level-1];
+}
+
+static inline bool __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check,
+ u64 pte, int level)
+{
+ return pte & get_rsvd_bits(rsvd_check, pte, level);
+}
+
+static inline bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check,
+ u64 pte)
+{
+ return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f);
+}
+
+static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check,
+ u64 spte, int level)
+{
+ /*
+ * Use a bitwise-OR instead of a logical-OR to aggregate the reserved
+ * bits and EPT's invalid memtype/XWR checks to avoid an extra Jcc
+ * (this is extremely unlikely to be short-circuited as true).
+ */
+ return __is_bad_mt_xwr(rsvd_check, spte) |
+ __is_rsvd_bits_set(rsvd_check, spte, level);
+}
+
static inline bool spte_can_locklessly_be_made_writable(u64 spte)
{
- return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) ==
- (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE);
+ return (spte & shadow_host_writable_mask) &&
+ (spte & shadow_mmu_writable_mask);
}
static inline u64 get_mmio_spte_generation(u64 spte)
diff --git a/arch/x86/kvm/mmu/tdp_iter.c b/arch/x86/kvm/mmu/tdp_iter.c
index e5f148106e20..b3ed302c1a35 100644
--- a/arch/x86/kvm/mmu/tdp_iter.c
+++ b/arch/x86/kvm/mmu/tdp_iter.c
@@ -21,6 +21,21 @@ static gfn_t round_gfn_for_level(gfn_t gfn, int level)
}
/*
+ * Return the TDP iterator to the root PT and allow it to continue its
+ * traversal over the paging structure from there.
+ */
+void tdp_iter_restart(struct tdp_iter *iter)
+{
+ iter->yielded_gfn = iter->next_last_level_gfn;
+ iter->level = iter->root_level;
+
+ iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level);
+ tdp_iter_refresh_sptep(iter);
+
+ iter->valid = true;
+}
+
+/*
* Sets a TDP iterator to walk a pre-order traversal of the paging structure
* rooted at root_pt, starting with the walk to translate next_last_level_gfn.
*/
@@ -31,16 +46,12 @@ void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,
WARN_ON(root_level > PT64_ROOT_MAX_LEVEL);
iter->next_last_level_gfn = next_last_level_gfn;
- iter->yielded_gfn = iter->next_last_level_gfn;
iter->root_level = root_level;
iter->min_level = min_level;
- iter->level = root_level;
- iter->pt_path[iter->level - 1] = (tdp_ptep_t)root_pt;
-
- iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level);
- tdp_iter_refresh_sptep(iter);
+ iter->pt_path[iter->root_level - 1] = (tdp_ptep_t)root_pt;
+ iter->as_id = kvm_mmu_page_as_id(sptep_to_sp(root_pt));
- iter->valid = true;
+ tdp_iter_restart(iter);
}
/*
@@ -159,8 +170,3 @@ void tdp_iter_next(struct tdp_iter *iter)
iter->valid = false;
}
-tdp_ptep_t tdp_iter_root_pt(struct tdp_iter *iter)
-{
- return iter->pt_path[iter->root_level - 1];
-}
-
diff --git a/arch/x86/kvm/mmu/tdp_iter.h b/arch/x86/kvm/mmu/tdp_iter.h
index 4cc177d75c4a..b1748b988d3a 100644
--- a/arch/x86/kvm/mmu/tdp_iter.h
+++ b/arch/x86/kvm/mmu/tdp_iter.h
@@ -36,6 +36,8 @@ struct tdp_iter {
int min_level;
/* The iterator's current level within the paging structure */
int level;
+ /* The address space ID, i.e. SMM vs. regular. */
+ int as_id;
/* A snapshot of the value at sptep */
u64 old_spte;
/*
@@ -62,6 +64,6 @@ tdp_ptep_t spte_to_child_pt(u64 pte, int level);
void tdp_iter_start(struct tdp_iter *iter, u64 *root_pt, int root_level,
int min_level, gfn_t next_last_level_gfn);
void tdp_iter_next(struct tdp_iter *iter);
-tdp_ptep_t tdp_iter_root_pt(struct tdp_iter *iter);
+void tdp_iter_restart(struct tdp_iter *iter);
#endif /* __KVM_X86_MMU_TDP_ITER_H */
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index c926c6b899a1..0853370bd811 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -14,10 +14,10 @@ static bool __read_mostly tdp_mmu_enabled = false;
module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
/* Initializes the TDP MMU for the VM, if enabled. */
-void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
+bool kvm_mmu_init_tdp_mmu(struct kvm *kvm)
{
if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
- return;
+ return false;
/* This should not be changed for the lifetime of the VM. */
kvm->arch.tdp_mmu_enabled = true;
@@ -25,6 +25,17 @@ void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
+
+ return true;
+}
+
+static __always_inline void kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
+ bool shared)
+{
+ if (shared)
+ lockdep_assert_held_read(&kvm->mmu_lock);
+ else
+ lockdep_assert_held_write(&kvm->mmu_lock);
}
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
@@ -41,32 +52,85 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
rcu_barrier();
}
-static void tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root)
+static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
+ gfn_t start, gfn_t end, bool can_yield, bool flush,
+ bool shared);
+
+static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
+{
+ free_page((unsigned long)sp->spt);
+ kmem_cache_free(mmu_page_header_cache, sp);
+}
+
+/*
+ * This is called through call_rcu in order to free TDP page table memory
+ * safely with respect to other kernel threads that may be operating on
+ * the memory.
+ * By only accessing TDP MMU page table memory in an RCU read critical
+ * section, and freeing it after a grace period, lockless access to that
+ * memory won't use it after it is freed.
+ */
+static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
{
- if (kvm_mmu_put_root(kvm, root))
- kvm_tdp_mmu_free_root(kvm, root);
+ struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
+ rcu_head);
+
+ tdp_mmu_free_sp(sp);
}
-static inline bool tdp_mmu_next_root_valid(struct kvm *kvm,
- struct kvm_mmu_page *root)
+void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
+ bool shared)
{
- lockdep_assert_held_write(&kvm->mmu_lock);
+ gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
- if (list_entry_is_head(root, &kvm->arch.tdp_mmu_roots, link))
- return false;
+ kvm_lockdep_assert_mmu_lock_held(kvm, shared);
- kvm_mmu_get_root(kvm, root);
- return true;
+ if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
+ return;
+
+ WARN_ON(!root->tdp_mmu_page);
+
+ spin_lock(&kvm->arch.tdp_mmu_pages_lock);
+ list_del_rcu(&root->link);
+ spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
+
+ zap_gfn_range(kvm, root, 0, max_gfn, false, false, shared);
+ call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
}
-static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
- struct kvm_mmu_page *root)
+/*
+ * Finds the next valid root after root (or the first valid root if root
+ * is NULL), takes a reference on it, and returns that next root. If root
+ * is not NULL, this thread should have already taken a reference on it, and
+ * that reference will be dropped. If no valid root is found, this
+ * function will return NULL.
+ */
+static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
+ struct kvm_mmu_page *prev_root,
+ bool shared)
{
struct kvm_mmu_page *next_root;
- next_root = list_next_entry(root, link);
- tdp_mmu_put_root(kvm, root);
+ rcu_read_lock();
+
+ if (prev_root)
+ next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
+ &prev_root->link,
+ typeof(*prev_root), link);
+ else
+ next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
+ typeof(*next_root), link);
+
+ while (next_root && !kvm_tdp_mmu_get_root(kvm, next_root))
+ next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
+ &next_root->link, typeof(*next_root), link);
+
+ rcu_read_unlock();
+
+ if (prev_root)
+ kvm_tdp_mmu_put_root(kvm, prev_root, shared);
+
return next_root;
}
@@ -75,35 +139,24 @@ static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
* This makes it safe to release the MMU lock and yield within the loop, but
* if exiting the loop early, the caller must drop the reference to the most
* recent root. (Unless keeping a live reference is desirable.)
+ *
+ * If shared is set, this function is operating under the MMU lock in read
+ * mode. In the unlikely event that this thread must free a root, the lock
+ * will be temporarily dropped and reacquired in write mode.
*/
-#define for_each_tdp_mmu_root_yield_safe(_kvm, _root) \
- for (_root = list_first_entry(&_kvm->arch.tdp_mmu_roots, \
- typeof(*_root), link); \
- tdp_mmu_next_root_valid(_kvm, _root); \
- _root = tdp_mmu_next_root(_kvm, _root))
-
-#define for_each_tdp_mmu_root(_kvm, _root) \
- list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
-
-static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
- gfn_t start, gfn_t end, bool can_yield);
-
-void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
-{
- gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
-
- lockdep_assert_held_write(&kvm->mmu_lock);
-
- WARN_ON(root->root_count);
- WARN_ON(!root->tdp_mmu_page);
-
- list_del(&root->link);
-
- zap_gfn_range(kvm, root, 0, max_gfn, false);
-
- free_page((unsigned long)root->spt);
- kmem_cache_free(mmu_page_header_cache, root);
-}
+#define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \
+ for (_root = tdp_mmu_next_root(_kvm, NULL, _shared); \
+ _root; \
+ _root = tdp_mmu_next_root(_kvm, _root, _shared)) \
+ if (kvm_mmu_page_as_id(_root) != _as_id) { \
+ } else
+
+#define for_each_tdp_mmu_root(_kvm, _root, _as_id) \
+ list_for_each_entry_rcu(_root, &_kvm->arch.tdp_mmu_roots, link, \
+ lockdep_is_held_type(&kvm->mmu_lock, 0) || \
+ lockdep_is_held(&kvm->arch.tdp_mmu_pages_lock)) \
+ if (kvm_mmu_page_as_id(_root) != _as_id) { \
+ } else
static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
int level)
@@ -137,86 +190,46 @@ static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn,
return sp;
}
-static struct kvm_mmu_page *get_tdp_mmu_vcpu_root(struct kvm_vcpu *vcpu)
+hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
{
union kvm_mmu_page_role role;
struct kvm *kvm = vcpu->kvm;
struct kvm_mmu_page *root;
- role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);
+ lockdep_assert_held_write(&kvm->mmu_lock);
- write_lock(&kvm->mmu_lock);
+ role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);
/* Check for an existing root before allocating a new one. */
- for_each_tdp_mmu_root(kvm, root) {
- if (root->role.word == role.word) {
- kvm_mmu_get_root(kvm, root);
- write_unlock(&kvm->mmu_lock);
- return root;
- }
+ for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
+ if (root->role.word == role.word &&
+ kvm_tdp_mmu_get_root(kvm, root))
+ goto out;
}
root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level);
- root->root_count = 1;
+ refcount_set(&root->tdp_mmu_root_count, 1);
- list_add(&root->link, &kvm->arch.tdp_mmu_roots);
-
- write_unlock(&kvm->mmu_lock);
-
- return root;
-}
-
-hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
-{
- struct kvm_mmu_page *root;
-
- root = get_tdp_mmu_vcpu_root(vcpu);
- if (!root)
- return INVALID_PAGE;
+ spin_lock(&kvm->arch.tdp_mmu_pages_lock);
+ list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
+ spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
+out:
return __pa(root->spt);
}
-static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
-{
- free_page((unsigned long)sp->spt);
- kmem_cache_free(mmu_page_header_cache, sp);
-}
-
-/*
- * This is called through call_rcu in order to free TDP page table memory
- * safely with respect to other kernel threads that may be operating on
- * the memory.
- * By only accessing TDP MMU page table memory in an RCU read critical
- * section, and freeing it after a grace period, lockless access to that
- * memory won't use it after it is freed.
- */
-static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
-{
- struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
- rcu_head);
-
- tdp_mmu_free_sp(sp);
-}
-
static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
u64 old_spte, u64 new_spte, int level,
bool shared);
-static int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
-{
- return sp->role.smm ? 1 : 0;
-}
-
static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
{
- bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
-
if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
return;
if (is_accessed_spte(old_spte) &&
- (!is_accessed_spte(new_spte) || pfn_changed))
+ (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) ||
+ spte_to_pfn(old_spte) != spte_to_pfn(new_spte)))
kvm_set_pfn_accessed(spte_to_pfn(old_spte));
}
@@ -301,11 +314,16 @@ static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp,
*
* Given a page table that has been removed from the TDP paging structure,
* iterates through the page table to clear SPTEs and free child page tables.
+ *
+ * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
+ * protection. Since this thread removed it from the paging structure,
+ * this thread will be responsible for ensuring the page is freed. Hence the
+ * early rcu_dereferences in the function.
*/
-static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt,
+static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
bool shared)
{
- struct kvm_mmu_page *sp = sptep_to_sp(pt);
+ struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
int level = sp->role.level;
gfn_t base_gfn = sp->gfn;
u64 old_child_spte;
@@ -318,8 +336,8 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt,
tdp_mmu_unlink_page(kvm, sp, shared);
for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
- sptep = pt + i;
- gfn = base_gfn + (i * KVM_PAGES_PER_HPAGE(level - 1));
+ sptep = rcu_dereference(pt) + i;
+ gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
if (shared) {
/*
@@ -337,7 +355,18 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt,
cpu_relax();
}
} else {
+ /*
+ * If the SPTE is not MMU-present, there is no backing
+ * page associated with the SPTE and so no side effects
+ * that need to be recorded, and exclusive ownership of
+ * mmu_lock ensures the SPTE can't be made present.
+ * Note, zapping MMIO SPTEs is also unnecessary as they
+ * are guarded by the memslots generation, not by being
+ * unreachable.
+ */
old_child_spte = READ_ONCE(*sptep);
+ if (!is_shadow_present_pte(old_child_spte))
+ continue;
/*
* Marking the SPTE as a removed SPTE is not
@@ -350,18 +379,18 @@ static void handle_removed_tdp_mmu_page(struct kvm *kvm, u64 *pt,
WRITE_ONCE(*sptep, REMOVED_SPTE);
}
handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
- old_child_spte, REMOVED_SPTE, level - 1,
+ old_child_spte, REMOVED_SPTE, level,
shared);
}
kvm_flush_remote_tlbs_with_address(kvm, gfn,
- KVM_PAGES_PER_HPAGE(level));
+ KVM_PAGES_PER_HPAGE(level + 1));
call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
}
/**
- * handle_changed_spte - handle bookkeeping associated with an SPTE change
+ * __handle_changed_spte - handle bookkeeping associated with an SPTE change
* @kvm: kvm instance
* @as_id: the address space of the paging structure the SPTE was a part of
* @gfn: the base GFN that was mapped by the SPTE
@@ -393,7 +422,7 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
* If this warning were to trigger it would indicate that there was a
* missing MMU notifier or a race with some notifier handler.
* A present, leaf SPTE should never be directly replaced with another
- * present leaf SPTE pointing to a differnt PFN. A notifier handler
+ * present leaf SPTE pointing to a different PFN. A notifier handler
* should be zapping the SPTE before the main MM's page table is
* changed, or the SPTE should be zeroed, and the TLBs flushed by the
* thread before replacement.
@@ -407,7 +436,7 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
/*
* Crash the host to prevent error propagation and guest data
- * courruption.
+ * corruption.
*/
BUG();
}
@@ -417,6 +446,13 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
+ if (is_large_pte(old_spte) != is_large_pte(new_spte)) {
+ if (is_large_pte(old_spte))
+ atomic64_sub(1, (atomic64_t*)&kvm->stat.lpages);
+ else
+ atomic64_add(1, (atomic64_t*)&kvm->stat.lpages);
+ }
+
/*
* The only times a SPTE should be changed from a non-present to
* non-present state is when an MMIO entry is installed/modified/
@@ -444,7 +480,7 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
if (was_leaf && is_dirty_spte(old_spte) &&
- (!is_dirty_spte(new_spte) || pfn_changed))
+ (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
kvm_set_pfn_dirty(spte_to_pfn(old_spte));
/*
@@ -468,8 +504,9 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
}
/*
- * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically and handle the
- * associated bookkeeping
+ * tdp_mmu_set_spte_atomic_no_dirty_log - Set a TDP MMU SPTE atomically
+ * and handle the associated bookkeeping, but do not mark the page dirty
+ * in KVM's dirty bitmaps.
*
* @kvm: kvm instance
* @iter: a tdp_iter instance currently on the SPTE that should be set
@@ -477,30 +514,39 @@ static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
* Returns: true if the SPTE was set, false if it was not. If false is returned,
* this function will have no side-effects.
*/
-static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
- struct tdp_iter *iter,
- u64 new_spte)
+static inline bool tdp_mmu_set_spte_atomic_no_dirty_log(struct kvm *kvm,
+ struct tdp_iter *iter,
+ u64 new_spte)
{
- u64 *root_pt = tdp_iter_root_pt(iter);
- struct kvm_mmu_page *root = sptep_to_sp(root_pt);
- int as_id = kvm_mmu_page_as_id(root);
-
lockdep_assert_held_read(&kvm->mmu_lock);
/*
* Do not change removed SPTEs. Only the thread that froze the SPTE
* may modify it.
*/
- if (iter->old_spte == REMOVED_SPTE)
+ if (is_removed_spte(iter->old_spte))
return false;
if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte,
new_spte) != iter->old_spte)
return false;
- handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte,
- iter->level, true);
+ __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
+ new_spte, iter->level, true);
+ handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level);
+
+ return true;
+}
+
+static inline bool tdp_mmu_set_spte_atomic(struct kvm *kvm,
+ struct tdp_iter *iter,
+ u64 new_spte)
+{
+ if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, iter, new_spte))
+ return false;
+ handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn,
+ iter->old_spte, new_spte, iter->level);
return true;
}
@@ -522,12 +568,12 @@ static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
/*
* No other thread can overwrite the removed SPTE as they
* must either wait on the MMU lock or use
- * tdp_mmu_set_spte_atomic which will not overrite the
+ * tdp_mmu_set_spte_atomic which will not overwrite the
* special removed SPTE value. No bookkeeping is needed
* here since the SPTE is going from non-present
* to non-present.
*/
- WRITE_ONCE(*iter->sptep, 0);
+ WRITE_ONCE(*rcu_dereference(iter->sptep), 0);
return true;
}
@@ -553,10 +599,6 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
u64 new_spte, bool record_acc_track,
bool record_dirty_log)
{
- tdp_ptep_t root_pt = tdp_iter_root_pt(iter);
- struct kvm_mmu_page *root = sptep_to_sp(root_pt);
- int as_id = kvm_mmu_page_as_id(root);
-
lockdep_assert_held_write(&kvm->mmu_lock);
/*
@@ -566,17 +608,17 @@ static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
* should be used. If operating under the MMU lock in write mode, the
* use of the removed SPTE should not be necessary.
*/
- WARN_ON(iter->old_spte == REMOVED_SPTE);
+ WARN_ON(is_removed_spte(iter->old_spte));
WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte);
- __handle_changed_spte(kvm, as_id, iter->gfn, iter->old_spte, new_spte,
- iter->level, false);
+ __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
+ new_spte, iter->level, false);
if (record_acc_track)
handle_changed_spte_acc_track(iter->old_spte, new_spte,
iter->level);
if (record_dirty_log)
- handle_changed_spte_dirty_log(kvm, as_id, iter->gfn,
+ handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn,
iter->old_spte, new_spte,
iter->level);
}
@@ -631,7 +673,8 @@ static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
* Return false if a yield was not needed.
*/
static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
- struct tdp_iter *iter, bool flush)
+ struct tdp_iter *iter, bool flush,
+ bool shared)
{
/* Ensure forward progress has been made before yielding. */
if (iter->next_last_level_gfn == iter->yielded_gfn)
@@ -643,14 +686,16 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
if (flush)
kvm_flush_remote_tlbs(kvm);
- cond_resched_rwlock_write(&kvm->mmu_lock);
+ if (shared)
+ cond_resched_rwlock_read(&kvm->mmu_lock);
+ else
+ cond_resched_rwlock_write(&kvm->mmu_lock);
+
rcu_read_lock();
WARN_ON(iter->gfn > iter->next_last_level_gfn);
- tdp_iter_start(iter, iter->pt_path[iter->root_level - 1],
- iter->root_level, iter->min_level,
- iter->next_last_level_gfn);
+ tdp_iter_restart(iter);
return true;
}
@@ -663,24 +708,33 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
* non-root pages mapping GFNs strictly within that range. Returns true if
* SPTEs have been cleared and a TLB flush is needed before releasing the
* MMU lock.
+ *
* If can_yield is true, will release the MMU lock and reschedule if the
* scheduler needs the CPU or there is contention on the MMU lock. If this
* function cannot yield, it will not release the MMU lock or reschedule and
* the caller must ensure it does not supply too large a GFN range, or the
* operation can cause a soft lockup.
+ *
+ * If shared is true, this thread holds the MMU lock in read mode and must
+ * account for the possibility that other threads are modifying the paging
+ * structures concurrently. If shared is false, this thread should hold the
+ * MMU lock in write mode.
*/
static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
- gfn_t start, gfn_t end, bool can_yield)
+ gfn_t start, gfn_t end, bool can_yield, bool flush,
+ bool shared)
{
struct tdp_iter iter;
- bool flush_needed = false;
+
+ kvm_lockdep_assert_mmu_lock_held(kvm, shared);
rcu_read_lock();
tdp_root_for_each_pte(iter, root, start, end) {
+retry:
if (can_yield &&
- tdp_mmu_iter_cond_resched(kvm, &iter, flush_needed)) {
- flush_needed = false;
+ tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) {
+ flush = false;
continue;
}
@@ -697,12 +751,21 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
!is_last_spte(iter.old_spte, iter.level))
continue;
- tdp_mmu_set_spte(kvm, &iter, 0);
- flush_needed = true;
+ if (!shared) {
+ tdp_mmu_set_spte(kvm, &iter, 0);
+ flush = true;
+ } else if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
+ /*
+ * The iter must explicitly re-read the SPTE because
+ * the atomic cmpxchg failed.
+ */
+ iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
+ goto retry;
+ }
}
rcu_read_unlock();
- return flush_needed;
+ return flush;
}
/*
@@ -710,14 +773,21 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
* non-root pages mapping GFNs strictly within that range. Returns true if
* SPTEs have been cleared and a TLB flush is needed before releasing the
* MMU lock.
+ *
+ * If shared is true, this thread holds the MMU lock in read mode and must
+ * account for the possibility that other threads are modifying the paging
+ * structures concurrently. If shared is false, this thread should hold the
+ * MMU in write mode.
*/
-bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end)
+bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
+ gfn_t end, bool can_yield, bool flush,
+ bool shared)
{
struct kvm_mmu_page *root;
- bool flush = false;
- for_each_tdp_mmu_root_yield_safe(kvm, root)
- flush |= zap_gfn_range(kvm, root, start, end, true);
+ for_each_tdp_mmu_root_yield_safe(kvm, root, as_id, shared)
+ flush = zap_gfn_range(kvm, root, start, end, can_yield, flush,
+ shared);
return flush;
}
@@ -725,14 +795,116 @@ bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end)
void kvm_tdp_mmu_zap_all(struct kvm *kvm)
{
gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
- bool flush;
+ bool flush = false;
+ int i;
+
+ for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
+ flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, max_gfn,
+ flush, false);
+
+ if (flush)
+ kvm_flush_remote_tlbs(kvm);
+}
+
+static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm,
+ struct kvm_mmu_page *prev_root)
+{
+ struct kvm_mmu_page *next_root;
+
+ if (prev_root)
+ next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
+ &prev_root->link,
+ typeof(*prev_root), link);
+ else
+ next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
+ typeof(*next_root), link);
+
+ while (next_root && !(next_root->role.invalid &&
+ refcount_read(&next_root->tdp_mmu_root_count)))
+ next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
+ &next_root->link,
+ typeof(*next_root), link);
+
+ return next_root;
+}
+
+/*
+ * Since kvm_tdp_mmu_zap_all_fast has acquired a reference to each
+ * invalidated root, they will not be freed until this function drops the
+ * reference. Before dropping that reference, tear down the paging
+ * structure so that whichever thread does drop the last reference
+ * only has to do a trivial amount of work. Since the roots are invalid,
+ * no new SPTEs should be created under them.
+ */
+void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
+{
+ gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
+ struct kvm_mmu_page *next_root;
+ struct kvm_mmu_page *root;
+ bool flush = false;
+
+ lockdep_assert_held_read(&kvm->mmu_lock);
+
+ rcu_read_lock();
+
+ root = next_invalidated_root(kvm, NULL);
+
+ while (root) {
+ next_root = next_invalidated_root(kvm, root);
+
+ rcu_read_unlock();
+
+ flush = zap_gfn_range(kvm, root, 0, max_gfn, true, flush,
+ true);
+
+ /*
+ * Put the reference acquired in
+ * kvm_tdp_mmu_invalidate_roots
+ */
+ kvm_tdp_mmu_put_root(kvm, root, true);
+
+ root = next_root;
+
+ rcu_read_lock();
+ }
+
+ rcu_read_unlock();
- flush = kvm_tdp_mmu_zap_gfn_range(kvm, 0, max_gfn);
if (flush)
kvm_flush_remote_tlbs(kvm);
}
/*
+ * Mark each TDP MMU root as invalid so that other threads
+ * will drop their references and allow the root count to
+ * go to 0.
+ *
+ * Also take a reference on all roots so that this thread
+ * can do the bulk of the work required to free the roots
+ * once they are invalidated. Without this reference, a
+ * vCPU thread might drop the last reference to a root and
+ * get stuck with tearing down the entire paging structure.
+ *
+ * Roots which have a zero refcount should be skipped as
+ * they're already being torn down.
+ * Already invalid roots should be referenced again so that
+ * they aren't freed before kvm_tdp_mmu_zap_all_fast is
+ * done with them.
+ *
+ * This has essentially the same effect for the TDP MMU
+ * as updating mmu_valid_gen does for the shadow MMU.
+ */
+void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
+{
+ struct kvm_mmu_page *root;
+
+ lockdep_assert_held_write(&kvm->mmu_lock);
+ list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link)
+ if (refcount_inc_not_zero(&root->tdp_mmu_root_count))
+ root->role.invalid = true;
+}
+
+/*
* Installs a last-level SPTE to handle a TDP page fault.
* (NPT/EPT violation/misconfiguration)
*/
@@ -742,7 +914,7 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
kvm_pfn_t pfn, bool prefault)
{
u64 new_spte;
- int ret = 0;
+ int ret = RET_PF_FIXED;
int make_spte_ret = 0;
if (unlikely(is_noslot_pfn(pfn)))
@@ -774,13 +946,16 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
new_spte);
ret = RET_PF_EMULATE;
- } else
+ } else {
trace_kvm_mmu_set_spte(iter->level, iter->gfn,
rcu_dereference(iter->sptep));
+ }
- trace_kvm_mmu_set_spte(iter->level, iter->gfn,
- rcu_dereference(iter->sptep));
- if (!prefault)
+ /*
+ * Increase pf_fixed in both RET_PF_EMULATE and RET_PF_FIXED to be
+ * consistent with legacy MMU behavior.
+ */
+ if (ret != RET_PF_SPURIOUS)
vcpu->stat.pf_fixed++;
return ret;
@@ -808,11 +983,6 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
int level;
int req_level;
- if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
- return RET_PF_RETRY;
- if (WARN_ON(!is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa)))
- return RET_PF_RETRY;
-
level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
huge_page_disallowed, &req_level);
@@ -847,7 +1017,15 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
}
if (!is_shadow_present_pte(iter.old_spte)) {
- sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level);
+ /*
+ * If SPTE has been frozen by another thread, just
+ * give up and retry, avoiding unnecessary page table
+ * allocation and free.
+ */
+ if (is_removed_spte(iter.old_spte))
+ break;
+
+ sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level - 1);
child_pt = sp->spt;
new_spte = make_nonleaf_spte(child_pt,
@@ -879,205 +1057,145 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
return ret;
}
-static __always_inline int
-kvm_tdp_mmu_handle_hva_range(struct kvm *kvm,
- unsigned long start,
- unsigned long end,
- unsigned long data,
- int (*handler)(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- struct kvm_mmu_page *root,
- gfn_t start,
- gfn_t end,
- unsigned long data))
+bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
+ bool flush)
{
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
struct kvm_mmu_page *root;
- int ret = 0;
- int as_id;
-
- for_each_tdp_mmu_root_yield_safe(kvm, root) {
- as_id = kvm_mmu_page_as_id(root);
- slots = __kvm_memslots(kvm, as_id);
- kvm_for_each_memslot(memslot, slots) {
- unsigned long hva_start, hva_end;
- gfn_t gfn_start, gfn_end;
-
- hva_start = max(start, memslot->userspace_addr);
- hva_end = min(end, memslot->userspace_addr +
- (memslot->npages << PAGE_SHIFT));
- if (hva_start >= hva_end)
- continue;
- /*
- * {gfn(page) | page intersects with [hva_start, hva_end)} =
- * {gfn_start, gfn_start+1, ..., gfn_end-1}.
- */
- gfn_start = hva_to_gfn_memslot(hva_start, memslot);
- gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
- ret |= handler(kvm, memslot, root, gfn_start,
- gfn_end, data);
- }
- }
+ for_each_tdp_mmu_root(kvm, root, range->slot->as_id)
+ flush |= zap_gfn_range(kvm, root, range->start, range->end,
+ range->may_block, flush, false);
- return ret;
+ return flush;
}
-static int zap_gfn_range_hva_wrapper(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- struct kvm_mmu_page *root, gfn_t start,
- gfn_t end, unsigned long unused)
-{
- return zap_gfn_range(kvm, root, start, end, false);
-}
+typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
+ struct kvm_gfn_range *range);
-int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start,
- unsigned long end)
+static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
+ struct kvm_gfn_range *range,
+ tdp_handler_t handler)
{
- return kvm_tdp_mmu_handle_hva_range(kvm, start, end, 0,
- zap_gfn_range_hva_wrapper);
+ struct kvm_mmu_page *root;
+ struct tdp_iter iter;
+ bool ret = false;
+
+ rcu_read_lock();
+
+ /*
+ * Don't support rescheduling, none of the MMU notifiers that funnel
+ * into this helper allow blocking; it'd be dead, wasteful code.
+ */
+ for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
+ tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
+ ret |= handler(kvm, &iter, range);
+ }
+
+ rcu_read_unlock();
+
+ return ret;
}
/*
* Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
* if any of the GFNs in the range have been accessed.
*/
-static int age_gfn_range(struct kvm *kvm, struct kvm_memory_slot *slot,
- struct kvm_mmu_page *root, gfn_t start, gfn_t end,
- unsigned long unused)
+static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
+ struct kvm_gfn_range *range)
{
- struct tdp_iter iter;
- int young = 0;
u64 new_spte = 0;
- rcu_read_lock();
+ /* If we have a non-accessed entry we don't need to change the pte. */
+ if (!is_accessed_spte(iter->old_spte))
+ return false;
- tdp_root_for_each_leaf_pte(iter, root, start, end) {
+ new_spte = iter->old_spte;
+
+ if (spte_ad_enabled(new_spte)) {
+ new_spte &= ~shadow_accessed_mask;
+ } else {
/*
- * If we have a non-accessed entry we don't need to change the
- * pte.
+ * Capture the dirty status of the page, so that it doesn't get
+ * lost when the SPTE is marked for access tracking.
*/
- if (!is_accessed_spte(iter.old_spte))
- continue;
-
- new_spte = iter.old_spte;
-
- if (spte_ad_enabled(new_spte)) {
- clear_bit((ffs(shadow_accessed_mask) - 1),
- (unsigned long *)&new_spte);
- } else {
- /*
- * Capture the dirty status of the page, so that it doesn't get
- * lost when the SPTE is marked for access tracking.
- */
- if (is_writable_pte(new_spte))
- kvm_set_pfn_dirty(spte_to_pfn(new_spte));
-
- new_spte = mark_spte_for_access_track(new_spte);
- }
- new_spte &= ~shadow_dirty_mask;
+ if (is_writable_pte(new_spte))
+ kvm_set_pfn_dirty(spte_to_pfn(new_spte));
- tdp_mmu_set_spte_no_acc_track(kvm, &iter, new_spte);
- young = 1;
-
- trace_kvm_age_page(iter.gfn, iter.level, slot, young);
+ new_spte = mark_spte_for_access_track(new_spte);
}
- rcu_read_unlock();
+ tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte);
- return young;
+ return true;
}
-int kvm_tdp_mmu_age_hva_range(struct kvm *kvm, unsigned long start,
- unsigned long end)
+bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
- return kvm_tdp_mmu_handle_hva_range(kvm, start, end, 0,
- age_gfn_range);
+ return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
}
-static int test_age_gfn(struct kvm *kvm, struct kvm_memory_slot *slot,
- struct kvm_mmu_page *root, gfn_t gfn, gfn_t unused,
- unsigned long unused2)
+static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
+ struct kvm_gfn_range *range)
{
- struct tdp_iter iter;
-
- tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1)
- if (is_accessed_spte(iter.old_spte))
- return 1;
-
- return 0;
+ return is_accessed_spte(iter->old_spte);
}
-int kvm_tdp_mmu_test_age_hva(struct kvm *kvm, unsigned long hva)
+bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- return kvm_tdp_mmu_handle_hva_range(kvm, hva, hva + 1, 0,
- test_age_gfn);
+ return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
}
-/*
- * Handle the changed_pte MMU notifier for the TDP MMU.
- * data is a pointer to the new pte_t mapping the HVA specified by the MMU
- * notifier.
- * Returns non-zero if a flush is needed before releasing the MMU lock.
- */
-static int set_tdp_spte(struct kvm *kvm, struct kvm_memory_slot *slot,
- struct kvm_mmu_page *root, gfn_t gfn, gfn_t unused,
- unsigned long data)
+static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
+ struct kvm_gfn_range *range)
{
- struct tdp_iter iter;
- pte_t *ptep = (pte_t *)data;
- kvm_pfn_t new_pfn;
u64 new_spte;
- int need_flush = 0;
-
- rcu_read_lock();
- WARN_ON(pte_huge(*ptep));
+ /* Huge pages aren't expected to be modified without first being zapped. */
+ WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
- new_pfn = pte_pfn(*ptep);
-
- tdp_root_for_each_pte(iter, root, gfn, gfn + 1) {
- if (iter.level != PG_LEVEL_4K)
- continue;
-
- if (!is_shadow_present_pte(iter.old_spte))
- break;
-
- tdp_mmu_set_spte(kvm, &iter, 0);
-
- kvm_flush_remote_tlbs_with_address(kvm, iter.gfn, 1);
+ if (iter->level != PG_LEVEL_4K ||
+ !is_shadow_present_pte(iter->old_spte))
+ return false;
- if (!pte_write(*ptep)) {
- new_spte = kvm_mmu_changed_pte_notifier_make_spte(
- iter.old_spte, new_pfn);
+ /*
+ * Note, when changing a read-only SPTE, it's not strictly necessary to
+ * zero the SPTE before setting the new PFN, but doing so preserves the
+ * invariant that the PFN of a present * leaf SPTE can never change.
+ * See __handle_changed_spte().
+ */
+ tdp_mmu_set_spte(kvm, iter, 0);
- tdp_mmu_set_spte(kvm, &iter, new_spte);
- }
+ if (!pte_write(range->pte)) {
+ new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
+ pte_pfn(range->pte));
- need_flush = 1;
+ tdp_mmu_set_spte(kvm, iter, new_spte);
}
- if (need_flush)
- kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
-
- rcu_read_unlock();
-
- return 0;
+ return true;
}
-int kvm_tdp_mmu_set_spte_hva(struct kvm *kvm, unsigned long address,
- pte_t *host_ptep)
+/*
+ * Handle the changed_pte MMU notifier for the TDP MMU.
+ * data is a pointer to the new pte_t mapping the HVA specified by the MMU
+ * notifier.
+ * Returns non-zero if a flush is needed before releasing the MMU lock.
+ */
+bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- return kvm_tdp_mmu_handle_hva_range(kvm, address, address + 1,
- (unsigned long)host_ptep,
- set_tdp_spte);
+ bool flush = kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
+
+ /* FIXME: return 'flush' instead of flushing here. */
+ if (flush)
+ kvm_flush_remote_tlbs_with_address(kvm, range->start, 1);
+
+ return false;
}
/*
- * Remove write access from all the SPTEs mapping GFNs [start, end). If
- * skip_4k is set, SPTEs that map 4k pages, will not be write-protected.
- * Returns true if an SPTE has been changed and the TLBs need to be flushed.
+ * Remove write access from all SPTEs at or above min_level that map GFNs
+ * [start, end). Returns true if an SPTE has been changed and the TLBs need to
+ * be flushed.
*/
static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end, int min_level)
@@ -1092,7 +1210,8 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
min_level, start, end) {
- if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
+retry:
+ if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
continue;
if (!is_shadow_present_pte(iter.old_spte) ||
@@ -1102,7 +1221,15 @@ static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
- tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
+ if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, &iter,
+ new_spte)) {
+ /*
+ * The iter must explicitly re-read the SPTE because
+ * the atomic cmpxchg failed.
+ */
+ iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
+ goto retry;
+ }
spte_set = true;
}
@@ -1119,17 +1246,13 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
int min_level)
{
struct kvm_mmu_page *root;
- int root_as_id;
bool spte_set = false;
- for_each_tdp_mmu_root_yield_safe(kvm, root) {
- root_as_id = kvm_mmu_page_as_id(root);
- if (root_as_id != slot->as_id)
- continue;
+ lockdep_assert_held_read(&kvm->mmu_lock);
+ for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages, min_level);
- }
return spte_set;
}
@@ -1151,7 +1274,8 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
rcu_read_lock();
tdp_root_for_each_leaf_pte(iter, root, start, end) {
- if (tdp_mmu_iter_cond_resched(kvm, &iter, false))
+retry:
+ if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
continue;
if (spte_ad_need_write_protect(iter.old_spte)) {
@@ -1166,7 +1290,15 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
continue;
}
- tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
+ if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, &iter,
+ new_spte)) {
+ /*
+ * The iter must explicitly re-read the SPTE because
+ * the atomic cmpxchg failed.
+ */
+ iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
+ goto retry;
+ }
spte_set = true;
}
@@ -1184,17 +1316,13 @@ static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm, struct kvm_memory_slot *slot)
{
struct kvm_mmu_page *root;
- int root_as_id;
bool spte_set = false;
- for_each_tdp_mmu_root_yield_safe(kvm, root) {
- root_as_id = kvm_mmu_page_as_id(root);
- if (root_as_id != slot->as_id)
- continue;
+ lockdep_assert_held_read(&kvm->mmu_lock);
+ for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
slot->base_gfn + slot->npages);
- }
return spte_set;
}
@@ -1256,37 +1384,32 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
bool wrprot)
{
struct kvm_mmu_page *root;
- int root_as_id;
lockdep_assert_held_write(&kvm->mmu_lock);
- for_each_tdp_mmu_root(kvm, root) {
- root_as_id = kvm_mmu_page_as_id(root);
- if (root_as_id != slot->as_id)
- continue;
-
+ for_each_tdp_mmu_root(kvm, root, slot->as_id)
clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
- }
}
/*
* Clear leaf entries which could be replaced by large mappings, for
* GFNs within the slot.
*/
-static void zap_collapsible_spte_range(struct kvm *kvm,
+static bool zap_collapsible_spte_range(struct kvm *kvm,
struct kvm_mmu_page *root,
- struct kvm_memory_slot *slot)
+ const struct kvm_memory_slot *slot,
+ bool flush)
{
gfn_t start = slot->base_gfn;
gfn_t end = start + slot->npages;
struct tdp_iter iter;
kvm_pfn_t pfn;
- bool spte_set = false;
rcu_read_lock();
tdp_root_for_each_pte(iter, root, start, end) {
- if (tdp_mmu_iter_cond_resched(kvm, &iter, spte_set)) {
- spte_set = false;
+retry:
+ if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {
+ flush = false;
continue;
}
@@ -1300,55 +1423,67 @@ static void zap_collapsible_spte_range(struct kvm *kvm,
pfn, PG_LEVEL_NUM))
continue;
- tdp_mmu_set_spte(kvm, &iter, 0);
-
- spte_set = true;
+ if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
+ /*
+ * The iter must explicitly re-read the SPTE because
+ * the atomic cmpxchg failed.
+ */
+ iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
+ goto retry;
+ }
+ flush = true;
}
rcu_read_unlock();
- if (spte_set)
- kvm_flush_remote_tlbs(kvm);
+
+ return flush;
}
/*
* Clear non-leaf entries (and free associated page tables) which could
* be replaced by large mappings, for GFNs within the slot.
*/
-void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
- struct kvm_memory_slot *slot)
+bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
+ const struct kvm_memory_slot *slot,
+ bool flush)
{
struct kvm_mmu_page *root;
- int root_as_id;
- for_each_tdp_mmu_root_yield_safe(kvm, root) {
- root_as_id = kvm_mmu_page_as_id(root);
- if (root_as_id != slot->as_id)
- continue;
+ lockdep_assert_held_read(&kvm->mmu_lock);
- zap_collapsible_spte_range(kvm, root, slot);
- }
+ for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
+ flush = zap_collapsible_spte_range(kvm, root, slot, flush);
+
+ return flush;
}
/*
* Removes write access on the last level SPTE mapping this GFN and unsets the
- * SPTE_MMU_WRITABLE bit to ensure future writes continue to be intercepted.
+ * MMU-writable bit to ensure future writes continue to be intercepted.
* Returns true if an SPTE was set and a TLB flush is needed.
*/
static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
- gfn_t gfn)
+ gfn_t gfn, int min_level)
{
struct tdp_iter iter;
u64 new_spte;
bool spte_set = false;
+ BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
+
rcu_read_lock();
- tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) {
+ for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
+ min_level, gfn, gfn + 1) {
+ if (!is_shadow_present_pte(iter.old_spte) ||
+ !is_last_spte(iter.old_spte, iter.level))
+ continue;
+
if (!is_writable_pte(iter.old_spte))
break;
new_spte = iter.old_spte &
- ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
+ ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
tdp_mmu_set_spte(kvm, &iter, new_spte);
spte_set = true;
@@ -1361,24 +1496,20 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
/*
* Removes write access on the last level SPTE mapping this GFN and unsets the
- * SPTE_MMU_WRITABLE bit to ensure future writes continue to be intercepted.
+ * MMU-writable bit to ensure future writes continue to be intercepted.
* Returns true if an SPTE was set and a TLB flush is needed.
*/
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
- struct kvm_memory_slot *slot, gfn_t gfn)
+ struct kvm_memory_slot *slot, gfn_t gfn,
+ int min_level)
{
struct kvm_mmu_page *root;
- int root_as_id;
bool spte_set = false;
lockdep_assert_held_write(&kvm->mmu_lock);
- for_each_tdp_mmu_root(kvm, root) {
- root_as_id = kvm_mmu_page_as_id(root);
- if (root_as_id != slot->as_id)
- continue;
+ for_each_tdp_mmu_root(kvm, root, slot->as_id)
+ spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
- spte_set |= write_protect_gfn(kvm, root, gfn);
- }
return spte_set;
}
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index 3b761c111bff..1cae4485b3bc 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -6,24 +6,60 @@
#include <linux/kvm_host.h>
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
-void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root);
-bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end);
+__must_check static inline bool kvm_tdp_mmu_get_root(struct kvm *kvm,
+ struct kvm_mmu_page *root)
+{
+ if (root->role.invalid)
+ return false;
+
+ return refcount_inc_not_zero(&root->tdp_mmu_root_count);
+}
+
+void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
+ bool shared);
+
+bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
+ gfn_t end, bool can_yield, bool flush,
+ bool shared);
+static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id,
+ gfn_t start, gfn_t end, bool flush,
+ bool shared)
+{
+ return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush,
+ shared);
+}
+static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+ gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level + 1);
+
+ /*
+ * Don't allow yielding, as the caller may have a flush pending. Note,
+ * if mmu_lock is held for write, zapping will never yield in this case,
+ * but explicitly disallow it for safety. The TDP MMU does not yield
+ * until it has made forward progress (steps sideways), and when zapping
+ * a single shadow page that it's guaranteed to see (thus the mmu_lock
+ * requirement), its "step sideways" will always step beyond the bounds
+ * of the shadow page's gfn range and stop iterating before yielding.
+ */
+ lockdep_assert_held_write(&kvm->mmu_lock);
+ return __kvm_tdp_mmu_zap_gfn_range(kvm, kvm_mmu_page_as_id(sp),
+ sp->gfn, end, false, false, false);
+}
+
void kvm_tdp_mmu_zap_all(struct kvm *kvm);
+void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
+void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm);
int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
int map_writable, int max_level, kvm_pfn_t pfn,
bool prefault);
-int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start,
- unsigned long end);
-
-int kvm_tdp_mmu_age_hva_range(struct kvm *kvm, unsigned long start,
- unsigned long end);
-int kvm_tdp_mmu_test_age_hva(struct kvm *kvm, unsigned long hva);
-
-int kvm_tdp_mmu_set_spte_hva(struct kvm *kvm, unsigned long address,
- pte_t *host_ptep);
+bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
+ bool flush);
+bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
+bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
+bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
int min_level);
@@ -33,41 +69,45 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn, unsigned long mask,
bool wrprot);
-void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
- struct kvm_memory_slot *slot);
+bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
+ const struct kvm_memory_slot *slot,
+ bool flush);
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
- struct kvm_memory_slot *slot, gfn_t gfn);
+ struct kvm_memory_slot *slot, gfn_t gfn,
+ int min_level);
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
int *root_level);
#ifdef CONFIG_X86_64
-void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
+bool kvm_mmu_init_tdp_mmu(struct kvm *kvm);
void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; }
static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
-#else
-static inline void kvm_mmu_init_tdp_mmu(struct kvm *kvm) {}
-static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
-static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
-static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
-#endif
-static inline bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
+static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
{
struct kvm_mmu_page *sp;
+ hpa_t hpa = mmu->root_hpa;
- if (!is_tdp_mmu_enabled(kvm))
- return false;
if (WARN_ON(!VALID_PAGE(hpa)))
return false;
+ /*
+ * A NULL shadow page is legal when shadowing a non-paging guest with
+ * PAE paging, as the MMU will be direct with root_hpa pointing at the
+ * pae_root page, not a shadow page.
+ */
sp = to_shadow_page(hpa);
- if (WARN_ON(!sp))
- return false;
-
- return is_tdp_mmu_page(sp) && sp->root_count;
+ return sp && is_tdp_mmu_page(sp) && sp->root_count;
}
+#else
+static inline bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return false; }
+static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
+static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
+static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
+static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; }
+#endif
#endif /* __KVM_X86_MMU_TDP_MMU_H */
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 7b30bc967af3..67e753edfa22 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -103,7 +103,7 @@ static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
/* returns general purpose PMC with the specified MSR. Note that it can be
* used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
- * paramenter to tell them apart.
+ * parameter to tell them apart.
*/
static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
u32 base)
diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h
new file mode 100644
index 000000000000..a19d473d0184
--- /dev/null
+++ b/arch/x86/kvm/reverse_cpuid.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ARCH_X86_KVM_REVERSE_CPUID_H
+#define ARCH_X86_KVM_REVERSE_CPUID_H
+
+#include <uapi/asm/kvm.h>
+#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
+
+/*
+ * Hardware-defined CPUID leafs that are scattered in the kernel, but need to
+ * be directly used by KVM. Note, these word values conflict with the kernel's
+ * "bug" caps, but KVM doesn't use those.
+ */
+enum kvm_only_cpuid_leafs {
+ CPUID_12_EAX = NCAPINTS,
+ NR_KVM_CPU_CAPS,
+
+ NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
+};
+
+#define KVM_X86_FEATURE(w, f) ((w)*32 + (f))
+
+/* Intel-defined SGX sub-features, CPUID level 0x12 (EAX). */
+#define KVM_X86_FEATURE_SGX1 KVM_X86_FEATURE(CPUID_12_EAX, 0)
+#define KVM_X86_FEATURE_SGX2 KVM_X86_FEATURE(CPUID_12_EAX, 1)
+
+struct cpuid_reg {
+ u32 function;
+ u32 index;
+ int reg;
+};
+
+static const struct cpuid_reg reverse_cpuid[] = {
+ [CPUID_1_EDX] = { 1, 0, CPUID_EDX},
+ [CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX},
+ [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
+ [CPUID_1_ECX] = { 1, 0, CPUID_ECX},
+ [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
+ [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
+ [CPUID_7_0_EBX] = { 7, 0, CPUID_EBX},
+ [CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX},
+ [CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
+ [CPUID_6_EAX] = { 6, 0, CPUID_EAX},
+ [CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
+ [CPUID_7_ECX] = { 7, 0, CPUID_ECX},
+ [CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
+ [CPUID_7_EDX] = { 7, 0, CPUID_EDX},
+ [CPUID_7_1_EAX] = { 7, 1, CPUID_EAX},
+ [CPUID_12_EAX] = {0x00000012, 0, CPUID_EAX},
+ [CPUID_8000_001F_EAX] = {0x8000001f, 0, CPUID_EAX},
+};
+
+/*
+ * Reverse CPUID and its derivatives can only be used for hardware-defined
+ * feature words, i.e. words whose bits directly correspond to a CPUID leaf.
+ * Retrieving a feature bit or masking guest CPUID from a Linux-defined word
+ * is nonsensical as the bit number/mask is an arbitrary software-defined value
+ * and can't be used by KVM to query/control guest capabilities. And obviously
+ * the leaf being queried must have an entry in the lookup table.
+ */
+static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
+{
+ BUILD_BUG_ON(x86_leaf == CPUID_LNX_1);
+ BUILD_BUG_ON(x86_leaf == CPUID_LNX_2);
+ BUILD_BUG_ON(x86_leaf == CPUID_LNX_3);
+ BUILD_BUG_ON(x86_leaf == CPUID_LNX_4);
+ BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
+ BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
+}
+
+/*
+ * Translate feature bits that are scattered in the kernel's cpufeatures word
+ * into KVM feature words that align with hardware's definitions.
+ */
+static __always_inline u32 __feature_translate(int x86_feature)
+{
+ if (x86_feature == X86_FEATURE_SGX1)
+ return KVM_X86_FEATURE_SGX1;
+ else if (x86_feature == X86_FEATURE_SGX2)
+ return KVM_X86_FEATURE_SGX2;
+
+ return x86_feature;
+}
+
+static __always_inline u32 __feature_leaf(int x86_feature)
+{
+ return __feature_translate(x86_feature) / 32;
+}
+
+/*
+ * Retrieve the bit mask from an X86_FEATURE_* definition. Features contain
+ * the hardware defined bit number (stored in bits 4:0) and a software defined
+ * "word" (stored in bits 31:5). The word is used to index into arrays of
+ * bit masks that hold the per-cpu feature capabilities, e.g. this_cpu_has().
+ */
+static __always_inline u32 __feature_bit(int x86_feature)
+{
+ x86_feature = __feature_translate(x86_feature);
+
+ reverse_cpuid_check(x86_feature / 32);
+ return 1 << (x86_feature & 31);
+}
+
+#define feature_bit(name) __feature_bit(X86_FEATURE_##name)
+
+static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_feature)
+{
+ unsigned int x86_leaf = __feature_leaf(x86_feature);
+
+ reverse_cpuid_check(x86_leaf);
+ return reverse_cpuid[x86_leaf];
+}
+
+static __always_inline u32 *__cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
+ u32 reg)
+{
+ switch (reg) {
+ case CPUID_EAX:
+ return &entry->eax;
+ case CPUID_EBX:
+ return &entry->ebx;
+ case CPUID_ECX:
+ return &entry->ecx;
+ case CPUID_EDX:
+ return &entry->edx;
+ default:
+ BUILD_BUG();
+ return NULL;
+ }
+}
+
+static __always_inline u32 *cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry,
+ unsigned int x86_feature)
+{
+ const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
+
+ return __cpuid_entry_get_reg(entry, cpuid.reg);
+}
+
+static __always_inline u32 cpuid_entry_get(struct kvm_cpuid_entry2 *entry,
+ unsigned int x86_feature)
+{
+ u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
+
+ return *reg & __feature_bit(x86_feature);
+}
+
+static __always_inline bool cpuid_entry_has(struct kvm_cpuid_entry2 *entry,
+ unsigned int x86_feature)
+{
+ return cpuid_entry_get(entry, x86_feature);
+}
+
+static __always_inline void cpuid_entry_clear(struct kvm_cpuid_entry2 *entry,
+ unsigned int x86_feature)
+{
+ u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
+
+ *reg &= ~__feature_bit(x86_feature);
+}
+
+static __always_inline void cpuid_entry_set(struct kvm_cpuid_entry2 *entry,
+ unsigned int x86_feature)
+{
+ u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
+
+ *reg |= __feature_bit(x86_feature);
+}
+
+static __always_inline void cpuid_entry_change(struct kvm_cpuid_entry2 *entry,
+ unsigned int x86_feature,
+ bool set)
+{
+ u32 *reg = cpuid_entry_get_reg(entry, x86_feature);
+
+ /*
+ * Open coded instead of using cpuid_entry_{clear,set}() to coerce the
+ * compiler into using CMOV instead of Jcc when possible.
+ */
+ if (set)
+ *reg |= __feature_bit(x86_feature);
+ else
+ *reg &= ~__feature_bit(x86_feature);
+}
+
+#endif /* ARCH_X86_KVM_REVERSE_CPUID_H */
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 78bdcfac4e40..1d01da64c333 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -27,12 +27,6 @@
#include "irq.h"
#include "svm.h"
-/* enable / disable AVIC */
-int avic;
-#ifdef CONFIG_X86_LOCAL_APIC
-module_param(avic, int, S_IRUGO);
-#endif
-
#define SVM_AVIC_DOORBELL 0xc001011b
#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
@@ -126,7 +120,7 @@ void avic_vm_destroy(struct kvm *kvm)
unsigned long flags;
struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
- if (!avic)
+ if (!enable_apicv)
return;
if (kvm_svm->avic_logical_id_table_page)
@@ -149,7 +143,7 @@ int avic_vm_init(struct kvm *kvm)
struct page *l_page;
u32 vm_id;
- if (!avic)
+ if (!enable_apicv)
return 0;
/* Allocating physical APIC ID table (4KB) */
@@ -223,7 +217,7 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
return &avic_physical_id_table[index];
}
-/**
+/*
* Note:
* AVIC hardware walks the nested page table to check permissions,
* but does not use the SPA address specified in the leaf page
@@ -242,7 +236,7 @@ static int avic_update_access_page(struct kvm *kvm, bool activate)
* APICv mode change, which update APIC_ACCESS_PAGE_PRIVATE_MEMSLOT
* memory region. So, we need to ensure that kvm->mm == current->mm.
*/
- if ((kvm->arch.apic_access_page_done == activate) ||
+ if ((kvm->arch.apic_access_memslot_enabled == activate) ||
(kvm->mm != current->mm))
goto out;
@@ -255,7 +249,7 @@ static int avic_update_access_page(struct kvm *kvm, bool activate)
goto out;
}
- kvm->arch.apic_access_page_done = activate;
+ kvm->arch.apic_access_memslot_enabled = activate;
out:
mutex_unlock(&kvm->slots_lock);
return r;
@@ -270,7 +264,7 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
if (id >= AVIC_MAX_PHYSICAL_ID_COUNT)
return -EINVAL;
- if (!svm->vcpu.arch.apic->regs)
+ if (!vcpu->arch.apic->regs)
return -EINVAL;
if (kvm_apicv_activated(vcpu->kvm)) {
@@ -281,7 +275,7 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
return ret;
}
- svm->avic_backing_page = virt_to_page(svm->vcpu.arch.apic->regs);
+ svm->avic_backing_page = virt_to_page(vcpu->arch.apic->regs);
/* Setting AVIC backing page address in the phy APIC ID table */
entry = avic_get_physical_id_entry(vcpu, id);
@@ -315,15 +309,16 @@ static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
}
}
-int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
+int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
{
+ struct vcpu_svm *svm = to_svm(vcpu);
u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
u32 icrl = svm->vmcb->control.exit_info_1;
u32 id = svm->vmcb->control.exit_info_2 >> 32;
u32 index = svm->vmcb->control.exit_info_2 & 0xFF;
- struct kvm_lapic *apic = svm->vcpu.arch.apic;
+ struct kvm_lapic *apic = vcpu->arch.apic;
- trace_kvm_avic_incomplete_ipi(svm->vcpu.vcpu_id, icrh, icrl, id, index);
+ trace_kvm_avic_incomplete_ipi(vcpu->vcpu_id, icrh, icrl, id, index);
switch (id) {
case AVIC_IPI_FAILURE_INVALID_INT_TYPE:
@@ -347,11 +342,11 @@ int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
* set the appropriate IRR bits on the valid target
* vcpus. So, we just need to kick the appropriate vcpu.
*/
- avic_kick_target_vcpus(svm->vcpu.kvm, apic, icrl, icrh);
+ avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh);
break;
case AVIC_IPI_FAILURE_INVALID_TARGET:
WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
- index, svm->vcpu.vcpu_id, icrh, icrl);
+ index, vcpu->vcpu_id, icrh, icrl);
break;
case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
WARN_ONCE(1, "Invalid backing page\n");
@@ -539,8 +534,9 @@ static bool is_avic_unaccelerated_access_trap(u32 offset)
return ret;
}
-int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
+int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu)
{
+ struct vcpu_svm *svm = to_svm(vcpu);
int ret = 0;
u32 offset = svm->vmcb->control.exit_info_1 &
AVIC_UNACCEL_ACCESS_OFFSET_MASK;
@@ -550,7 +546,7 @@ int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
AVIC_UNACCEL_ACCESS_WRITE_MASK;
bool trap = is_avic_unaccelerated_access_trap(offset);
- trace_kvm_avic_unaccelerated_access(svm->vcpu.vcpu_id, offset,
+ trace_kvm_avic_unaccelerated_access(vcpu->vcpu_id, offset,
trap, write, vector);
if (trap) {
/* Handling Trap */
@@ -558,7 +554,7 @@ int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
ret = avic_unaccel_trap_write(svm);
} else {
/* Handling Fault */
- ret = kvm_emulate_instruction(&svm->vcpu, 0);
+ ret = kvm_emulate_instruction(vcpu, 0);
}
return ret;
@@ -569,10 +565,10 @@ int avic_init_vcpu(struct vcpu_svm *svm)
int ret;
struct kvm_vcpu *vcpu = &svm->vcpu;
- if (!avic || !irqchip_in_kernel(vcpu->kvm))
+ if (!enable_apicv || !irqchip_in_kernel(vcpu->kvm))
return 0;
- ret = avic_init_backing_page(&svm->vcpu);
+ ret = avic_init_backing_page(vcpu);
if (ret)
return ret;
@@ -593,7 +589,7 @@ void avic_post_state_restore(struct kvm_vcpu *vcpu)
void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate)
{
- if (!avic || !lapic_in_kernel(vcpu))
+ if (!enable_apicv || !lapic_in_kernel(vcpu))
return;
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@@ -653,7 +649,7 @@ void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
struct vmcb *vmcb = svm->vmcb;
bool activated = kvm_vcpu_apicv_active(vcpu);
- if (!avic)
+ if (!enable_apicv)
return;
if (activated) {
@@ -727,7 +723,7 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
struct amd_svm_iommu_ir *ir;
/**
- * In some cases, the existing irte is updaed and re-set,
+ * In some cases, the existing irte is updated and re-set,
* so we need to check here if it's already been * added
* to the ir_list.
*/
@@ -764,7 +760,7 @@ out:
return ret;
}
-/**
+/*
* Note:
* The HW cannot support posting multicast/broadcast
* interrupts to a vCPU. So, we still use legacy interrupt
@@ -838,7 +834,7 @@ int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
* Here, we setup with legacy mode in the following cases:
* 1. When cannot target interrupt to a specific vcpu.
* 2. Unsetting posted interrupt.
- * 3. APIC virtialization is disabled for the vcpu.
+ * 3. APIC virtualization is disabled for the vcpu.
* 4. IRQ has incompatible delivery mode (SMI, INIT, etc)
*/
if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
@@ -1005,7 +1001,7 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
}
-/**
+/*
* This function is called during VCPU halt/unhalt.
*/
static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 35891d9a1099..21d03e3a5dfd 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -29,6 +29,8 @@
#include "lapic.h"
#include "svm.h"
+#define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
+
static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
struct x86_exception *fault)
{
@@ -92,17 +94,22 @@ static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- struct vmcb *hsave = svm->nested.hsave;
WARN_ON(mmu_is_nested(vcpu));
vcpu->arch.mmu = &vcpu->arch.guest_mmu;
- kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, hsave->save.cr4, hsave->save.efer,
+
+ /*
+ * The NPT format depends on L1's CR4 and EFER, which is in vmcb01. Note,
+ * when called via KVM_SET_NESTED_STATE, that state may _not_ match current
+ * vCPU state. CR0.WP is explicitly ignored, while CR0.PG is required.
+ */
+ kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
+ svm->vmcb01.ptr->save.efer,
svm->nested.ctl.nested_cr3);
vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
- reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
}
@@ -123,7 +130,7 @@ void recalc_intercepts(struct vcpu_svm *svm)
return;
c = &svm->vmcb->control;
- h = &svm->nested.hsave->control;
+ h = &svm->vmcb01.ptr->control;
g = &svm->nested.ctl;
for (i = 0; i < MAX_INTERCEPT; i++)
@@ -213,69 +220,92 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
return true;
}
-static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
+/*
+ * Bits 11:0 of bitmap address are ignored by hardware
+ */
+static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size)
{
- struct vcpu_svm *svm = to_svm(vcpu);
-
- if (WARN_ON(!is_guest_mode(vcpu)))
- return true;
-
- if (!nested_svm_vmrun_msrpm(svm)) {
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- vcpu->run->internal.suberror =
- KVM_INTERNAL_ERROR_EMULATION;
- vcpu->run->internal.ndata = 0;
- return false;
- }
+ u64 addr = PAGE_ALIGN(pa);
- return true;
+ return kvm_vcpu_is_legal_gpa(vcpu, addr) &&
+ kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1);
}
-static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
+static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
+ struct vmcb_control_area *control)
{
- if ((vmcb_is_intercept(control, INTERCEPT_VMRUN)) == 0)
+ if (CC(!vmcb_is_intercept(control, INTERCEPT_VMRUN)))
+ return false;
+
+ if (CC(control->asid == 0))
return false;
- if (control->asid == 0)
+ if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled))
return false;
- if ((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
- !npt_enabled)
+ if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa,
+ MSRPM_SIZE)))
+ return false;
+ if (CC(!nested_svm_check_bitmap_pa(vcpu, control->iopm_base_pa,
+ IOPM_SIZE)))
return false;
return true;
}
-static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
+static bool nested_vmcb_check_cr3_cr4(struct kvm_vcpu *vcpu,
+ struct vmcb_save_area *save)
{
- struct kvm_vcpu *vcpu = &svm->vcpu;
- bool vmcb12_lma;
+ /*
+ * These checks are also performed by KVM_SET_SREGS,
+ * except that EFER.LMA is not checked by SVM against
+ * CR0.PG && EFER.LME.
+ */
+ if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
+ if (CC(!(save->cr4 & X86_CR4_PAE)) ||
+ CC(!(save->cr0 & X86_CR0_PE)) ||
+ CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3)))
+ return false;
+ }
- if ((vmcb12->save.efer & EFER_SVME) == 0)
+ if (CC(!kvm_is_valid_cr4(vcpu, save->cr4)))
return false;
- if (((vmcb12->save.cr0 & X86_CR0_CD) == 0) && (vmcb12->save.cr0 & X86_CR0_NW))
+ return true;
+}
+
+/* Common checks that apply to both L1 and L2 state. */
+static bool nested_vmcb_valid_sregs(struct kvm_vcpu *vcpu,
+ struct vmcb_save_area *save)
+{
+ /*
+ * FIXME: these should be done after copying the fields,
+ * to avoid TOC/TOU races. For these save area checks
+ * the possible damage is limited since kvm_set_cr0 and
+ * kvm_set_cr4 handle failure; EFER_SVME is an exception
+ * so it is force-set later in nested_prepare_vmcb_save.
+ */
+ if (CC(!(save->efer & EFER_SVME)))
return false;
- if (!kvm_dr6_valid(vmcb12->save.dr6) || !kvm_dr7_valid(vmcb12->save.dr7))
+ if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) ||
+ CC(save->cr0 & ~0xffffffffULL))
return false;
- vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG);
+ if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7)))
+ return false;
- if (vmcb12_lma) {
- if (!(vmcb12->save.cr4 & X86_CR4_PAE) ||
- !(vmcb12->save.cr0 & X86_CR0_PE) ||
- kvm_vcpu_is_illegal_gpa(vcpu, vmcb12->save.cr3))
- return false;
- }
- if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
+ if (!nested_vmcb_check_cr3_cr4(vcpu, save))
+ return false;
+
+ if (CC(!kvm_valid_efer(vcpu, save->efer)))
return false;
- return nested_vmcb_check_controls(&vmcb12->control);
+ return true;
}
-static void load_nested_vmcb_control(struct vcpu_svm *svm,
- struct vmcb_control_area *control)
+static void nested_load_control_from_vmcb12(struct vcpu_svm *svm,
+ struct vmcb_control_area *control)
{
copy_vmcb_control_area(&svm->nested.ctl, control);
@@ -287,9 +317,9 @@ static void load_nested_vmcb_control(struct vcpu_svm *svm,
/*
* Synchronize fields that are written by the processor, so that
- * they can be copied back into the nested_vmcb.
+ * they can be copied back into the vmcb12.
*/
-void sync_nested_vmcb_control(struct vcpu_svm *svm)
+void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
{
u32 mask;
svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
@@ -317,8 +347,8 @@ void sync_nested_vmcb_control(struct vcpu_svm *svm)
* Transfer any event that L0 or L1 wanted to inject into L2 to
* EXIT_INT_INFO.
*/
-static void nested_vmcb_save_pending_event(struct vcpu_svm *svm,
- struct vmcb *vmcb12)
+static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
+ struct vmcb *vmcb12)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
u32 exit_int_info = 0;
@@ -355,51 +385,101 @@ static inline bool nested_npt_enabled(struct vcpu_svm *svm)
return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE;
}
+static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
+{
+ /*
+ * TODO: optimize unconditional TLB flush/MMU sync. A partial list of
+ * things to fix before this can be conditional:
+ *
+ * - Flush TLBs for both L1 and L2 remote TLB flush
+ * - Honor L1's request to flush an ASID on nested VMRUN
+ * - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*]
+ * - Don't crush a pending TLB flush in vmcb02 on nested VMRUN
+ * - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST
+ *
+ * [*] Unlike nested EPT, SVM's ASID management can invalidate nested
+ * NPT guest-physical mappings on VMRUN.
+ */
+ kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
+ kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
+}
+
/*
* Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
* if we are emulating VM-Entry into a guest with NPT enabled.
*/
static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
- bool nested_npt)
+ bool nested_npt, bool reload_pdptrs)
{
- if (kvm_vcpu_is_illegal_gpa(vcpu, cr3))
+ if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3)))
return -EINVAL;
- if (!nested_npt && is_pae_paging(vcpu) &&
- (cr3 != kvm_read_cr3(vcpu) || pdptrs_changed(vcpu))) {
- if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
- return -EINVAL;
- }
+ if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) &&
+ CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)))
+ return -EINVAL;
- /*
- * TODO: optimize unconditional TLB flush/MMU sync here and in
- * kvm_init_shadow_npt_mmu().
- */
if (!nested_npt)
- kvm_mmu_new_pgd(vcpu, cr3, false, false);
+ kvm_mmu_new_pgd(vcpu, cr3);
vcpu->arch.cr3 = cr3;
kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
- kvm_init_mmu(vcpu, false);
+ /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
+ kvm_init_mmu(vcpu);
return 0;
}
-static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
+void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
+{
+ if (!svm->nested.vmcb02.ptr)
+ return;
+
+ /* FIXME: merge g_pat from vmcb01 and vmcb12. */
+ svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
+}
+
+static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
{
+ bool new_vmcb12 = false;
+
+ nested_vmcb02_compute_g_pat(svm);
+
/* Load the nested guest state */
- svm->vmcb->save.es = vmcb12->save.es;
- svm->vmcb->save.cs = vmcb12->save.cs;
- svm->vmcb->save.ss = vmcb12->save.ss;
- svm->vmcb->save.ds = vmcb12->save.ds;
- svm->vmcb->save.gdtr = vmcb12->save.gdtr;
- svm->vmcb->save.idtr = vmcb12->save.idtr;
+ if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) {
+ new_vmcb12 = true;
+ svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa;
+ }
+
+ if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_SEG))) {
+ svm->vmcb->save.es = vmcb12->save.es;
+ svm->vmcb->save.cs = vmcb12->save.cs;
+ svm->vmcb->save.ss = vmcb12->save.ss;
+ svm->vmcb->save.ds = vmcb12->save.ds;
+ svm->vmcb->save.cpl = vmcb12->save.cpl;
+ vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
+ }
+
+ if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DT))) {
+ svm->vmcb->save.gdtr = vmcb12->save.gdtr;
+ svm->vmcb->save.idtr = vmcb12->save.idtr;
+ vmcb_mark_dirty(svm->vmcb, VMCB_DT);
+ }
+
kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
- svm_set_efer(&svm->vcpu, vmcb12->save.efer);
+
+ /*
+ * Force-set EFER_SVME even though it is checked earlier on the
+ * VMCB12, because the guest can flip the bit between the check
+ * and now. Clearing EFER_SVME would call svm_free_nested.
+ */
+ svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);
+
svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
- svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
+
+ svm->vcpu.arch.cr2 = vmcb12->save.cr2;
+
kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
@@ -408,24 +488,51 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
svm->vmcb->save.rax = vmcb12->save.rax;
svm->vmcb->save.rsp = vmcb12->save.rsp;
svm->vmcb->save.rip = vmcb12->save.rip;
- svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1;
- svm->vcpu.arch.dr6 = vmcb12->save.dr6 | DR6_ACTIVE_LOW;
- svm->vmcb->save.cpl = vmcb12->save.cpl;
+
+ /* These bits will be set properly on the first execution when new_vmc12 is true */
+ if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) {
+ svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1;
+ svm->vcpu.arch.dr6 = vmcb12->save.dr6 | DR6_ACTIVE_LOW;
+ vmcb_mark_dirty(svm->vmcb, VMCB_DR);
+ }
}
-static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
+static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
{
const u32 mask = V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK;
+ struct kvm_vcpu *vcpu = &svm->vcpu;
+
+ /*
+ * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
+ * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes.
+ */
+
+ /*
+ * Also covers avic_vapic_bar, avic_backing_page, avic_logical_id,
+ * avic_physical_id.
+ */
+ WARN_ON(svm->vmcb01.ptr->control.int_ctl & AVIC_ENABLE_MASK);
+
+ /* Copied from vmcb01. msrpm_base can be overwritten later. */
+ svm->vmcb->control.nested_ctl = svm->vmcb01.ptr->control.nested_ctl;
+ svm->vmcb->control.iopm_base_pa = svm->vmcb01.ptr->control.iopm_base_pa;
+ svm->vmcb->control.msrpm_base_pa = svm->vmcb01.ptr->control.msrpm_base_pa;
+
+ /* Done at vmrun: asid. */
+ /* Also overwritten later if necessary. */
+ svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
+
+ /* nested_cr3. */
if (nested_npt_enabled(svm))
- nested_svm_init_mmu_context(&svm->vcpu);
+ nested_svm_init_mmu_context(vcpu);
- svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
- svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
+ svm->vmcb->control.tsc_offset = vcpu->arch.tsc_offset =
+ vcpu->arch.l1_tsc_offset + svm->nested.ctl.tsc_offset;
svm->vmcb->control.int_ctl =
(svm->nested.ctl.int_ctl & ~mask) |
- (svm->nested.hsave->control.int_ctl & mask);
+ (svm->vmcb01.ptr->control.int_ctl & mask);
svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext;
svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
@@ -436,21 +543,34 @@ static void nested_prepare_vmcb_control(struct vcpu_svm *svm)
svm->vmcb->control.pause_filter_count = svm->nested.ctl.pause_filter_count;
svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh;
+ nested_svm_transition_tlb_flush(vcpu);
+
/* Enter Guest-Mode */
- enter_guest_mode(&svm->vcpu);
+ enter_guest_mode(vcpu);
/*
- * Merge guest and host intercepts - must be called with vcpu in
- * guest-mode to take affect here
+ * Merge guest and host intercepts - must be called with vcpu in
+ * guest-mode to take effect.
*/
recalc_intercepts(svm);
+}
- vmcb_mark_all_dirty(svm->vmcb);
+static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
+{
+ /*
+ * Some VMCB state is shared between L1 and L2 and thus has to be
+ * moved at the time of nested vmrun and vmexit.
+ *
+ * VMLOAD/VMSAVE state would also belong in this category, but KVM
+ * always performs VMLOAD and VMSAVE from the VMCB01.
+ */
+ to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl;
}
-int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
+int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
struct vmcb *vmcb12)
{
+ struct vcpu_svm *svm = to_svm(vcpu);
int ret;
trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
@@ -468,54 +588,61 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
svm->nested.vmcb12_gpa = vmcb12_gpa;
- load_nested_vmcb_control(svm, &vmcb12->control);
- nested_prepare_vmcb_control(svm);
- nested_prepare_vmcb_save(svm, vmcb12);
+
+ WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr);
+
+ nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr);
+
+ svm_switch_vmcb(svm, &svm->nested.vmcb02);
+ nested_vmcb02_prepare_control(svm);
+ nested_vmcb02_prepare_save(svm, vmcb12);
ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
- nested_npt_enabled(svm));
+ nested_npt_enabled(svm), true);
if (ret)
return ret;
if (!npt_enabled)
- svm->vcpu.arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
+ vcpu->arch.mmu->inject_page_fault = svm_inject_page_fault_nested;
svm_set_gif(svm, true);
return 0;
}
-int nested_svm_vmrun(struct vcpu_svm *svm)
+int nested_svm_vmrun(struct kvm_vcpu *vcpu)
{
+ struct vcpu_svm *svm = to_svm(vcpu);
int ret;
struct vmcb *vmcb12;
- struct vmcb *hsave = svm->nested.hsave;
- struct vmcb *vmcb = svm->vmcb;
struct kvm_host_map map;
u64 vmcb12_gpa;
- if (is_smm(&svm->vcpu)) {
- kvm_queue_exception(&svm->vcpu, UD_VECTOR);
+ if (is_smm(vcpu)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
vmcb12_gpa = svm->vmcb->save.rax;
- ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb12_gpa), &map);
+ ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
if (ret == -EINVAL) {
- kvm_inject_gp(&svm->vcpu, 0);
+ kvm_inject_gp(vcpu, 0);
return 1;
} else if (ret) {
- return kvm_skip_emulated_instruction(&svm->vcpu);
+ return kvm_skip_emulated_instruction(vcpu);
}
- ret = kvm_skip_emulated_instruction(&svm->vcpu);
+ ret = kvm_skip_emulated_instruction(vcpu);
vmcb12 = map.hva;
if (WARN_ON_ONCE(!svm->nested.initialized))
return -EINVAL;
- if (!nested_vmcb_checks(svm, vmcb12)) {
+ nested_load_control_from_vmcb12(svm, &vmcb12->control);
+
+ if (!nested_vmcb_valid_sregs(vcpu, &vmcb12->save) ||
+ !nested_vmcb_check_controls(vcpu, &svm->nested.ctl)) {
vmcb12->control.exit_code = SVM_EXIT_ERR;
vmcb12->control.exit_code_hi = 0;
vmcb12->control.exit_info_1 = 0;
@@ -525,36 +652,25 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
/* Clear internal status */
- kvm_clear_exception_queue(&svm->vcpu);
- kvm_clear_interrupt_queue(&svm->vcpu);
+ kvm_clear_exception_queue(vcpu);
+ kvm_clear_interrupt_queue(vcpu);
/*
- * Save the old vmcb, so we don't need to pick what we save, but can
- * restore everything when a VMEXIT occurs
+ * Since vmcb01 is not in use, we can use it to store some of the L1
+ * state.
*/
- hsave->save.es = vmcb->save.es;
- hsave->save.cs = vmcb->save.cs;
- hsave->save.ss = vmcb->save.ss;
- hsave->save.ds = vmcb->save.ds;
- hsave->save.gdtr = vmcb->save.gdtr;
- hsave->save.idtr = vmcb->save.idtr;
- hsave->save.efer = svm->vcpu.arch.efer;
- hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
- hsave->save.cr4 = svm->vcpu.arch.cr4;
- hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
- hsave->save.rip = kvm_rip_read(&svm->vcpu);
- hsave->save.rsp = vmcb->save.rsp;
- hsave->save.rax = vmcb->save.rax;
- if (npt_enabled)
- hsave->save.cr3 = vmcb->save.cr3;
- else
- hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
+ svm->vmcb01.ptr->save.efer = vcpu->arch.efer;
+ svm->vmcb01.ptr->save.cr0 = kvm_read_cr0(vcpu);
+ svm->vmcb01.ptr->save.cr4 = vcpu->arch.cr4;
+ svm->vmcb01.ptr->save.rflags = kvm_get_rflags(vcpu);
+ svm->vmcb01.ptr->save.rip = kvm_rip_read(vcpu);
- copy_vmcb_control_area(&hsave->control, &vmcb->control);
+ if (!npt_enabled)
+ svm->vmcb01.ptr->save.cr3 = kvm_read_cr3(vcpu);
svm->nested.nested_run_pending = 1;
- if (enter_svm_guest_mode(svm, vmcb12_gpa, vmcb12))
+ if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12))
goto out_exit_err;
if (nested_svm_vmrun_msrpm(svm))
@@ -571,7 +687,7 @@ out_exit_err:
nested_svm_vmexit(svm);
out:
- kvm_vcpu_unmap(&svm->vcpu, &map, true);
+ kvm_vcpu_unmap(vcpu, &map, true);
return ret;
}
@@ -594,27 +710,30 @@ void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
int nested_svm_vmexit(struct vcpu_svm *svm)
{
- int rc;
+ struct kvm_vcpu *vcpu = &svm->vcpu;
struct vmcb *vmcb12;
- struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb;
struct kvm_host_map map;
+ int rc;
+
+ /* Triple faults in L2 should never escape. */
+ WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu));
- rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
+ rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
if (rc) {
if (rc == -EINVAL)
- kvm_inject_gp(&svm->vcpu, 0);
+ kvm_inject_gp(vcpu, 0);
return 1;
}
vmcb12 = map.hva;
/* Exit Guest-Mode */
- leave_guest_mode(&svm->vcpu);
+ leave_guest_mode(vcpu);
svm->nested.vmcb12_gpa = 0;
WARN_ON_ONCE(svm->nested.nested_run_pending);
- kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
+ kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
/* in case we halted in L2 */
svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
@@ -628,14 +747,14 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
vmcb12->save.gdtr = vmcb->save.gdtr;
vmcb12->save.idtr = vmcb->save.idtr;
vmcb12->save.efer = svm->vcpu.arch.efer;
- vmcb12->save.cr0 = kvm_read_cr0(&svm->vcpu);
- vmcb12->save.cr3 = kvm_read_cr3(&svm->vcpu);
+ vmcb12->save.cr0 = kvm_read_cr0(vcpu);
+ vmcb12->save.cr3 = kvm_read_cr3(vcpu);
vmcb12->save.cr2 = vmcb->save.cr2;
vmcb12->save.cr4 = svm->vcpu.arch.cr4;
- vmcb12->save.rflags = kvm_get_rflags(&svm->vcpu);
- vmcb12->save.rip = kvm_rip_read(&svm->vcpu);
- vmcb12->save.rsp = kvm_rsp_read(&svm->vcpu);
- vmcb12->save.rax = kvm_rax_read(&svm->vcpu);
+ vmcb12->save.rflags = kvm_get_rflags(vcpu);
+ vmcb12->save.rip = kvm_rip_read(vcpu);
+ vmcb12->save.rsp = kvm_rsp_read(vcpu);
+ vmcb12->save.rax = kvm_rax_read(vcpu);
vmcb12->save.dr7 = vmcb->save.dr7;
vmcb12->save.dr6 = svm->vcpu.arch.dr6;
vmcb12->save.cpl = vmcb->save.cpl;
@@ -647,7 +766,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
vmcb12->control.exit_info_2 = vmcb->control.exit_info_2;
if (vmcb12->control.exit_code != SVM_EXIT_ERR)
- nested_vmcb_save_pending_event(svm, vmcb12);
+ nested_save_pending_event_to_vmcb12(svm, vmcb12);
if (svm->nrips_enabled)
vmcb12->control.next_rip = vmcb->control.next_rip;
@@ -662,37 +781,38 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
vmcb12->control.pause_filter_thresh =
svm->vmcb->control.pause_filter_thresh;
- /* Restore the original control entries */
- copy_vmcb_control_area(&vmcb->control, &hsave->control);
+ nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
+
+ svm_switch_vmcb(svm, &svm->vmcb01);
- /* On vmexit the GIF is set to false */
+ /*
+ * On vmexit the GIF is set to false and
+ * no event can be injected in L1.
+ */
svm_set_gif(svm, false);
+ svm->vmcb->control.exit_int_info = 0;
- svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset =
- svm->vcpu.arch.l1_tsc_offset;
+ svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset;
+ if (svm->vmcb->control.tsc_offset != svm->vcpu.arch.tsc_offset) {
+ svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
+ vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
+ }
svm->nested.ctl.nested_cr3 = 0;
- /* Restore selected save entries */
- svm->vmcb->save.es = hsave->save.es;
- svm->vmcb->save.cs = hsave->save.cs;
- svm->vmcb->save.ss = hsave->save.ss;
- svm->vmcb->save.ds = hsave->save.ds;
- svm->vmcb->save.gdtr = hsave->save.gdtr;
- svm->vmcb->save.idtr = hsave->save.idtr;
- kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
- kvm_set_rflags(&svm->vcpu, hsave->save.rflags | X86_EFLAGS_FIXED);
- svm_set_efer(&svm->vcpu, hsave->save.efer);
- svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
- svm_set_cr4(&svm->vcpu, hsave->save.cr4);
- kvm_rax_write(&svm->vcpu, hsave->save.rax);
- kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
- kvm_rip_write(&svm->vcpu, hsave->save.rip);
- svm->vmcb->save.dr7 = DR7_FIXED_1;
- svm->vmcb->save.cpl = 0;
- svm->vmcb->control.exit_int_info = 0;
+ /*
+ * Restore processor state that had been saved in vmcb01
+ */
+ kvm_set_rflags(vcpu, svm->vmcb->save.rflags);
+ svm_set_efer(vcpu, svm->vmcb->save.efer);
+ svm_set_cr0(vcpu, svm->vmcb->save.cr0 | X86_CR0_PE);
+ svm_set_cr4(vcpu, svm->vmcb->save.cr4);
+ kvm_rax_write(vcpu, svm->vmcb->save.rax);
+ kvm_rsp_write(vcpu, svm->vmcb->save.rsp);
+ kvm_rip_write(vcpu, svm->vmcb->save.rip);
- vmcb_mark_all_dirty(svm->vmcb);
+ svm->vcpu.arch.dr7 = DR7_FIXED_1;
+ kvm_update_dr7(&svm->vcpu);
trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
vmcb12->control.exit_info_1,
@@ -701,50 +821,64 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
vmcb12->control.exit_int_info_err,
KVM_ISA_SVM);
- kvm_vcpu_unmap(&svm->vcpu, &map, true);
+ kvm_vcpu_unmap(vcpu, &map, true);
+
+ nested_svm_transition_tlb_flush(vcpu);
- nested_svm_uninit_mmu_context(&svm->vcpu);
+ nested_svm_uninit_mmu_context(vcpu);
- rc = nested_svm_load_cr3(&svm->vcpu, hsave->save.cr3, false);
+ rc = nested_svm_load_cr3(vcpu, svm->vmcb->save.cr3, false, true);
if (rc)
return 1;
- if (npt_enabled)
- svm->vmcb->save.cr3 = hsave->save.cr3;
-
/*
* Drop what we picked up for L2 via svm_complete_interrupts() so it
* doesn't end up in L1.
*/
svm->vcpu.arch.nmi_injected = false;
- kvm_clear_exception_queue(&svm->vcpu);
- kvm_clear_interrupt_queue(&svm->vcpu);
+ kvm_clear_exception_queue(vcpu);
+ kvm_clear_interrupt_queue(vcpu);
+
+ /*
+ * If we are here following the completion of a VMRUN that
+ * is being single-stepped, queue the pending #DB intercept
+ * right now so that it an be accounted for before we execute
+ * L1's next instruction.
+ */
+ if (unlikely(svm->vmcb->save.rflags & X86_EFLAGS_TF))
+ kvm_queue_exception(&(svm->vcpu), DB_VECTOR);
return 0;
}
+static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
+{
+ nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN);
+}
+
int svm_allocate_nested(struct vcpu_svm *svm)
{
- struct page *hsave_page;
+ struct page *vmcb02_page;
if (svm->nested.initialized)
return 0;
- hsave_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
- if (!hsave_page)
+ vmcb02_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+ if (!vmcb02_page)
return -ENOMEM;
- svm->nested.hsave = page_address(hsave_page);
+ svm->nested.vmcb02.ptr = page_address(vmcb02_page);
+ svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT);
svm->nested.msrpm = svm_vcpu_alloc_msrpm();
if (!svm->nested.msrpm)
- goto err_free_hsave;
+ goto err_free_vmcb02;
svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
svm->nested.initialized = true;
return 0;
-err_free_hsave:
- __free_page(hsave_page);
+err_free_vmcb02:
+ __free_page(vmcb02_page);
return -ENOMEM;
}
@@ -756,8 +890,17 @@ void svm_free_nested(struct vcpu_svm *svm)
svm_vcpu_free_msrpm(svm->nested.msrpm);
svm->nested.msrpm = NULL;
- __free_page(virt_to_page(svm->nested.hsave));
- svm->nested.hsave = NULL;
+ __free_page(virt_to_page(svm->nested.vmcb02.ptr));
+ svm->nested.vmcb02.ptr = NULL;
+
+ /*
+ * When last_vmcb12_gpa matches the current vmcb12 gpa,
+ * some vmcb12 fields are not loaded if they are marked clean
+ * in the vmcb12, since in this case they are up to date already.
+ *
+ * When the vmcb02 is freed, this optimization becomes invalid.
+ */
+ svm->nested.last_vmcb12_gpa = INVALID_GPA;
svm->nested.initialized = false;
}
@@ -767,18 +910,21 @@ void svm_free_nested(struct vcpu_svm *svm)
*/
void svm_leave_nested(struct vcpu_svm *svm)
{
- if (is_guest_mode(&svm->vcpu)) {
- struct vmcb *hsave = svm->nested.hsave;
- struct vmcb *vmcb = svm->vmcb;
+ struct kvm_vcpu *vcpu = &svm->vcpu;
+ if (is_guest_mode(vcpu)) {
svm->nested.nested_run_pending = 0;
- leave_guest_mode(&svm->vcpu);
- copy_vmcb_control_area(&vmcb->control, &hsave->control);
- nested_svm_uninit_mmu_context(&svm->vcpu);
+ svm->nested.vmcb12_gpa = INVALID_GPA;
+
+ leave_guest_mode(vcpu);
+
+ svm_switch_vmcb(svm, &svm->vmcb01);
+
+ nested_svm_uninit_mmu_context(vcpu);
vmcb_mark_all_dirty(svm->vmcb);
}
- kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu);
+ kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
}
static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
@@ -887,16 +1033,15 @@ int nested_svm_exit_handled(struct vcpu_svm *svm)
return vmexit;
}
-int nested_svm_check_permissions(struct vcpu_svm *svm)
+int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
{
- if (!(svm->vcpu.arch.efer & EFER_SVME) ||
- !is_paging(&svm->vcpu)) {
- kvm_queue_exception(&svm->vcpu, UD_VECTOR);
+ if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
- if (svm->vmcb->save.cpl) {
- kvm_inject_gp(&svm->vcpu, 0);
+ if (to_svm(vcpu)->vmcb->save.cpl) {
+ kvm_inject_gp(vcpu, 0);
return 1;
}
@@ -944,50 +1089,11 @@ static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
nested_svm_vmexit(svm);
}
-static void nested_svm_smi(struct vcpu_svm *svm)
-{
- svm->vmcb->control.exit_code = SVM_EXIT_SMI;
- svm->vmcb->control.exit_info_1 = 0;
- svm->vmcb->control.exit_info_2 = 0;
-
- nested_svm_vmexit(svm);
-}
-
-static void nested_svm_nmi(struct vcpu_svm *svm)
-{
- svm->vmcb->control.exit_code = SVM_EXIT_NMI;
- svm->vmcb->control.exit_info_1 = 0;
- svm->vmcb->control.exit_info_2 = 0;
-
- nested_svm_vmexit(svm);
-}
-
-static void nested_svm_intr(struct vcpu_svm *svm)
-{
- trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
-
- svm->vmcb->control.exit_code = SVM_EXIT_INTR;
- svm->vmcb->control.exit_info_1 = 0;
- svm->vmcb->control.exit_info_2 = 0;
-
- nested_svm_vmexit(svm);
-}
-
static inline bool nested_exit_on_init(struct vcpu_svm *svm)
{
return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
}
-static void nested_svm_init(struct vcpu_svm *svm)
-{
- svm->vmcb->control.exit_code = SVM_EXIT_INIT;
- svm->vmcb->control.exit_info_1 = 0;
- svm->vmcb->control.exit_info_2 = 0;
-
- nested_svm_vmexit(svm);
-}
-
-
static int svm_check_nested_events(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -1001,12 +1107,18 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu)
return -EBUSY;
if (!nested_exit_on_init(svm))
return 0;
- nested_svm_init(svm);
+ nested_svm_simple_vmexit(svm, SVM_EXIT_INIT);
return 0;
}
if (vcpu->arch.exception.pending) {
- if (block_nested_events)
+ /*
+ * Only a pending nested run can block a pending exception.
+ * Otherwise an injected NMI/interrupt should either be
+ * lost or delivered to the nested hypervisor in the EXITINTINFO
+ * vmcb field, while delivering the pending exception.
+ */
+ if (svm->nested.nested_run_pending)
return -EBUSY;
if (!nested_exit_on_exception(svm))
return 0;
@@ -1019,7 +1131,7 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu)
return -EBUSY;
if (!nested_exit_on_smi(svm))
return 0;
- nested_svm_smi(svm);
+ nested_svm_simple_vmexit(svm, SVM_EXIT_SMI);
return 0;
}
@@ -1028,7 +1140,7 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu)
return -EBUSY;
if (!nested_exit_on_nmi(svm))
return 0;
- nested_svm_nmi(svm);
+ nested_svm_simple_vmexit(svm, SVM_EXIT_NMI);
return 0;
}
@@ -1037,7 +1149,8 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu)
return -EBUSY;
if (!nested_exit_on_intr(svm))
return 0;
- nested_svm_intr(svm);
+ trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
+ nested_svm_simple_vmexit(svm, SVM_EXIT_INTR);
return 0;
}
@@ -1056,8 +1169,8 @@ int nested_svm_exit_special(struct vcpu_svm *svm)
case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
- if (get_host_vmcb(svm)->control.intercepts[INTERCEPT_EXCEPTION] &
- excp_bits)
+ if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] &
+ excp_bits)
return NESTED_EXIT_HOST;
else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
svm->vcpu.arch.apf.host_apf_flags)
@@ -1121,10 +1234,9 @@ static int svm_get_nested_state(struct kvm_vcpu *vcpu,
if (copy_to_user(&user_vmcb->control, &svm->nested.ctl,
sizeof(user_vmcb->control)))
return -EFAULT;
- if (copy_to_user(&user_vmcb->save, &svm->nested.hsave->save,
+ if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
sizeof(user_vmcb->save)))
return -EFAULT;
-
out:
return kvm_state.size;
}
@@ -1134,13 +1246,12 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
struct kvm_nested_state *kvm_state)
{
struct vcpu_svm *svm = to_svm(vcpu);
- struct vmcb *hsave = svm->nested.hsave;
struct vmcb __user *user_vmcb = (struct vmcb __user *)
&user_kvm_nested_state->data.svm[0];
struct vmcb_control_area *ctl;
struct vmcb_save_area *save;
+ unsigned long cr0;
int ret;
- u32 cr0;
BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
KVM_STATE_NESTED_SVM_VMCB_SIZE);
@@ -1179,8 +1290,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
return -EINVAL;
ret = -ENOMEM;
- ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
- save = kzalloc(sizeof(*save), GFP_KERNEL);
+ ctl = kzalloc(sizeof(*ctl), GFP_KERNEL_ACCOUNT);
+ save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT);
if (!ctl || !save)
goto out_free;
@@ -1191,12 +1302,12 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
goto out_free;
ret = -EINVAL;
- if (!nested_vmcb_check_controls(ctl))
+ if (!nested_vmcb_check_controls(vcpu, ctl))
goto out_free;
/*
* Processor state contains L2 state. Check that it is
- * valid for guest mode (see nested_vmcb_checks).
+ * valid for guest mode (see nested_vmcb_check_save).
*/
cr0 = kvm_read_cr0(vcpu);
if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
@@ -1205,27 +1316,66 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
/*
* Validate host state saved from before VMRUN (see
* nested_svm_check_permissions).
- * TODO: validate reserved bits for all saved state.
*/
- if (!(save->cr0 & X86_CR0_PG))
+ if (!(save->cr0 & X86_CR0_PG) ||
+ !(save->cr0 & X86_CR0_PE) ||
+ (save->rflags & X86_EFLAGS_VM) ||
+ !nested_vmcb_valid_sregs(vcpu, save))
goto out_free;
/*
- * All checks done, we can enter guest mode. L1 control fields
- * come from the nested save state. Guest state is already
- * in the registers, the save area of the nested state instead
- * contains saved L1 state.
+ * While the nested guest CR3 is already checked and set by
+ * KVM_SET_SREGS, it was set when nested state was yet loaded,
+ * thus MMU might not be initialized correctly.
+ * Set it again to fix this.
*/
+ ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
+ nested_npt_enabled(svm), false);
+ if (WARN_ON_ONCE(ret))
+ goto out_free;
+
+
+ /*
+ * All checks done, we can enter guest mode. Userspace provides
+ * vmcb12.control, which will be combined with L1 and stored into
+ * vmcb02, and the L1 save state which we store in vmcb01.
+ * L2 registers if needed are moved from the current VMCB to VMCB02.
+ */
+
+ if (is_guest_mode(vcpu))
+ svm_leave_nested(svm);
+ else
+ svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
+
+ svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
+
svm->nested.nested_run_pending =
!!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
- copy_vmcb_control_area(&hsave->control, &svm->vmcb->control);
- hsave->save = *save;
-
svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
- load_nested_vmcb_control(svm, ctl);
- nested_prepare_vmcb_control(svm);
+
+ svm->vmcb01.ptr->save.es = save->es;
+ svm->vmcb01.ptr->save.cs = save->cs;
+ svm->vmcb01.ptr->save.ss = save->ss;
+ svm->vmcb01.ptr->save.ds = save->ds;
+ svm->vmcb01.ptr->save.gdtr = save->gdtr;
+ svm->vmcb01.ptr->save.idtr = save->idtr;
+ svm->vmcb01.ptr->save.rflags = save->rflags | X86_EFLAGS_FIXED;
+ svm->vmcb01.ptr->save.efer = save->efer;
+ svm->vmcb01.ptr->save.cr0 = save->cr0;
+ svm->vmcb01.ptr->save.cr3 = save->cr3;
+ svm->vmcb01.ptr->save.cr4 = save->cr4;
+ svm->vmcb01.ptr->save.rax = save->rax;
+ svm->vmcb01.ptr->save.rsp = save->rsp;
+ svm->vmcb01.ptr->save.rip = save->rip;
+ svm->vmcb01.ptr->save.cpl = 0;
+
+ nested_load_control_from_vmcb12(svm, ctl);
+
+ svm_switch_vmcb(svm, &svm->nested.vmcb02);
+
+ nested_vmcb02_prepare_control(svm);
kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
ret = 0;
@@ -1236,8 +1386,37 @@ out_free:
return ret;
}
+static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ if (WARN_ON(!is_guest_mode(vcpu)))
+ return true;
+
+ if (!vcpu->arch.pdptrs_from_userspace &&
+ !nested_npt_enabled(svm) && is_pae_paging(vcpu))
+ /*
+ * Reload the guest's PDPTRs since after a migration
+ * the guest CR3 might be restored prior to setting the nested
+ * state which can lead to a load of wrong PDPTRs.
+ */
+ if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3)))
+ return false;
+
+ if (!nested_svm_vmrun_msrpm(svm)) {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror =
+ KVM_INTERNAL_ERROR_EMULATION;
+ vcpu->run->internal.ndata = 0;
+ return false;
+ }
+
+ return true;
+}
+
struct kvm_x86_nested_ops svm_nested_ops = {
.check_events = svm_check_nested_events,
+ .triple_fault = nested_svm_triple_fault,
.get_nested_state_pages = svm_get_nested_state_pages,
.get_state = svm_get_nested_state,
.set_state = svm_set_nested_state,
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index 035da07500e8..fdf587f19c5f 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -98,6 +98,8 @@ static enum index msr_to_index(u32 msr)
static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
enum pmu_type type)
{
+ struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
+
switch (msr) {
case MSR_F15H_PERF_CTL0:
case MSR_F15H_PERF_CTL1:
@@ -105,6 +107,9 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
case MSR_F15H_PERF_CTL3:
case MSR_F15H_PERF_CTL4:
case MSR_F15H_PERF_CTL5:
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
+ return NULL;
+ fallthrough;
case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
if (type != PMU_TYPE_EVNTSEL)
return NULL;
@@ -115,6 +120,9 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
case MSR_F15H_PERF_CTR3:
case MSR_F15H_PERF_CTR4:
case MSR_F15H_PERF_CTR5:
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
+ return NULL;
+ fallthrough;
case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
if (type != PMU_TYPE_COUNTER)
return NULL;
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 874ea309279f..8d36f0c73071 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -14,6 +14,7 @@
#include <linux/psp-sev.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
+#include <linux/misc_cgroup.h>
#include <linux/processor.h>
#include <linux/trace_events.h>
#include <asm/fpu/internal.h>
@@ -28,12 +29,40 @@
#define __ex(x) __kvm_handle_fault_on_reboot(x)
+#ifndef CONFIG_KVM_AMD_SEV
+/*
+ * When this config is not defined, SEV feature is not supported and APIs in
+ * this file are not used but this file still gets compiled into the KVM AMD
+ * module.
+ *
+ * We will not have MISC_CG_RES_SEV and MISC_CG_RES_SEV_ES entries in the enum
+ * misc_res_type {} defined in linux/misc_cgroup.h.
+ *
+ * Below macros allow compilation to succeed.
+ */
+#define MISC_CG_RES_SEV MISC_CG_RES_TYPES
+#define MISC_CG_RES_SEV_ES MISC_CG_RES_TYPES
+#endif
+
+#ifdef CONFIG_KVM_AMD_SEV
+/* enable/disable SEV support */
+static bool sev_enabled = true;
+module_param_named(sev, sev_enabled, bool, 0444);
+
+/* enable/disable SEV-ES support */
+static bool sev_es_enabled = true;
+module_param_named(sev_es, sev_es_enabled, bool, 0444);
+#else
+#define sev_enabled false
+#define sev_es_enabled false
+#endif /* CONFIG_KVM_AMD_SEV */
+
static u8 sev_enc_bit;
-static int sev_flush_asids(void);
static DECLARE_RWSEM(sev_deactivate_lock);
static DEFINE_MUTEX(sev_bitmap_lock);
unsigned int max_sev_asid;
static unsigned int min_sev_asid;
+static unsigned long sev_me_mask;
static unsigned long *sev_asid_bitmap;
static unsigned long *sev_reclaim_asid_bitmap;
@@ -45,9 +74,15 @@ struct enc_region {
unsigned long size;
};
-static int sev_flush_asids(void)
+/* Called with the sev_bitmap_lock held, or on shutdown */
+static int sev_flush_asids(int min_asid, int max_asid)
{
- int ret, error = 0;
+ int ret, pos, error = 0;
+
+ /* Check if there are any ASIDs to reclaim before performing a flush */
+ pos = find_next_bit(sev_reclaim_asid_bitmap, max_asid, min_asid);
+ if (pos >= max_asid)
+ return -EBUSY;
/*
* DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
@@ -66,17 +101,15 @@ static int sev_flush_asids(void)
return ret;
}
+static inline bool is_mirroring_enc_context(struct kvm *kvm)
+{
+ return !!to_kvm_svm(kvm)->sev_info.enc_context_owner;
+}
+
/* Must be called with the sev_bitmap_lock held */
static bool __sev_recycle_asids(int min_asid, int max_asid)
{
- int pos;
-
- /* Check if there are any ASIDs to reclaim before performing a flush */
- pos = find_next_bit(sev_reclaim_asid_bitmap, max_sev_asid, min_asid);
- if (pos >= max_asid)
- return false;
-
- if (sev_flush_asids())
+ if (sev_flush_asids(min_asid, max_asid))
return false;
/* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */
@@ -89,8 +122,19 @@ static bool __sev_recycle_asids(int min_asid, int max_asid)
static int sev_asid_new(struct kvm_sev_info *sev)
{
- int pos, min_asid, max_asid;
+ int pos, min_asid, max_asid, ret;
bool retry = true;
+ enum misc_res_type type;
+
+ type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
+ WARN_ON(sev->misc_cg);
+ sev->misc_cg = get_current_misc_cg();
+ ret = misc_cg_try_charge(type, sev->misc_cg, 1);
+ if (ret) {
+ put_misc_cg(sev->misc_cg);
+ sev->misc_cg = NULL;
+ return ret;
+ }
mutex_lock(&sev_bitmap_lock);
@@ -108,7 +152,8 @@ again:
goto again;
}
mutex_unlock(&sev_bitmap_lock);
- return -EBUSY;
+ ret = -EBUSY;
+ goto e_uncharge;
}
__set_bit(pos, sev_asid_bitmap);
@@ -116,6 +161,11 @@ again:
mutex_unlock(&sev_bitmap_lock);
return pos + 1;
+e_uncharge:
+ misc_cg_uncharge(type, sev->misc_cg, 1);
+ put_misc_cg(sev->misc_cg);
+ sev->misc_cg = NULL;
+ return ret;
}
static int sev_get_asid(struct kvm *kvm)
@@ -125,14 +175,15 @@ static int sev_get_asid(struct kvm *kvm)
return sev->asid;
}
-static void sev_asid_free(int asid)
+static void sev_asid_free(struct kvm_sev_info *sev)
{
struct svm_cpu_data *sd;
int cpu, pos;
+ enum misc_res_type type;
mutex_lock(&sev_bitmap_lock);
- pos = asid - 1;
+ pos = sev->asid - 1;
__set_bit(pos, sev_reclaim_asid_bitmap);
for_each_possible_cpu(cpu) {
@@ -141,53 +192,59 @@ static void sev_asid_free(int asid)
}
mutex_unlock(&sev_bitmap_lock);
+
+ type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
+ misc_cg_uncharge(type, sev->misc_cg, 1);
+ put_misc_cg(sev->misc_cg);
+ sev->misc_cg = NULL;
}
-static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
+static void sev_decommission(unsigned int handle)
{
- struct sev_data_decommission *decommission;
- struct sev_data_deactivate *data;
+ struct sev_data_decommission decommission;
if (!handle)
return;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data)
+ decommission.handle = handle;
+ sev_guest_decommission(&decommission, NULL);
+}
+
+static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
+{
+ struct sev_data_deactivate deactivate;
+
+ if (!handle)
return;
- /* deactivate handle */
- data->handle = handle;
+ deactivate.handle = handle;
/* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
down_read(&sev_deactivate_lock);
- sev_guest_deactivate(data, NULL);
+ sev_guest_deactivate(&deactivate, NULL);
up_read(&sev_deactivate_lock);
- kfree(data);
-
- decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
- if (!decommission)
- return;
-
- /* decommission handle */
- decommission->handle = handle;
- sev_guest_decommission(decommission, NULL);
-
- kfree(decommission);
+ sev_decommission(handle);
}
static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ bool es_active = argp->id == KVM_SEV_ES_INIT;
int asid, ret;
+ if (kvm->created_vcpus)
+ return -EINVAL;
+
ret = -EBUSY;
if (unlikely(sev->active))
return ret;
+ sev->es_active = es_active;
asid = sev_asid_new(sev);
if (asid < 0)
- return ret;
+ goto e_no_asid;
+ sev->asid = asid;
ret = sev_platform_init(&argp->error);
if (ret)
@@ -200,35 +257,23 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
return 0;
e_free:
- sev_asid_free(asid);
+ sev_asid_free(sev);
+ sev->asid = 0;
+e_no_asid:
+ sev->es_active = false;
return ret;
}
-static int sev_es_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
-{
- if (!sev_es)
- return -ENOTTY;
-
- to_kvm_svm(kvm)->sev_info.es_active = true;
-
- return sev_guest_init(kvm, argp);
-}
-
static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
{
- struct sev_data_activate *data;
+ struct sev_data_activate activate;
int asid = sev_get_asid(kvm);
int ret;
- data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
- if (!data)
- return -ENOMEM;
-
/* activate ASID on the given handle */
- data->handle = handle;
- data->asid = asid;
- ret = sev_guest_activate(data, error);
- kfree(data);
+ activate.handle = handle;
+ activate.asid = asid;
+ ret = sev_guest_activate(&activate, error);
return ret;
}
@@ -258,7 +303,7 @@ static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
- struct sev_data_launch_start *start;
+ struct sev_data_launch_start start;
struct kvm_sev_launch_start params;
void *dh_blob, *session_blob;
int *error = &argp->error;
@@ -270,20 +315,16 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
return -EFAULT;
- start = kzalloc(sizeof(*start), GFP_KERNEL_ACCOUNT);
- if (!start)
- return -ENOMEM;
+ memset(&start, 0, sizeof(start));
dh_blob = NULL;
if (params.dh_uaddr) {
dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
- if (IS_ERR(dh_blob)) {
- ret = PTR_ERR(dh_blob);
- goto e_free;
- }
+ if (IS_ERR(dh_blob))
+ return PTR_ERR(dh_blob);
- start->dh_cert_address = __sme_set(__pa(dh_blob));
- start->dh_cert_len = params.dh_len;
+ start.dh_cert_address = __sme_set(__pa(dh_blob));
+ start.dh_cert_len = params.dh_len;
}
session_blob = NULL;
@@ -294,40 +335,40 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
goto e_free_dh;
}
- start->session_address = __sme_set(__pa(session_blob));
- start->session_len = params.session_len;
+ start.session_address = __sme_set(__pa(session_blob));
+ start.session_len = params.session_len;
}
- start->handle = params.handle;
- start->policy = params.policy;
+ start.handle = params.handle;
+ start.policy = params.policy;
/* create memory encryption context */
- ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
+ ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, &start, error);
if (ret)
goto e_free_session;
/* Bind ASID to this guest */
- ret = sev_bind_asid(kvm, start->handle, error);
- if (ret)
+ ret = sev_bind_asid(kvm, start.handle, error);
+ if (ret) {
+ sev_decommission(start.handle);
goto e_free_session;
+ }
/* return handle to userspace */
- params.handle = start->handle;
+ params.handle = start.handle;
if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
- sev_unbind_asid(kvm, start->handle);
+ sev_unbind_asid(kvm, start.handle);
ret = -EFAULT;
goto e_free_session;
}
- sev->handle = start->handle;
+ sev->handle = start.handle;
sev->fd = argp->sev_fd;
e_free_session:
kfree(session_blob);
e_free_dh:
kfree(dh_blob);
-e_free:
- kfree(start);
return ret;
}
@@ -446,7 +487,7 @@ static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct kvm_sev_launch_update_data params;
- struct sev_data_launch_update_data *data;
+ struct sev_data_launch_update_data data;
struct page **inpages;
int ret;
@@ -456,20 +497,14 @@ static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
return -EFAULT;
- data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
- if (!data)
- return -ENOMEM;
-
vaddr = params.uaddr;
size = params.len;
vaddr_end = vaddr + size;
/* Lock the user memory. */
inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
- if (IS_ERR(inpages)) {
- ret = PTR_ERR(inpages);
- goto e_free;
- }
+ if (IS_ERR(inpages))
+ return PTR_ERR(inpages);
/*
* Flush (on non-coherent CPUs) before LAUNCH_UPDATE encrypts pages in
@@ -477,6 +512,9 @@ static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
*/
sev_clflush_pages(inpages, npages);
+ data.reserved = 0;
+ data.handle = sev->handle;
+
for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
int offset, len;
@@ -491,10 +529,9 @@ static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
- data->handle = sev->handle;
- data->len = len;
- data->address = __sme_page_pa(inpages[i]) + offset;
- ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
+ data.len = len;
+ data.address = __sme_page_pa(inpages[i]) + offset;
+ ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, &data, &argp->error);
if (ret)
goto e_unpin;
@@ -510,8 +547,6 @@ e_unpin:
}
/* unlock the user pages */
sev_unpin_memory(kvm, inpages, npages);
-e_free:
- kfree(data);
return ret;
}
@@ -563,23 +598,22 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
- struct sev_data_launch_update_vmsa *vmsa;
+ struct sev_data_launch_update_vmsa vmsa;
+ struct kvm_vcpu *vcpu;
int i, ret;
if (!sev_es_guest(kvm))
return -ENOTTY;
- vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL);
- if (!vmsa)
- return -ENOMEM;
+ vmsa.reserved = 0;
- for (i = 0; i < kvm->created_vcpus; i++) {
- struct vcpu_svm *svm = to_svm(kvm->vcpus[i]);
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ struct vcpu_svm *svm = to_svm(vcpu);
/* Perform some pre-encryption checks against the VMSA */
ret = sev_es_sync_vmsa(svm);
if (ret)
- goto e_free;
+ return ret;
/*
* The LAUNCH_UPDATE_VMSA command will perform in-place
@@ -589,27 +623,25 @@ static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
*/
clflush_cache_range(svm->vmsa, PAGE_SIZE);
- vmsa->handle = sev->handle;
- vmsa->address = __sme_pa(svm->vmsa);
- vmsa->len = PAGE_SIZE;
- ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, vmsa,
+ vmsa.handle = sev->handle;
+ vmsa.address = __sme_pa(svm->vmsa);
+ vmsa.len = PAGE_SIZE;
+ ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa,
&argp->error);
if (ret)
- goto e_free;
+ return ret;
svm->vcpu.arch.guest_state_protected = true;
}
-e_free:
- kfree(vmsa);
- return ret;
+ return 0;
}
static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
void __user *measure = (void __user *)(uintptr_t)argp->data;
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
- struct sev_data_launch_measure *data;
+ struct sev_data_launch_measure data;
struct kvm_sev_launch_measure params;
void __user *p = NULL;
void *blob = NULL;
@@ -621,9 +653,7 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (copy_from_user(&params, measure, sizeof(params)))
return -EFAULT;
- data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
- if (!data)
- return -ENOMEM;
+ memset(&data, 0, sizeof(data));
/* User wants to query the blob length */
if (!params.len)
@@ -631,23 +661,20 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
p = (void __user *)(uintptr_t)params.uaddr;
if (p) {
- if (params.len > SEV_FW_BLOB_MAX_SIZE) {
- ret = -EINVAL;
- goto e_free;
- }
+ if (params.len > SEV_FW_BLOB_MAX_SIZE)
+ return -EINVAL;
- ret = -ENOMEM;
- blob = kmalloc(params.len, GFP_KERNEL);
+ blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
if (!blob)
- goto e_free;
+ return -ENOMEM;
- data->address = __psp_pa(blob);
- data->len = params.len;
+ data.address = __psp_pa(blob);
+ data.len = params.len;
}
cmd:
- data->handle = sev->handle;
- ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
+ data.handle = sev->handle;
+ ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, &data, &argp->error);
/*
* If we query the session length, FW responded with expected data.
@@ -664,63 +691,50 @@ cmd:
}
done:
- params.len = data->len;
+ params.len = data.len;
if (copy_to_user(measure, &params, sizeof(params)))
ret = -EFAULT;
e_free_blob:
kfree(blob);
-e_free:
- kfree(data);
return ret;
}
static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
- struct sev_data_launch_finish *data;
- int ret;
+ struct sev_data_launch_finish data;
if (!sev_guest(kvm))
return -ENOTTY;
- data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
- if (!data)
- return -ENOMEM;
-
- data->handle = sev->handle;
- ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
-
- kfree(data);
- return ret;
+ data.handle = sev->handle;
+ return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, &data, &argp->error);
}
static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
struct kvm_sev_guest_status params;
- struct sev_data_guest_status *data;
+ struct sev_data_guest_status data;
int ret;
if (!sev_guest(kvm))
return -ENOTTY;
- data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
- if (!data)
- return -ENOMEM;
+ memset(&data, 0, sizeof(data));
- data->handle = sev->handle;
- ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
+ data.handle = sev->handle;
+ ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, &data, &argp->error);
if (ret)
- goto e_free;
+ return ret;
- params.policy = data->policy;
- params.state = data->state;
- params.handle = data->handle;
+ params.policy = data.policy;
+ params.state = data.state;
+ params.handle = data.handle;
if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
ret = -EFAULT;
-e_free:
- kfree(data);
+
return ret;
}
@@ -729,23 +743,17 @@ static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
int *error, bool enc)
{
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
- struct sev_data_dbg *data;
- int ret;
-
- data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
- if (!data)
- return -ENOMEM;
+ struct sev_data_dbg data;
- data->handle = sev->handle;
- data->dst_addr = dst;
- data->src_addr = src;
- data->len = size;
+ data.reserved = 0;
+ data.handle = sev->handle;
+ data.dst_addr = dst;
+ data.src_addr = src;
+ data.len = size;
- ret = sev_issue_cmd(kvm,
- enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
- data, error);
- kfree(data);
- return ret;
+ return sev_issue_cmd(kvm,
+ enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
+ &data, error);
}
static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
@@ -765,7 +773,7 @@ static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
}
static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
- unsigned long __user dst_uaddr,
+ void __user *dst_uaddr,
unsigned long dst_paddr,
int size, int *err)
{
@@ -789,8 +797,7 @@ static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
if (tpage) {
offset = paddr & 15;
- if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
- page_address(tpage) + offset, size))
+ if (copy_to_user(dst_uaddr, page_address(tpage) + offset, size))
ret = -EFAULT;
}
@@ -802,9 +809,9 @@ e_free:
}
static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
- unsigned long __user vaddr,
+ void __user *vaddr,
unsigned long dst_paddr,
- unsigned long __user dst_vaddr,
+ void __user *dst_vaddr,
int size, int *error)
{
struct page *src_tpage = NULL;
@@ -812,13 +819,12 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
int ret, len = size;
/* If source buffer is not aligned then use an intermediate buffer */
- if (!IS_ALIGNED(vaddr, 16)) {
+ if (!IS_ALIGNED((unsigned long)vaddr, 16)) {
src_tpage = alloc_page(GFP_KERNEL);
if (!src_tpage)
return -ENOMEM;
- if (copy_from_user(page_address(src_tpage),
- (void __user *)(uintptr_t)vaddr, size)) {
+ if (copy_from_user(page_address(src_tpage), vaddr, size)) {
__free_page(src_tpage);
return -EFAULT;
}
@@ -832,7 +838,7 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
* - copy the source buffer in an intermediate buffer
* - use the intermediate buffer as source buffer
*/
- if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
+ if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
int dst_offset;
dst_tpage = alloc_page(GFP_KERNEL);
@@ -857,7 +863,7 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
page_address(src_tpage), size);
else {
if (copy_from_user(page_address(dst_tpage) + dst_offset,
- (void __user *)(uintptr_t)vaddr, size)) {
+ vaddr, size)) {
ret = -EFAULT;
goto e_free;
}
@@ -937,15 +943,15 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
if (dec)
ret = __sev_dbg_decrypt_user(kvm,
__sme_page_pa(src_p[0]) + s_off,
- dst_vaddr,
+ (void __user *)dst_vaddr,
__sme_page_pa(dst_p[0]) + d_off,
len, &argp->error);
else
ret = __sev_dbg_encrypt_user(kvm,
__sme_page_pa(src_p[0]) + s_off,
- vaddr,
+ (void __user *)vaddr,
__sme_page_pa(dst_p[0]) + d_off,
- dst_vaddr,
+ (void __user *)dst_vaddr,
len, &argp->error);
sev_unpin_memory(kvm, src_p, n);
@@ -965,7 +971,7 @@ err:
static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
- struct sev_data_launch_secret *data;
+ struct sev_data_launch_secret data;
struct kvm_sev_launch_secret params;
struct page **pages;
void *blob, *hdr;
@@ -997,41 +1003,36 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
goto e_unpin_memory;
}
- ret = -ENOMEM;
- data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
- if (!data)
- goto e_unpin_memory;
+ memset(&data, 0, sizeof(data));
offset = params.guest_uaddr & (PAGE_SIZE - 1);
- data->guest_address = __sme_page_pa(pages[0]) + offset;
- data->guest_len = params.guest_len;
+ data.guest_address = __sme_page_pa(pages[0]) + offset;
+ data.guest_len = params.guest_len;
blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
if (IS_ERR(blob)) {
ret = PTR_ERR(blob);
- goto e_free;
+ goto e_unpin_memory;
}
- data->trans_address = __psp_pa(blob);
- data->trans_len = params.trans_len;
+ data.trans_address = __psp_pa(blob);
+ data.trans_len = params.trans_len;
hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
if (IS_ERR(hdr)) {
ret = PTR_ERR(hdr);
goto e_free_blob;
}
- data->hdr_address = __psp_pa(hdr);
- data->hdr_len = params.hdr_len;
+ data.hdr_address = __psp_pa(hdr);
+ data.hdr_len = params.hdr_len;
- data->handle = sev->handle;
- ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
+ data.handle = sev->handle;
+ ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, &data, &argp->error);
kfree(hdr);
e_free_blob:
kfree(blob);
-e_free:
- kfree(data);
e_unpin_memory:
/* content of memory is updated, mark pages dirty */
for (i = 0; i < n; i++) {
@@ -1046,7 +1047,7 @@ static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
void __user *report = (void __user *)(uintptr_t)argp->data;
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
- struct sev_data_attestation_report *data;
+ struct sev_data_attestation_report data;
struct kvm_sev_attestation_report params;
void __user *p;
void *blob = NULL;
@@ -1058,9 +1059,7 @@ static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
return -EFAULT;
- data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
- if (!data)
- return -ENOMEM;
+ memset(&data, 0, sizeof(data));
/* User wants to query the blob length */
if (!params.len)
@@ -1068,23 +1067,20 @@ static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
p = (void __user *)(uintptr_t)params.uaddr;
if (p) {
- if (params.len > SEV_FW_BLOB_MAX_SIZE) {
- ret = -EINVAL;
- goto e_free;
- }
+ if (params.len > SEV_FW_BLOB_MAX_SIZE)
+ return -EINVAL;
- ret = -ENOMEM;
- blob = kmalloc(params.len, GFP_KERNEL);
+ blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
if (!blob)
- goto e_free;
+ return -ENOMEM;
- data->address = __psp_pa(blob);
- data->len = params.len;
- memcpy(data->mnonce, params.mnonce, sizeof(params.mnonce));
+ data.address = __psp_pa(blob);
+ data.len = params.len;
+ memcpy(data.mnonce, params.mnonce, sizeof(params.mnonce));
}
cmd:
- data->handle = sev->handle;
- ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, data, &argp->error);
+ data.handle = sev->handle;
+ ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, &data, &argp->error);
/*
* If we query the session length, FW responded with expected data.
*/
@@ -1100,22 +1096,415 @@ cmd:
}
done:
- params.len = data->len;
+ params.len = data.len;
if (copy_to_user(report, &params, sizeof(params)))
ret = -EFAULT;
e_free_blob:
kfree(blob);
-e_free:
- kfree(data);
return ret;
}
+/* Userspace wants to query session length. */
+static int
+__sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
+ struct kvm_sev_send_start *params)
+{
+ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct sev_data_send_start data;
+ int ret;
+
+ memset(&data, 0, sizeof(data));
+ data.handle = sev->handle;
+ ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
+
+ params->session_len = data.session_len;
+ if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
+ sizeof(struct kvm_sev_send_start)))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct sev_data_send_start data;
+ struct kvm_sev_send_start params;
+ void *amd_certs, *session_data;
+ void *pdh_cert, *plat_certs;
+ int ret;
+
+ if (!sev_guest(kvm))
+ return -ENOTTY;
+
+ if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
+ sizeof(struct kvm_sev_send_start)))
+ return -EFAULT;
+
+ /* if session_len is zero, userspace wants to query the session length */
+ if (!params.session_len)
+ return __sev_send_start_query_session_length(kvm, argp,
+ &params);
+
+ /* some sanity checks */
+ if (!params.pdh_cert_uaddr || !params.pdh_cert_len ||
+ !params.session_uaddr || params.session_len > SEV_FW_BLOB_MAX_SIZE)
+ return -EINVAL;
+
+ /* allocate the memory to hold the session data blob */
+ session_data = kmalloc(params.session_len, GFP_KERNEL_ACCOUNT);
+ if (!session_data)
+ return -ENOMEM;
+
+ /* copy the certificate blobs from userspace */
+ pdh_cert = psp_copy_user_blob(params.pdh_cert_uaddr,
+ params.pdh_cert_len);
+ if (IS_ERR(pdh_cert)) {
+ ret = PTR_ERR(pdh_cert);
+ goto e_free_session;
+ }
+
+ plat_certs = psp_copy_user_blob(params.plat_certs_uaddr,
+ params.plat_certs_len);
+ if (IS_ERR(plat_certs)) {
+ ret = PTR_ERR(plat_certs);
+ goto e_free_pdh;
+ }
+
+ amd_certs = psp_copy_user_blob(params.amd_certs_uaddr,
+ params.amd_certs_len);
+ if (IS_ERR(amd_certs)) {
+ ret = PTR_ERR(amd_certs);
+ goto e_free_plat_cert;
+ }
+
+ /* populate the FW SEND_START field with system physical address */
+ memset(&data, 0, sizeof(data));
+ data.pdh_cert_address = __psp_pa(pdh_cert);
+ data.pdh_cert_len = params.pdh_cert_len;
+ data.plat_certs_address = __psp_pa(plat_certs);
+ data.plat_certs_len = params.plat_certs_len;
+ data.amd_certs_address = __psp_pa(amd_certs);
+ data.amd_certs_len = params.amd_certs_len;
+ data.session_address = __psp_pa(session_data);
+ data.session_len = params.session_len;
+ data.handle = sev->handle;
+
+ ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
+
+ if (!ret && copy_to_user((void __user *)(uintptr_t)params.session_uaddr,
+ session_data, params.session_len)) {
+ ret = -EFAULT;
+ goto e_free_amd_cert;
+ }
+
+ params.policy = data.policy;
+ params.session_len = data.session_len;
+ if (copy_to_user((void __user *)(uintptr_t)argp->data, &params,
+ sizeof(struct kvm_sev_send_start)))
+ ret = -EFAULT;
+
+e_free_amd_cert:
+ kfree(amd_certs);
+e_free_plat_cert:
+ kfree(plat_certs);
+e_free_pdh:
+ kfree(pdh_cert);
+e_free_session:
+ kfree(session_data);
+ return ret;
+}
+
+/* Userspace wants to query either header or trans length. */
+static int
+__sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
+ struct kvm_sev_send_update_data *params)
+{
+ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct sev_data_send_update_data data;
+ int ret;
+
+ memset(&data, 0, sizeof(data));
+ data.handle = sev->handle;
+ ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
+
+ params->hdr_len = data.hdr_len;
+ params->trans_len = data.trans_len;
+
+ if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
+ sizeof(struct kvm_sev_send_update_data)))
+ ret = -EFAULT;
+
+ return ret;
+}
+
+static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct sev_data_send_update_data data;
+ struct kvm_sev_send_update_data params;
+ void *hdr, *trans_data;
+ struct page **guest_page;
+ unsigned long n;
+ int ret, offset;
+
+ if (!sev_guest(kvm))
+ return -ENOTTY;
+
+ if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
+ sizeof(struct kvm_sev_send_update_data)))
+ return -EFAULT;
+
+ /* userspace wants to query either header or trans length */
+ if (!params.trans_len || !params.hdr_len)
+ return __sev_send_update_data_query_lengths(kvm, argp, &params);
+
+ if (!params.trans_uaddr || !params.guest_uaddr ||
+ !params.guest_len || !params.hdr_uaddr)
+ return -EINVAL;
+
+ /* Check if we are crossing the page boundary */
+ offset = params.guest_uaddr & (PAGE_SIZE - 1);
+ if ((params.guest_len + offset > PAGE_SIZE))
+ return -EINVAL;
+
+ /* Pin guest memory */
+ guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
+ PAGE_SIZE, &n, 0);
+ if (!guest_page)
+ return -EFAULT;
+
+ /* allocate memory for header and transport buffer */
+ ret = -ENOMEM;
+ hdr = kmalloc(params.hdr_len, GFP_KERNEL_ACCOUNT);
+ if (!hdr)
+ goto e_unpin;
+
+ trans_data = kmalloc(params.trans_len, GFP_KERNEL_ACCOUNT);
+ if (!trans_data)
+ goto e_free_hdr;
+
+ memset(&data, 0, sizeof(data));
+ data.hdr_address = __psp_pa(hdr);
+ data.hdr_len = params.hdr_len;
+ data.trans_address = __psp_pa(trans_data);
+ data.trans_len = params.trans_len;
+
+ /* The SEND_UPDATE_DATA command requires C-bit to be always set. */
+ data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
+ data.guest_address |= sev_me_mask;
+ data.guest_len = params.guest_len;
+ data.handle = sev->handle;
+
+ ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
+
+ if (ret)
+ goto e_free_trans_data;
+
+ /* copy transport buffer to user space */
+ if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr,
+ trans_data, params.trans_len)) {
+ ret = -EFAULT;
+ goto e_free_trans_data;
+ }
+
+ /* Copy packet header to userspace. */
+ ret = copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr,
+ params.hdr_len);
+
+e_free_trans_data:
+ kfree(trans_data);
+e_free_hdr:
+ kfree(hdr);
+e_unpin:
+ sev_unpin_memory(kvm, guest_page, n);
+
+ return ret;
+}
+
+static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct sev_data_send_finish data;
+
+ if (!sev_guest(kvm))
+ return -ENOTTY;
+
+ data.handle = sev->handle;
+ return sev_issue_cmd(kvm, SEV_CMD_SEND_FINISH, &data, &argp->error);
+}
+
+static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct sev_data_send_cancel data;
+
+ if (!sev_guest(kvm))
+ return -ENOTTY;
+
+ data.handle = sev->handle;
+ return sev_issue_cmd(kvm, SEV_CMD_SEND_CANCEL, &data, &argp->error);
+}
+
+static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct sev_data_receive_start start;
+ struct kvm_sev_receive_start params;
+ int *error = &argp->error;
+ void *session_data;
+ void *pdh_data;
+ int ret;
+
+ if (!sev_guest(kvm))
+ return -ENOTTY;
+
+ /* Get parameter from the userspace */
+ if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
+ sizeof(struct kvm_sev_receive_start)))
+ return -EFAULT;
+
+ /* some sanity checks */
+ if (!params.pdh_uaddr || !params.pdh_len ||
+ !params.session_uaddr || !params.session_len)
+ return -EINVAL;
+
+ pdh_data = psp_copy_user_blob(params.pdh_uaddr, params.pdh_len);
+ if (IS_ERR(pdh_data))
+ return PTR_ERR(pdh_data);
+
+ session_data = psp_copy_user_blob(params.session_uaddr,
+ params.session_len);
+ if (IS_ERR(session_data)) {
+ ret = PTR_ERR(session_data);
+ goto e_free_pdh;
+ }
+
+ memset(&start, 0, sizeof(start));
+ start.handle = params.handle;
+ start.policy = params.policy;
+ start.pdh_cert_address = __psp_pa(pdh_data);
+ start.pdh_cert_len = params.pdh_len;
+ start.session_address = __psp_pa(session_data);
+ start.session_len = params.session_len;
+
+ /* create memory encryption context */
+ ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_RECEIVE_START, &start,
+ error);
+ if (ret)
+ goto e_free_session;
+
+ /* Bind ASID to this guest */
+ ret = sev_bind_asid(kvm, start.handle, error);
+ if (ret)
+ goto e_free_session;
+
+ params.handle = start.handle;
+ if (copy_to_user((void __user *)(uintptr_t)argp->data,
+ &params, sizeof(struct kvm_sev_receive_start))) {
+ ret = -EFAULT;
+ sev_unbind_asid(kvm, start.handle);
+ goto e_free_session;
+ }
+
+ sev->handle = start.handle;
+ sev->fd = argp->sev_fd;
+
+e_free_session:
+ kfree(session_data);
+e_free_pdh:
+ kfree(pdh_data);
+
+ return ret;
+}
+
+static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_receive_update_data params;
+ struct sev_data_receive_update_data data;
+ void *hdr = NULL, *trans = NULL;
+ struct page **guest_page;
+ unsigned long n;
+ int ret, offset;
+
+ if (!sev_guest(kvm))
+ return -EINVAL;
+
+ if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data,
+ sizeof(struct kvm_sev_receive_update_data)))
+ return -EFAULT;
+
+ if (!params.hdr_uaddr || !params.hdr_len ||
+ !params.guest_uaddr || !params.guest_len ||
+ !params.trans_uaddr || !params.trans_len)
+ return -EINVAL;
+
+ /* Check if we are crossing the page boundary */
+ offset = params.guest_uaddr & (PAGE_SIZE - 1);
+ if ((params.guest_len + offset > PAGE_SIZE))
+ return -EINVAL;
+
+ hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
+ if (IS_ERR(hdr))
+ return PTR_ERR(hdr);
+
+ trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto e_free_hdr;
+ }
+
+ memset(&data, 0, sizeof(data));
+ data.hdr_address = __psp_pa(hdr);
+ data.hdr_len = params.hdr_len;
+ data.trans_address = __psp_pa(trans);
+ data.trans_len = params.trans_len;
+
+ /* Pin guest memory */
+ ret = -EFAULT;
+ guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
+ PAGE_SIZE, &n, 0);
+ if (!guest_page)
+ goto e_free_trans;
+
+ /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
+ data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
+ data.guest_address |= sev_me_mask;
+ data.guest_len = params.guest_len;
+ data.handle = sev->handle;
+
+ ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_DATA, &data,
+ &argp->error);
+
+ sev_unpin_memory(kvm, guest_page, n);
+
+e_free_trans:
+ kfree(trans);
+e_free_hdr:
+ kfree(hdr);
+
+ return ret;
+}
+
+static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct sev_data_receive_finish data;
+
+ if (!sev_guest(kvm))
+ return -ENOTTY;
+
+ data.handle = sev->handle;
+ return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
+}
+
int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
{
struct kvm_sev_cmd sev_cmd;
int r;
- if (!svm_sev_enabled() || !sev)
+ if (!sev_enabled)
return -ENOTTY;
if (!argp)
@@ -1126,13 +1515,22 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
mutex_lock(&kvm->lock);
+ /* enc_context_owner handles all memory enc operations */
+ if (is_mirroring_enc_context(kvm)) {
+ r = -EINVAL;
+ goto out;
+ }
+
switch (sev_cmd.id) {
+ case KVM_SEV_ES_INIT:
+ if (!sev_es_enabled) {
+ r = -ENOTTY;
+ goto out;
+ }
+ fallthrough;
case KVM_SEV_INIT:
r = sev_guest_init(kvm, &sev_cmd);
break;
- case KVM_SEV_ES_INIT:
- r = sev_es_guest_init(kvm, &sev_cmd);
- break;
case KVM_SEV_LAUNCH_START:
r = sev_launch_start(kvm, &sev_cmd);
break;
@@ -1163,6 +1561,27 @@ int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
case KVM_SEV_GET_ATTESTATION_REPORT:
r = sev_get_attestation_report(kvm, &sev_cmd);
break;
+ case KVM_SEV_SEND_START:
+ r = sev_send_start(kvm, &sev_cmd);
+ break;
+ case KVM_SEV_SEND_UPDATE_DATA:
+ r = sev_send_update_data(kvm, &sev_cmd);
+ break;
+ case KVM_SEV_SEND_FINISH:
+ r = sev_send_finish(kvm, &sev_cmd);
+ break;
+ case KVM_SEV_SEND_CANCEL:
+ r = sev_send_cancel(kvm, &sev_cmd);
+ break;
+ case KVM_SEV_RECEIVE_START:
+ r = sev_receive_start(kvm, &sev_cmd);
+ break;
+ case KVM_SEV_RECEIVE_UPDATE_DATA:
+ r = sev_receive_update_data(kvm, &sev_cmd);
+ break;
+ case KVM_SEV_RECEIVE_FINISH:
+ r = sev_receive_finish(kvm, &sev_cmd);
+ break;
default:
r = -EINVAL;
goto out;
@@ -1186,6 +1605,10 @@ int svm_register_enc_region(struct kvm *kvm,
if (!sev_guest(kvm))
return -ENOTTY;
+ /* If kvm is mirroring encryption context it isn't responsible for it */
+ if (is_mirroring_enc_context(kvm))
+ return -EINVAL;
+
if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
return -EINVAL;
@@ -1252,6 +1675,10 @@ int svm_unregister_enc_region(struct kvm *kvm,
struct enc_region *region;
int ret;
+ /* If kvm is mirroring encryption context it isn't responsible for it */
+ if (is_mirroring_enc_context(kvm))
+ return -EINVAL;
+
mutex_lock(&kvm->lock);
if (!sev_guest(kvm)) {
@@ -1282,6 +1709,72 @@ failed:
return ret;
}
+int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
+{
+ struct file *source_kvm_file;
+ struct kvm *source_kvm;
+ struct kvm_sev_info *mirror_sev;
+ unsigned int asid;
+ int ret;
+
+ source_kvm_file = fget(source_fd);
+ if (!file_is_kvm(source_kvm_file)) {
+ ret = -EBADF;
+ goto e_source_put;
+ }
+
+ source_kvm = source_kvm_file->private_data;
+ mutex_lock(&source_kvm->lock);
+
+ if (!sev_guest(source_kvm)) {
+ ret = -EINVAL;
+ goto e_source_unlock;
+ }
+
+ /* Mirrors of mirrors should work, but let's not get silly */
+ if (is_mirroring_enc_context(source_kvm) || source_kvm == kvm) {
+ ret = -EINVAL;
+ goto e_source_unlock;
+ }
+
+ asid = to_kvm_svm(source_kvm)->sev_info.asid;
+
+ /*
+ * The mirror kvm holds an enc_context_owner ref so its asid can't
+ * disappear until we're done with it
+ */
+ kvm_get_kvm(source_kvm);
+
+ fput(source_kvm_file);
+ mutex_unlock(&source_kvm->lock);
+ mutex_lock(&kvm->lock);
+
+ if (sev_guest(kvm)) {
+ ret = -EINVAL;
+ goto e_mirror_unlock;
+ }
+
+ /* Set enc_context_owner and copy its encryption context over */
+ mirror_sev = &to_kvm_svm(kvm)->sev_info;
+ mirror_sev->enc_context_owner = source_kvm;
+ mirror_sev->asid = asid;
+ mirror_sev->active = true;
+
+ mutex_unlock(&kvm->lock);
+ return 0;
+
+e_mirror_unlock:
+ mutex_unlock(&kvm->lock);
+ kvm_put_kvm(source_kvm);
+ return ret;
+e_source_unlock:
+ mutex_unlock(&source_kvm->lock);
+e_source_put:
+ if (source_kvm_file)
+ fput(source_kvm_file);
+ return ret;
+}
+
void sev_vm_destroy(struct kvm *kvm)
{
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
@@ -1291,6 +1784,12 @@ void sev_vm_destroy(struct kvm *kvm)
if (!sev_guest(kvm))
return;
+ /* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
+ if (is_mirroring_enc_context(kvm)) {
+ kvm_put_kvm(sev->enc_context_owner);
+ return;
+ }
+
mutex_lock(&kvm->lock);
/*
@@ -1315,15 +1814,27 @@ void sev_vm_destroy(struct kvm *kvm)
mutex_unlock(&kvm->lock);
sev_unbind_asid(kvm, sev->handle);
- sev_asid_free(sev->asid);
+ sev_asid_free(sev);
+}
+
+void __init sev_set_cpu_caps(void)
+{
+ if (!sev_enabled)
+ kvm_cpu_cap_clear(X86_FEATURE_SEV);
+ if (!sev_es_enabled)
+ kvm_cpu_cap_clear(X86_FEATURE_SEV_ES);
}
void __init sev_hardware_setup(void)
{
- unsigned int eax, ebx, ecx, edx;
+#ifdef CONFIG_KVM_AMD_SEV
+ unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count;
bool sev_es_supported = false;
bool sev_supported = false;
+ if (!sev_enabled || !npt_enabled)
+ goto out;
+
/* Does the CPU support SEV? */
if (!boot_cpu_has(X86_FEATURE_SEV))
goto out;
@@ -1336,12 +1847,12 @@ void __init sev_hardware_setup(void)
/* Maximum number of encrypted guests supported simultaneously */
max_sev_asid = ecx;
-
- if (!svm_sev_enabled())
+ if (!max_sev_asid)
goto out;
/* Minimum ASID value that should be used for SEV guest */
min_sev_asid = edx;
+ sev_me_mask = 1UL << (ebx & 0x3f);
/* Initialize SEV ASID bitmaps */
sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
@@ -1349,14 +1860,21 @@ void __init sev_hardware_setup(void)
goto out;
sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
- if (!sev_reclaim_asid_bitmap)
+ if (!sev_reclaim_asid_bitmap) {
+ bitmap_free(sev_asid_bitmap);
+ sev_asid_bitmap = NULL;
+ goto out;
+ }
+
+ sev_asid_count = max_sev_asid - min_sev_asid + 1;
+ if (misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count))
goto out;
- pr_info("SEV supported: %u ASIDs\n", max_sev_asid - min_sev_asid + 1);
+ pr_info("SEV supported: %u ASIDs\n", sev_asid_count);
sev_supported = true;
/* SEV-ES support requested? */
- if (!sev_es)
+ if (!sev_es_enabled)
goto out;
/* Does the CPU support SEV-ES? */
@@ -1367,23 +1885,44 @@ void __init sev_hardware_setup(void)
if (min_sev_asid == 1)
goto out;
- pr_info("SEV-ES supported: %u ASIDs\n", min_sev_asid - 1);
+ sev_es_asid_count = min_sev_asid - 1;
+ if (misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count))
+ goto out;
+
+ pr_info("SEV-ES supported: %u ASIDs\n", sev_es_asid_count);
sev_es_supported = true;
out:
- sev = sev_supported;
- sev_es = sev_es_supported;
+ sev_enabled = sev_supported;
+ sev_es_enabled = sev_es_supported;
+#endif
}
void sev_hardware_teardown(void)
{
- if (!svm_sev_enabled())
+ if (!sev_enabled)
return;
+ /* No need to take sev_bitmap_lock, all VMs have been destroyed. */
+ sev_flush_asids(0, max_sev_asid);
+
bitmap_free(sev_asid_bitmap);
bitmap_free(sev_reclaim_asid_bitmap);
- sev_flush_asids();
+ misc_cg_set_capacity(MISC_CG_RES_SEV, 0);
+ misc_cg_set_capacity(MISC_CG_RES_SEV_ES, 0);
+}
+
+int sev_cpu_init(struct svm_cpu_data *sd)
+{
+ if (!sev_enabled)
+ return 0;
+
+ sd->sev_vmcbs = kcalloc(max_sev_asid + 1, sizeof(void *), GFP_KERNEL);
+ if (!sd->sev_vmcbs)
+ return -ENOMEM;
+
+ return 0;
}
/*
@@ -1666,7 +2205,7 @@ vmgexit_err:
return -EINVAL;
}
-static void pre_sev_es_run(struct vcpu_svm *svm)
+void sev_es_unmap_ghcb(struct vcpu_svm *svm)
{
if (!svm->ghcb)
return;
@@ -1702,9 +2241,6 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu)
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
int asid = sev_get_asid(svm->vcpu.kvm);
- /* Perform any SEV-ES pre-run actions */
- pre_sev_es_run(svm);
-
/* Assign the asid allocated with this SEV guest */
svm->asid = asid;
@@ -1775,7 +2311,7 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
len, GHCB_SCRATCH_AREA_LIMIT);
return false;
}
- scratch_va = kzalloc(len, GFP_KERNEL);
+ scratch_va = kzalloc(len, GFP_KERNEL_ACCOUNT);
if (!scratch_va)
return false;
@@ -1849,7 +2385,7 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn;
vcpu->arch.regs[VCPU_REGS_RCX] = 0;
- ret = svm_invoke_exit_handler(svm, SVM_EXIT_CPUID);
+ ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
if (!ret) {
ret = -EINVAL;
break;
@@ -1899,8 +2435,9 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
return ret;
}
-int sev_handle_vmgexit(struct vcpu_svm *svm)
+int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
{
+ struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb_control_area *control = &svm->vmcb->control;
u64 ghcb_gpa, exit_code;
struct ghcb *ghcb;
@@ -1912,13 +2449,13 @@ int sev_handle_vmgexit(struct vcpu_svm *svm)
return sev_handle_vmgexit_msr_protocol(svm);
if (!ghcb_gpa) {
- vcpu_unimpl(&svm->vcpu, "vmgexit: GHCB gpa is not set\n");
+ vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
return -EINVAL;
}
- if (kvm_vcpu_map(&svm->vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) {
+ if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) {
/* Unable to map GHCB from guest */
- vcpu_unimpl(&svm->vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
+ vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
ghcb_gpa);
return -EINVAL;
}
@@ -1926,7 +2463,7 @@ int sev_handle_vmgexit(struct vcpu_svm *svm)
svm->ghcb = svm->ghcb_map.hva;
ghcb = svm->ghcb_map.hva;
- trace_kvm_vmgexit_enter(svm->vcpu.vcpu_id, ghcb);
+ trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);
exit_code = ghcb_get_sw_exit_code(ghcb);
@@ -1944,7 +2481,7 @@ int sev_handle_vmgexit(struct vcpu_svm *svm)
if (!setup_vmgexit_scratch(svm, true, control->exit_info_2))
break;
- ret = kvm_sev_es_mmio_read(&svm->vcpu,
+ ret = kvm_sev_es_mmio_read(vcpu,
control->exit_info_1,
control->exit_info_2,
svm->ghcb_sa);
@@ -1953,19 +2490,19 @@ int sev_handle_vmgexit(struct vcpu_svm *svm)
if (!setup_vmgexit_scratch(svm, false, control->exit_info_2))
break;
- ret = kvm_sev_es_mmio_write(&svm->vcpu,
+ ret = kvm_sev_es_mmio_write(vcpu,
control->exit_info_1,
control->exit_info_2,
svm->ghcb_sa);
break;
case SVM_VMGEXIT_NMI_COMPLETE:
- ret = svm_invoke_exit_handler(svm, SVM_EXIT_IRET);
+ ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET);
break;
case SVM_VMGEXIT_AP_HLT_LOOP:
- ret = kvm_emulate_ap_reset_hold(&svm->vcpu);
+ ret = kvm_emulate_ap_reset_hold(vcpu);
break;
case SVM_VMGEXIT_AP_JUMP_TABLE: {
- struct kvm_sev_info *sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
+ struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
switch (control->exit_info_1) {
case 0:
@@ -1990,12 +2527,12 @@ int sev_handle_vmgexit(struct vcpu_svm *svm)
break;
}
case SVM_VMGEXIT_UNSUPPORTED_EVENT:
- vcpu_unimpl(&svm->vcpu,
+ vcpu_unimpl(vcpu,
"vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
control->exit_info_1, control->exit_info_2);
break;
default:
- ret = svm_invoke_exit_handler(svm, exit_code);
+ ret = svm_invoke_exit_handler(vcpu, exit_code);
}
return ret;
@@ -2082,7 +2619,7 @@ void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu)
hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400);
hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
- /* PKRU is restored on VMEXIT, save the curent host value */
+ /* PKRU is restored on VMEXIT, save the current host value */
hostsa->pkru = read_pkru();
/* MSR_IA32_XSS is restored on VMEXIT, save the currnet host value */
@@ -2104,5 +2641,8 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
* the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
* non-zero value.
*/
+ if (!svm->ghcb)
+ return;
+
ghcb_set_sw_exit_info_2(svm->ghcb, 1);
}
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index baee91c1e936..8834822c00cd 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -43,6 +43,9 @@
#include "svm.h"
#include "svm_ops.h"
+#include "kvm_onhyperv.h"
+#include "svm_onhyperv.h"
+
#define __ex(x) __kvm_handle_fault_on_reboot(x)
MODULE_AUTHOR("Qumranet");
@@ -56,9 +59,6 @@ static const struct x86_cpu_id svm_cpu_id[] = {
MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
#endif
-#define IOPM_ALLOC_ORDER 2
-#define MSRPM_ALLOC_ORDER 1
-
#define SEG_TYPE_LDT 2
#define SEG_TYPE_BUSY_TSS16 3
@@ -95,6 +95,8 @@ static const struct svm_direct_access_msrs {
} direct_access_msrs[MAX_DIRECT_ACCESS_MSRS] = {
{ .index = MSR_STAR, .always = true },
{ .index = MSR_IA32_SYSENTER_CS, .always = true },
+ { .index = MSR_IA32_SYSENTER_EIP, .always = false },
+ { .index = MSR_IA32_SYSENTER_ESP, .always = false },
#ifdef CONFIG_X86_64
{ .index = MSR_GS_BASE, .always = true },
{ .index = MSR_FS_BASE, .always = true },
@@ -115,13 +117,6 @@ static const struct svm_direct_access_msrs {
{ .index = MSR_INVALID, .always = false },
};
-/* enable NPT for AMD64 and X86 with PAE */
-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
-bool npt_enabled = true;
-#else
-bool npt_enabled;
-#endif
-
/*
* These 2 parameters are used to config the controls for Pause-Loop Exiting:
* pause_filter_count: On processors that support Pause filtering(indicated
@@ -170,9 +165,12 @@ module_param(pause_filter_count_shrink, ushort, 0444);
static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX;
module_param(pause_filter_count_max, ushort, 0444);
-/* allow nested paging (virtualized MMU) for all guests */
-static int npt = true;
-module_param(npt, int, S_IRUGO);
+/*
+ * Use nested page tables by default. Note, NPT may get forced off by
+ * svm_hardware_setup() if it's unsupported by hardware or the host kernel.
+ */
+bool npt_enabled = true;
+module_param_named(npt, npt_enabled, bool, 0444);
/* allow nested virtualization in KVM/SVM */
static int nested = true;
@@ -190,13 +188,12 @@ module_param(vls, int, 0444);
static int vgif = true;
module_param(vgif, int, 0444);
-/* enable/disable SEV support */
-int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
-module_param(sev, int, 0444);
-
-/* enable/disable SEV-ES support */
-int sev_es = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
-module_param(sev_es, int, 0444);
+/*
+ * enable / disable AVIC. Because the defaults differ for APICv
+ * support between VMX and SVM we cannot use module_param_named.
+ */
+static bool avic;
+module_param(avic, bool, 0444);
bool __read_mostly dump_invalid_vmcb;
module_param(dump_invalid_vmcb, bool, 0644);
@@ -218,6 +215,15 @@ struct kvm_ldttss_desc {
DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
+/*
+ * Only MSR_TSC_AUX is switched via the user return hook. EFER is switched via
+ * the VMCB, and the SYSCALL/SYSENTER MSRs are handled by VMLOAD/VMSAVE.
+ *
+ * RDTSCP and RDPID are not used in the kernel, specifically to allow KVM to
+ * defer the restoration of TSC_AUX until the CPU returns to userspace.
+ */
+static int tsc_aux_uret_slot __read_mostly = -1;
+
static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
@@ -283,7 +289,7 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
* In this case we will return to the nested guest
* as soon as we leave SMM.
*/
- if (!is_smm(&svm->vcpu))
+ if (!is_smm(vcpu))
svm_free_nested(svm);
} else {
@@ -367,10 +373,10 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu)
bool has_error_code = vcpu->arch.exception.has_error_code;
u32 error_code = vcpu->arch.exception.error_code;
- kvm_deliver_exception_payload(&svm->vcpu);
+ kvm_deliver_exception_payload(vcpu);
if (nr == BP_VECTOR && !nrips) {
- unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
+ unsigned long rip, old_rip = kvm_rip_read(vcpu);
/*
* For guest debugging where we have to reinject #BP if some
@@ -379,8 +385,8 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu)
* raises a fault that is not intercepted. Still better than
* failing in all cases.
*/
- (void)skip_emulated_instruction(&svm->vcpu);
- rip = kvm_rip_read(&svm->vcpu);
+ (void)skip_emulated_instruction(vcpu);
+ rip = kvm_rip_read(vcpu);
svm->int3_rip = rip + svm->vmcb->save.cs.base;
svm->int3_injected = rip - old_rip;
}
@@ -451,6 +457,11 @@ static int has_svm(void)
return 0;
}
+ if (pgtable_l5_enabled()) {
+ pr_info("KVM doesn't yet support 5-level paging on AMD SVM\n");
+ return 0;
+ }
+
return 1;
}
@@ -557,23 +568,21 @@ static void svm_cpu_uninit(int cpu)
static int svm_cpu_init(int cpu)
{
struct svm_cpu_data *sd;
+ int ret = -ENOMEM;
sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
if (!sd)
- return -ENOMEM;
+ return ret;
sd->cpu = cpu;
sd->save_area = alloc_page(GFP_KERNEL);
if (!sd->save_area)
goto free_cpu_data;
+
clear_page(page_address(sd->save_area));
- if (svm_sev_enabled()) {
- sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1,
- sizeof(void *),
- GFP_KERNEL);
- if (!sd->sev_vmcbs)
- goto free_save_area;
- }
+ ret = sev_cpu_init(sd);
+ if (ret)
+ goto free_save_area;
per_cpu(svm_data, cpu) = sd;
@@ -583,7 +592,7 @@ free_save_area:
__free_page(sd->save_area);
free_cpu_data:
kfree(sd);
- return -ENOMEM;
+ return ret;
}
@@ -674,6 +683,9 @@ static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm,
write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
msrpm[offset] = tmp;
+
+ svm_hv_vmcb_dirty_nested_enlightenments(vcpu);
+
}
void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
@@ -685,14 +697,15 @@ void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
u32 *svm_vcpu_alloc_msrpm(void)
{
- struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
+ unsigned int order = get_order(MSRPM_SIZE);
+ struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, order);
u32 *msrpm;
if (!pages)
return NULL;
msrpm = page_address(pages);
- memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
+ memset(msrpm, 0xff, PAGE_SIZE * (1 << order));
return msrpm;
}
@@ -711,7 +724,7 @@ void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm)
void svm_vcpu_free_msrpm(u32 *msrpm)
{
- __free_pages(virt_to_page(msrpm), MSRPM_ALLOC_ORDER);
+ __free_pages(virt_to_page(msrpm), get_order(MSRPM_SIZE));
}
static void svm_msr_filter_changed(struct kvm_vcpu *vcpu)
@@ -863,8 +876,8 @@ static __init void svm_adjust_mmio_mask(void)
return;
/* If memory encryption is not enabled, use existing mask */
- rdmsrl(MSR_K8_SYSCFG, msr);
- if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
+ rdmsrl(MSR_AMD64_SYSCFG, msr);
+ if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
return;
enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
@@ -885,20 +898,20 @@ static __init void svm_adjust_mmio_mask(void)
*/
mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
- kvm_mmu_set_mmio_spte_mask(mask, PT_WRITABLE_MASK | PT_USER_MASK);
+ kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK);
}
static void svm_hardware_teardown(void)
{
int cpu;
- if (svm_sev_enabled())
- sev_hardware_teardown();
+ sev_hardware_teardown();
for_each_possible_cpu(cpu)
svm_cpu_uninit(cpu);
- __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
+ __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT),
+ get_order(IOPM_SIZE));
iopm_base = 0;
}
@@ -926,6 +939,9 @@ static __init void svm_set_cpu_caps(void)
if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
boot_cpu_has(X86_FEATURE_AMD_SSBD))
kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
+
+ /* CPUID 0x8000001F (SME/SEV features) */
+ sev_set_cpu_caps();
}
static __init int svm_hardware_setup(void)
@@ -934,23 +950,31 @@ static __init int svm_hardware_setup(void)
struct page *iopm_pages;
void *iopm_va;
int r;
+ unsigned int order = get_order(IOPM_SIZE);
+
+ /*
+ * NX is required for shadow paging and for NPT if the NX huge pages
+ * mitigation is enabled.
+ */
+ if (!boot_cpu_has(X86_FEATURE_NX)) {
+ pr_err_ratelimited("NX (Execute Disable) not supported\n");
+ return -EOPNOTSUPP;
+ }
+ kvm_enable_efer_bits(EFER_NX);
- iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
+ iopm_pages = alloc_pages(GFP_KERNEL, order);
if (!iopm_pages)
return -ENOMEM;
iopm_va = page_address(iopm_pages);
- memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
+ memset(iopm_va, 0xff, PAGE_SIZE * (1 << order));
iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
init_msrpm_offsets();
supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
- if (boot_cpu_has(X86_FEATURE_NX))
- kvm_enable_efer_bits(EFER_NX);
-
if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
kvm_enable_efer_bits(EFER_FFXSR);
@@ -960,6 +984,8 @@ static __init int svm_hardware_setup(void)
kvm_tsc_scaling_ratio_frac_bits = 32;
}
+ tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX);
+
/* Check for pause filtering support */
if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
pause_filter_count = 0;
@@ -973,12 +999,24 @@ static __init int svm_hardware_setup(void)
kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
}
- if (IS_ENABLED(CONFIG_KVM_AMD_SEV) && sev) {
- sev_hardware_setup();
- } else {
- sev = false;
- sev_es = false;
- }
+ /*
+ * KVM's MMU doesn't support using 2-level paging for itself, and thus
+ * NPT isn't supported if the host is using 2-level paging since host
+ * CR4 is unchanged on VMRUN.
+ */
+ if (!IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_X86_PAE))
+ npt_enabled = false;
+
+ if (!boot_cpu_has(X86_FEATURE_NPT))
+ npt_enabled = false;
+
+ kvm_configure_mmu(npt_enabled, get_max_npt_level(), PG_LEVEL_1G);
+ pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
+
+ /* Note, SEV setup consumes npt_enabled. */
+ sev_hardware_setup();
+
+ svm_hv_hardware_setup();
svm_adjust_mmio_mask();
@@ -988,30 +1026,17 @@ static __init int svm_hardware_setup(void)
goto err;
}
- if (!boot_cpu_has(X86_FEATURE_NPT))
- npt_enabled = false;
-
- if (npt_enabled && !npt)
- npt_enabled = false;
-
- kvm_configure_mmu(npt_enabled, get_max_npt_level(), PG_LEVEL_1G);
- pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
-
if (nrips) {
if (!boot_cpu_has(X86_FEATURE_NRIPS))
nrips = false;
}
- if (avic) {
- if (!npt_enabled ||
- !boot_cpu_has(X86_FEATURE_AVIC) ||
- !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
- avic = false;
- } else {
- pr_info("AVIC enabled\n");
+ enable_apicv = avic = avic && npt_enabled && boot_cpu_has(X86_FEATURE_AVIC);
- amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
- }
+ if (enable_apicv) {
+ pr_info("AVIC enabled\n");
+
+ amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
}
if (vls) {
@@ -1075,29 +1100,35 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
seg->base = 0;
}
-static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+static u64 svm_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- u64 g_tsc_offset = 0;
- if (is_guest_mode(vcpu)) {
- /* Write L1's TSC offset. */
- g_tsc_offset = svm->vmcb->control.tsc_offset -
- svm->nested.hsave->control.tsc_offset;
- svm->nested.hsave->control.tsc_offset = offset;
- }
+ return svm->nested.ctl.tsc_offset;
+}
- trace_kvm_write_tsc_offset(vcpu->vcpu_id,
- svm->vmcb->control.tsc_offset - g_tsc_offset,
- offset);
+static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
+{
+ return kvm_default_tsc_scaling_ratio;
+}
- svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
+static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset;
+ svm->vmcb->control.tsc_offset = offset;
vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
- return svm->vmcb->control.tsc_offset;
}
-static void svm_check_invpcid(struct vcpu_svm *svm)
+static void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
+{
+ wrmsrl(MSR_AMD64_TSC_RATIO, multiplier);
+}
+
+/* Evaluate instruction intercepts that depend on guest CPUID features. */
+static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu,
+ struct vcpu_svm *svm)
{
/*
* Intercept INVPCID if shadow paging is enabled to sync/free shadow
@@ -1110,14 +1141,22 @@ static void svm_check_invpcid(struct vcpu_svm *svm)
else
svm_clr_intercept(svm, INTERCEPT_INVPCID);
}
+
+ if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) {
+ if (guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
+ svm_clr_intercept(svm, INTERCEPT_RDTSCP);
+ else
+ svm_set_intercept(svm, INTERCEPT_RDTSCP);
+ }
}
-static void init_vmcb(struct vcpu_svm *svm)
+static void init_vmcb(struct kvm_vcpu *vcpu)
{
+ struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb_control_area *control = &svm->vmcb->control;
struct vmcb_save_area *save = &svm->vmcb->save;
- svm->vcpu.arch.hflags = 0;
+ vcpu->arch.hflags = 0;
svm_set_intercept(svm, INTERCEPT_CR0_READ);
svm_set_intercept(svm, INTERCEPT_CR3_READ);
@@ -1125,7 +1164,7 @@ static void init_vmcb(struct vcpu_svm *svm)
svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
svm_set_intercept(svm, INTERCEPT_CR3_WRITE);
svm_set_intercept(svm, INTERCEPT_CR4_WRITE);
- if (!kvm_vcpu_apicv_active(&svm->vcpu))
+ if (!kvm_vcpu_apicv_active(vcpu))
svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
set_dr_intercepts(svm);
@@ -1169,12 +1208,12 @@ static void init_vmcb(struct vcpu_svm *svm)
svm_set_intercept(svm, INTERCEPT_RDPRU);
svm_set_intercept(svm, INTERCEPT_RSM);
- if (!kvm_mwait_in_guest(svm->vcpu.kvm)) {
+ if (!kvm_mwait_in_guest(vcpu->kvm)) {
svm_set_intercept(svm, INTERCEPT_MONITOR);
svm_set_intercept(svm, INTERCEPT_MWAIT);
}
- if (!kvm_hlt_in_guest(svm->vcpu.kvm))
+ if (!kvm_hlt_in_guest(vcpu->kvm))
svm_set_intercept(svm, INTERCEPT_HLT);
control->iopm_base_pa = __sme_set(iopm_base);
@@ -1200,19 +1239,19 @@ static void init_vmcb(struct vcpu_svm *svm)
init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
- svm_set_cr4(&svm->vcpu, 0);
- svm_set_efer(&svm->vcpu, 0);
+ svm_set_cr4(vcpu, 0);
+ svm_set_efer(vcpu, 0);
save->dr6 = 0xffff0ff0;
- kvm_set_rflags(&svm->vcpu, X86_EFLAGS_FIXED);
+ kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
save->rip = 0x0000fff0;
- svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
+ vcpu->arch.regs[VCPU_REGS_RIP] = save->rip;
/*
* svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
* It also updates the guest-visible cr0 value.
*/
- svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
- kvm_mmu_reset_context(&svm->vcpu);
+ svm_set_cr0(vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
+ kvm_mmu_reset_context(vcpu);
save->cr4 = X86_CR4_PAE;
/* rdx = ?? */
@@ -1224,17 +1263,18 @@ static void init_vmcb(struct vcpu_svm *svm)
clr_exception_intercept(svm, PF_VECTOR);
svm_clr_intercept(svm, INTERCEPT_CR3_READ);
svm_clr_intercept(svm, INTERCEPT_CR3_WRITE);
- save->g_pat = svm->vcpu.arch.pat;
+ save->g_pat = vcpu->arch.pat;
save->cr3 = 0;
save->cr4 = 0;
}
- svm->asid_generation = 0;
+ svm->current_vmcb->asid_generation = 0;
svm->asid = 0;
- svm->nested.vmcb12_gpa = 0;
- svm->vcpu.arch.hflags = 0;
+ svm->nested.vmcb12_gpa = INVALID_GPA;
+ svm->nested.last_vmcb12_gpa = INVALID_GPA;
+ vcpu->arch.hflags = 0;
- if (!kvm_pause_in_guest(svm->vcpu.kvm)) {
+ if (!kvm_pause_in_guest(vcpu->kvm)) {
control->pause_filter_count = pause_filter_count;
if (pause_filter_thresh)
control->pause_filter_thresh = pause_filter_thresh;
@@ -1243,20 +1283,17 @@ static void init_vmcb(struct vcpu_svm *svm)
svm_clr_intercept(svm, INTERCEPT_PAUSE);
}
- svm_check_invpcid(svm);
-
- if (kvm_vcpu_apicv_active(&svm->vcpu))
- avic_init_vmcb(svm);
+ svm_recalc_instruction_intercepts(vcpu, svm);
/*
- * If hardware supports Virtual VMLOAD VMSAVE then enable it
- * in VMCB and clear intercepts to avoid #VMEXIT.
+ * If the host supports V_SPEC_CTRL then disable the interception
+ * of MSR_IA32_SPEC_CTRL.
*/
- if (vls) {
- svm_clr_intercept(svm, INTERCEPT_VMLOAD);
- svm_clr_intercept(svm, INTERCEPT_VMSAVE);
- svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
- }
+ if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
+
+ if (kvm_vcpu_apicv_active(vcpu))
+ avic_init_vmcb(svm);
if (vgif) {
svm_clr_intercept(svm, INTERCEPT_STGI);
@@ -1264,16 +1301,18 @@ static void init_vmcb(struct vcpu_svm *svm)
svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
}
- if (sev_guest(svm->vcpu.kvm)) {
+ if (sev_guest(vcpu->kvm)) {
svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
clr_exception_intercept(svm, UD_VECTOR);
- if (sev_es_guest(svm->vcpu.kvm)) {
+ if (sev_es_guest(vcpu->kvm)) {
/* Perform SEV-ES specific VMCB updates */
sev_es_init_vmcb(svm);
}
}
+ svm_hv_init_vmcb(svm->vmcb);
+
vmcb_mark_all_dirty(svm->vmcb);
enable_gif(svm);
@@ -1290,12 +1329,12 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
svm->virt_spec_ctrl = 0;
if (!init_event) {
- svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
- MSR_IA32_APICBASE_ENABLE;
- if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
- svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
+ vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE |
+ MSR_IA32_APICBASE_ENABLE;
+ if (kvm_vcpu_is_reset_bsp(vcpu))
+ vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP;
}
- init_vmcb(svm);
+ init_vmcb(vcpu);
kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, false);
kvm_rdx_write(vcpu, eax);
@@ -1304,10 +1343,16 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE);
}
+void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb)
+{
+ svm->current_vmcb = target_vmcb;
+ svm->vmcb = target_vmcb->ptr;
+}
+
static int svm_create_vcpu(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm;
- struct page *vmcb_page;
+ struct page *vmcb01_page;
struct page *vmsa_page = NULL;
int err;
@@ -1315,11 +1360,11 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
svm = to_svm(vcpu);
err = -ENOMEM;
- vmcb_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
- if (!vmcb_page)
+ vmcb01_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
+ if (!vmcb01_page)
goto out;
- if (sev_es_guest(svm->vcpu.kvm)) {
+ if (sev_es_guest(vcpu->kvm)) {
/*
* SEV-ES guests require a separate VMSA page used to contain
* the encrypted register state of the guest.
@@ -1355,20 +1400,21 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
svm_vcpu_init_msrpm(vcpu, svm->msrpm);
- svm->vmcb = page_address(vmcb_page);
- svm->vmcb_pa = __sme_set(page_to_pfn(vmcb_page) << PAGE_SHIFT);
+ svm->vmcb01.ptr = page_address(vmcb01_page);
+ svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT);
if (vmsa_page)
svm->vmsa = page_address(vmsa_page);
- svm->asid_generation = 0;
svm->guest_state_loaded = false;
- init_vmcb(svm);
+
+ svm_switch_vmcb(svm, &svm->vmcb01);
+ init_vmcb(vcpu);
svm_init_osvw(vcpu);
vcpu->arch.microcode_version = 0x01000065;
- if (sev_es_guest(svm->vcpu.kvm))
+ if (sev_es_guest(vcpu->kvm))
/* Perform SEV-ES specific VMCB creation updates */
sev_es_create_vcpu(svm);
@@ -1378,7 +1424,7 @@ error_free_vmsa_page:
if (vmsa_page)
__free_page(vmsa_page);
error_free_vmcb_page:
- __free_page(vmcb_page);
+ __free_page(vmcb01_page);
out:
return err;
}
@@ -1406,32 +1452,26 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
sev_free_vcpu(vcpu);
- __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
- __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
+ __free_page(pfn_to_page(__sme_clr(svm->vmcb01.pa) >> PAGE_SHIFT));
+ __free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
}
static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
- unsigned int i;
+
+ if (sev_es_guest(vcpu->kvm))
+ sev_es_unmap_ghcb(svm);
if (svm->guest_state_loaded)
return;
/*
- * Certain MSRs are restored on VMEXIT (sev-es), or vmload of host save
- * area (non-sev-es). Save ones that aren't so we can restore them
- * individually later.
- */
- for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
- rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
-
- /*
* Save additional host state that will be restored on VMEXIT (sev-es)
* or subsequent vmload of host save area.
*/
- if (sev_es_guest(svm->vcpu.kvm)) {
+ if (sev_es_guest(vcpu->kvm)) {
sev_es_prepare_guest_switch(svm, vcpu->cpu);
} else {
vmsave(__sme_page_pa(sd->save_area));
@@ -1445,29 +1485,15 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
}
}
- /* This assumes that the kernel never uses MSR_TSC_AUX */
- if (static_cpu_has(X86_FEATURE_RDTSCP))
- wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
+ if (likely(tsc_aux_uret_slot >= 0))
+ kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
svm->guest_state_loaded = true;
}
static void svm_prepare_host_switch(struct kvm_vcpu *vcpu)
{
- struct vcpu_svm *svm = to_svm(vcpu);
- unsigned int i;
-
- if (!svm->guest_state_loaded)
- return;
-
- /*
- * Certain MSRs are restored on VMEXIT (sev-es), or vmload of host save
- * area (non-sev-es). Restore the ones that weren't.
- */
- for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
- wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
-
- svm->guest_state_loaded = false;
+ to_svm(vcpu)->guest_state_loaded = false;
}
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@@ -1475,11 +1501,6 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
struct vcpu_svm *svm = to_svm(vcpu);
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
- if (unlikely(cpu != vcpu->cpu)) {
- svm->asid_generation = 0;
- vmcb_mark_all_dirty(svm->vmcb);
- }
-
if (sd->current_vmcb != svm->vmcb) {
sd->current_vmcb = svm->vmcb;
indirect_branch_prediction_barrier();
@@ -1563,7 +1584,7 @@ static void svm_clear_vintr(struct vcpu_svm *svm)
/* Drop int_ctl fields related to VINTR injection. */
svm->vmcb->control.int_ctl &= mask;
if (is_guest_mode(&svm->vcpu)) {
- svm->nested.hsave->control.int_ctl &= mask;
+ svm->vmcb01.ptr->control.int_ctl &= mask;
WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) !=
(svm->nested.ctl.int_ctl & V_TPR_MASK));
@@ -1576,16 +1597,17 @@ static void svm_clear_vintr(struct vcpu_svm *svm)
static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
{
struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
+ struct vmcb_save_area *save01 = &to_svm(vcpu)->vmcb01.ptr->save;
switch (seg) {
case VCPU_SREG_CS: return &save->cs;
case VCPU_SREG_DS: return &save->ds;
case VCPU_SREG_ES: return &save->es;
- case VCPU_SREG_FS: return &save->fs;
- case VCPU_SREG_GS: return &save->gs;
+ case VCPU_SREG_FS: return &save01->fs;
+ case VCPU_SREG_GS: return &save01->gs;
case VCPU_SREG_SS: return &save->ss;
- case VCPU_SREG_TR: return &save->tr;
- case VCPU_SREG_LDTR: return &save->ldtr;
+ case VCPU_SREG_TR: return &save01->tr;
+ case VCPU_SREG_LDTR: return &save01->ldtr;
}
BUG();
return NULL;
@@ -1708,37 +1730,10 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
vmcb_mark_dirty(svm->vmcb, VMCB_DT);
}
-static void update_cr0_intercept(struct vcpu_svm *svm)
-{
- ulong gcr0;
- u64 *hcr0;
-
- /*
- * SEV-ES guests must always keep the CR intercepts cleared. CR
- * tracking is done using the CR write traps.
- */
- if (sev_es_guest(svm->vcpu.kvm))
- return;
-
- gcr0 = svm->vcpu.arch.cr0;
- hcr0 = &svm->vmcb->save.cr0;
- *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
- | (gcr0 & SVM_CR0_SELECTIVE_MASK);
-
- vmcb_mark_dirty(svm->vmcb, VMCB_CR);
-
- if (gcr0 == *hcr0) {
- svm_clr_intercept(svm, INTERCEPT_CR0_READ);
- svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
- } else {
- svm_set_intercept(svm, INTERCEPT_CR0_READ);
- svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
- }
-}
-
void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ u64 hcr0 = cr0;
#ifdef CONFIG_X86_64
if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) {
@@ -1756,7 +1751,7 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
vcpu->arch.cr0 = cr0;
if (!npt_enabled)
- cr0 |= X86_CR0_PG | X86_CR0_WP;
+ hcr0 |= X86_CR0_PG | X86_CR0_WP;
/*
* re-enable caching here because the QEMU bios
@@ -1764,10 +1759,26 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
* reboot
*/
if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
- cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
- svm->vmcb->save.cr0 = cr0;
+ hcr0 &= ~(X86_CR0_CD | X86_CR0_NW);
+
+ svm->vmcb->save.cr0 = hcr0;
vmcb_mark_dirty(svm->vmcb, VMCB_CR);
- update_cr0_intercept(svm);
+
+ /*
+ * SEV-ES guests must always keep the CR intercepts cleared. CR
+ * tracking is done using the CR write traps.
+ */
+ if (sev_es_guest(vcpu->kvm))
+ return;
+
+ if (hcr0 == cr0) {
+ /* Selective CR0 write remains on. */
+ svm_clr_intercept(svm, INTERCEPT_CR0_READ);
+ svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
+ } else {
+ svm_set_intercept(svm, INTERCEPT_CR0_READ);
+ svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
+ }
}
static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
@@ -1846,7 +1857,7 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
}
- svm->asid_generation = sd->asid_generation;
+ svm->current_vmcb->asid_generation = sd->asid_generation;
svm->asid = sd->next_asid++;
}
@@ -1895,39 +1906,43 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
vmcb_mark_dirty(svm->vmcb, VMCB_DR);
}
-static int pf_interception(struct vcpu_svm *svm)
+static int pf_interception(struct kvm_vcpu *vcpu)
{
- u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ u64 fault_address = svm->vmcb->control.exit_info_2;
u64 error_code = svm->vmcb->control.exit_info_1;
- return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
+ return kvm_handle_page_fault(vcpu, error_code, fault_address,
static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
svm->vmcb->control.insn_bytes : NULL,
svm->vmcb->control.insn_len);
}
-static int npf_interception(struct vcpu_svm *svm)
+static int npf_interception(struct kvm_vcpu *vcpu)
{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
u64 error_code = svm->vmcb->control.exit_info_1;
trace_kvm_page_fault(fault_address, error_code);
- return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
+ return kvm_mmu_page_fault(vcpu, fault_address, error_code,
static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
svm->vmcb->control.insn_bytes : NULL,
svm->vmcb->control.insn_len);
}
-static int db_interception(struct vcpu_svm *svm)
+static int db_interception(struct kvm_vcpu *vcpu)
{
- struct kvm_run *kvm_run = svm->vcpu.run;
- struct kvm_vcpu *vcpu = &svm->vcpu;
+ struct kvm_run *kvm_run = vcpu->run;
+ struct vcpu_svm *svm = to_svm(vcpu);
- if (!(svm->vcpu.guest_debug &
+ if (!(vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
!svm->nmi_singlestep) {
u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW;
- kvm_queue_exception_p(&svm->vcpu, DB_VECTOR, payload);
+ kvm_queue_exception_p(vcpu, DB_VECTOR, payload);
return 1;
}
@@ -1937,7 +1952,7 @@ static int db_interception(struct vcpu_svm *svm)
kvm_make_request(KVM_REQ_EVENT, vcpu);
}
- if (svm->vcpu.guest_debug &
+ if (vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
kvm_run->exit_reason = KVM_EXIT_DEBUG;
kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6;
@@ -1951,9 +1966,10 @@ static int db_interception(struct vcpu_svm *svm)
return 1;
}
-static int bp_interception(struct vcpu_svm *svm)
+static int bp_interception(struct kvm_vcpu *vcpu)
{
- struct kvm_run *kvm_run = svm->vcpu.run;
+ struct vcpu_svm *svm = to_svm(vcpu);
+ struct kvm_run *kvm_run = vcpu->run;
kvm_run->exit_reason = KVM_EXIT_DEBUG;
kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
@@ -1961,14 +1977,14 @@ static int bp_interception(struct vcpu_svm *svm)
return 0;
}
-static int ud_interception(struct vcpu_svm *svm)
+static int ud_interception(struct kvm_vcpu *vcpu)
{
- return handle_ud(&svm->vcpu);
+ return handle_ud(vcpu);
}
-static int ac_interception(struct vcpu_svm *svm)
+static int ac_interception(struct kvm_vcpu *vcpu)
{
- kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
+ kvm_queue_exception_e(vcpu, AC_VECTOR, 0);
return 1;
}
@@ -2011,7 +2027,7 @@ static bool is_erratum_383(void)
return true;
}
-static void svm_handle_mce(struct vcpu_svm *svm)
+static void svm_handle_mce(struct kvm_vcpu *vcpu)
{
if (is_erratum_383()) {
/*
@@ -2020,7 +2036,7 @@ static void svm_handle_mce(struct vcpu_svm *svm)
*/
pr_err("KVM: Guest triggered AMD Erratum 383\n");
- kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
+ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
return;
}
@@ -2032,20 +2048,21 @@ static void svm_handle_mce(struct vcpu_svm *svm)
kvm_machine_check();
}
-static int mc_interception(struct vcpu_svm *svm)
+static int mc_interception(struct kvm_vcpu *vcpu)
{
return 1;
}
-static int shutdown_interception(struct vcpu_svm *svm)
+static int shutdown_interception(struct kvm_vcpu *vcpu)
{
- struct kvm_run *kvm_run = svm->vcpu.run;
+ struct kvm_run *kvm_run = vcpu->run;
+ struct vcpu_svm *svm = to_svm(vcpu);
/*
* The VM save area has already been encrypted so it
* cannot be reinitialized - just terminate.
*/
- if (sev_es_guest(svm->vcpu.kvm))
+ if (sev_es_guest(vcpu->kvm))
return -EINVAL;
/*
@@ -2053,20 +2070,20 @@ static int shutdown_interception(struct vcpu_svm *svm)
* so reinitialize it.
*/
clear_page(svm->vmcb);
- init_vmcb(svm);
+ init_vmcb(vcpu);
kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
return 0;
}
-static int io_interception(struct vcpu_svm *svm)
+static int io_interception(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu *vcpu = &svm->vcpu;
+ struct vcpu_svm *svm = to_svm(vcpu);
u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
int size, in, string;
unsigned port;
- ++svm->vcpu.stat.io_exits;
+ ++vcpu->stat.io_exits;
string = (io_info & SVM_IOIO_STR_MASK) != 0;
in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
port = io_info >> 16;
@@ -2081,93 +2098,69 @@ static int io_interception(struct vcpu_svm *svm)
svm->next_rip = svm->vmcb->control.exit_info_2;
- return kvm_fast_pio(&svm->vcpu, size, port, in);
-}
-
-static int nmi_interception(struct vcpu_svm *svm)
-{
- return 1;
+ return kvm_fast_pio(vcpu, size, port, in);
}
-static int intr_interception(struct vcpu_svm *svm)
+static int nmi_interception(struct kvm_vcpu *vcpu)
{
- ++svm->vcpu.stat.irq_exits;
return 1;
}
-static int nop_on_interception(struct vcpu_svm *svm)
+static int intr_interception(struct kvm_vcpu *vcpu)
{
+ ++vcpu->stat.irq_exits;
return 1;
}
-static int halt_interception(struct vcpu_svm *svm)
-{
- return kvm_emulate_halt(&svm->vcpu);
-}
-
-static int vmmcall_interception(struct vcpu_svm *svm)
+static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload)
{
- return kvm_emulate_hypercall(&svm->vcpu);
-}
-
-static int vmload_interception(struct vcpu_svm *svm)
-{
- struct vmcb *nested_vmcb;
+ struct vcpu_svm *svm = to_svm(vcpu);
+ struct vmcb *vmcb12;
struct kvm_host_map map;
int ret;
- if (nested_svm_check_permissions(svm))
+ if (nested_svm_check_permissions(vcpu))
return 1;
- ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
+ ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
if (ret) {
if (ret == -EINVAL)
- kvm_inject_gp(&svm->vcpu, 0);
+ kvm_inject_gp(vcpu, 0);
return 1;
}
- nested_vmcb = map.hva;
+ vmcb12 = map.hva;
- ret = kvm_skip_emulated_instruction(&svm->vcpu);
+ ret = kvm_skip_emulated_instruction(vcpu);
- nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
- kvm_vcpu_unmap(&svm->vcpu, &map, true);
+ if (vmload) {
+ nested_svm_vmloadsave(vmcb12, svm->vmcb);
+ svm->sysenter_eip_hi = 0;
+ svm->sysenter_esp_hi = 0;
+ } else
+ nested_svm_vmloadsave(svm->vmcb, vmcb12);
+
+ kvm_vcpu_unmap(vcpu, &map, true);
return ret;
}
-static int vmsave_interception(struct vcpu_svm *svm)
+static int vmload_interception(struct kvm_vcpu *vcpu)
{
- struct vmcb *nested_vmcb;
- struct kvm_host_map map;
- int ret;
-
- if (nested_svm_check_permissions(svm))
- return 1;
-
- ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
- if (ret) {
- if (ret == -EINVAL)
- kvm_inject_gp(&svm->vcpu, 0);
- return 1;
- }
-
- nested_vmcb = map.hva;
-
- ret = kvm_skip_emulated_instruction(&svm->vcpu);
-
- nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
- kvm_vcpu_unmap(&svm->vcpu, &map, true);
+ return vmload_vmsave_interception(vcpu, true);
+}
- return ret;
+static int vmsave_interception(struct kvm_vcpu *vcpu)
+{
+ return vmload_vmsave_interception(vcpu, false);
}
-static int vmrun_interception(struct vcpu_svm *svm)
+static int vmrun_interception(struct kvm_vcpu *vcpu)
{
- if (nested_svm_check_permissions(svm))
+ if (nested_svm_check_permissions(vcpu))
return 1;
- return nested_svm_vmrun(svm);
+ return nested_svm_vmrun(vcpu);
}
enum {
@@ -2206,7 +2199,7 @@ static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode)
[SVM_INSTR_VMLOAD] = SVM_EXIT_VMLOAD,
[SVM_INSTR_VMSAVE] = SVM_EXIT_VMSAVE,
};
- int (*const svm_instr_handlers[])(struct vcpu_svm *svm) = {
+ int (*const svm_instr_handlers[])(struct kvm_vcpu *vcpu) = {
[SVM_INSTR_VMRUN] = vmrun_interception,
[SVM_INSTR_VMLOAD] = vmload_interception,
[SVM_INSTR_VMSAVE] = vmsave_interception,
@@ -2215,17 +2208,13 @@ static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode)
int ret;
if (is_guest_mode(vcpu)) {
- svm->vmcb->control.exit_code = guest_mode_exit_codes[opcode];
- svm->vmcb->control.exit_info_1 = 0;
- svm->vmcb->control.exit_info_2 = 0;
-
/* Returns '1' or -errno on failure, '0' on success. */
- ret = nested_svm_vmexit(svm);
+ ret = nested_svm_simple_vmexit(svm, guest_mode_exit_codes[opcode]);
if (ret)
return ret;
return 1;
}
- return svm_instr_handlers[opcode](svm);
+ return svm_instr_handlers[opcode](vcpu);
}
/*
@@ -2236,9 +2225,9 @@ static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode)
* regions (e.g. SMM memory on host).
* 2) VMware backdoor
*/
-static int gp_interception(struct vcpu_svm *svm)
+static int gp_interception(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu *vcpu = &svm->vcpu;
+ struct vcpu_svm *svm = to_svm(vcpu);
u32 error_code = svm->vmcb->control.exit_info_1;
int opcode;
@@ -2303,73 +2292,58 @@ void svm_set_gif(struct vcpu_svm *svm, bool value)
}
}
-static int stgi_interception(struct vcpu_svm *svm)
+static int stgi_interception(struct kvm_vcpu *vcpu)
{
int ret;
- if (nested_svm_check_permissions(svm))
+ if (nested_svm_check_permissions(vcpu))
return 1;
- ret = kvm_skip_emulated_instruction(&svm->vcpu);
- svm_set_gif(svm, true);
+ ret = kvm_skip_emulated_instruction(vcpu);
+ svm_set_gif(to_svm(vcpu), true);
return ret;
}
-static int clgi_interception(struct vcpu_svm *svm)
+static int clgi_interception(struct kvm_vcpu *vcpu)
{
int ret;
- if (nested_svm_check_permissions(svm))
+ if (nested_svm_check_permissions(vcpu))
return 1;
- ret = kvm_skip_emulated_instruction(&svm->vcpu);
- svm_set_gif(svm, false);
+ ret = kvm_skip_emulated_instruction(vcpu);
+ svm_set_gif(to_svm(vcpu), false);
return ret;
}
-static int invlpga_interception(struct vcpu_svm *svm)
+static int invlpga_interception(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu *vcpu = &svm->vcpu;
-
- trace_kvm_invlpga(svm->vmcb->save.rip, kvm_rcx_read(&svm->vcpu),
- kvm_rax_read(&svm->vcpu));
-
- /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
- kvm_mmu_invlpg(vcpu, kvm_rax_read(&svm->vcpu));
+ gva_t gva = kvm_rax_read(vcpu);
+ u32 asid = kvm_rcx_read(vcpu);
- return kvm_skip_emulated_instruction(&svm->vcpu);
-}
+ /* FIXME: Handle an address size prefix. */
+ if (!is_long_mode(vcpu))
+ gva = (u32)gva;
-static int skinit_interception(struct vcpu_svm *svm)
-{
- trace_kvm_skinit(svm->vmcb->save.rip, kvm_rax_read(&svm->vcpu));
+ trace_kvm_invlpga(to_svm(vcpu)->vmcb->save.rip, asid, gva);
- kvm_queue_exception(&svm->vcpu, UD_VECTOR);
- return 1;
-}
+ /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
+ kvm_mmu_invlpg(vcpu, gva);
-static int wbinvd_interception(struct vcpu_svm *svm)
-{
- return kvm_emulate_wbinvd(&svm->vcpu);
+ return kvm_skip_emulated_instruction(vcpu);
}
-static int xsetbv_interception(struct vcpu_svm *svm)
+static int skinit_interception(struct kvm_vcpu *vcpu)
{
- u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
- u32 index = kvm_rcx_read(&svm->vcpu);
-
- int err = kvm_set_xcr(&svm->vcpu, index, new_bv);
- return kvm_complete_insn_gp(&svm->vcpu, err);
-}
+ trace_kvm_skinit(to_svm(vcpu)->vmcb->save.rip, kvm_rax_read(vcpu));
-static int rdpru_interception(struct vcpu_svm *svm)
-{
- kvm_queue_exception(&svm->vcpu, UD_VECTOR);
+ kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
-static int task_switch_interception(struct vcpu_svm *svm)
+static int task_switch_interception(struct kvm_vcpu *vcpu)
{
+ struct vcpu_svm *svm = to_svm(vcpu);
u16 tss_selector;
int reason;
int int_type = svm->vmcb->control.exit_int_info &
@@ -2398,7 +2372,7 @@ static int task_switch_interception(struct vcpu_svm *svm)
if (reason == TASK_SWITCH_GATE) {
switch (type) {
case SVM_EXITINTINFO_TYPE_NMI:
- svm->vcpu.arch.nmi_injected = false;
+ vcpu->arch.nmi_injected = false;
break;
case SVM_EXITINTINFO_TYPE_EXEPT:
if (svm->vmcb->control.exit_info_2 &
@@ -2407,10 +2381,10 @@ static int task_switch_interception(struct vcpu_svm *svm)
error_code =
(u32)svm->vmcb->control.exit_info_2;
}
- kvm_clear_exception_queue(&svm->vcpu);
+ kvm_clear_exception_queue(vcpu);
break;
case SVM_EXITINTINFO_TYPE_INTR:
- kvm_clear_interrupt_queue(&svm->vcpu);
+ kvm_clear_interrupt_queue(vcpu);
break;
default:
break;
@@ -2421,77 +2395,58 @@ static int task_switch_interception(struct vcpu_svm *svm)
int_type == SVM_EXITINTINFO_TYPE_SOFT ||
(int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
(int_vec == OF_VECTOR || int_vec == BP_VECTOR))) {
- if (!skip_emulated_instruction(&svm->vcpu))
+ if (!skip_emulated_instruction(vcpu))
return 0;
}
if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
int_vec = -1;
- return kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
+ return kvm_task_switch(vcpu, tss_selector, int_vec, reason,
has_error_code, error_code);
}
-static int cpuid_interception(struct vcpu_svm *svm)
+static int iret_interception(struct kvm_vcpu *vcpu)
{
- return kvm_emulate_cpuid(&svm->vcpu);
-}
+ struct vcpu_svm *svm = to_svm(vcpu);
-static int iret_interception(struct vcpu_svm *svm)
-{
- ++svm->vcpu.stat.nmi_window_exits;
- svm->vcpu.arch.hflags |= HF_IRET_MASK;
- if (!sev_es_guest(svm->vcpu.kvm)) {
+ ++vcpu->stat.nmi_window_exits;
+ vcpu->arch.hflags |= HF_IRET_MASK;
+ if (!sev_es_guest(vcpu->kvm)) {
svm_clr_intercept(svm, INTERCEPT_IRET);
- svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
+ svm->nmi_iret_rip = kvm_rip_read(vcpu);
}
- kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
return 1;
}
-static int invd_interception(struct vcpu_svm *svm)
-{
- /* Treat an INVD instruction as a NOP and just skip it. */
- return kvm_skip_emulated_instruction(&svm->vcpu);
-}
-
-static int invlpg_interception(struct vcpu_svm *svm)
+static int invlpg_interception(struct kvm_vcpu *vcpu)
{
if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
- return kvm_emulate_instruction(&svm->vcpu, 0);
+ return kvm_emulate_instruction(vcpu, 0);
- kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
- return kvm_skip_emulated_instruction(&svm->vcpu);
+ kvm_mmu_invlpg(vcpu, to_svm(vcpu)->vmcb->control.exit_info_1);
+ return kvm_skip_emulated_instruction(vcpu);
}
-static int emulate_on_interception(struct vcpu_svm *svm)
+static int emulate_on_interception(struct kvm_vcpu *vcpu)
{
- return kvm_emulate_instruction(&svm->vcpu, 0);
+ return kvm_emulate_instruction(vcpu, 0);
}
-static int rsm_interception(struct vcpu_svm *svm)
+static int rsm_interception(struct kvm_vcpu *vcpu)
{
- return kvm_emulate_instruction_from_buffer(&svm->vcpu, rsm_ins_bytes, 2);
+ return kvm_emulate_instruction_from_buffer(vcpu, rsm_ins_bytes, 2);
}
-static int rdpmc_interception(struct vcpu_svm *svm)
-{
- int err;
-
- if (!nrips)
- return emulate_on_interception(svm);
-
- err = kvm_rdpmc(&svm->vcpu);
- return kvm_complete_insn_gp(&svm->vcpu, err);
-}
-
-static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
+static bool check_selective_cr0_intercepted(struct kvm_vcpu *vcpu,
unsigned long val)
{
- unsigned long cr0 = svm->vcpu.arch.cr0;
+ struct vcpu_svm *svm = to_svm(vcpu);
+ unsigned long cr0 = vcpu->arch.cr0;
bool ret = false;
- if (!is_guest_mode(&svm->vcpu) ||
+ if (!is_guest_mode(vcpu) ||
(!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0))))
return false;
@@ -2508,17 +2463,18 @@ static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
#define CR_VALID (1ULL << 63)
-static int cr_interception(struct vcpu_svm *svm)
+static int cr_interception(struct kvm_vcpu *vcpu)
{
+ struct vcpu_svm *svm = to_svm(vcpu);
int reg, cr;
unsigned long val;
int err;
if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
- return emulate_on_interception(svm);
+ return emulate_on_interception(vcpu);
if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
- return emulate_on_interception(svm);
+ return emulate_on_interception(vcpu);
reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
@@ -2529,61 +2485,61 @@ static int cr_interception(struct vcpu_svm *svm)
err = 0;
if (cr >= 16) { /* mov to cr */
cr -= 16;
- val = kvm_register_read(&svm->vcpu, reg);
+ val = kvm_register_read(vcpu, reg);
trace_kvm_cr_write(cr, val);
switch (cr) {
case 0:
- if (!check_selective_cr0_intercepted(svm, val))
- err = kvm_set_cr0(&svm->vcpu, val);
+ if (!check_selective_cr0_intercepted(vcpu, val))
+ err = kvm_set_cr0(vcpu, val);
else
return 1;
break;
case 3:
- err = kvm_set_cr3(&svm->vcpu, val);
+ err = kvm_set_cr3(vcpu, val);
break;
case 4:
- err = kvm_set_cr4(&svm->vcpu, val);
+ err = kvm_set_cr4(vcpu, val);
break;
case 8:
- err = kvm_set_cr8(&svm->vcpu, val);
+ err = kvm_set_cr8(vcpu, val);
break;
default:
WARN(1, "unhandled write to CR%d", cr);
- kvm_queue_exception(&svm->vcpu, UD_VECTOR);
+ kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
} else { /* mov from cr */
switch (cr) {
case 0:
- val = kvm_read_cr0(&svm->vcpu);
+ val = kvm_read_cr0(vcpu);
break;
case 2:
- val = svm->vcpu.arch.cr2;
+ val = vcpu->arch.cr2;
break;
case 3:
- val = kvm_read_cr3(&svm->vcpu);
+ val = kvm_read_cr3(vcpu);
break;
case 4:
- val = kvm_read_cr4(&svm->vcpu);
+ val = kvm_read_cr4(vcpu);
break;
case 8:
- val = kvm_get_cr8(&svm->vcpu);
+ val = kvm_get_cr8(vcpu);
break;
default:
WARN(1, "unhandled read from CR%d", cr);
- kvm_queue_exception(&svm->vcpu, UD_VECTOR);
+ kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
- kvm_register_write(&svm->vcpu, reg, val);
+ kvm_register_write(vcpu, reg, val);
trace_kvm_cr_read(cr, val);
}
- return kvm_complete_insn_gp(&svm->vcpu, err);
+ return kvm_complete_insn_gp(vcpu, err);
}
-static int cr_trap(struct vcpu_svm *svm)
+static int cr_trap(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu *vcpu = &svm->vcpu;
+ struct vcpu_svm *svm = to_svm(vcpu);
unsigned long old_value, new_value;
unsigned int cr;
int ret = 0;
@@ -2605,7 +2561,7 @@ static int cr_trap(struct vcpu_svm *svm)
kvm_post_set_cr4(vcpu, old_value, new_value);
break;
case 8:
- ret = kvm_set_cr8(&svm->vcpu, new_value);
+ ret = kvm_set_cr8(vcpu, new_value);
break;
default:
WARN(1, "unhandled CR%d write trap", cr);
@@ -2616,57 +2572,57 @@ static int cr_trap(struct vcpu_svm *svm)
return kvm_complete_insn_gp(vcpu, ret);
}
-static int dr_interception(struct vcpu_svm *svm)
+static int dr_interception(struct kvm_vcpu *vcpu)
{
+ struct vcpu_svm *svm = to_svm(vcpu);
int reg, dr;
unsigned long val;
int err = 0;
- if (svm->vcpu.guest_debug == 0) {
+ if (vcpu->guest_debug == 0) {
/*
* No more DR vmexits; force a reload of the debug registers
* and reenter on this instruction. The next vmexit will
* retrieve the full state of the debug registers.
*/
clr_dr_intercepts(svm);
- svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
+ vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
return 1;
}
if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
- return emulate_on_interception(svm);
+ return emulate_on_interception(vcpu);
reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
if (dr >= 16) { /* mov to DRn */
dr -= 16;
- val = kvm_register_read(&svm->vcpu, reg);
- err = kvm_set_dr(&svm->vcpu, dr, val);
+ val = kvm_register_read(vcpu, reg);
+ err = kvm_set_dr(vcpu, dr, val);
} else {
- kvm_get_dr(&svm->vcpu, dr, &val);
- kvm_register_write(&svm->vcpu, reg, val);
+ kvm_get_dr(vcpu, dr, &val);
+ kvm_register_write(vcpu, reg, val);
}
- return kvm_complete_insn_gp(&svm->vcpu, err);
+ return kvm_complete_insn_gp(vcpu, err);
}
-static int cr8_write_interception(struct vcpu_svm *svm)
+static int cr8_write_interception(struct kvm_vcpu *vcpu)
{
- struct kvm_run *kvm_run = svm->vcpu.run;
int r;
- u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
+ u8 cr8_prev = kvm_get_cr8(vcpu);
/* instruction emulation calls kvm_set_cr8() */
- r = cr_interception(svm);
- if (lapic_in_kernel(&svm->vcpu))
+ r = cr_interception(vcpu);
+ if (lapic_in_kernel(vcpu))
return r;
- if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
+ if (cr8_prev <= kvm_get_cr8(vcpu))
return r;
- kvm_run->exit_reason = KVM_EXIT_SET_TPR;
+ vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
return 0;
}
-static int efer_trap(struct vcpu_svm *svm)
+static int efer_trap(struct kvm_vcpu *vcpu)
{
struct msr_data msr_info;
int ret;
@@ -2679,10 +2635,10 @@ static int efer_trap(struct vcpu_svm *svm)
*/
msr_info.host_initiated = false;
msr_info.index = MSR_EFER;
- msr_info.data = svm->vmcb->control.exit_info_1 & ~EFER_SVME;
- ret = kvm_set_msr_common(&svm->vcpu, &msr_info);
+ msr_info.data = to_svm(vcpu)->vmcb->control.exit_info_1 & ~EFER_SVME;
+ ret = kvm_set_msr_common(vcpu, &msr_info);
- return kvm_complete_insn_gp(&svm->vcpu, ret);
+ return kvm_complete_insn_gp(vcpu, ret);
}
static int svm_get_msr_feature(struct kvm_msr_entry *msr)
@@ -2709,34 +2665,36 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
switch (msr_info->index) {
case MSR_STAR:
- msr_info->data = svm->vmcb->save.star;
+ msr_info->data = svm->vmcb01.ptr->save.star;
break;
#ifdef CONFIG_X86_64
case MSR_LSTAR:
- msr_info->data = svm->vmcb->save.lstar;
+ msr_info->data = svm->vmcb01.ptr->save.lstar;
break;
case MSR_CSTAR:
- msr_info->data = svm->vmcb->save.cstar;
+ msr_info->data = svm->vmcb01.ptr->save.cstar;
break;
case MSR_KERNEL_GS_BASE:
- msr_info->data = svm->vmcb->save.kernel_gs_base;
+ msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base;
break;
case MSR_SYSCALL_MASK:
- msr_info->data = svm->vmcb->save.sfmask;
+ msr_info->data = svm->vmcb01.ptr->save.sfmask;
break;
#endif
case MSR_IA32_SYSENTER_CS:
- msr_info->data = svm->vmcb->save.sysenter_cs;
+ msr_info->data = svm->vmcb01.ptr->save.sysenter_cs;
break;
case MSR_IA32_SYSENTER_EIP:
- msr_info->data = svm->sysenter_eip;
+ msr_info->data = (u32)svm->vmcb01.ptr->save.sysenter_eip;
+ if (guest_cpuid_is_intel(vcpu))
+ msr_info->data |= (u64)svm->sysenter_eip_hi << 32;
break;
case MSR_IA32_SYSENTER_ESP:
- msr_info->data = svm->sysenter_esp;
+ msr_info->data = svm->vmcb01.ptr->save.sysenter_esp;
+ if (guest_cpuid_is_intel(vcpu))
+ msr_info->data |= (u64)svm->sysenter_esp_hi << 32;
break;
case MSR_TSC_AUX:
- if (!boot_cpu_has(X86_FEATURE_RDTSCP))
- return 1;
msr_info->data = svm->tsc_aux;
break;
/*
@@ -2770,7 +2728,10 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
!guest_has_spec_ctrl_msr(vcpu))
return 1;
- msr_info->data = svm->spec_ctrl;
+ if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
+ msr_info->data = svm->vmcb->save.spec_ctrl;
+ else
+ msr_info->data = svm->spec_ctrl;
break;
case MSR_AMD64_VIRT_SPEC_CTRL:
if (!msr_info->host_initiated &&
@@ -2808,8 +2769,8 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
{
struct vcpu_svm *svm = to_svm(vcpu);
- if (!sev_es_guest(svm->vcpu.kvm) || !err)
- return kvm_complete_insn_gp(&svm->vcpu, err);
+ if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->ghcb))
+ return kvm_complete_insn_gp(vcpu, err);
ghcb_set_sw_exit_info_1(svm->ghcb, 1);
ghcb_set_sw_exit_info_2(svm->ghcb,
@@ -2819,11 +2780,6 @@ static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
return 1;
}
-static int rdmsr_interception(struct vcpu_svm *svm)
-{
- return kvm_emulate_rdmsr(&svm->vcpu);
-}
-
static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -2852,6 +2808,7 @@ static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ int r;
u32 ecx = msr->index;
u64 data = msr->data;
@@ -2860,7 +2817,9 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
return 1;
vcpu->arch.pat = data;
- svm->vmcb->save.g_pat = data;
+ svm->vmcb01.ptr->save.g_pat = data;
+ if (is_guest_mode(vcpu))
+ nested_vmcb02_compute_g_pat(svm);
vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
break;
case MSR_IA32_SPEC_CTRL:
@@ -2871,7 +2830,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
if (kvm_spec_ctrl_test_value(data))
return 1;
- svm->spec_ctrl = data;
+ if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
+ svm->vmcb->save.spec_ctrl = data;
+ else
+ svm->spec_ctrl = data;
if (!data)
break;
@@ -2914,44 +2876,53 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
svm->virt_spec_ctrl = data;
break;
case MSR_STAR:
- svm->vmcb->save.star = data;
+ svm->vmcb01.ptr->save.star = data;
break;
#ifdef CONFIG_X86_64
case MSR_LSTAR:
- svm->vmcb->save.lstar = data;
+ svm->vmcb01.ptr->save.lstar = data;
break;
case MSR_CSTAR:
- svm->vmcb->save.cstar = data;
+ svm->vmcb01.ptr->save.cstar = data;
break;
case MSR_KERNEL_GS_BASE:
- svm->vmcb->save.kernel_gs_base = data;
+ svm->vmcb01.ptr->save.kernel_gs_base = data;
break;
case MSR_SYSCALL_MASK:
- svm->vmcb->save.sfmask = data;
+ svm->vmcb01.ptr->save.sfmask = data;
break;
#endif
case MSR_IA32_SYSENTER_CS:
- svm->vmcb->save.sysenter_cs = data;
+ svm->vmcb01.ptr->save.sysenter_cs = data;
break;
case MSR_IA32_SYSENTER_EIP:
- svm->sysenter_eip = data;
- svm->vmcb->save.sysenter_eip = data;
+ svm->vmcb01.ptr->save.sysenter_eip = (u32)data;
+ /*
+ * We only intercept the MSR_IA32_SYSENTER_{EIP|ESP} msrs
+ * when we spoof an Intel vendor ID (for cross vendor migration).
+ * In this case we use this intercept to track the high
+ * 32 bit part of these msrs to support Intel's
+ * implementation of SYSENTER/SYSEXIT.
+ */
+ svm->sysenter_eip_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0;
break;
case MSR_IA32_SYSENTER_ESP:
- svm->sysenter_esp = data;
- svm->vmcb->save.sysenter_esp = data;
+ svm->vmcb01.ptr->save.sysenter_esp = (u32)data;
+ svm->sysenter_esp_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0;
break;
case MSR_TSC_AUX:
- if (!boot_cpu_has(X86_FEATURE_RDTSCP))
- return 1;
-
/*
- * This is rare, so we update the MSR here instead of using
- * direct_access_msrs. Doing that would require a rdmsr in
- * svm_vcpu_put.
+ * TSC_AUX is usually changed only during boot and never read
+ * directly. Intercept TSC_AUX instead of exposing it to the
+ * guest via direct_access_msrs, and switch it via user return.
*/
+ preempt_disable();
+ r = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull);
+ preempt_enable();
+ if (r)
+ return 1;
+
svm->tsc_aux = data;
- wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
break;
case MSR_IA32_DEBUGCTLMSR:
if (!boot_cpu_has(X86_FEATURE_LBRV)) {
@@ -3005,38 +2976,32 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
return 0;
}
-static int wrmsr_interception(struct vcpu_svm *svm)
+static int msr_interception(struct kvm_vcpu *vcpu)
{
- return kvm_emulate_wrmsr(&svm->vcpu);
-}
-
-static int msr_interception(struct vcpu_svm *svm)
-{
- if (svm->vmcb->control.exit_info_1)
- return wrmsr_interception(svm);
+ if (to_svm(vcpu)->vmcb->control.exit_info_1)
+ return kvm_emulate_wrmsr(vcpu);
else
- return rdmsr_interception(svm);
+ return kvm_emulate_rdmsr(vcpu);
}
-static int interrupt_window_interception(struct vcpu_svm *svm)
+static int interrupt_window_interception(struct kvm_vcpu *vcpu)
{
- kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
- svm_clear_vintr(svm);
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ svm_clear_vintr(to_svm(vcpu));
/*
* For AVIC, the only reason to end up here is ExtINTs.
* In this case AVIC was temporarily disabled for
* requesting the IRQ window and we have to re-enable it.
*/
- svm_toggle_avic_for_irq_window(&svm->vcpu, true);
+ svm_toggle_avic_for_irq_window(vcpu, true);
- ++svm->vcpu.stat.irq_window_exits;
+ ++vcpu->stat.irq_window_exits;
return 1;
}
-static int pause_interception(struct vcpu_svm *svm)
+static int pause_interception(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu *vcpu = &svm->vcpu;
bool in_kernel;
/*
@@ -3044,35 +3009,18 @@ static int pause_interception(struct vcpu_svm *svm)
* vcpu->arch.preempted_in_kernel can never be true. Just
* set in_kernel to false as well.
*/
- in_kernel = !sev_es_guest(svm->vcpu.kvm) && svm_get_cpl(vcpu) == 0;
+ in_kernel = !sev_es_guest(vcpu->kvm) && svm_get_cpl(vcpu) == 0;
if (!kvm_pause_in_guest(vcpu->kvm))
grow_ple_window(vcpu);
kvm_vcpu_on_spin(vcpu, in_kernel);
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
-static int nop_interception(struct vcpu_svm *svm)
+static int invpcid_interception(struct kvm_vcpu *vcpu)
{
- return kvm_skip_emulated_instruction(&(svm->vcpu));
-}
-
-static int monitor_interception(struct vcpu_svm *svm)
-{
- printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
- return nop_interception(svm);
-}
-
-static int mwait_interception(struct vcpu_svm *svm)
-{
- printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
- return nop_interception(svm);
-}
-
-static int invpcid_interception(struct vcpu_svm *svm)
-{
- struct kvm_vcpu *vcpu = &svm->vcpu;
+ struct vcpu_svm *svm = to_svm(vcpu);
unsigned long type;
gva_t gva;
@@ -3097,7 +3045,7 @@ static int invpcid_interception(struct vcpu_svm *svm)
return kvm_handle_invpcid(vcpu, type, gva);
}
-static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
+static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[SVM_EXIT_READ_CR0] = cr_interception,
[SVM_EXIT_READ_CR3] = cr_interception,
[SVM_EXIT_READ_CR4] = cr_interception,
@@ -3132,15 +3080,15 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_EXCP_BASE + GP_VECTOR] = gp_interception,
[SVM_EXIT_INTR] = intr_interception,
[SVM_EXIT_NMI] = nmi_interception,
- [SVM_EXIT_SMI] = nop_on_interception,
- [SVM_EXIT_INIT] = nop_on_interception,
+ [SVM_EXIT_SMI] = kvm_emulate_as_nop,
+ [SVM_EXIT_INIT] = kvm_emulate_as_nop,
[SVM_EXIT_VINTR] = interrupt_window_interception,
- [SVM_EXIT_RDPMC] = rdpmc_interception,
- [SVM_EXIT_CPUID] = cpuid_interception,
+ [SVM_EXIT_RDPMC] = kvm_emulate_rdpmc,
+ [SVM_EXIT_CPUID] = kvm_emulate_cpuid,
[SVM_EXIT_IRET] = iret_interception,
- [SVM_EXIT_INVD] = invd_interception,
+ [SVM_EXIT_INVD] = kvm_emulate_invd,
[SVM_EXIT_PAUSE] = pause_interception,
- [SVM_EXIT_HLT] = halt_interception,
+ [SVM_EXIT_HLT] = kvm_emulate_halt,
[SVM_EXIT_INVLPG] = invlpg_interception,
[SVM_EXIT_INVLPGA] = invlpga_interception,
[SVM_EXIT_IOIO] = io_interception,
@@ -3148,17 +3096,18 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_TASK_SWITCH] = task_switch_interception,
[SVM_EXIT_SHUTDOWN] = shutdown_interception,
[SVM_EXIT_VMRUN] = vmrun_interception,
- [SVM_EXIT_VMMCALL] = vmmcall_interception,
+ [SVM_EXIT_VMMCALL] = kvm_emulate_hypercall,
[SVM_EXIT_VMLOAD] = vmload_interception,
[SVM_EXIT_VMSAVE] = vmsave_interception,
[SVM_EXIT_STGI] = stgi_interception,
[SVM_EXIT_CLGI] = clgi_interception,
[SVM_EXIT_SKINIT] = skinit_interception,
- [SVM_EXIT_WBINVD] = wbinvd_interception,
- [SVM_EXIT_MONITOR] = monitor_interception,
- [SVM_EXIT_MWAIT] = mwait_interception,
- [SVM_EXIT_XSETBV] = xsetbv_interception,
- [SVM_EXIT_RDPRU] = rdpru_interception,
+ [SVM_EXIT_RDTSCP] = kvm_handle_invalid_op,
+ [SVM_EXIT_WBINVD] = kvm_emulate_wbinvd,
+ [SVM_EXIT_MONITOR] = kvm_emulate_monitor,
+ [SVM_EXIT_MWAIT] = kvm_emulate_mwait,
+ [SVM_EXIT_XSETBV] = kvm_emulate_xsetbv,
+ [SVM_EXIT_RDPRU] = kvm_handle_invalid_op,
[SVM_EXIT_EFER_WRITE_TRAP] = efer_trap,
[SVM_EXIT_CR0_WRITE_TRAP] = cr_trap,
[SVM_EXIT_CR4_WRITE_TRAP] = cr_trap,
@@ -3176,12 +3125,15 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb_control_area *control = &svm->vmcb->control;
struct vmcb_save_area *save = &svm->vmcb->save;
+ struct vmcb_save_area *save01 = &svm->vmcb01.ptr->save;
if (!dump_invalid_vmcb) {
pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
return;
}
+ pr_err("VMCB %p, last attempted VMRUN on CPU %d\n",
+ svm->current_vmcb->ptr, vcpu->arch.last_vmentry_cpu);
pr_err("VMCB Control Area:\n");
pr_err("%-20s%04x\n", "cr_read:", control->intercepts[INTERCEPT_CR] & 0xffff);
pr_err("%-20s%04x\n", "cr_write:", control->intercepts[INTERCEPT_CR] >> 16);
@@ -3238,28 +3190,28 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
save->ds.limit, save->ds.base);
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"fs:",
- save->fs.selector, save->fs.attrib,
- save->fs.limit, save->fs.base);
+ save01->fs.selector, save01->fs.attrib,
+ save01->fs.limit, save01->fs.base);
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"gs:",
- save->gs.selector, save->gs.attrib,
- save->gs.limit, save->gs.base);
+ save01->gs.selector, save01->gs.attrib,
+ save01->gs.limit, save01->gs.base);
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"gdtr:",
save->gdtr.selector, save->gdtr.attrib,
save->gdtr.limit, save->gdtr.base);
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"ldtr:",
- save->ldtr.selector, save->ldtr.attrib,
- save->ldtr.limit, save->ldtr.base);
+ save01->ldtr.selector, save01->ldtr.attrib,
+ save01->ldtr.limit, save01->ldtr.base);
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"idtr:",
save->idtr.selector, save->idtr.attrib,
save->idtr.limit, save->idtr.base);
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"tr:",
- save->tr.selector, save->tr.attrib,
- save->tr.limit, save->tr.base);
+ save01->tr.selector, save01->tr.attrib,
+ save01->tr.limit, save01->tr.base);
pr_err("cpl: %d efer: %016llx\n",
save->cpl, save->efer);
pr_err("%-15s %016llx %-13s %016llx\n",
@@ -3273,15 +3225,15 @@ static void dump_vmcb(struct kvm_vcpu *vcpu)
pr_err("%-15s %016llx %-13s %016llx\n",
"rsp:", save->rsp, "rax:", save->rax);
pr_err("%-15s %016llx %-13s %016llx\n",
- "star:", save->star, "lstar:", save->lstar);
+ "star:", save01->star, "lstar:", save01->lstar);
pr_err("%-15s %016llx %-13s %016llx\n",
- "cstar:", save->cstar, "sfmask:", save->sfmask);
+ "cstar:", save01->cstar, "sfmask:", save01->sfmask);
pr_err("%-15s %016llx %-13s %016llx\n",
- "kernel_gs_base:", save->kernel_gs_base,
- "sysenter_cs:", save->sysenter_cs);
+ "kernel_gs_base:", save01->kernel_gs_base,
+ "sysenter_cs:", save01->sysenter_cs);
pr_err("%-15s %016llx %-13s %016llx\n",
- "sysenter_esp:", save->sysenter_esp,
- "sysenter_eip:", save->sysenter_eip);
+ "sysenter_esp:", save01->sysenter_esp,
+ "sysenter_eip:", save01->sysenter_eip);
pr_err("%-15s %016llx %-13s %016llx\n",
"gpat:", save->g_pat, "dbgctl:", save->dbgctl);
pr_err("%-15s %016llx %-13s %016llx\n",
@@ -3308,24 +3260,24 @@ static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code)
return -EINVAL;
}
-int svm_invoke_exit_handler(struct vcpu_svm *svm, u64 exit_code)
+int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code)
{
- if (svm_handle_invalid_exit(&svm->vcpu, exit_code))
+ if (svm_handle_invalid_exit(vcpu, exit_code))
return 0;
#ifdef CONFIG_RETPOLINE
if (exit_code == SVM_EXIT_MSR)
- return msr_interception(svm);
+ return msr_interception(vcpu);
else if (exit_code == SVM_EXIT_VINTR)
- return interrupt_window_interception(svm);
+ return interrupt_window_interception(vcpu);
else if (exit_code == SVM_EXIT_INTR)
- return intr_interception(svm);
+ return intr_interception(vcpu);
else if (exit_code == SVM_EXIT_HLT)
- return halt_interception(svm);
+ return kvm_emulate_halt(vcpu);
else if (exit_code == SVM_EXIT_NPF)
- return npf_interception(svm);
+ return npf_interception(vcpu);
#endif
- return svm_exit_handlers[exit_code](svm);
+ return svm_exit_handlers[exit_code](vcpu);
}
static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2,
@@ -3394,7 +3346,7 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
if (exit_fastpath != EXIT_FASTPATH_NONE)
return 1;
- return svm_invoke_exit_handler(svm, exit_code);
+ return svm_invoke_exit_handler(vcpu, exit_code);
}
static void reload_tss(struct kvm_vcpu *vcpu)
@@ -3405,15 +3357,27 @@ static void reload_tss(struct kvm_vcpu *vcpu)
load_TR_desc();
}
-static void pre_svm_run(struct vcpu_svm *svm)
+static void pre_svm_run(struct kvm_vcpu *vcpu)
{
- struct svm_cpu_data *sd = per_cpu(svm_data, svm->vcpu.cpu);
+ struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
+ struct vcpu_svm *svm = to_svm(vcpu);
- if (sev_guest(svm->vcpu.kvm))
- return pre_sev_run(svm, svm->vcpu.cpu);
+ /*
+ * If the previous vmrun of the vmcb occurred on a different physical
+ * cpu, then mark the vmcb dirty and assign a new asid. Hardware's
+ * vmcb clean bits are per logical CPU, as are KVM's asid assignments.
+ */
+ if (unlikely(svm->current_vmcb->cpu != vcpu->cpu)) {
+ svm->current_vmcb->asid_generation = 0;
+ vmcb_mark_all_dirty(svm->vmcb);
+ svm->current_vmcb->cpu = vcpu->cpu;
+ }
+
+ if (sev_guest(vcpu->kvm))
+ return pre_sev_run(svm, vcpu->cpu);
/* FIXME: handle wraparound of asid_generation */
- if (svm->asid_generation != sd->asid_generation)
+ if (svm->current_vmcb->asid_generation != sd->asid_generation)
new_asid(svm, sd);
}
@@ -3423,7 +3387,7 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu)
svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
vcpu->arch.hflags |= HF_NMI_MASK;
- if (!sev_es_guest(svm->vcpu.kvm))
+ if (!sev_es_guest(vcpu->kvm))
svm_set_intercept(svm, INTERCEPT_IRET);
++vcpu->stat.nmi_injections;
}
@@ -3477,7 +3441,7 @@ bool svm_nmi_blocked(struct kvm_vcpu *vcpu)
return false;
ret = (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
- (svm->vcpu.arch.hflags & HF_NMI_MASK);
+ (vcpu->arch.hflags & HF_NMI_MASK);
return ret;
}
@@ -3497,9 +3461,7 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
{
- struct vcpu_svm *svm = to_svm(vcpu);
-
- return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
+ return !!(vcpu->arch.hflags & HF_NMI_MASK);
}
static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
@@ -3507,12 +3469,12 @@ static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
struct vcpu_svm *svm = to_svm(vcpu);
if (masked) {
- svm->vcpu.arch.hflags |= HF_NMI_MASK;
- if (!sev_es_guest(svm->vcpu.kvm))
+ vcpu->arch.hflags |= HF_NMI_MASK;
+ if (!sev_es_guest(vcpu->kvm))
svm_set_intercept(svm, INTERCEPT_IRET);
} else {
- svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
- if (!sev_es_guest(svm->vcpu.kvm))
+ vcpu->arch.hflags &= ~HF_NMI_MASK;
+ if (!sev_es_guest(vcpu->kvm))
svm_clr_intercept(svm, INTERCEPT_IRET);
}
}
@@ -3525,7 +3487,7 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
if (!gif_set(svm))
return true;
- if (sev_es_guest(svm->vcpu.kvm)) {
+ if (sev_es_guest(vcpu->kvm)) {
/*
* SEV-ES guests to not expose RFLAGS. Use the VMCB interrupt mask
* bit to determine the state of the IF flag.
@@ -3535,7 +3497,7 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
} else if (is_guest_mode(vcpu)) {
/* As long as interrupts are being delivered... */
if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK)
- ? !(svm->nested.hsave->save.rflags & X86_EFLAGS_IF)
+ ? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF)
: !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
return true;
@@ -3594,8 +3556,7 @@ static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
- == HF_NMI_MASK)
+ if ((vcpu->arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) == HF_NMI_MASK)
return; /* IRET will cause a vm exit */
if (!gif_set(svm)) {
@@ -3637,7 +3598,7 @@ void svm_flush_tlb(struct kvm_vcpu *vcpu)
if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
else
- svm->asid_generation--;
+ svm->current_vmcb->asid_generation--;
}
static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
@@ -3674,8 +3635,9 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
}
-static void svm_complete_interrupts(struct vcpu_svm *svm)
+static void svm_complete_interrupts(struct kvm_vcpu *vcpu)
{
+ struct vcpu_svm *svm = to_svm(vcpu);
u8 vector;
int type;
u32 exitintinfo = svm->vmcb->control.exit_int_info;
@@ -3687,28 +3649,28 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
* If we've made progress since setting HF_IRET_MASK, we've
* executed an IRET and can allow NMI injection.
*/
- if ((svm->vcpu.arch.hflags & HF_IRET_MASK) &&
- (sev_es_guest(svm->vcpu.kvm) ||
- kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip)) {
- svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
- kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
+ if ((vcpu->arch.hflags & HF_IRET_MASK) &&
+ (sev_es_guest(vcpu->kvm) ||
+ kvm_rip_read(vcpu) != svm->nmi_iret_rip)) {
+ vcpu->arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
}
- svm->vcpu.arch.nmi_injected = false;
- kvm_clear_exception_queue(&svm->vcpu);
- kvm_clear_interrupt_queue(&svm->vcpu);
+ vcpu->arch.nmi_injected = false;
+ kvm_clear_exception_queue(vcpu);
+ kvm_clear_interrupt_queue(vcpu);
if (!(exitintinfo & SVM_EXITINTINFO_VALID))
return;
- kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
switch (type) {
case SVM_EXITINTINFO_TYPE_NMI:
- svm->vcpu.arch.nmi_injected = true;
+ vcpu->arch.nmi_injected = true;
break;
case SVM_EXITINTINFO_TYPE_EXEPT:
/*
@@ -3724,21 +3686,20 @@ static void svm_complete_interrupts(struct vcpu_svm *svm)
*/
if (kvm_exception_is_soft(vector)) {
if (vector == BP_VECTOR && int3_injected &&
- kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
- kvm_rip_write(&svm->vcpu,
- kvm_rip_read(&svm->vcpu) -
- int3_injected);
+ kvm_is_linear_rip(vcpu, svm->int3_rip))
+ kvm_rip_write(vcpu,
+ kvm_rip_read(vcpu) - int3_injected);
break;
}
if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
u32 err = svm->vmcb->control.exit_int_info_err;
- kvm_requeue_exception_e(&svm->vcpu, vector, err);
+ kvm_requeue_exception_e(vcpu, vector, err);
} else
- kvm_requeue_exception(&svm->vcpu, vector);
+ kvm_requeue_exception(vcpu, vector);
break;
case SVM_EXITINTINFO_TYPE_INTR:
- kvm_queue_interrupt(&svm->vcpu, vector, false);
+ kvm_queue_interrupt(vcpu, vector, false);
break;
default:
break;
@@ -3753,7 +3714,7 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu)
control->exit_int_info = control->event_inj;
control->exit_int_info_err = control->event_inj_err;
control->event_inj = 0;
- svm_complete_interrupts(svm);
+ svm_complete_interrupts(vcpu);
}
static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
@@ -3765,57 +3726,32 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
return EXIT_FASTPATH_NONE;
}
-static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu,
- struct vcpu_svm *svm)
+static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
{
- /*
- * VMENTER enables interrupts (host state), but the kernel state is
- * interrupts disabled when this is invoked. Also tell RCU about
- * it. This is the same logic as for exit_to_user_mode().
- *
- * This ensures that e.g. latency analysis on the host observes
- * guest mode as interrupt enabled.
- *
- * guest_enter_irqoff() informs context tracking about the
- * transition to guest mode and if enabled adjusts RCU state
- * accordingly.
- */
- instrumentation_begin();
- trace_hardirqs_on_prepare();
- lockdep_hardirqs_on_prepare(CALLER_ADDR0);
- instrumentation_end();
+ struct vcpu_svm *svm = to_svm(vcpu);
+ unsigned long vmcb_pa = svm->current_vmcb->pa;
- guest_enter_irqoff();
- lockdep_hardirqs_on(CALLER_ADDR0);
+ kvm_guest_enter_irqoff();
- if (sev_es_guest(svm->vcpu.kvm)) {
- __svm_sev_es_vcpu_run(svm->vmcb_pa);
+ if (sev_es_guest(vcpu->kvm)) {
+ __svm_sev_es_vcpu_run(vmcb_pa);
} else {
struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
- __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs);
+ /*
+ * Use a single vmcb (vmcb01 because it's always valid) for
+ * context switching guest state via VMLOAD/VMSAVE, that way
+ * the state doesn't need to be copied between vmcb01 and
+ * vmcb02 when switching vmcbs for nested virtualization.
+ */
+ vmload(svm->vmcb01.pa);
+ __svm_vcpu_run(vmcb_pa, (unsigned long *)&vcpu->arch.regs);
+ vmsave(svm->vmcb01.pa);
vmload(__sme_page_pa(sd->save_area));
}
- /*
- * VMEXIT disables interrupts (host state), but tracing and lockdep
- * have them in state 'on' as recorded before entering guest mode.
- * Same as enter_from_user_mode().
- *
- * guest_exit_irqoff() restores host context and reinstates RCU if
- * enabled and required.
- *
- * This needs to be done before the below as native_read_msr()
- * contains a tracepoint and x86_spec_ctrl_restore_host() calls
- * into world and some more.
- */
- lockdep_hardirqs_off(CALLER_ADDR0);
- guest_exit_irqoff();
-
- instrumentation_begin();
- trace_hardirqs_off_finish();
- instrumentation_end();
+ kvm_guest_exit_irqoff();
}
static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
@@ -3844,7 +3780,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
smp_send_reschedule(vcpu->cpu);
}
- pre_svm_run(svm);
+ pre_svm_run(vcpu);
sync_lapic_to_cr8(vcpu);
@@ -3854,11 +3790,13 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
}
svm->vmcb->save.cr2 = vcpu->arch.cr2;
+ svm_hv_update_vp_id(svm->vmcb, vcpu);
+
/*
* Run with all-zero DR6 unless needed, so that we can get the exact cause
* of a #DB.
*/
- if (unlikely(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
+ if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
svm_set_dr6(svm, vcpu->arch.dr6);
else
svm_set_dr6(svm, DR6_ACTIVE_LOW);
@@ -3874,9 +3812,10 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
* is no need to worry about the conditional branch over the wrmsr
* being speculatively taken.
*/
- x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
+ if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
+ x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
- svm_vcpu_enter_exit(vcpu, svm);
+ svm_vcpu_enter_exit(vcpu);
/*
* We do not use IBRS in the kernel. If this vCPU has used the
@@ -3893,15 +3832,17 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
* If the L02 MSR bitmap does not intercept the MSR, then we need to
* save it.
*/
- if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
+ if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL) &&
+ unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
- if (!sev_es_guest(svm->vcpu.kvm))
+ if (!sev_es_guest(vcpu->kvm))
reload_tss(vcpu);
- x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
+ if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
+ x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
- if (!sev_es_guest(svm->vcpu.kvm)) {
+ if (!sev_es_guest(vcpu->kvm)) {
vcpu->arch.cr2 = svm->vmcb->save.cr2;
vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
@@ -3909,7 +3850,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
}
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
- kvm_before_interrupt(&svm->vcpu);
+ kvm_before_interrupt(vcpu);
kvm_load_host_xsave_state(vcpu);
stgi();
@@ -3917,13 +3858,19 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
/* Any pending NMI will happen here */
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
- kvm_after_interrupt(&svm->vcpu);
+ kvm_after_interrupt(vcpu);
sync_cr8_to_lapic(vcpu);
svm->next_rip = 0;
- if (is_guest_mode(&svm->vcpu)) {
- sync_nested_vmcb_control(svm);
+ if (is_guest_mode(vcpu)) {
+ nested_sync_control_from_vmcb02(svm);
+
+ /* Track VMRUNs that have made past consistency checking */
+ if (svm->nested.nested_run_pending &&
+ svm->vmcb->control.exit_code != SVM_EXIT_ERR)
+ ++vcpu->stat.nested_run;
+
svm->nested.nested_run_pending = 0;
}
@@ -3932,13 +3879,11 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
/* if exit due to PF check for async PF */
if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
- svm->vcpu.arch.apf.host_apf_flags =
+ vcpu->arch.apf.host_apf_flags =
kvm_read_and_reset_apf_flags();
- if (npt_enabled) {
- vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
- vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
- }
+ if (npt_enabled)
+ kvm_register_clear_available(vcpu, VCPU_EXREG_PDPTR);
/*
* We need to handle MC intercepts here before the vcpu has a chance to
@@ -3946,9 +3891,9 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
*/
if (unlikely(svm->vmcb->control.exit_code ==
SVM_EXIT_EXCP_BASE + MC_VECTOR))
- svm_handle_mce(svm);
+ svm_handle_mce(vcpu);
- svm_complete_interrupts(svm);
+ svm_complete_interrupts(vcpu);
if (is_guest_mode(vcpu))
return EXIT_FASTPATH_NONE;
@@ -3956,21 +3901,28 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
return svm_exit_handlers_fastpath(vcpu);
}
-static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long root,
+static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
int root_level)
{
struct vcpu_svm *svm = to_svm(vcpu);
unsigned long cr3;
- cr3 = __sme_set(root);
if (npt_enabled) {
- svm->vmcb->control.nested_cr3 = cr3;
+ svm->vmcb->control.nested_cr3 = __sme_set(root_hpa);
vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
+ hv_track_root_tdp(vcpu, root_hpa);
+
/* Loading L2's CR3 is handled by enter_svm_guest_mode. */
if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
return;
cr3 = vcpu->arch.cr3;
+ } else if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
+ cr3 = __sme_set(root_hpa) | kvm_get_active_pcid(vcpu);
+ } else {
+ /* PCID in the guest should be impossible with a 32-bit MMU. */
+ WARN_ON_ONCE(kvm_get_active_pcid(vcpu));
+ cr3 = root_hpa;
}
svm->vmcb->save.cr3 = cr3;
@@ -4047,10 +3999,9 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
/* Update nrips enabled cache */
svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) &&
- guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
+ guest_cpuid_has(vcpu, X86_FEATURE_NRIPS);
- /* Check again if INVPCID interception if required */
- svm_check_invpcid(svm);
+ svm_recalc_instruction_intercepts(vcpu, svm);
/* For sev guests, the memory encryption bit is not reserved in CR3. */
if (sev_guest(vcpu->kvm)) {
@@ -4059,24 +4010,50 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
}
- if (!kvm_vcpu_apicv_active(vcpu))
- return;
+ if (kvm_vcpu_apicv_active(vcpu)) {
+ /*
+ * AVIC does not work with an x2APIC mode guest. If the X2APIC feature
+ * is exposed to the guest, disable AVIC.
+ */
+ if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC))
+ kvm_request_apicv_update(vcpu->kvm, false,
+ APICV_INHIBIT_REASON_X2APIC);
- /*
- * AVIC does not work with an x2APIC mode guest. If the X2APIC feature
- * is exposed to the guest, disable AVIC.
- */
- if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC))
- kvm_request_apicv_update(vcpu->kvm, false,
- APICV_INHIBIT_REASON_X2APIC);
+ /*
+ * Currently, AVIC does not work with nested virtualization.
+ * So, we disable AVIC when cpuid for SVM is set in the L1 guest.
+ */
+ if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM))
+ kvm_request_apicv_update(vcpu->kvm, false,
+ APICV_INHIBIT_REASON_NESTED);
+ }
- /*
- * Currently, AVIC does not work with nested virtualization.
- * So, we disable AVIC when cpuid for SVM is set in the L1 guest.
- */
- if (nested && guest_cpuid_has(vcpu, X86_FEATURE_SVM))
- kvm_request_apicv_update(vcpu->kvm, false,
- APICV_INHIBIT_REASON_NESTED);
+ if (guest_cpuid_is_intel(vcpu)) {
+ /*
+ * We must intercept SYSENTER_EIP and SYSENTER_ESP
+ * accesses because the processor only stores 32 bits.
+ * For the same reason we cannot use virtual VMLOAD/VMSAVE.
+ */
+ svm_set_intercept(svm, INTERCEPT_VMLOAD);
+ svm_set_intercept(svm, INTERCEPT_VMSAVE);
+ svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
+
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 0, 0);
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 0, 0);
+ } else {
+ /*
+ * If hardware supports Virtual VMLOAD VMSAVE then enable it
+ * in VMCB and clear intercepts to avoid #VMEXIT.
+ */
+ if (vls) {
+ svm_clr_intercept(svm, INTERCEPT_VMLOAD);
+ svm_clr_intercept(svm, INTERCEPT_VMSAVE);
+ svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
+ }
+ /* No need to intercept these MSRs */
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
+ set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
+ }
}
static bool svm_has_wbinvd_exit(void)
@@ -4308,7 +4285,7 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
return !svm_smi_blocked(vcpu);
}
-static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
{
struct vcpu_svm *svm = to_svm(vcpu);
int ret;
@@ -4330,7 +4307,7 @@ static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
return 0;
}
-static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
+static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct kvm_host_map map;
@@ -4348,15 +4325,15 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
if (!(saved_efer & EFER_SVME))
return 1;
- if (kvm_vcpu_map(&svm->vcpu,
+ if (kvm_vcpu_map(vcpu,
gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
return 1;
if (svm_allocate_nested(svm))
return 1;
- ret = enter_svm_guest_mode(svm, vmcb12_gpa, map.hva);
- kvm_vcpu_unmap(&svm->vcpu, &map, true);
+ ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, map.hva);
+ kvm_vcpu_unmap(vcpu, &map, true);
}
}
@@ -4399,7 +4376,7 @@ static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int i
*
* This happens because CPU microcode reading instruction bytes
* uses a special opcode which attempts to read data using CPL=0
- * priviledges. The microcode reads CS:RIP and if it hits a SMAP
+ * privileges. The microcode reads CS:RIP and if it hits a SMAP
* fault, it gives up and returns no instruction bytes.
*
* Detection:
@@ -4486,13 +4463,12 @@ static int svm_vm_init(struct kvm *kvm)
if (!pause_filter_count || !pause_filter_thresh)
kvm->arch.pause_in_guest = true;
- if (avic) {
+ if (enable_apicv) {
int ret = avic_vm_init(kvm);
if (ret)
return ret;
}
- kvm_apicv_init(kvm, avic);
return 0;
}
@@ -4583,7 +4559,10 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.has_wbinvd_exit = svm_has_wbinvd_exit,
- .write_l1_tsc_offset = svm_write_l1_tsc_offset,
+ .get_l2_tsc_offset = svm_get_l2_tsc_offset,
+ .get_l2_tsc_multiplier = svm_get_l2_tsc_multiplier,
+ .write_tsc_offset = svm_write_tsc_offset,
+ .write_tsc_multiplier = svm_write_tsc_multiplier,
.load_mmu_pgd = svm_load_mmu_pgd,
@@ -4603,14 +4582,16 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.setup_mce = svm_setup_mce,
.smi_allowed = svm_smi_allowed,
- .pre_enter_smm = svm_pre_enter_smm,
- .pre_leave_smm = svm_pre_leave_smm,
+ .enter_smm = svm_enter_smm,
+ .leave_smm = svm_leave_smm,
.enable_smi_window = svm_enable_smi_window,
.mem_enc_op = svm_mem_enc_op,
.mem_enc_reg_region = svm_register_enc_region,
.mem_enc_unreg_region = svm_unregister_enc_region,
+ .vm_copy_enc_context_from = svm_vm_copy_asid_from,
+
.can_emulate_instruction = svm_can_emulate_instruction,
.apic_init_signal_blocked = svm_apic_init_signal_blocked,
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 39e071fdab0c..f89b623bb591 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -20,19 +20,23 @@
#include <linux/bits.h>
#include <asm/svm.h>
+#include <asm/sev-common.h>
#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
-static const u32 host_save_user_msrs[] = {
- MSR_TSC_AUX,
-};
-#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
+#define IOPM_SIZE PAGE_SIZE * 3
+#define MSRPM_SIZE PAGE_SIZE * 2
-#define MAX_DIRECT_ACCESS_MSRS 18
+#define MAX_DIRECT_ACCESS_MSRS 20
#define MSRPM_OFFSETS 16
extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
extern bool npt_enabled;
+/*
+ * Clean bits in VMCB.
+ * VMCB_ALL_CLEAN_MASK might also need to
+ * be updated if this enum is modified.
+ */
enum {
VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
pause filter count */
@@ -50,9 +54,17 @@ enum {
* AVIC PHYSICAL_TABLE pointer,
* AVIC LOGICAL_TABLE pointer
*/
- VMCB_DIRTY_MAX,
+ VMCB_SW = 31, /* Reserved for hypervisor/software use */
};
+#define VMCB_ALL_CLEAN_MASK ( \
+ (1U << VMCB_INTERCEPTS) | (1U << VMCB_PERM_MAP) | \
+ (1U << VMCB_ASID) | (1U << VMCB_INTR) | \
+ (1U << VMCB_NPT) | (1U << VMCB_CR) | (1U << VMCB_DR) | \
+ (1U << VMCB_DT) | (1U << VMCB_SEG) | (1U << VMCB_CR2) | \
+ (1U << VMCB_LBR) | (1U << VMCB_AVIC) | \
+ (1U << VMCB_SW))
+
/* TPR and CR2 are always written before VMRUN */
#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
@@ -65,6 +77,8 @@ struct kvm_sev_info {
unsigned long pages_locked; /* Number of pages locked */
struct list_head regions_list; /* List of registered regions */
u64 ap_jump_table; /* SEV-ES AP Jump Table address */
+ struct kvm *enc_context_owner; /* Owner of copied encryption context */
+ struct misc_cg *misc_cg; /* For misc cgroup accounting */
};
struct kvm_svm {
@@ -81,11 +95,19 @@ struct kvm_svm {
struct kvm_vcpu;
+struct kvm_vmcb_info {
+ struct vmcb *ptr;
+ unsigned long pa;
+ int cpu;
+ uint64_t asid_generation;
+};
+
struct svm_nested_state {
- struct vmcb *hsave;
+ struct kvm_vmcb_info vmcb02;
u64 hsave_msr;
u64 vm_cr_msr;
u64 vmcb12_gpa;
+ u64 last_vmcb12_gpa;
/* These are the merged vectors */
u32 *msrpm;
@@ -102,21 +124,20 @@ struct svm_nested_state {
struct vcpu_svm {
struct kvm_vcpu vcpu;
+ /* vmcb always points at current_vmcb->ptr, it's purely a shorthand. */
struct vmcb *vmcb;
- unsigned long vmcb_pa;
+ struct kvm_vmcb_info vmcb01;
+ struct kvm_vmcb_info *current_vmcb;
struct svm_cpu_data *svm_data;
u32 asid;
- uint64_t asid_generation;
- uint64_t sysenter_esp;
- uint64_t sysenter_eip;
+ u32 sysenter_esp_hi;
+ u32 sysenter_eip_hi;
uint64_t tsc_aux;
u64 msr_decfg;
u64 next_rip;
- u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
-
u64 spec_ctrl;
/*
* Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
@@ -230,26 +251,28 @@ static inline void vmcb_mark_all_dirty(struct vmcb *vmcb)
static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
{
- vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
+ vmcb->control.clean = VMCB_ALL_CLEAN_MASK
& ~VMCB_ALWAYS_DIRTY_MASK;
}
+static inline bool vmcb_is_clean(struct vmcb *vmcb, int bit)
+{
+ return (vmcb->control.clean & (1 << bit));
+}
+
static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
{
vmcb->control.clean &= ~(1 << bit);
}
-static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
+static inline bool vmcb_is_dirty(struct vmcb *vmcb, int bit)
{
- return container_of(vcpu, struct vcpu_svm, vcpu);
+ return !test_bit(bit, (unsigned long *)&vmcb->control.clean);
}
-static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
+static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
{
- if (is_guest_mode(&svm->vcpu))
- return svm->nested.hsave;
- else
- return svm->vmcb;
+ return container_of(vcpu, struct vcpu_svm, vcpu);
}
static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
@@ -272,7 +295,7 @@ static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
static inline void set_dr_intercepts(struct vcpu_svm *svm)
{
- struct vmcb *vmcb = get_host_vmcb(svm);
+ struct vmcb *vmcb = svm->vmcb01.ptr;
if (!sev_es_guest(svm->vcpu.kvm)) {
vmcb_set_intercept(&vmcb->control, INTERCEPT_DR0_READ);
@@ -299,7 +322,7 @@ static inline void set_dr_intercepts(struct vcpu_svm *svm)
static inline void clr_dr_intercepts(struct vcpu_svm *svm)
{
- struct vmcb *vmcb = get_host_vmcb(svm);
+ struct vmcb *vmcb = svm->vmcb01.ptr;
vmcb->control.intercepts[INTERCEPT_DR] = 0;
@@ -314,7 +337,7 @@ static inline void clr_dr_intercepts(struct vcpu_svm *svm)
static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
{
- struct vmcb *vmcb = get_host_vmcb(svm);
+ struct vmcb *vmcb = svm->vmcb01.ptr;
WARN_ON_ONCE(bit >= 32);
vmcb_set_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
@@ -324,7 +347,7 @@ static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit)
static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
{
- struct vmcb *vmcb = get_host_vmcb(svm);
+ struct vmcb *vmcb = svm->vmcb01.ptr;
WARN_ON_ONCE(bit >= 32);
vmcb_clr_intercept(&vmcb->control, INTERCEPT_EXCEPTION_OFFSET + bit);
@@ -334,7 +357,7 @@ static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit)
static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
{
- struct vmcb *vmcb = get_host_vmcb(svm);
+ struct vmcb *vmcb = svm->vmcb01.ptr;
vmcb_set_intercept(&vmcb->control, bit);
@@ -343,7 +366,7 @@ static inline void svm_set_intercept(struct vcpu_svm *svm, int bit)
static inline void svm_clr_intercept(struct vcpu_svm *svm, int bit)
{
- struct vmcb *vmcb = get_host_vmcb(svm);
+ struct vmcb *vmcb = svm->vmcb01.ptr;
vmcb_clr_intercept(&vmcb->control, bit);
@@ -387,8 +410,6 @@ static inline bool gif_set(struct vcpu_svm *svm)
/* svm.c */
#define MSR_INVALID 0xffffffffU
-extern int sev;
-extern int sev_es;
extern bool dump_invalid_vmcb;
u32 svm_msrpm_offset(u32 msr);
@@ -405,7 +426,7 @@ bool svm_smi_blocked(struct kvm_vcpu *vcpu);
bool svm_nmi_blocked(struct kvm_vcpu *vcpu);
bool svm_interrupt_blocked(struct kvm_vcpu *vcpu);
void svm_set_gif(struct vcpu_svm *svm, bool value);
-int svm_invoke_exit_handler(struct vcpu_svm *svm, u64 exit_code);
+int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
int read, int write);
@@ -437,20 +458,30 @@ static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
}
-int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
- struct vmcb *nested_vmcb);
+int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb_gpa, struct vmcb *vmcb12);
void svm_leave_nested(struct vcpu_svm *svm);
void svm_free_nested(struct vcpu_svm *svm);
int svm_allocate_nested(struct vcpu_svm *svm);
-int nested_svm_vmrun(struct vcpu_svm *svm);
+int nested_svm_vmrun(struct kvm_vcpu *vcpu);
void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb);
int nested_svm_vmexit(struct vcpu_svm *svm);
+
+static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
+{
+ svm->vmcb->control.exit_code = exit_code;
+ svm->vmcb->control.exit_info_1 = 0;
+ svm->vmcb->control.exit_info_2 = 0;
+ return nested_svm_vmexit(svm);
+}
+
int nested_svm_exit_handled(struct vcpu_svm *svm);
-int nested_svm_check_permissions(struct vcpu_svm *svm);
+int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
bool has_error_code, u32 error_code);
int nested_svm_exit_special(struct vcpu_svm *svm);
-void sync_nested_vmcb_control(struct vcpu_svm *svm);
+void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
+void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
+void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
extern struct kvm_x86_nested_ops svm_nested_ops;
@@ -467,8 +498,6 @@ extern struct kvm_x86_nested_ops svm_nested_ops;
#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
-extern int avic;
-
static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
{
svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
@@ -491,8 +520,8 @@ void avic_vm_destroy(struct kvm *kvm);
int avic_vm_init(struct kvm *kvm);
void avic_init_vmcb(struct vcpu_svm *svm);
void svm_toggle_avic_for_irq_window(struct kvm_vcpu *vcpu, bool activate);
-int avic_incomplete_ipi_interception(struct vcpu_svm *svm);
-int avic_unaccelerated_access_interception(struct vcpu_svm *svm);
+int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu);
+int avic_unaccelerated_access_interception(struct kvm_vcpu *vcpu);
int avic_init_vcpu(struct vcpu_svm *svm);
void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void avic_vcpu_put(struct kvm_vcpu *vcpu);
@@ -513,47 +542,11 @@ void svm_vcpu_unblocking(struct kvm_vcpu *vcpu);
/* sev.c */
-#define GHCB_VERSION_MAX 1ULL
-#define GHCB_VERSION_MIN 1ULL
-
-#define GHCB_MSR_INFO_POS 0
-#define GHCB_MSR_INFO_MASK (BIT_ULL(12) - 1)
-
-#define GHCB_MSR_SEV_INFO_RESP 0x001
-#define GHCB_MSR_SEV_INFO_REQ 0x002
-#define GHCB_MSR_VER_MAX_POS 48
-#define GHCB_MSR_VER_MAX_MASK 0xffff
-#define GHCB_MSR_VER_MIN_POS 32
-#define GHCB_MSR_VER_MIN_MASK 0xffff
-#define GHCB_MSR_CBIT_POS 24
-#define GHCB_MSR_CBIT_MASK 0xff
-#define GHCB_MSR_SEV_INFO(_max, _min, _cbit) \
- ((((_max) & GHCB_MSR_VER_MAX_MASK) << GHCB_MSR_VER_MAX_POS) | \
- (((_min) & GHCB_MSR_VER_MIN_MASK) << GHCB_MSR_VER_MIN_POS) | \
- (((_cbit) & GHCB_MSR_CBIT_MASK) << GHCB_MSR_CBIT_POS) | \
- GHCB_MSR_SEV_INFO_RESP)
-
-#define GHCB_MSR_CPUID_REQ 0x004
-#define GHCB_MSR_CPUID_RESP 0x005
-#define GHCB_MSR_CPUID_FUNC_POS 32
-#define GHCB_MSR_CPUID_FUNC_MASK 0xffffffff
-#define GHCB_MSR_CPUID_VALUE_POS 32
-#define GHCB_MSR_CPUID_VALUE_MASK 0xffffffff
-#define GHCB_MSR_CPUID_REG_POS 30
-#define GHCB_MSR_CPUID_REG_MASK 0x3
-
-#define GHCB_MSR_TERM_REQ 0x100
-#define GHCB_MSR_TERM_REASON_SET_POS 12
-#define GHCB_MSR_TERM_REASON_SET_MASK 0xf
-#define GHCB_MSR_TERM_REASON_POS 16
-#define GHCB_MSR_TERM_REASON_MASK 0xff
+#define GHCB_VERSION_MAX 1ULL
+#define GHCB_VERSION_MIN 1ULL
-extern unsigned int max_sev_asid;
-static inline bool svm_sev_enabled(void)
-{
- return IS_ENABLED(CONFIG_KVM_AMD_SEV) ? max_sev_asid : 0;
-}
+extern unsigned int max_sev_asid;
void sev_vm_destroy(struct kvm *kvm);
int svm_mem_enc_op(struct kvm *kvm, void __user *argp);
@@ -561,16 +554,20 @@ int svm_register_enc_region(struct kvm *kvm,
struct kvm_enc_region *range);
int svm_unregister_enc_region(struct kvm *kvm,
struct kvm_enc_region *range);
+int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd);
void pre_sev_run(struct vcpu_svm *svm, int cpu);
+void __init sev_set_cpu_caps(void);
void __init sev_hardware_setup(void);
void sev_hardware_teardown(void);
+int sev_cpu_init(struct svm_cpu_data *sd);
void sev_free_vcpu(struct kvm_vcpu *vcpu);
-int sev_handle_vmgexit(struct vcpu_svm *svm);
+int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
void sev_es_init_vmcb(struct vcpu_svm *svm);
void sev_es_create_vcpu(struct vcpu_svm *svm);
void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu);
+void sev_es_unmap_ghcb(struct vcpu_svm *svm);
/* vmenter.S */
diff --git a/arch/x86/kvm/svm/svm_onhyperv.c b/arch/x86/kvm/svm/svm_onhyperv.c
new file mode 100644
index 000000000000..98aa981c04ec
--- /dev/null
+++ b/arch/x86/kvm/svm/svm_onhyperv.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KVM L1 hypervisor optimizations on Hyper-V for SVM.
+ */
+
+#include <linux/kvm_host.h>
+#include "kvm_cache_regs.h"
+
+#include <asm/mshyperv.h>
+
+#include "svm.h"
+#include "svm_ops.h"
+
+#include "hyperv.h"
+#include "kvm_onhyperv.h"
+#include "svm_onhyperv.h"
+
+int svm_hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
+{
+ struct hv_enlightenments *hve;
+ struct hv_partition_assist_pg **p_hv_pa_pg =
+ &to_kvm_hv(vcpu->kvm)->hv_pa_pg;
+
+ if (!*p_hv_pa_pg)
+ *p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL);
+
+ if (!*p_hv_pa_pg)
+ return -ENOMEM;
+
+ hve = (struct hv_enlightenments *)to_svm(vcpu)->vmcb->control.reserved_sw;
+
+ hve->partition_assist_page = __pa(*p_hv_pa_pg);
+ hve->hv_vm_id = (unsigned long)vcpu->kvm;
+ if (!hve->hv_enlightenments_control.nested_flush_hypercall) {
+ hve->hv_enlightenments_control.nested_flush_hypercall = 1;
+ vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
+ }
+
+ return 0;
+}
+
diff --git a/arch/x86/kvm/svm/svm_onhyperv.h b/arch/x86/kvm/svm/svm_onhyperv.h
new file mode 100644
index 000000000000..9b9a55abc29f
--- /dev/null
+++ b/arch/x86/kvm/svm/svm_onhyperv.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * KVM L1 hypervisor optimizations on Hyper-V for SVM.
+ */
+
+#ifndef __ARCH_X86_KVM_SVM_ONHYPERV_H__
+#define __ARCH_X86_KVM_SVM_ONHYPERV_H__
+
+#if IS_ENABLED(CONFIG_HYPERV)
+#include <asm/mshyperv.h>
+
+#include "hyperv.h"
+#include "kvm_onhyperv.h"
+
+static struct kvm_x86_ops svm_x86_ops;
+
+/*
+ * Hyper-V uses the software reserved 32 bytes in VMCB
+ * control area to expose SVM enlightenments to guests.
+ */
+struct hv_enlightenments {
+ struct __packed hv_enlightenments_control {
+ u32 nested_flush_hypercall:1;
+ u32 msr_bitmap:1;
+ u32 enlightened_npt_tlb: 1;
+ u32 reserved:29;
+ } __packed hv_enlightenments_control;
+ u32 hv_vp_id;
+ u64 hv_vm_id;
+ u64 partition_assist_page;
+ u64 reserved;
+} __packed;
+
+/*
+ * Hyper-V uses the software reserved clean bit in VMCB
+ */
+#define VMCB_HV_NESTED_ENLIGHTENMENTS VMCB_SW
+
+int svm_hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu);
+
+static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
+{
+ struct hv_enlightenments *hve =
+ (struct hv_enlightenments *)vmcb->control.reserved_sw;
+
+ if (npt_enabled &&
+ ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB)
+ hve->hv_enlightenments_control.enlightened_npt_tlb = 1;
+}
+
+static inline void svm_hv_hardware_setup(void)
+{
+ if (npt_enabled &&
+ ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB) {
+ pr_info("kvm: Hyper-V enlightened NPT TLB flush enabled\n");
+ svm_x86_ops.tlb_remote_flush = hv_remote_flush_tlb;
+ svm_x86_ops.tlb_remote_flush_with_range =
+ hv_remote_flush_tlb_with_range;
+ }
+
+ if (ms_hyperv.nested_features & HV_X64_NESTED_DIRECT_FLUSH) {
+ int cpu;
+
+ pr_info("kvm: Hyper-V Direct TLB Flush enabled\n");
+ for_each_online_cpu(cpu) {
+ struct hv_vp_assist_page *vp_ap =
+ hv_get_vp_assist_page(cpu);
+
+ if (!vp_ap)
+ continue;
+
+ vp_ap->nested_control.features.directhypercall = 1;
+ }
+ svm_x86_ops.enable_direct_tlbflush =
+ svm_hv_enable_direct_tlbflush;
+ }
+}
+
+static inline void svm_hv_vmcb_dirty_nested_enlightenments(
+ struct kvm_vcpu *vcpu)
+{
+ struct vmcb *vmcb = to_svm(vcpu)->vmcb;
+ struct hv_enlightenments *hve =
+ (struct hv_enlightenments *)vmcb->control.reserved_sw;
+
+ /*
+ * vmcb can be NULL if called during early vcpu init.
+ * And its okay not to mark vmcb dirty during vcpu init
+ * as we mark it dirty unconditionally towards end of vcpu
+ * init phase.
+ */
+ if (vmcb && vmcb_is_clean(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS) &&
+ hve->hv_enlightenments_control.msr_bitmap)
+ vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
+}
+
+static inline void svm_hv_update_vp_id(struct vmcb *vmcb,
+ struct kvm_vcpu *vcpu)
+{
+ struct hv_enlightenments *hve =
+ (struct hv_enlightenments *)vmcb->control.reserved_sw;
+ u32 vp_index = kvm_hv_get_vpindex(vcpu);
+
+ if (hve->hv_vp_id != vp_index) {
+ hve->hv_vp_id = vp_index;
+ vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
+ }
+}
+#else
+
+static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
+{
+}
+
+static inline void svm_hv_hardware_setup(void)
+{
+}
+
+static inline void svm_hv_vmcb_dirty_nested_enlightenments(
+ struct kvm_vcpu *vcpu)
+{
+}
+
+static inline void svm_hv_update_vp_id(struct vmcb *vmcb,
+ struct kvm_vcpu *vcpu)
+{
+}
+#endif /* CONFIG_HYPERV */
+
+#endif /* __ARCH_X86_KVM_SVM_ONHYPERV_H__ */
diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
index 6feb8c08f45a..4fa17df123cd 100644
--- a/arch/x86/kvm/svm/vmenter.S
+++ b/arch/x86/kvm/svm/vmenter.S
@@ -79,28 +79,10 @@ SYM_FUNC_START(__svm_vcpu_run)
/* Enter guest mode */
sti
-1: vmload %_ASM_AX
- jmp 3f
-2: cmpb $0, kvm_rebooting
- jne 3f
- ud2
- _ASM_EXTABLE(1b, 2b)
-3: vmrun %_ASM_AX
- jmp 5f
-4: cmpb $0, kvm_rebooting
- jne 5f
- ud2
- _ASM_EXTABLE(3b, 4b)
+1: vmrun %_ASM_AX
-5: vmsave %_ASM_AX
- jmp 7f
-6: cmpb $0, kvm_rebooting
- jne 7f
- ud2
- _ASM_EXTABLE(5b, 6b)
-7:
- cli
+2: cli
#ifdef CONFIG_RETPOLINE
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
@@ -167,6 +149,13 @@ SYM_FUNC_START(__svm_vcpu_run)
#endif
pop %_ASM_BP
ret
+
+3: cmpb $0, kvm_rebooting
+ jne 2b
+ ud2
+
+ _ASM_EXTABLE(1b, 3b)
+
SYM_FUNC_END(__svm_vcpu_run)
/**
@@ -186,18 +175,15 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
#endif
push %_ASM_BX
- /* Enter guest mode */
+ /* Move @vmcb to RAX. */
mov %_ASM_ARG1, %_ASM_AX
+
+ /* Enter guest mode */
sti
1: vmrun %_ASM_AX
- jmp 3f
-2: cmpb $0, kvm_rebooting
- jne 3f
- ud2
- _ASM_EXTABLE(1b, 2b)
-3: cli
+2: cli
#ifdef CONFIG_RETPOLINE
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
@@ -217,4 +203,11 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
#endif
pop %_ASM_BP
ret
+
+3: cmpb $0, kvm_rebooting
+ jne 2b
+ ud2
+
+ _ASM_EXTABLE(1b, 3b)
+
SYM_FUNC_END(__svm_sev_es_vcpu_run)
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index a61c015870e3..b484141ea15b 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -997,7 +997,7 @@ TRACE_EVENT(kvm_wait_lapic_expire,
__entry->delta < 0 ? "early" : "late")
);
-TRACE_EVENT(kvm_enter_smm,
+TRACE_EVENT(kvm_smm_transition,
TP_PROTO(unsigned int vcpu_id, u64 smbase, bool entering),
TP_ARGS(vcpu_id, smbase, entering),
@@ -1550,16 +1550,16 @@ TRACE_EVENT(kvm_nested_vmenter_failed,
TP_ARGS(msg, err),
TP_STRUCT__entry(
- __field(const char *, msg)
+ __string(msg, msg)
__field(u32, err)
),
TP_fast_assign(
- __entry->msg = msg;
+ __assign_str(msg, msg);
__entry->err = err;
),
- TP_printk("%s%s", __entry->msg, !__entry->err ? "" :
+ TP_printk("%s%s", __get_str(msg), !__entry->err ? "" :
__print_symbolic(__entry->err, VMX_VMENTER_INSTRUCTION_ERRORS))
);
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
index d1d77985e889..4705ad55abb5 100644
--- a/arch/x86/kvm/vmx/capabilities.h
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -12,7 +12,6 @@ extern bool __read_mostly enable_ept;
extern bool __read_mostly enable_unrestricted_guest;
extern bool __read_mostly enable_ept_ad_bits;
extern bool __read_mostly enable_pml;
-extern bool __read_mostly enable_apicv;
extern int __read_mostly pt_mode;
#define PT_MODE_SYSTEM 0
@@ -90,8 +89,7 @@ static inline bool cpu_has_vmx_preemption_timer(void)
static inline bool cpu_has_vmx_posted_intr(void)
{
- return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
- vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
+ return vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
}
static inline bool cpu_has_load_ia32_efer(void)
@@ -398,6 +396,9 @@ static inline u64 vmx_supported_debugctl(void)
{
u64 debugctl = 0;
+ if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
+ debugctl |= DEBUGCTLMSR_BUS_LOCK_DETECT;
+
if (vmx_get_perf_capabilities() & PMU_CAP_LBR_FMT)
debugctl |= DEBUGCTLMSR_LBR_MASK;
diff --git a/arch/x86/kvm/vmx/evmcs.c b/arch/x86/kvm/vmx/evmcs.c
index 41f24661af04..896b2a50b4aa 100644
--- a/arch/x86/kvm/vmx/evmcs.c
+++ b/arch/x86/kvm/vmx/evmcs.c
@@ -319,6 +319,9 @@ bool nested_enlightened_vmentry(struct kvm_vcpu *vcpu, u64 *evmcs_gpa)
if (unlikely(!assist_page.enlighten_vmentry))
return false;
+ if (unlikely(!evmptr_is_valid(assist_page.current_nested_vmcs)))
+ return false;
+
*evmcs_gpa = assist_page.current_nested_vmcs;
return true;
diff --git a/arch/x86/kvm/vmx/evmcs.h b/arch/x86/kvm/vmx/evmcs.h
index bd41d9462355..2ec9b46f0d0c 100644
--- a/arch/x86/kvm/vmx/evmcs.h
+++ b/arch/x86/kvm/vmx/evmcs.h
@@ -197,6 +197,14 @@ static inline void evmcs_load(u64 phys_addr) {}
static inline void evmcs_touch_msr_bitmap(void) {}
#endif /* IS_ENABLED(CONFIG_HYPERV) */
+#define EVMPTR_INVALID (-1ULL)
+#define EVMPTR_MAP_PENDING (-2ULL)
+
+static inline bool evmptr_is_valid(u64 evmptr)
+{
+ return evmptr != EVMPTR_INVALID && evmptr != EVMPTR_MAP_PENDING;
+}
+
enum nested_evmptrld_status {
EVMPTRLD_DISABLED,
EVMPTRLD_SUCCEEDED,
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index bcca0b80e0d0..1a52134b0c42 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -11,6 +11,7 @@
#include "mmu.h"
#include "nested.h"
#include "pmu.h"
+#include "sgx.h"
#include "trace.h"
#include "vmx.h"
#include "x86.h"
@@ -21,13 +22,7 @@ module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
static bool __read_mostly nested_early_check = 0;
module_param(nested_early_check, bool, S_IRUGO);
-#define CC(consistency_check) \
-({ \
- bool failed = (consistency_check); \
- if (failed) \
- trace_kvm_nested_vmenter_failed(#consistency_check, 0); \
- failed; \
-})
+#define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
/*
* Hyper-V requires all of these, so mark them as supported even though
@@ -178,9 +173,13 @@ static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
| X86_EFLAGS_ZF);
get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
/*
- * We don't need to force a shadow sync because
- * VM_INSTRUCTION_ERROR is not shadowed
+ * We don't need to force sync to shadow VMCS because
+ * VM_INSTRUCTION_ERROR is not shadowed. Enlightened VMCS 'shadows' all
+ * fields and thus must be synced.
*/
+ if (to_vmx(vcpu)->nested.hv_evmcs_vmptr != EVMPTR_INVALID)
+ to_vmx(vcpu)->nested.need_vmcs12_to_shadow_sync = true;
+
return kvm_skip_emulated_instruction(vcpu);
}
@@ -192,7 +191,8 @@ static int nested_vmx_fail(struct kvm_vcpu *vcpu, u32 vm_instruction_error)
* failValid writes the error number to the current VMCS, which
* can't be done if there isn't a current VMCS.
*/
- if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs)
+ if (vmx->nested.current_vmptr == -1ull &&
+ !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
return nested_vmx_failInvalid(vcpu);
return nested_vmx_failValid(vcpu, vm_instruction_error);
@@ -226,12 +226,12 @@ static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- if (!vmx->nested.hv_evmcs)
- return;
+ if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
+ kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
+ vmx->nested.hv_evmcs = NULL;
+ }
- kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
- vmx->nested.hv_evmcs_vmptr = 0;
- vmx->nested.hv_evmcs = NULL;
+ vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
}
static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
@@ -351,16 +351,21 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
vmcs12->guest_physical_address = fault->address;
}
+static void nested_ept_new_eptp(struct kvm_vcpu *vcpu)
+{
+ kvm_init_shadow_ept_mmu(vcpu,
+ to_vmx(vcpu)->nested.msrs.ept_caps &
+ VMX_EPT_EXECUTE_ONLY_BIT,
+ nested_ept_ad_enabled(vcpu),
+ nested_ept_get_eptp(vcpu));
+}
+
static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
{
WARN_ON(mmu_is_nested(vcpu));
vcpu->arch.mmu = &vcpu->arch.guest_mmu;
- kvm_init_shadow_ept_mmu(vcpu,
- to_vmx(vcpu)->nested.msrs.ept_caps &
- VMX_EPT_EXECUTE_ONLY_BIT,
- nested_ept_ad_enabled(vcpu),
- nested_ept_get_eptp(vcpu));
+ nested_ept_new_eptp(vcpu);
vcpu->arch.mmu->get_guest_pgd = nested_ept_get_eptp;
vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
vcpu->arch.mmu->get_pdptr = kvm_pdptr_read;
@@ -619,6 +624,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
}
/* KVM unconditionally exposes the FS/GS base MSRs to L1. */
+#ifdef CONFIG_X86_64
nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
MSR_FS_BASE, MSR_TYPE_RW);
@@ -627,6 +633,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
+#endif
/*
* Checking the L0->L1 bitmap is trying to verify two things:
@@ -1061,54 +1068,13 @@ static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu,
}
/*
- * Returns true if the MMU needs to be sync'd on nested VM-Enter/VM-Exit.
- * tl;dr: the MMU needs a sync if L0 is using shadow paging and L1 didn't
- * enable VPID for L2 (implying it expects a TLB flush on VMX transitions).
- * Here's why.
- *
- * If EPT is enabled by L0 a sync is never needed:
- * - if it is disabled by L1, then L0 is not shadowing L1 or L2 PTEs, there
- * cannot be unsync'd SPTEs for either L1 or L2.
- *
- * - if it is also enabled by L1, then L0 doesn't need to sync on VM-Enter
- * VM-Enter as VM-Enter isn't required to invalidate guest-physical mappings
- * (irrespective of VPID), i.e. L1 can't rely on the (virtual) CPU to flush
- * stale guest-physical mappings for L2 from the TLB. And as above, L0 isn't
- * shadowing L1 PTEs so there are no unsync'd SPTEs to sync on VM-Exit.
- *
- * If EPT is disabled by L0:
- * - if VPID is enabled by L1 (for L2), the situation is similar to when L1
- * enables EPT: L0 doesn't need to sync as VM-Enter and VM-Exit aren't
- * required to invalidate linear mappings (EPT is disabled so there are
- * no combined or guest-physical mappings), i.e. L1 can't rely on the
- * (virtual) CPU to flush stale linear mappings for either L2 or itself (L1).
- *
- * - however if VPID is disabled by L1, then a sync is needed as L1 expects all
- * linear mappings (EPT is disabled so there are no combined or guest-physical
- * mappings) to be invalidated on both VM-Enter and VM-Exit.
- *
- * Note, this logic is subtly different than nested_has_guest_tlb_tag(), which
- * additionally checks that L2 has been assigned a VPID (when EPT is disabled).
- * Whether or not L2 has been assigned a VPID by L0 is irrelevant with respect
- * to L1's expectations, e.g. L0 needs to invalidate hardware TLB entries if L2
- * doesn't have a unique VPID to prevent reusing L1's entries (assuming L1 has
- * been assigned a VPID), but L0 doesn't need to do a MMU sync because L1
- * doesn't expect stale (virtual) TLB entries to be flushed, i.e. L1 doesn't
- * know that L0 will flush the TLB and so L1 will do INVVPID as needed to flush
- * stale TLB entries, at which point L0 will sync L2's MMU.
- */
-static bool nested_vmx_transition_mmu_sync(struct kvm_vcpu *vcpu)
-{
- return !enable_ept && !nested_cpu_has_vpid(get_vmcs12(vcpu));
-}
-
-/*
* Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are
* emulating VM-Entry into a guest with EPT enabled. On failure, the expected
* Exit Qualification (for a VM-Entry consistency check VM-Exit) is assigned to
* @entry_failure_code.
*/
-static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
+static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
+ bool nested_ept, bool reload_pdptrs,
enum vm_entry_failure_code *entry_failure_code)
{
if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) {
@@ -1120,27 +1086,20 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
* If PAE paging and EPT are both on, CR3 is not used by the CPU and
* must not be dereferenced.
*/
- if (!nested_ept && is_pae_paging(vcpu) &&
- (cr3 != kvm_read_cr3(vcpu) || pdptrs_changed(vcpu))) {
- if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) {
- *entry_failure_code = ENTRY_FAIL_PDPTE;
- return -EINVAL;
- }
+ if (reload_pdptrs && !nested_ept && is_pae_paging(vcpu) &&
+ CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) {
+ *entry_failure_code = ENTRY_FAIL_PDPTE;
+ return -EINVAL;
}
- /*
- * Unconditionally skip the TLB flush on fast CR3 switch, all TLB
- * flushes are handled by nested_vmx_transition_tlb_flush(). See
- * nested_vmx_transition_mmu_sync for details on skipping the MMU sync.
- */
if (!nested_ept)
- kvm_mmu_new_pgd(vcpu, cr3, true,
- !nested_vmx_transition_mmu_sync(vcpu));
+ kvm_mmu_new_pgd(vcpu, cr3);
vcpu->arch.cr3 = cr3;
kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
- kvm_init_mmu(vcpu, false);
+ /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
+ kvm_init_mmu(vcpu);
return 0;
}
@@ -1173,17 +1132,28 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
struct vcpu_vmx *vmx = to_vmx(vcpu);
/*
- * If VPID is disabled, linear and combined mappings are flushed on
- * VM-Enter/VM-Exit, and guest-physical mappings are valid only for
- * their associated EPTP.
+ * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
+ * for *all* contexts to be flushed on VM-Enter/VM-Exit, i.e. it's a
+ * full TLB flush from the guest's perspective. This is required even
+ * if VPID is disabled in the host as KVM may need to synchronize the
+ * MMU in response to the guest TLB flush.
+ *
+ * Note, using TLB_FLUSH_GUEST is correct even if nested EPT is in use.
+ * EPT is a special snowflake, as guest-physical mappings aren't
+ * flushed on VPID invalidations, including VM-Enter or VM-Exit with
+ * VPID disabled. As a result, KVM _never_ needs to sync nEPT
+ * entries on VM-Enter because L1 can't rely on VM-Enter to flush
+ * those mappings.
*/
- if (!enable_vpid)
+ if (!nested_cpu_has_vpid(vmcs12)) {
+ kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
return;
+ }
+
+ /* L2 should never have a VPID if VPID is disabled. */
+ WARN_ON(!enable_vpid);
/*
- * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
- * for *all* contexts to be flushed on VM-Enter/VM-Exit.
- *
* If VPID is enabled and used by vmc12, but L2 does not have a unique
* TLB tag (ASID), i.e. EPT is disabled and KVM was unable to allocate
* a VPID for L2, flush the current context as the effective ASID is
@@ -1195,13 +1165,12 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
*
* If a TLB flush isn't required due to any of the above, and vpid12 is
* changing then the new "virtual" VPID (vpid12) will reuse the same
- * "real" VPID (vpid02), and so needs to be sync'd. There is no direct
+ * "real" VPID (vpid02), and so needs to be flushed. There's no direct
* mapping between vpid02 and vpid12, vpid02 is per-vCPU and reused for
- * all nested vCPUs.
+ * all nested vCPUs. Remember, a flush on VM-Enter does not invalidate
+ * guest-physical mappings, so there is no need to sync the nEPT MMU.
*/
- if (!nested_cpu_has_vpid(vmcs12)) {
- kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
- } else if (!nested_has_guest_tlb_tag(vcpu)) {
+ if (!nested_has_guest_tlb_tag(vcpu)) {
kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
} else if (is_vmenter &&
vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
@@ -1589,7 +1558,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
vmcs_load(vmx->loaded_vmcs->vmcs);
}
-static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
+static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields)
{
struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
@@ -1598,7 +1567,7 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
vmcs12->tpr_threshold = evmcs->tpr_threshold;
vmcs12->guest_rip = evmcs->guest_rip;
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
vmcs12->guest_rsp = evmcs->guest_rsp;
vmcs12->guest_rflags = evmcs->guest_rflags;
@@ -1606,23 +1575,23 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
evmcs->guest_interruptibility_info;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
vmcs12->cpu_based_vm_exec_control =
evmcs->cpu_based_vm_exec_control;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN))) {
vmcs12->exception_bitmap = evmcs->exception_bitmap;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) {
vmcs12->vm_entry_controls = evmcs->vm_entry_controls;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) {
vmcs12->vm_entry_intr_info_field =
evmcs->vm_entry_intr_info_field;
@@ -1632,7 +1601,7 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
evmcs->vm_entry_instruction_len;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
vmcs12->host_ia32_pat = evmcs->host_ia32_pat;
vmcs12->host_ia32_efer = evmcs->host_ia32_efer;
@@ -1652,7 +1621,7 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
vmcs12->host_tr_selector = evmcs->host_tr_selector;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1))) {
vmcs12->pin_based_vm_exec_control =
evmcs->pin_based_vm_exec_control;
@@ -1661,18 +1630,18 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
evmcs->secondary_vm_exec_control;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) {
vmcs12->io_bitmap_a = evmcs->io_bitmap_a;
vmcs12->io_bitmap_b = evmcs->io_bitmap_b;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) {
vmcs12->msr_bitmap = evmcs->msr_bitmap;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) {
vmcs12->guest_es_base = evmcs->guest_es_base;
vmcs12->guest_cs_base = evmcs->guest_cs_base;
@@ -1712,14 +1681,14 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
vmcs12->guest_tr_selector = evmcs->guest_tr_selector;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) {
vmcs12->tsc_offset = evmcs->tsc_offset;
vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr;
vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) {
vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask;
vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask;
@@ -1731,7 +1700,7 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
vmcs12->guest_dr7 = evmcs->guest_dr7;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) {
vmcs12->host_fs_base = evmcs->host_fs_base;
vmcs12->host_gs_base = evmcs->host_gs_base;
@@ -1741,13 +1710,13 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
vmcs12->host_rsp = evmcs->host_rsp;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) {
vmcs12->ept_pointer = evmcs->ept_pointer;
vmcs12->virtual_processor_id = evmcs->virtual_processor_id;
}
- if (unlikely(!(evmcs->hv_clean_fields &
+ if (unlikely(!(hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) {
vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer;
vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl;
@@ -1802,10 +1771,10 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
* vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
*/
- return 0;
+ return;
}
-static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
+static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
{
struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
@@ -1965,7 +1934,7 @@ static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
- return 0;
+ return;
}
/*
@@ -1982,13 +1951,13 @@ static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
if (likely(!vmx->nested.enlightened_vmcs_enabled))
return EVMPTRLD_DISABLED;
- if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa))
+ if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa)) {
+ nested_release_evmcs(vcpu);
return EVMPTRLD_DISABLED;
+ }
- if (unlikely(!vmx->nested.hv_evmcs ||
- evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
- if (!vmx->nested.hv_evmcs)
- vmx->nested.current_vmptr = -1ull;
+ if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
+ vmx->nested.current_vmptr = -1ull;
nested_release_evmcs(vcpu);
@@ -2026,7 +1995,6 @@ static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
return EVMPTRLD_VMFAIL;
}
- vmx->nested.dirty_vmcs12 = true;
vmx->nested.hv_evmcs_vmptr = evmcs_gpa;
evmcs_gpa_changed = true;
@@ -2059,14 +2027,10 @@ void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- if (vmx->nested.hv_evmcs) {
+ if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
copy_vmcs12_to_enlightened(vmx);
- /* All fields are clean */
- vmx->nested.hv_evmcs->hv_clean_fields |=
- HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
- } else {
+ else
copy_vmcs12_to_shadow(vmx);
- }
vmx->nested.need_vmcs12_to_shadow_sync = false;
}
@@ -2211,7 +2175,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
u32 exec_control;
u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
- if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs)
+ if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
prepare_vmcs02_early_rare(vmx, vmcs12);
/*
@@ -2280,7 +2244,8 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
- SECONDARY_EXEC_ENABLE_VMFUNC);
+ SECONDARY_EXEC_ENABLE_VMFUNC |
+ SECONDARY_EXEC_TSC_SCALING);
if (nested_cpu_has(vmcs12,
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
exec_control |= vmcs12->secondary_vm_exec_control;
@@ -2306,6 +2271,9 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
+ if (exec_control & SECONDARY_EXEC_ENCLS_EXITING)
+ vmx_write_encls_bitmap(&vmx->vcpu, vmcs12);
+
secondary_exec_controls_set(vmx, exec_control);
}
@@ -2488,18 +2456,18 @@ static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
* is assigned to entry_failure_code on failure.
*/
static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
+ bool from_vmentry,
enum vm_entry_failure_code *entry_failure_code)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
bool load_guest_pdptrs_vmcs12 = false;
- if (vmx->nested.dirty_vmcs12 || hv_evmcs) {
+ if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
prepare_vmcs02_rare(vmx, vmcs12);
vmx->nested.dirty_vmcs12 = false;
- load_guest_pdptrs_vmcs12 = !hv_evmcs ||
- !(hv_evmcs->hv_clean_fields &
+ load_guest_pdptrs_vmcs12 = !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) ||
+ !(vmx->nested.hv_evmcs->hv_clean_fields &
HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1);
}
@@ -2532,10 +2500,18 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
}
- vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
+ vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
+ vcpu->arch.l1_tsc_offset,
+ vmx_get_l2_tsc_offset(vcpu),
+ vmx_get_l2_tsc_multiplier(vcpu));
+ vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier(
+ vcpu->arch.l1_tsc_scaling_ratio,
+ vmx_get_l2_tsc_multiplier(vcpu));
+
+ vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
if (kvm_has_tsc_control)
- decache_tsc_multiplier(vmx);
+ vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
nested_vmx_transition_tlb_flush(vcpu, vmcs12, true);
@@ -2572,7 +2548,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
/* Shadow page tables on either EPT or shadow page tables. */
if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
- entry_failure_code))
+ from_vmentry, entry_failure_code))
return -EINVAL;
/*
@@ -2604,6 +2580,17 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
kvm_rsp_write(vcpu, vmcs12->guest_rsp);
kvm_rip_write(vcpu, vmcs12->guest_rip);
+
+ /*
+ * It was observed that genuine Hyper-V running in L1 doesn't reset
+ * 'hv_clean_fields' by itself, it only sets the corresponding dirty
+ * bits when it changes a field in eVMCS. Mark all fields as clean
+ * here.
+ */
+ if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
+ vmx->nested.hv_evmcs->hv_clean_fields |=
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
+
return 0;
}
@@ -3093,20 +3080,20 @@ static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
* L2 was running), map it here to make sure vmcs12 changes are
* properly reflected.
*/
- if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs) {
+ if (vmx->nested.enlightened_vmcs_enabled &&
+ vmx->nested.hv_evmcs_vmptr == EVMPTR_MAP_PENDING) {
enum nested_evmptrld_status evmptrld_status =
nested_vmx_handle_enlightened_vmptrld(vcpu, false);
if (evmptrld_status == EVMPTRLD_VMFAIL ||
- evmptrld_status == EVMPTRLD_ERROR) {
- pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
- __func__);
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- vcpu->run->internal.suberror =
- KVM_INTERNAL_ERROR_EMULATION;
- vcpu->run->internal.ndata = 0;
+ evmptrld_status == EVMPTRLD_ERROR)
return false;
- }
+
+ /*
+ * Post migration VMCS12 always provides the most actual
+ * information, copy it to eVMCS upon entry.
+ */
+ vmx->nested.need_vmcs12_to_shadow_sync = true;
}
return true;
@@ -3120,6 +3107,18 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
struct page *page;
u64 hpa;
+ if (!vcpu->arch.pdptrs_from_userspace &&
+ !nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) {
+ /*
+ * Reload the guest's PDPTRs since after a migration
+ * the guest CR3 might be restored prior to setting the nested
+ * state which can lead to a load of wrong PDPTRs.
+ */
+ if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3)))
+ return false;
+ }
+
+
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
/*
* Translate L1 physical address to host physical
@@ -3182,6 +3181,15 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
offset_in_page(vmcs12->posted_intr_desc_addr));
vmcs_write64(POSTED_INTR_DESC_ADDR,
pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr));
+ } else {
+ /*
+ * Defer the KVM_INTERNAL_EXIT until KVM tries to
+ * access the contents of the VMCS12 posted interrupt
+ * descriptor. (Note that KVM may do this when it
+ * should not, per the architectural specification.)
+ */
+ vmx->nested.pi_desc = NULL;
+ pin_controls_clearbit(vmx, PIN_BASED_POSTED_INTR);
}
}
if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
@@ -3194,8 +3202,16 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
{
- if (!nested_get_evmcs_page(vcpu))
+ if (!nested_get_evmcs_page(vcpu)) {
+ pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
+ __func__);
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror =
+ KVM_INTERNAL_ERROR_EMULATION;
+ vcpu->run->internal.ndata = 0;
+
return false;
+ }
if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
return false;
@@ -3353,10 +3369,8 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
}
enter_guest_mode(vcpu);
- if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
- vcpu->arch.tsc_offset += vmcs12->tsc_offset;
- if (prepare_vmcs02(vcpu, vmcs12, &entry_failure_code)) {
+ if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &entry_failure_code)) {
exit_reason.basic = EXIT_REASON_INVALID_STATE;
vmcs12->exit_qualification = entry_failure_code;
goto vmentry_fail_vmexit_guest_mode;
@@ -3436,7 +3450,7 @@ vmentry_fail_vmexit:
load_vmcs12_host_state(vcpu, vmcs12);
vmcs12->vm_exit_reason = exit_reason.full;
- if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
+ if (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
vmx->nested.need_vmcs12_to_shadow_sync = true;
return NVMX_VMENTRY_VMEXIT;
}
@@ -3464,7 +3478,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
return nested_vmx_failInvalid(vcpu);
}
- if (CC(!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull))
+ if (CC(!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) &&
+ vmx->nested.current_vmptr == -1ull))
return nested_vmx_failInvalid(vcpu);
vmcs12 = get_vmcs12(vcpu);
@@ -3478,8 +3493,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
if (CC(vmcs12->hdr.shadow_vmcs))
return nested_vmx_failInvalid(vcpu);
- if (vmx->nested.hv_evmcs) {
- copy_enlightened_to_vmcs12(vmx);
+ if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
+ copy_enlightened_to_vmcs12(vmx, vmx->nested.hv_evmcs->hv_clean_fields);
/* Enlightened VMCS doesn't have launch state */
vmcs12->launch_state = !launch;
} else if (enable_shadow_vmcs) {
@@ -3537,7 +3552,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
* snapshot restore (migration).
*
* In this flow, it is assumed that vmcs12 cache was
- * trasferred as part of captured nVMX state and should
+ * transferred as part of captured nVMX state and should
* therefore not be read from guest memory (which may not
* exist on destination host yet).
*/
@@ -3679,25 +3694,29 @@ void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
}
}
-static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
+static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int max_irr;
void *vapic_page;
u16 status;
- if (!vmx->nested.pi_desc || !vmx->nested.pi_pending)
- return;
+ if (!vmx->nested.pi_pending)
+ return 0;
+
+ if (!vmx->nested.pi_desc)
+ goto mmio_needed;
vmx->nested.pi_pending = false;
+
if (!pi_test_and_clear_on(vmx->nested.pi_desc))
- return;
+ return 0;
max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
if (max_irr != 256) {
vapic_page = vmx->nested.virtual_apic_map.hva;
if (!vapic_page)
- return;
+ goto mmio_needed;
__kvm_apic_update_irr(vmx->nested.pi_desc->pir,
vapic_page, &max_irr);
@@ -3710,6 +3729,11 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
}
nested_mark_vmcs12_pages_dirty(vcpu);
+ return 0;
+
+mmio_needed:
+ kvm_handle_memory_failure(vcpu, X86EMUL_IO_NEEDED, NULL);
+ return -ENXIO;
}
static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
@@ -3810,9 +3834,15 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
/*
* Process any exceptions that are not debug traps before MTF.
+ *
+ * Note that only a pending nested run can block a pending exception.
+ * Otherwise an injected NMI/interrupt should either be
+ * lost or delivered to the nested hypervisor in the IDT_VECTORING_INFO,
+ * while delivering the pending exception.
*/
+
if (vcpu->arch.exception.pending && !vmx_pending_dbg_trap(vcpu)) {
- if (block_nested_events)
+ if (vmx->nested.nested_run_pending)
return -EBUSY;
if (!nested_vmx_check_exception(vcpu, &exit_qual))
goto no_vmexit;
@@ -3829,7 +3859,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
}
if (vcpu->arch.exception.pending) {
- if (block_nested_events)
+ if (vmx->nested.nested_run_pending)
return -EBUSY;
if (!nested_vmx_check_exception(vcpu, &exit_qual))
goto no_vmexit;
@@ -3878,8 +3908,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
}
no_vmexit:
- vmx_complete_nested_posted_interrupt(vcpu);
- return 0;
+ return vmx_complete_nested_posted_interrupt(vcpu);
}
static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
@@ -4023,10 +4052,11 @@ static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- if (vmx->nested.hv_evmcs)
+ if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
- vmx->nested.need_sync_vmcs02_to_vmcs12_rare = !vmx->nested.hv_evmcs;
+ vmx->nested.need_sync_vmcs02_to_vmcs12_rare =
+ !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr);
vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
@@ -4105,6 +4135,8 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
{
/* update exit information fields: */
vmcs12->vm_exit_reason = vm_exit_reason;
+ if (to_vmx(vcpu)->exit_reason.enclave_mode)
+ vmcs12->vm_exit_reason |= VMX_EXIT_REASONS_SGX_ENCLAVE_MODE;
vmcs12->exit_qualification = exit_qualification;
vmcs12->vm_exit_intr_info = exit_intr_info;
@@ -4195,7 +4227,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
* Only PDPTE load can fail as the value of cr3 was checked on entry and
* couldn't have changed.
*/
- if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &ignored))
+ if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, true, &ignored))
nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
nested_vmx_transition_tlb_flush(vcpu, vmcs12, false);
@@ -4422,7 +4454,18 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
/* trying to cancel vmlaunch/vmresume is a bug */
WARN_ON_ONCE(vmx->nested.nested_run_pending);
- kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
+ /* Similarly, triple faults in L2 should never escape. */
+ WARN_ON_ONCE(kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu));
+
+ if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
+ /*
+ * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
+ * Enlightened VMCS after migration and we still need to
+ * do that when something is forcing L2->L1 exit prior to
+ * the first L2 run.
+ */
+ (void)nested_get_evmcs_page(vcpu);
+ }
/* Service the TLB flush request for L2 before switching to L1. */
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
@@ -4441,8 +4484,11 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
if (nested_cpu_has_preemption_timer(vmcs12))
hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
- if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
- vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
+ if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING)) {
+ vcpu->arch.tsc_offset = vcpu->arch.l1_tsc_offset;
+ if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING))
+ vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
+ }
if (likely(!vmx->fail)) {
sync_vmcs02_to_vmcs12(vcpu, vmcs12);
@@ -4479,12 +4525,12 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
+ if (kvm_has_tsc_control)
+ vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
+
if (vmx->nested.l1_tpr_threshold != -1)
vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold);
- if (kvm_has_tsc_control)
- decache_tsc_multiplier(vmx);
-
if (vmx->nested.change_vmcs01_virtual_apic_mode) {
vmx->nested.change_vmcs01_virtual_apic_mode = false;
vmx_set_virtual_apic_mode(vcpu);
@@ -4510,7 +4556,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
}
if ((vm_exit_reason != -1) &&
- (enable_shadow_vmcs || vmx->nested.hv_evmcs))
+ (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)))
vmx->nested.need_vmcs12_to_shadow_sync = true;
/* in case we halted in L2 */
@@ -4558,6 +4604,11 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
vmx->fail = 0;
}
+static void nested_vmx_triple_fault(struct kvm_vcpu *vcpu)
+{
+ nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0);
+}
+
/*
* Decode the memory-address operand of a vmx instruction, as recorded on an
* exit caused by such an instruction (run by a guest hypervisor).
@@ -4960,6 +5011,8 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
vmptr + offsetof(struct vmcs12,
launch_state),
&zero, sizeof(zero));
+ } else if (vmx->nested.hv_evmcs && vmptr == vmx->nested.hv_evmcs_vmptr) {
+ nested_release_evmcs(vcpu);
}
return nested_vmx_succeed(vcpu);
@@ -5005,7 +5058,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
return nested_vmx_failInvalid(vcpu);
/* Decode instruction info and find the field to read */
- field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf));
+ field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
offset = vmcs_field_to_offset(field);
if (offset < 0)
@@ -5023,7 +5076,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
* on the guest's mode (32 or 64 bit), not on the given field's length.
*/
if (instr_info & BIT(10)) {
- kvm_register_writel(vcpu, (((instr_info) >> 3) & 0xf), value);
+ kvm_register_write(vcpu, (((instr_info) >> 3) & 0xf), value);
} else {
len = is_64_bit_mode(vcpu) ? 8 : 4;
if (get_vmx_mem_address(vcpu, exit_qualification,
@@ -5097,7 +5150,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
return nested_vmx_failInvalid(vcpu);
if (instr_info & BIT(10))
- value = kvm_register_readl(vcpu, (((instr_info) >> 3) & 0xf));
+ value = kvm_register_read(vcpu, (((instr_info) >> 3) & 0xf));
else {
len = is_64_bit_mode(vcpu) ? 8 : 4;
if (get_vmx_mem_address(vcpu, exit_qualification,
@@ -5108,7 +5161,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
return kvm_handle_memory_failure(vcpu, r, &e);
}
- field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf));
+ field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
offset = vmcs_field_to_offset(field);
if (offset < 0)
@@ -5201,7 +5254,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
/* Forbid normal VMPTRLD if Enlightened version was used */
- if (vmx->nested.hv_evmcs)
+ if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
return 1;
if (vmx->nested.current_vmptr != vmptr) {
@@ -5257,7 +5310,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_permission(vcpu))
return 1;
- if (unlikely(to_vmx(vcpu)->nested.hv_evmcs))
+ if (unlikely(evmptr_is_valid(to_vmx(vcpu)->nested.hv_evmcs_vmptr)))
return 1;
if (get_vmx_mem_address(vcpu, exit_qual, instr_info,
@@ -5305,7 +5358,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
return 1;
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
- type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
+ type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf);
types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
@@ -5385,7 +5438,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
return 1;
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
- type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
+ type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf);
types = (vmx->nested.msrs.vpid_caps &
VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
@@ -5434,8 +5487,8 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
/*
* Sync the shadow page tables if EPT is disabled, L1 is invalidating
- * linear mappings for L2 (tagged with L2's VPID). Free all roots as
- * VPIDs are not tracked in the MMU role.
+ * linear mappings for L2 (tagged with L2's VPID). Free all guest
+ * roots as VPIDs are not tracked in the MMU role.
*
* Note, this operates on root_mmu, not guest_mmu, as L1 and L2 share
* an MMU when EPT is disabled.
@@ -5443,8 +5496,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
* TODO: sync only the affected SPTEs for INVDIVIDUAL_ADDR.
*/
if (!enable_ept)
- kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu,
- KVM_MMU_ROOTS_ALL);
+ kvm_mmu_free_guest_mode_roots(vcpu, &vcpu->arch.root_mmu);
return nested_vmx_succeed(vcpu);
}
@@ -5454,23 +5506,16 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
{
u32 index = kvm_rcx_read(vcpu);
u64 new_eptp;
- bool accessed_dirty;
- struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
- if (!nested_cpu_has_eptp_switching(vmcs12) ||
- !nested_cpu_has_ept(vmcs12))
+ if (WARN_ON_ONCE(!nested_cpu_has_ept(vmcs12)))
return 1;
-
if (index >= VMFUNC_EPTP_ENTRIES)
return 1;
-
if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
&new_eptp, index * 8, 8))
return 1;
- accessed_dirty = !!(new_eptp & VMX_EPTP_AD_ENABLE_BIT);
-
/*
* If the (L2) guest does a vmfunc to the currently
* active ept pointer, we don't have to do anything else
@@ -5479,16 +5524,11 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
if (!nested_vmx_check_eptp(vcpu, new_eptp))
return 1;
- kvm_mmu_unload(vcpu);
- mmu->ept_ad = accessed_dirty;
- mmu->mmu_role.base.ad_disabled = !accessed_dirty;
vmcs12->ept_pointer = new_eptp;
- /*
- * TODO: Check what's the correct approach in case
- * mmu reload fails. Currently, we just let the next
- * reload potentially fail
- */
- kvm_mmu_reload(vcpu);
+ nested_ept_new_eptp(vcpu);
+
+ if (!nested_cpu_has_vpid(vmcs12))
+ kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
}
return 0;
@@ -5511,7 +5551,17 @@ static int handle_vmfunc(struct kvm_vcpu *vcpu)
}
vmcs12 = get_vmcs12(vcpu);
- if ((vmcs12->vm_function_control & (1 << function)) == 0)
+
+ /*
+ * #UD on out-of-bounds function has priority over VM-Exit, and VMFUNC
+ * is enabled in vmcs02 if and only if it's enabled in vmcs12.
+ */
+ if (WARN_ON_ONCE((function > 63) || !nested_cpu_has_vmfunc(vmcs12))) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+
+ if (!(vmcs12->vm_function_control & BIT_ULL(function)))
goto fail;
switch (function) {
@@ -5646,7 +5696,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
switch ((exit_qualification >> 4) & 3) {
case 0: /* mov to cr */
reg = (exit_qualification >> 8) & 15;
- val = kvm_register_readl(vcpu, reg);
+ val = kvm_register_read(vcpu, reg);
switch (cr) {
case 0:
if (vmcs12->cr0_guest_host_mask &
@@ -5705,6 +5755,21 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
return false;
}
+static bool nested_vmx_exit_handled_encls(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ u32 encls_leaf;
+
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_SGX) ||
+ !nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENCLS_EXITING))
+ return false;
+
+ encls_leaf = kvm_rax_read(vcpu);
+ if (encls_leaf > 62)
+ encls_leaf = 63;
+ return vmcs12->encls_exiting_bitmap & BIT_ULL(encls_leaf);
+}
+
static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12, gpa_t bitmap)
{
@@ -5769,6 +5834,9 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
else if (is_breakpoint(intr_info) &&
vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
return true;
+ else if (is_alignment_check(intr_info) &&
+ !vmx_guest_inject_ac(vcpu))
+ return true;
return false;
case EXIT_REASON_EXTERNAL_INTERRUPT:
return true;
@@ -5801,9 +5869,6 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
case EXIT_REASON_VMFUNC:
/* VM functions are emulated through L2->L0 vmexits. */
return true;
- case EXIT_REASON_ENCLS:
- /* SGX is never exposed to L1 */
- return true;
default:
break;
}
@@ -5927,6 +5992,8 @@ static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu,
case EXIT_REASON_TPAUSE:
return nested_cpu_has2(vmcs12,
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE);
+ case EXIT_REASON_ENCLS:
+ return nested_vmx_exit_handled_encls(vcpu, vmcs12);
default:
return true;
}
@@ -6020,7 +6087,8 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
if (vmx_has_valid_vmcs12(vcpu)) {
kvm_state.size += sizeof(user_vmx_nested_state->vmcs12);
- if (vmx->nested.hv_evmcs)
+ /* 'hv_evmcs_vmptr' can also be EVMPTR_MAP_PENDING here */
+ if (vmx->nested.hv_evmcs_vmptr != EVMPTR_INVALID)
kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
if (is_guest_mode(vcpu) &&
@@ -6076,8 +6144,15 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
} else {
copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
if (!vmx->nested.need_vmcs12_to_shadow_sync) {
- if (vmx->nested.hv_evmcs)
- copy_enlightened_to_vmcs12(vmx);
+ if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
+ /*
+ * L1 hypervisor is not obliged to keep eVMCS
+ * clean fields data always up-to-date while
+ * not in guest mode, 'hv_clean_fields' is only
+ * supposed to be actual upon vmentry so we need
+ * to ignore it here and do full copy.
+ */
+ copy_enlightened_to_vmcs12(vmx, 0);
else if (enable_shadow_vmcs)
copy_shadow_to_vmcs12(vmx);
}
@@ -6219,6 +6294,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
* restored yet. EVMCS will be mapped from
* nested_get_vmcs12_pages().
*/
+ vmx->nested.hv_evmcs_vmptr = EVMPTR_MAP_PENDING;
kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
} else {
return -EINVAL;
@@ -6303,6 +6379,40 @@ void nested_vmx_set_vmcs_shadowing_bitmap(void)
}
/*
+ * Indexing into the vmcs12 uses the VMCS encoding rotated left by 6. Undo
+ * that madness to get the encoding for comparison.
+ */
+#define VMCS12_IDX_TO_ENC(idx) ((u16)(((u16)(idx) >> 6) | ((u16)(idx) << 10)))
+
+static u64 nested_vmx_calc_vmcs_enum_msr(void)
+{
+ /*
+ * Note these are the so called "index" of the VMCS field encoding, not
+ * the index into vmcs12.
+ */
+ unsigned int max_idx, idx;
+ int i;
+
+ /*
+ * For better or worse, KVM allows VMREAD/VMWRITE to all fields in
+ * vmcs12, regardless of whether or not the associated feature is
+ * exposed to L1. Simply find the field with the highest index.
+ */
+ max_idx = 0;
+ for (i = 0; i < nr_vmcs12_fields; i++) {
+ /* The vmcs12 table is very, very sparsely populated. */
+ if (!vmcs_field_to_offset_table[i])
+ continue;
+
+ idx = vmcs_field_index(VMCS12_IDX_TO_ENC(i));
+ if (idx > max_idx)
+ max_idx = idx;
+ }
+
+ return (u64)max_idx << VMCS_FIELD_INDEX_SHIFT;
+}
+
+/*
* nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
* returned for the various VMX controls MSRs when nested VMX is enabled.
* The same values should also be used to verify that vmcs12 control fields are
@@ -6438,7 +6548,8 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
SECONDARY_EXEC_RDRAND_EXITING |
SECONDARY_EXEC_ENABLE_INVPCID |
SECONDARY_EXEC_RDSEED_EXITING |
- SECONDARY_EXEC_XSAVES;
+ SECONDARY_EXEC_XSAVES |
+ SECONDARY_EXEC_TSC_SCALING;
/*
* We can emulate "VMCS shadowing," even if the hardware
@@ -6502,6 +6613,9 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
msrs->secondary_ctls_high |=
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+ if (enable_sgx)
+ msrs->secondary_ctls_high |= SECONDARY_EXEC_ENCLS_EXITING;
+
/* miscellaneous data */
rdmsr(MSR_IA32_VMX_MISC,
msrs->misc_low,
@@ -6543,8 +6657,7 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
- /* highest index: VMX_PREEMPTION_TIMER_VALUE */
- msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1;
+ msrs->vmcs_enum = nested_vmx_calc_vmcs_enum_msr();
}
void nested_vmx_hardware_unsetup(void)
@@ -6599,6 +6712,7 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
struct kvm_x86_nested_ops vmx_nested_ops = {
.check_events = vmx_check_nested_events,
.hv_timer_pending = nested_vmx_preemption_timer_pending,
+ .triple_fault = nested_vmx_triple_fault,
.get_state = vmx_get_nested_state,
.set_state = vmx_set_nested_state,
.get_nested_state_pages = vmx_get_nested_state_pages,
diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
index 197148d76b8f..b69a80f43b37 100644
--- a/arch/x86/kvm/vmx/nested.h
+++ b/arch/x86/kvm/vmx/nested.h
@@ -56,14 +56,9 @@ static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- /*
- * In case we do two consecutive get/set_nested_state()s while L2 was
- * running hv_evmcs may end up not being mapped (we map it from
- * nested_vmx_run()/vmx_vcpu_run()). Check is_guest_mode() as we always
- * have vmcs12 if it is true.
- */
- return is_guest_mode(vcpu) || vmx->nested.current_vmptr != -1ull ||
- vmx->nested.hv_evmcs;
+ /* 'hv_evmcs_vmptr' can also be EVMPTR_MAP_PENDING here */
+ return vmx->nested.current_vmptr != -1ull ||
+ vmx->nested.hv_evmcs_vmptr != EVMPTR_INVALID;
}
static inline u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
@@ -244,6 +239,11 @@ static inline bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
PIN_BASED_EXT_INTR_MASK;
}
+static inline bool nested_cpu_has_encls_exit(struct vmcs12 *vmcs12)
+{
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENCLS_EXITING);
+}
+
/*
* if fixed0[i] == 1: val[i] must be 1
* if fixed1[i] == 0: val[i] must be 0
diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c
index 4831bc44ce66..5f81ef092bd4 100644
--- a/arch/x86/kvm/vmx/posted_intr.c
+++ b/arch/x86/kvm/vmx/posted_intr.c
@@ -10,7 +10,7 @@
#include "vmx.h"
/*
- * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
+ * We maintain a per-CPU linked-list of vCPU, so in wakeup_handler() we
* can find which vCPU should be waken up.
*/
static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
@@ -238,6 +238,20 @@ bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu)
/*
+ * Bail out of the block loop if the VM has an assigned
+ * device, but the blocking vCPU didn't reconfigure the
+ * PI.NV to the wakeup vector, i.e. the assigned device
+ * came along after the initial check in pi_pre_block().
+ */
+void vmx_pi_start_assignment(struct kvm *kvm)
+{
+ if (!irq_remapping_cap(IRQ_POSTING_CAP))
+ return;
+
+ kvm_make_all_cpus_request(kvm, KVM_REQ_UNBLOCK);
+}
+
+/*
* pi_update_irte - set IRTE for Posted-Interrupts
*
* @kvm: kvm
diff --git a/arch/x86/kvm/vmx/posted_intr.h b/arch/x86/kvm/vmx/posted_intr.h
index 0bdc41391c5b..7f7b2326caf5 100644
--- a/arch/x86/kvm/vmx/posted_intr.h
+++ b/arch/x86/kvm/vmx/posted_intr.h
@@ -95,5 +95,6 @@ void __init pi_init_cpu(int cpu);
bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu);
int pi_update_irte(struct kvm *kvm, unsigned int host_irq, uint32_t guest_irq,
bool set);
+void vmx_pi_start_assignment(struct kvm *kvm);
#endif /* __KVM_X86_VMX_POSTED_INTR_H */
diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c
new file mode 100644
index 000000000000..6693ebdc0770
--- /dev/null
+++ b/arch/x86/kvm/vmx/sgx.c
@@ -0,0 +1,502 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2021 Intel Corporation. */
+
+#include <asm/sgx.h>
+
+#include "cpuid.h"
+#include "kvm_cache_regs.h"
+#include "nested.h"
+#include "sgx.h"
+#include "vmx.h"
+#include "x86.h"
+
+bool __read_mostly enable_sgx = 1;
+module_param_named(sgx, enable_sgx, bool, 0444);
+
+/* Initial value of guest's virtual SGX_LEPUBKEYHASHn MSRs */
+static u64 sgx_pubkey_hash[4] __ro_after_init;
+
+/*
+ * ENCLS's memory operands use a fixed segment (DS) and a fixed
+ * address size based on the mode. Related prefixes are ignored.
+ */
+static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset,
+ int size, int alignment, gva_t *gva)
+{
+ struct kvm_segment s;
+ bool fault;
+
+ /* Skip vmcs.GUEST_DS retrieval for 64-bit mode to avoid VMREADs. */
+ *gva = offset;
+ if (!is_long_mode(vcpu)) {
+ vmx_get_segment(vcpu, &s, VCPU_SREG_DS);
+ *gva += s.base;
+ }
+
+ if (!IS_ALIGNED(*gva, alignment)) {
+ fault = true;
+ } else if (likely(is_long_mode(vcpu))) {
+ fault = is_noncanonical_address(*gva, vcpu);
+ } else {
+ *gva &= 0xffffffff;
+ fault = (s.unusable) ||
+ (s.type != 2 && s.type != 3) ||
+ (*gva > s.limit) ||
+ ((s.base != 0 || s.limit != 0xffffffff) &&
+ (((u64)*gva + size - 1) > s.limit + 1));
+ }
+ if (fault)
+ kvm_inject_gp(vcpu, 0);
+ return fault ? -EINVAL : 0;
+}
+
+static void sgx_handle_emulation_failure(struct kvm_vcpu *vcpu, u64 addr,
+ unsigned int size)
+{
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ vcpu->run->internal.ndata = 2;
+ vcpu->run->internal.data[0] = addr;
+ vcpu->run->internal.data[1] = size;
+}
+
+static int sgx_read_hva(struct kvm_vcpu *vcpu, unsigned long hva, void *data,
+ unsigned int size)
+{
+ if (__copy_from_user(data, (void __user *)hva, size)) {
+ sgx_handle_emulation_failure(vcpu, hva, size);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int sgx_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t gva, bool write,
+ gpa_t *gpa)
+{
+ struct x86_exception ex;
+
+ if (write)
+ *gpa = kvm_mmu_gva_to_gpa_write(vcpu, gva, &ex);
+ else
+ *gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, &ex);
+
+ if (*gpa == UNMAPPED_GVA) {
+ kvm_inject_emulated_page_fault(vcpu, &ex);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int sgx_gpa_to_hva(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned long *hva)
+{
+ *hva = kvm_vcpu_gfn_to_hva(vcpu, PFN_DOWN(gpa));
+ if (kvm_is_error_hva(*hva)) {
+ sgx_handle_emulation_failure(vcpu, gpa, 1);
+ return -EFAULT;
+ }
+
+ *hva |= gpa & ~PAGE_MASK;
+
+ return 0;
+}
+
+static int sgx_inject_fault(struct kvm_vcpu *vcpu, gva_t gva, int trapnr)
+{
+ struct x86_exception ex;
+
+ /*
+ * A non-EPCM #PF indicates a bad userspace HVA. This *should* check
+ * for PFEC.SGX and not assume any #PF on SGX2 originated in the EPC,
+ * but the error code isn't (yet) plumbed through the ENCLS helpers.
+ */
+ if (trapnr == PF_VECTOR && !boot_cpu_has(X86_FEATURE_SGX2)) {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ vcpu->run->internal.ndata = 0;
+ return 0;
+ }
+
+ /*
+ * If the guest thinks it's running on SGX2 hardware, inject an SGX
+ * #PF if the fault matches an EPCM fault signature (#GP on SGX1,
+ * #PF on SGX2). The assumption is that EPCM faults are much more
+ * likely than a bad userspace address.
+ */
+ if ((trapnr == PF_VECTOR || !boot_cpu_has(X86_FEATURE_SGX2)) &&
+ guest_cpuid_has(vcpu, X86_FEATURE_SGX2)) {
+ memset(&ex, 0, sizeof(ex));
+ ex.vector = PF_VECTOR;
+ ex.error_code = PFERR_PRESENT_MASK | PFERR_WRITE_MASK |
+ PFERR_SGX_MASK;
+ ex.address = gva;
+ ex.error_code_valid = true;
+ ex.nested_page_fault = false;
+ kvm_inject_page_fault(vcpu, &ex);
+ } else {
+ kvm_inject_gp(vcpu, 0);
+ }
+ return 1;
+}
+
+static int __handle_encls_ecreate(struct kvm_vcpu *vcpu,
+ struct sgx_pageinfo *pageinfo,
+ unsigned long secs_hva,
+ gva_t secs_gva)
+{
+ struct sgx_secs *contents = (struct sgx_secs *)pageinfo->contents;
+ struct kvm_cpuid_entry2 *sgx_12_0, *sgx_12_1;
+ u64 attributes, xfrm, size;
+ u32 miscselect;
+ u8 max_size_log2;
+ int trapnr, ret;
+
+ sgx_12_0 = kvm_find_cpuid_entry(vcpu, 0x12, 0);
+ sgx_12_1 = kvm_find_cpuid_entry(vcpu, 0x12, 1);
+ if (!sgx_12_0 || !sgx_12_1) {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ vcpu->run->internal.ndata = 0;
+ return 0;
+ }
+
+ miscselect = contents->miscselect;
+ attributes = contents->attributes;
+ xfrm = contents->xfrm;
+ size = contents->size;
+
+ /* Enforce restriction of access to the PROVISIONKEY. */
+ if (!vcpu->kvm->arch.sgx_provisioning_allowed &&
+ (attributes & SGX_ATTR_PROVISIONKEY)) {
+ if (sgx_12_1->eax & SGX_ATTR_PROVISIONKEY)
+ pr_warn_once("KVM: SGX PROVISIONKEY advertised but not allowed\n");
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+
+ /* Enforce CPUID restrictions on MISCSELECT, ATTRIBUTES and XFRM. */
+ if ((u32)miscselect & ~sgx_12_0->ebx ||
+ (u32)attributes & ~sgx_12_1->eax ||
+ (u32)(attributes >> 32) & ~sgx_12_1->ebx ||
+ (u32)xfrm & ~sgx_12_1->ecx ||
+ (u32)(xfrm >> 32) & ~sgx_12_1->edx) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+
+ /* Enforce CPUID restriction on max enclave size. */
+ max_size_log2 = (attributes & SGX_ATTR_MODE64BIT) ? sgx_12_0->edx >> 8 :
+ sgx_12_0->edx;
+ if (size >= BIT_ULL(max_size_log2))
+ kvm_inject_gp(vcpu, 0);
+
+ /*
+ * sgx_virt_ecreate() returns:
+ * 1) 0: ECREATE was successful
+ * 2) -EFAULT: ECREATE was run but faulted, and trapnr was set to the
+ * exception number.
+ * 3) -EINVAL: access_ok() on @secs_hva failed. This should never
+ * happen as KVM checks host addresses at memslot creation.
+ * sgx_virt_ecreate() has already warned in this case.
+ */
+ ret = sgx_virt_ecreate(pageinfo, (void __user *)secs_hva, &trapnr);
+ if (!ret)
+ return kvm_skip_emulated_instruction(vcpu);
+ if (ret == -EFAULT)
+ return sgx_inject_fault(vcpu, secs_gva, trapnr);
+
+ return ret;
+}
+
+static int handle_encls_ecreate(struct kvm_vcpu *vcpu)
+{
+ gva_t pageinfo_gva, secs_gva;
+ gva_t metadata_gva, contents_gva;
+ gpa_t metadata_gpa, contents_gpa, secs_gpa;
+ unsigned long metadata_hva, contents_hva, secs_hva;
+ struct sgx_pageinfo pageinfo;
+ struct sgx_secs *contents;
+ struct x86_exception ex;
+ int r;
+
+ if (sgx_get_encls_gva(vcpu, kvm_rbx_read(vcpu), 32, 32, &pageinfo_gva) ||
+ sgx_get_encls_gva(vcpu, kvm_rcx_read(vcpu), 4096, 4096, &secs_gva))
+ return 1;
+
+ /*
+ * Copy the PAGEINFO to local memory, its pointers need to be
+ * translated, i.e. we need to do a deep copy/translate.
+ */
+ r = kvm_read_guest_virt(vcpu, pageinfo_gva, &pageinfo,
+ sizeof(pageinfo), &ex);
+ if (r == X86EMUL_PROPAGATE_FAULT) {
+ kvm_inject_emulated_page_fault(vcpu, &ex);
+ return 1;
+ } else if (r != X86EMUL_CONTINUE) {
+ sgx_handle_emulation_failure(vcpu, pageinfo_gva,
+ sizeof(pageinfo));
+ return 0;
+ }
+
+ if (sgx_get_encls_gva(vcpu, pageinfo.metadata, 64, 64, &metadata_gva) ||
+ sgx_get_encls_gva(vcpu, pageinfo.contents, 4096, 4096,
+ &contents_gva))
+ return 1;
+
+ /*
+ * Translate the SECINFO, SOURCE and SECS pointers from GVA to GPA.
+ * Resume the guest on failure to inject a #PF.
+ */
+ if (sgx_gva_to_gpa(vcpu, metadata_gva, false, &metadata_gpa) ||
+ sgx_gva_to_gpa(vcpu, contents_gva, false, &contents_gpa) ||
+ sgx_gva_to_gpa(vcpu, secs_gva, true, &secs_gpa))
+ return 1;
+
+ /*
+ * ...and then to HVA. The order of accesses isn't architectural, i.e.
+ * KVM doesn't have to fully process one address at a time. Exit to
+ * userspace if a GPA is invalid.
+ */
+ if (sgx_gpa_to_hva(vcpu, metadata_gpa, &metadata_hva) ||
+ sgx_gpa_to_hva(vcpu, contents_gpa, &contents_hva) ||
+ sgx_gpa_to_hva(vcpu, secs_gpa, &secs_hva))
+ return 0;
+
+ /*
+ * Copy contents into kernel memory to prevent TOCTOU attack. E.g. the
+ * guest could do ECREATE w/ SECS.SGX_ATTR_PROVISIONKEY=0, and
+ * simultaneously set SGX_ATTR_PROVISIONKEY to bypass the check to
+ * enforce restriction of access to the PROVISIONKEY.
+ */
+ contents = (struct sgx_secs *)__get_free_page(GFP_KERNEL_ACCOUNT);
+ if (!contents)
+ return -ENOMEM;
+
+ /* Exit to userspace if copying from a host userspace address fails. */
+ if (sgx_read_hva(vcpu, contents_hva, (void *)contents, PAGE_SIZE)) {
+ free_page((unsigned long)contents);
+ return 0;
+ }
+
+ pageinfo.metadata = metadata_hva;
+ pageinfo.contents = (u64)contents;
+
+ r = __handle_encls_ecreate(vcpu, &pageinfo, secs_hva, secs_gva);
+
+ free_page((unsigned long)contents);
+
+ return r;
+}
+
+static int handle_encls_einit(struct kvm_vcpu *vcpu)
+{
+ unsigned long sig_hva, secs_hva, token_hva, rflags;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ gva_t sig_gva, secs_gva, token_gva;
+ gpa_t sig_gpa, secs_gpa, token_gpa;
+ int ret, trapnr;
+
+ if (sgx_get_encls_gva(vcpu, kvm_rbx_read(vcpu), 1808, 4096, &sig_gva) ||
+ sgx_get_encls_gva(vcpu, kvm_rcx_read(vcpu), 4096, 4096, &secs_gva) ||
+ sgx_get_encls_gva(vcpu, kvm_rdx_read(vcpu), 304, 512, &token_gva))
+ return 1;
+
+ /*
+ * Translate the SIGSTRUCT, SECS and TOKEN pointers from GVA to GPA.
+ * Resume the guest on failure to inject a #PF.
+ */
+ if (sgx_gva_to_gpa(vcpu, sig_gva, false, &sig_gpa) ||
+ sgx_gva_to_gpa(vcpu, secs_gva, true, &secs_gpa) ||
+ sgx_gva_to_gpa(vcpu, token_gva, false, &token_gpa))
+ return 1;
+
+ /*
+ * ...and then to HVA. The order of accesses isn't architectural, i.e.
+ * KVM doesn't have to fully process one address at a time. Exit to
+ * userspace if a GPA is invalid. Note, all structures are aligned and
+ * cannot split pages.
+ */
+ if (sgx_gpa_to_hva(vcpu, sig_gpa, &sig_hva) ||
+ sgx_gpa_to_hva(vcpu, secs_gpa, &secs_hva) ||
+ sgx_gpa_to_hva(vcpu, token_gpa, &token_hva))
+ return 0;
+
+ ret = sgx_virt_einit((void __user *)sig_hva, (void __user *)token_hva,
+ (void __user *)secs_hva,
+ vmx->msr_ia32_sgxlepubkeyhash, &trapnr);
+
+ if (ret == -EFAULT)
+ return sgx_inject_fault(vcpu, secs_gva, trapnr);
+
+ /*
+ * sgx_virt_einit() returns -EINVAL when access_ok() fails on @sig_hva,
+ * @token_hva or @secs_hva. This should never happen as KVM checks host
+ * addresses at memslot creation. sgx_virt_einit() has already warned
+ * in this case, so just return.
+ */
+ if (ret < 0)
+ return ret;
+
+ rflags = vmx_get_rflags(vcpu) & ~(X86_EFLAGS_CF | X86_EFLAGS_PF |
+ X86_EFLAGS_AF | X86_EFLAGS_SF |
+ X86_EFLAGS_OF);
+ if (ret)
+ rflags |= X86_EFLAGS_ZF;
+ else
+ rflags &= ~X86_EFLAGS_ZF;
+ vmx_set_rflags(vcpu, rflags);
+
+ kvm_rax_write(vcpu, ret);
+ return kvm_skip_emulated_instruction(vcpu);
+}
+
+static inline bool encls_leaf_enabled_in_guest(struct kvm_vcpu *vcpu, u32 leaf)
+{
+ if (!enable_sgx || !guest_cpuid_has(vcpu, X86_FEATURE_SGX))
+ return false;
+
+ if (leaf >= ECREATE && leaf <= ETRACK)
+ return guest_cpuid_has(vcpu, X86_FEATURE_SGX1);
+
+ if (leaf >= EAUG && leaf <= EMODT)
+ return guest_cpuid_has(vcpu, X86_FEATURE_SGX2);
+
+ return false;
+}
+
+static inline bool sgx_enabled_in_guest_bios(struct kvm_vcpu *vcpu)
+{
+ const u64 bits = FEAT_CTL_SGX_ENABLED | FEAT_CTL_LOCKED;
+
+ return (to_vmx(vcpu)->msr_ia32_feature_control & bits) == bits;
+}
+
+int handle_encls(struct kvm_vcpu *vcpu)
+{
+ u32 leaf = (u32)kvm_rax_read(vcpu);
+
+ if (!encls_leaf_enabled_in_guest(vcpu, leaf)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ } else if (!sgx_enabled_in_guest_bios(vcpu)) {
+ kvm_inject_gp(vcpu, 0);
+ } else {
+ if (leaf == ECREATE)
+ return handle_encls_ecreate(vcpu);
+ if (leaf == EINIT)
+ return handle_encls_einit(vcpu);
+ WARN(1, "KVM: unexpected exit on ENCLS[%u]", leaf);
+ vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
+ vcpu->run->hw.hardware_exit_reason = EXIT_REASON_ENCLS;
+ return 0;
+ }
+ return 1;
+}
+
+void setup_default_sgx_lepubkeyhash(void)
+{
+ /*
+ * Use Intel's default value for Skylake hardware if Launch Control is
+ * not supported, i.e. Intel's hash is hardcoded into silicon, or if
+ * Launch Control is supported and enabled, i.e. mimic the reset value
+ * and let the guest write the MSRs at will. If Launch Control is
+ * supported but disabled, then use the current MSR values as the hash
+ * MSRs exist but are read-only (locked and not writable).
+ */
+ if (!enable_sgx || boot_cpu_has(X86_FEATURE_SGX_LC) ||
+ rdmsrl_safe(MSR_IA32_SGXLEPUBKEYHASH0, &sgx_pubkey_hash[0])) {
+ sgx_pubkey_hash[0] = 0xa6053e051270b7acULL;
+ sgx_pubkey_hash[1] = 0x6cfbe8ba8b3b413dULL;
+ sgx_pubkey_hash[2] = 0xc4916d99f2b3735dULL;
+ sgx_pubkey_hash[3] = 0xd4f8c05909f9bb3bULL;
+ } else {
+ /* MSR_IA32_SGXLEPUBKEYHASH0 is read above */
+ rdmsrl(MSR_IA32_SGXLEPUBKEYHASH1, sgx_pubkey_hash[1]);
+ rdmsrl(MSR_IA32_SGXLEPUBKEYHASH2, sgx_pubkey_hash[2]);
+ rdmsrl(MSR_IA32_SGXLEPUBKEYHASH3, sgx_pubkey_hash[3]);
+ }
+}
+
+void vcpu_setup_sgx_lepubkeyhash(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ memcpy(vmx->msr_ia32_sgxlepubkeyhash, sgx_pubkey_hash,
+ sizeof(sgx_pubkey_hash));
+}
+
+/*
+ * ECREATE must be intercepted to enforce MISCSELECT, ATTRIBUTES and XFRM
+ * restrictions if the guest's allowed-1 settings diverge from hardware.
+ */
+static bool sgx_intercept_encls_ecreate(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *guest_cpuid;
+ u32 eax, ebx, ecx, edx;
+
+ if (!vcpu->kvm->arch.sgx_provisioning_allowed)
+ return true;
+
+ guest_cpuid = kvm_find_cpuid_entry(vcpu, 0x12, 0);
+ if (!guest_cpuid)
+ return true;
+
+ cpuid_count(0x12, 0, &eax, &ebx, &ecx, &edx);
+ if (guest_cpuid->ebx != ebx || guest_cpuid->edx != edx)
+ return true;
+
+ guest_cpuid = kvm_find_cpuid_entry(vcpu, 0x12, 1);
+ if (!guest_cpuid)
+ return true;
+
+ cpuid_count(0x12, 1, &eax, &ebx, &ecx, &edx);
+ if (guest_cpuid->eax != eax || guest_cpuid->ebx != ebx ||
+ guest_cpuid->ecx != ecx || guest_cpuid->edx != edx)
+ return true;
+
+ return false;
+}
+
+void vmx_write_encls_bitmap(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+{
+ /*
+ * There is no software enable bit for SGX that is virtualized by
+ * hardware, e.g. there's no CR4.SGXE, so when SGX is disabled in the
+ * guest (either by the host or by the guest's BIOS) but enabled in the
+ * host, trap all ENCLS leafs and inject #UD/#GP as needed to emulate
+ * the expected system behavior for ENCLS.
+ */
+ u64 bitmap = -1ull;
+
+ /* Nothing to do if hardware doesn't support SGX */
+ if (!cpu_has_vmx_encls_vmexit())
+ return;
+
+ if (guest_cpuid_has(vcpu, X86_FEATURE_SGX) &&
+ sgx_enabled_in_guest_bios(vcpu)) {
+ if (guest_cpuid_has(vcpu, X86_FEATURE_SGX1)) {
+ bitmap &= ~GENMASK_ULL(ETRACK, ECREATE);
+ if (sgx_intercept_encls_ecreate(vcpu))
+ bitmap |= (1 << ECREATE);
+ }
+
+ if (guest_cpuid_has(vcpu, X86_FEATURE_SGX2))
+ bitmap &= ~GENMASK_ULL(EMODT, EAUG);
+
+ /*
+ * Trap and execute EINIT if launch control is enabled in the
+ * host using the guest's values for launch control MSRs, even
+ * if the guest's values are fixed to hardware default values.
+ * The MSRs are not loaded/saved on VM-Enter/VM-Exit as writing
+ * the MSRs is extraordinarily expensive.
+ */
+ if (boot_cpu_has(X86_FEATURE_SGX_LC))
+ bitmap |= (1 << EINIT);
+
+ if (!vmcs12 && is_guest_mode(vcpu))
+ vmcs12 = get_vmcs12(vcpu);
+ if (vmcs12 && nested_cpu_has_encls_exit(vmcs12))
+ bitmap |= vmcs12->encls_exiting_bitmap;
+ }
+ vmcs_write64(ENCLS_EXITING_BITMAP, bitmap);
+}
diff --git a/arch/x86/kvm/vmx/sgx.h b/arch/x86/kvm/vmx/sgx.h
new file mode 100644
index 000000000000..a400888b376d
--- /dev/null
+++ b/arch/x86/kvm/vmx/sgx.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __KVM_X86_SGX_H
+#define __KVM_X86_SGX_H
+
+#include <linux/kvm_host.h>
+
+#include "capabilities.h"
+#include "vmx_ops.h"
+
+#ifdef CONFIG_X86_SGX_KVM
+extern bool __read_mostly enable_sgx;
+
+int handle_encls(struct kvm_vcpu *vcpu);
+
+void setup_default_sgx_lepubkeyhash(void);
+void vcpu_setup_sgx_lepubkeyhash(struct kvm_vcpu *vcpu);
+
+void vmx_write_encls_bitmap(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12);
+#else
+#define enable_sgx 0
+
+static inline void setup_default_sgx_lepubkeyhash(void) { }
+static inline void vcpu_setup_sgx_lepubkeyhash(struct kvm_vcpu *vcpu) { }
+
+static inline void vmx_write_encls_bitmap(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ /* Nothing to do if hardware doesn't support SGX */
+ if (cpu_has_vmx_encls_vmexit())
+ vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
+}
+#endif
+
+#endif /* __KVM_X86_SGX_H */
diff --git a/arch/x86/kvm/vmx/vmcs.h b/arch/x86/kvm/vmx/vmcs.h
index 1472c6c376f7..4b9957e2bf5b 100644
--- a/arch/x86/kvm/vmx/vmcs.h
+++ b/arch/x86/kvm/vmx/vmcs.h
@@ -117,6 +117,11 @@ static inline bool is_gp_fault(u32 intr_info)
return is_exception_n(intr_info, GP_VECTOR);
}
+static inline bool is_alignment_check(u32 intr_info)
+{
+ return is_exception_n(intr_info, AC_VECTOR);
+}
+
static inline bool is_machine_check(u32 intr_info)
{
return is_exception_n(intr_info, MC_VECTOR);
@@ -164,4 +169,12 @@ static inline int vmcs_field_readonly(unsigned long field)
return (((field >> 10) & 0x3) == 1);
}
+#define VMCS_FIELD_INDEX_SHIFT (1)
+#define VMCS_FIELD_INDEX_MASK GENMASK(9, 1)
+
+static inline unsigned int vmcs_field_index(unsigned long field)
+{
+ return (field & VMCS_FIELD_INDEX_MASK) >> VMCS_FIELD_INDEX_SHIFT;
+}
+
#endif /* __KVM_X86_VMX_VMCS_H */
diff --git a/arch/x86/kvm/vmx/vmcs12.c b/arch/x86/kvm/vmx/vmcs12.c
index c8e51c004f78..d9f5d7c56ae3 100644
--- a/arch/x86/kvm/vmx/vmcs12.c
+++ b/arch/x86/kvm/vmx/vmcs12.c
@@ -37,6 +37,7 @@ const unsigned short vmcs_field_to_offset_table[] = {
FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
FIELD64(PML_ADDRESS, pml_address),
FIELD64(TSC_OFFSET, tsc_offset),
+ FIELD64(TSC_MULTIPLIER, tsc_multiplier),
FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr),
@@ -50,6 +51,7 @@ const unsigned short vmcs_field_to_offset_table[] = {
FIELD64(VMREAD_BITMAP, vmread_bitmap),
FIELD64(VMWRITE_BITMAP, vmwrite_bitmap),
FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
+ FIELD64(ENCLS_EXITING_BITMAP, encls_exiting_bitmap),
FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
diff --git a/arch/x86/kvm/vmx/vmcs12.h b/arch/x86/kvm/vmx/vmcs12.h
index 80232daf00ff..5e0e1b39f495 100644
--- a/arch/x86/kvm/vmx/vmcs12.h
+++ b/arch/x86/kvm/vmx/vmcs12.h
@@ -69,7 +69,9 @@ struct __packed vmcs12 {
u64 vm_function_control;
u64 eptp_list_address;
u64 pml_address;
- u64 padding64[3]; /* room for future expansion */
+ u64 encls_exiting_bitmap;
+ u64 tsc_multiplier;
+ u64 padding64[1]; /* room for future expansion */
/*
* To allow migration of L1 (complete with its L2 guests) between
* machines of different natural widths (32 or 64 bit), we cannot have
@@ -204,12 +206,6 @@ struct __packed vmcs12 {
#define VMCS12_SIZE KVM_STATE_NESTED_VMX_VMCS_SIZE
/*
- * VMCS12_MAX_FIELD_INDEX is the highest index value used in any
- * supported VMCS12 field encoding.
- */
-#define VMCS12_MAX_FIELD_INDEX 0x17
-
-/*
* For save/restore compatibility, the vmcs12 field offsets must not change.
*/
#define CHECK_OFFSET(field, loc) \
@@ -256,6 +252,8 @@ static inline void vmx_check_vmcs12_offsets(void)
CHECK_OFFSET(vm_function_control, 296);
CHECK_OFFSET(eptp_list_address, 304);
CHECK_OFFSET(pml_address, 312);
+ CHECK_OFFSET(encls_exiting_bitmap, 320);
+ CHECK_OFFSET(tsc_multiplier, 328);
CHECK_OFFSET(cr0_guest_host_mask, 344);
CHECK_OFFSET(cr4_guest_host_mask, 352);
CHECK_OFFSET(cr0_read_shadow, 360);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 50810d471462..927a552393b9 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -36,6 +36,7 @@
#include <asm/debugreg.h>
#include <asm/desc.h>
#include <asm/fpu/internal.h>
+#include <asm/idtentry.h>
#include <asm/io.h>
#include <asm/irq_remapping.h>
#include <asm/kexec.h>
@@ -51,12 +52,14 @@
#include "cpuid.h"
#include "evmcs.h"
#include "hyperv.h"
+#include "kvm_onhyperv.h"
#include "irq.h"
#include "kvm_cache_regs.h"
#include "lapic.h"
#include "mmu.h"
#include "nested.h"
#include "pmu.h"
+#include "sgx.h"
#include "trace.h"
#include "vmcs.h"
#include "vmcs12.h"
@@ -99,7 +102,6 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO);
static bool __read_mostly fasteoi = 1;
module_param(fasteoi, bool, S_IRUGO);
-bool __read_mostly enable_apicv = 1;
module_param(enable_apicv, bool, S_IRUGO);
/*
@@ -156,9 +158,11 @@ static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = {
MSR_IA32_SPEC_CTRL,
MSR_IA32_PRED_CMD,
MSR_IA32_TSC,
+#ifdef CONFIG_X86_64
MSR_FS_BASE,
MSR_GS_BASE,
MSR_KERNEL_GS_BASE,
+#endif
MSR_IA32_SYSENTER_CS,
MSR_IA32_SYSENTER_ESP,
MSR_IA32_SYSENTER_EIP,
@@ -361,8 +365,6 @@ static const struct kernel_param_ops vmentry_l1d_flush_ops = {
module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
static u32 vmx_segment_access_rights(struct kvm_segment *var);
-static __always_inline void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
- u32 msr, int type);
void vmx_vmexit(void);
@@ -453,102 +455,10 @@ static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
static unsigned long host_idt_base;
-/*
- * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm
- * will emulate SYSCALL in legacy mode if the vendor string in guest
- * CPUID.0:{EBX,ECX,EDX} is "AuthenticAMD" or "AMDisbetter!" To
- * support this emulation, IA32_STAR must always be included in
- * vmx_uret_msrs_list[], even in i386 builds.
- */
-static const u32 vmx_uret_msrs_list[] = {
-#ifdef CONFIG_X86_64
- MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
-#endif
- MSR_EFER, MSR_TSC_AUX, MSR_STAR,
- MSR_IA32_TSX_CTRL,
-};
-
#if IS_ENABLED(CONFIG_HYPERV)
static bool __read_mostly enlightened_vmcs = true;
module_param(enlightened_vmcs, bool, 0444);
-/* check_ept_pointer() should be under protection of ept_pointer_lock. */
-static void check_ept_pointer_match(struct kvm *kvm)
-{
- struct kvm_vcpu *vcpu;
- u64 tmp_eptp = INVALID_PAGE;
- int i;
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (!VALID_PAGE(tmp_eptp)) {
- tmp_eptp = to_vmx(vcpu)->ept_pointer;
- } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) {
- to_kvm_vmx(kvm)->ept_pointers_match
- = EPT_POINTERS_MISMATCH;
- return;
- }
- }
-
- to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
-}
-
-static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
- void *data)
-{
- struct kvm_tlb_range *range = data;
-
- return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn,
- range->pages);
-}
-
-static inline int __hv_remote_flush_tlb_with_range(struct kvm *kvm,
- struct kvm_vcpu *vcpu, struct kvm_tlb_range *range)
-{
- u64 ept_pointer = to_vmx(vcpu)->ept_pointer;
-
- /*
- * FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs address
- * of the base of EPT PML4 table, strip off EPT configuration
- * information.
- */
- if (range)
- return hyperv_flush_guest_mapping_range(ept_pointer & PAGE_MASK,
- kvm_fill_hv_flush_list_func, (void *)range);
- else
- return hyperv_flush_guest_mapping(ept_pointer & PAGE_MASK);
-}
-
-static int hv_remote_flush_tlb_with_range(struct kvm *kvm,
- struct kvm_tlb_range *range)
-{
- struct kvm_vcpu *vcpu;
- int ret = 0, i;
-
- spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
-
- if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK)
- check_ept_pointer_match(kvm);
-
- if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) {
- kvm_for_each_vcpu(i, vcpu, kvm) {
- /* If ept_pointer is invalid pointer, bypass flush request. */
- if (VALID_PAGE(to_vmx(vcpu)->ept_pointer))
- ret |= __hv_remote_flush_tlb_with_range(
- kvm, vcpu, range);
- }
- } else {
- ret = __hv_remote_flush_tlb_with_range(kvm,
- kvm_get_vcpu(kvm, 0), range);
- }
-
- spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
- return ret;
-}
-static int hv_remote_flush_tlb(struct kvm *kvm)
-{
- return hv_remote_flush_tlb_with_range(kvm, NULL);
-}
-
static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
{
struct hv_enlightened_vmcs *evmcs;
@@ -559,7 +469,7 @@ static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
* evmcs in singe VM shares same assist page.
*/
if (!*p_hv_pa_pg)
- *p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ *p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT);
if (!*p_hv_pa_pg)
return -ENOMEM;
@@ -677,21 +587,11 @@ static bool is_valid_passthrough_msr(u32 msr)
return r;
}
-static inline int __vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
-{
- int i;
-
- for (i = 0; i < vmx->nr_uret_msrs; ++i)
- if (vmx_uret_msrs_list[vmx->guest_uret_msrs[i].slot] == msr)
- return i;
- return -1;
-}
-
struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
{
int i;
- i = __vmx_find_uret_msr(vmx, msr);
+ i = kvm_find_user_return_msr(msr);
if (i >= 0)
return &vmx->guest_uret_msrs[i];
return NULL;
@@ -700,13 +600,14 @@ struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr)
static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
struct vmx_uret_msr *msr, u64 data)
{
+ unsigned int slot = msr - vmx->guest_uret_msrs;
int ret = 0;
u64 old_msr_data = msr->data;
msr->data = data;
- if (msr - vmx->guest_uret_msrs < vmx->nr_active_uret_msrs) {
+ if (msr->load_into_hardware) {
preempt_disable();
- ret = kvm_set_user_return_msr(msr->slot, msr->data, msr->mask);
+ ret = kvm_set_user_return_msr(slot, msr->data, msr->mask);
preempt_enable();
if (ret)
msr->data = old_msr_data;
@@ -846,16 +747,21 @@ void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
if (is_guest_mode(vcpu))
eb |= get_vmcs12(vcpu)->exception_bitmap;
else {
- /*
- * If EPT is enabled, #PF is only trapped if MAXPHYADDR is mismatched
- * between guest and host. In that case we only care about present
- * faults. For vmcs02, however, PFEC_MASK and PFEC_MATCH are set in
- * prepare_vmcs02_rare.
- */
- bool selective_pf_trap = enable_ept && (eb & (1u << PF_VECTOR));
- int mask = selective_pf_trap ? PFERR_PRESENT_MASK : 0;
+ int mask = 0, match = 0;
+
+ if (enable_ept && (eb & (1u << PF_VECTOR))) {
+ /*
+ * If EPT is enabled, #PF is currently only intercepted
+ * if MAXPHYADDR is smaller on the guest than on the
+ * host. In that case we only care about present,
+ * non-reserved faults. For vmcs02, however, PFEC_MASK
+ * and PFEC_MATCH are set in prepare_vmcs02_rare.
+ */
+ mask = PFERR_PRESENT_MASK | PFERR_RSVD_MASK;
+ match = PFERR_PRESENT_MASK;
+ }
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, mask);
- vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, mask);
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, match);
}
vmcs_write32(EXCEPTION_BITMAP, eb);
@@ -1058,7 +964,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx)
return false;
}
- i = __vmx_find_uret_msr(vmx, MSR_EFER);
+ i = kvm_find_user_return_msr(MSR_EFER);
if (i < 0)
return false;
@@ -1220,11 +1126,14 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
*/
if (!vmx->guest_uret_msrs_loaded) {
vmx->guest_uret_msrs_loaded = true;
- for (i = 0; i < vmx->nr_active_uret_msrs; ++i)
- kvm_set_user_return_msr(vmx->guest_uret_msrs[i].slot,
+ for (i = 0; i < kvm_nr_uret_msrs; ++i) {
+ if (!vmx->guest_uret_msrs[i].load_into_hardware)
+ continue;
+
+ kvm_set_user_return_msr(i,
vmx->guest_uret_msrs[i].data,
vmx->guest_uret_msrs[i].mask);
-
+ }
}
if (vmx->nested.need_vmcs12_to_shadow_sync)
@@ -1391,11 +1300,6 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
vmx->loaded_vmcs->cpu = cpu;
}
-
- /* Setup TSC multiplier */
- if (kvm_has_tsc_control &&
- vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
- decache_tsc_multiplier(vmx);
}
/*
@@ -1529,7 +1433,7 @@ static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data)
/*
* MTCFreq, CycThresh and PSBFreq encodings check, any MSR write that
- * utilize encodings marked reserved will casue a #GP fault.
+ * utilize encodings marked reserved will cause a #GP fault.
*/
value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc_periods);
if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc) &&
@@ -1570,12 +1474,25 @@ static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data)
static bool vmx_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int insn_len)
{
+ /*
+ * Emulation of instructions in SGX enclaves is impossible as RIP does
+ * not point tthe failing instruction, and even if it did, the code
+ * stream is inaccessible. Inject #UD instead of exiting to userspace
+ * so that guest userspace can't DoS the guest simply by triggering
+ * emulation (enclaves are CPL3 only).
+ */
+ if (to_vmx(vcpu)->exit_reason.enclave_mode) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return false;
+ }
return true;
}
static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
{
+ union vmx_exit_reason exit_reason = to_vmx(vcpu)->exit_reason;
unsigned long rip, orig_rip;
+ u32 instr_len;
/*
* Using VMCS.VM_EXIT_INSTRUCTION_LEN on EPT misconfig depends on
@@ -1586,9 +1503,33 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
* i.e. we end up advancing IP with some random value.
*/
if (!static_cpu_has(X86_FEATURE_HYPERVISOR) ||
- to_vmx(vcpu)->exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) {
+ exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) {
+ instr_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+
+ /*
+ * Emulating an enclave's instructions isn't supported as KVM
+ * cannot access the enclave's memory or its true RIP, e.g. the
+ * vmcs.GUEST_RIP points at the exit point of the enclave, not
+ * the RIP that actually triggered the VM-Exit. But, because
+ * most instructions that cause VM-Exit will #UD in an enclave,
+ * most instruction-based VM-Exits simply do not occur.
+ *
+ * There are a few exceptions, notably the debug instructions
+ * INT1ICEBRK and INT3, as they are allowed in debug enclaves
+ * and generate #DB/#BP as expected, which KVM might intercept.
+ * But again, the CPU does the dirty work and saves an instr
+ * length of zero so VMMs don't shoot themselves in the foot.
+ * WARN if KVM tries to skip a non-zero length instruction on
+ * a VM-Exit from an enclave.
+ */
+ if (!instr_len)
+ goto rip_updated;
+
+ WARN(exit_reason.enclave_mode,
+ "KVM: skipping instruction after SGX enclave VM-Exit");
+
orig_rip = kvm_rip_read(vcpu);
- rip = orig_rip + vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+ rip = orig_rip + instr_len;
#ifdef CONFIG_X86_64
/*
* We need to mask out the high 32 bits of RIP if not in 64-bit
@@ -1604,6 +1545,7 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
return 0;
}
+rip_updated:
/* skipping an emulated instruction also counts */
vmx_set_interrupt_shadow(vcpu, 0);
@@ -1693,19 +1635,16 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu)
vmx_clear_hlt(vcpu);
}
-static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr)
+static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr,
+ bool load_into_hardware)
{
- struct vmx_uret_msr tmp;
- int from, to;
+ struct vmx_uret_msr *uret_msr;
- from = __vmx_find_uret_msr(vmx, msr);
- if (from < 0)
+ uret_msr = vmx_find_uret_msr(vmx, msr);
+ if (!uret_msr)
return;
- to = vmx->nr_active_uret_msrs++;
- tmp = vmx->guest_uret_msrs[to];
- vmx->guest_uret_msrs[to] = vmx->guest_uret_msrs[from];
- vmx->guest_uret_msrs[from] = tmp;
+ uret_msr->load_into_hardware = load_into_hardware;
}
/*
@@ -1715,51 +1654,73 @@ static void vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr)
*/
static void setup_msrs(struct vcpu_vmx *vmx)
{
- vmx->guest_uret_msrs_loaded = false;
- vmx->nr_active_uret_msrs = 0;
#ifdef CONFIG_X86_64
+ bool load_syscall_msrs;
+
/*
* The SYSCALL MSRs are only needed on long mode guests, and only
* when EFER.SCE is set.
*/
- if (is_long_mode(&vmx->vcpu) && (vmx->vcpu.arch.efer & EFER_SCE)) {
- vmx_setup_uret_msr(vmx, MSR_STAR);
- vmx_setup_uret_msr(vmx, MSR_LSTAR);
- vmx_setup_uret_msr(vmx, MSR_SYSCALL_MASK);
- }
+ load_syscall_msrs = is_long_mode(&vmx->vcpu) &&
+ (vmx->vcpu.arch.efer & EFER_SCE);
+
+ vmx_setup_uret_msr(vmx, MSR_STAR, load_syscall_msrs);
+ vmx_setup_uret_msr(vmx, MSR_LSTAR, load_syscall_msrs);
+ vmx_setup_uret_msr(vmx, MSR_SYSCALL_MASK, load_syscall_msrs);
#endif
- if (update_transition_efer(vmx))
- vmx_setup_uret_msr(vmx, MSR_EFER);
+ vmx_setup_uret_msr(vmx, MSR_EFER, update_transition_efer(vmx));
- if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP))
- vmx_setup_uret_msr(vmx, MSR_TSC_AUX);
+ vmx_setup_uret_msr(vmx, MSR_TSC_AUX,
+ guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP) ||
+ guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDPID));
- vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL);
+ /*
+ * hle=0, rtm=0, tsx_ctrl=1 can be found with some combinations of new
+ * kernel and old userspace. If those guests run on a tsx=off host, do
+ * allow guests to use TSX_CTRL, but don't change the value in hardware
+ * so that TSX remains always disabled.
+ */
+ vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL, boot_cpu_has(X86_FEATURE_RTM));
if (cpu_has_vmx_msr_bitmap())
vmx_update_msr_bitmap(&vmx->vcpu);
+
+ /*
+ * The set of MSRs to load may have changed, reload MSRs before the
+ * next VM-Enter.
+ */
+ vmx->guest_uret_msrs_loaded = false;
}
-static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- u64 g_tsc_offset = 0;
- /*
- * We're here if L1 chose not to trap WRMSR to TSC. According
- * to the spec, this should set L1's TSC; The offset that L1
- * set for L2 remains unchanged, and still needs to be added
- * to the newly set TSC to get L2's TSC.
- */
- if (is_guest_mode(vcpu) &&
- (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING))
- g_tsc_offset = vmcs12->tsc_offset;
+ if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING))
+ return vmcs12->tsc_offset;
+
+ return 0;
+}
- trace_kvm_write_tsc_offset(vcpu->vcpu_id,
- vcpu->arch.tsc_offset - g_tsc_offset,
- offset);
- vmcs_write64(TSC_OFFSET, offset + g_tsc_offset);
- return offset + g_tsc_offset;
+u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+ if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING) &&
+ nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING))
+ return vmcs12->tsc_multiplier;
+
+ return kvm_default_tsc_scaling_ratio;
+}
+
+static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+{
+ vmcs_write64(TSC_OFFSET, offset);
+}
+
+static void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
+{
+ vmcs_write64(TSC_MULTIPLIER, multiplier);
}
/*
@@ -1865,6 +1826,13 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_FEAT_CTL:
msr_info->data = vmx->msr_ia32_feature_control;
break;
+ case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC))
+ return 1;
+ msr_info->data = to_vmx(vcpu)->msr_ia32_sgxlepubkeyhash
+ [msr_info->index - MSR_IA32_SGXLEPUBKEYHASH0];
+ break;
case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
if (!nested_vmx_allowed(vcpu))
return 1;
@@ -1928,11 +1896,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
else
msr_info->data = vmx->pt_desc.guest.addr_a[index / 2];
break;
- case MSR_TSC_AUX:
- if (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
- return 1;
- goto find_uret_msr;
case MSR_IA32_DEBUGCTLMSR:
msr_info->data = vmcs_read64(GUEST_IA32_DEBUGCTL);
break;
@@ -1966,6 +1929,9 @@ static u64 vcpu_supported_debugctl(struct kvm_vcpu *vcpu)
if (!intel_pmu_lbr_is_enabled(vcpu))
debugctl &= ~DEBUGCTLMSR_LBR_MASK;
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT))
+ debugctl &= ~DEBUGCTLMSR_BUS_LOCK_DETECT;
+
return debugctl;
}
@@ -2158,6 +2124,29 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vmx->msr_ia32_feature_control = data;
if (msr_info->host_initiated && data == 0)
vmx_leave_nested(vcpu);
+
+ /* SGX may be enabled/disabled by guest's firmware */
+ vmx_write_encls_bitmap(vcpu, NULL);
+ break;
+ case MSR_IA32_SGXLEPUBKEYHASH0 ... MSR_IA32_SGXLEPUBKEYHASH3:
+ /*
+ * On real hardware, the LE hash MSRs are writable before
+ * the firmware sets bit 0 in MSR 0x7a ("activating" SGX),
+ * at which point SGX related bits in IA32_FEATURE_CONTROL
+ * become writable.
+ *
+ * KVM does not emulate SGX activation for simplicity, so
+ * allow writes to the LE hash MSRs if IA32_FEATURE_CONTROL
+ * is unlocked. This is technically not architectural
+ * behavior, but it's close enough.
+ */
+ if (!msr_info->host_initiated &&
+ (!guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC) ||
+ ((vmx->msr_ia32_feature_control & FEAT_CTL_LOCKED) &&
+ !(vmx->msr_ia32_feature_control & FEAT_CTL_SGX_LC_ENABLED))))
+ return 1;
+ vmx->msr_ia32_sgxlepubkeyhash
+ [msr_index - MSR_IA32_SGXLEPUBKEYHASH0] = data;
break;
case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
if (!msr_info->host_initiated)
@@ -2225,14 +2214,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
else
vmx->pt_desc.guest.addr_a[index / 2] = data;
break;
- case MSR_TSC_AUX:
- if (!msr_info->host_initiated &&
- !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
- return 1;
- /* Check reserved bit, higher 32 bits should be zero */
- if ((data >> 32) != 0)
- return 1;
- goto find_uret_msr;
case MSR_IA32_PERF_CAPABILITIES:
if (data && !vcpu_to_pmu(vcpu)->version)
return 1;
@@ -2761,7 +2742,7 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
/*
- * Update real mode segment cache. It may be not up-to-date if sement
+ * Update real mode segment cache. It may be not up-to-date if segment
* register was written while vcpu was in a guest mode.
*/
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
@@ -3088,8 +3069,7 @@ static int vmx_get_max_tdp_level(void)
return 4;
}
-u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa,
- int root_level)
+u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level)
{
u64 eptp = VMX_EPTP_MT_WB;
@@ -3098,13 +3078,13 @@ u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa,
if (enable_ept_ad_bits &&
(!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu)))
eptp |= VMX_EPTP_AD_ENABLE_BIT;
- eptp |= (root_hpa & PAGE_MASK);
+ eptp |= root_hpa;
return eptp;
}
-static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd,
- int pgd_level)
+static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
+ int root_level)
{
struct kvm *kvm = vcpu->kvm;
bool update_guest_cr3 = true;
@@ -3112,16 +3092,10 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd,
u64 eptp;
if (enable_ept) {
- eptp = construct_eptp(vcpu, pgd, pgd_level);
+ eptp = construct_eptp(vcpu, root_hpa, root_level);
vmcs_write64(EPT_POINTER, eptp);
- if (kvm_x86_ops.tlb_remote_flush) {
- spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
- to_vmx(vcpu)->ept_pointer = eptp;
- to_kvm_vmx(kvm)->ept_pointers_match
- = EPT_POINTERS_CHECK;
- spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
- }
+ hv_track_root_tdp(vcpu, root_hpa);
if (!enable_unrestricted_guest && !is_paging(vcpu))
guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
@@ -3131,7 +3105,7 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd,
update_guest_cr3 = false;
vmx_ept_load_pdptrs(vcpu);
} else {
- guest_cr3 = pgd;
+ guest_cr3 = root_hpa | kvm_get_active_pcid(vcpu);
}
if (update_guest_cr3)
@@ -3647,7 +3621,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
int ret = 0;
mutex_lock(&kvm->slots_lock);
- if (kvm->arch.apic_access_page_done)
+ if (kvm->arch.apic_access_memslot_enabled)
goto out;
hva = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
@@ -3667,7 +3641,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
* is able to migrate it.
*/
put_page(page);
- kvm->arch.apic_access_page_done = true;
+ kvm->arch.apic_access_memslot_enabled = true;
out:
mutex_unlock(&kvm->slots_lock);
return ret;
@@ -3738,8 +3712,7 @@ static void vmx_set_msr_bitmap_write(ulong *msr_bitmap, u32 msr)
__set_bit(msr & 0x1fff, msr_bitmap + 0xc00 / f);
}
-static __always_inline void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
- u32 msr, int type)
+void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
@@ -3784,8 +3757,7 @@ static __always_inline void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu,
vmx_clear_msr_bitmap_write(msr_bitmap, msr);
}
-static __always_inline void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
- u32 msr, int type)
+void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
@@ -3818,15 +3790,6 @@ static __always_inline void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu,
vmx_set_msr_bitmap_write(msr_bitmap, msr);
}
-void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu,
- u32 msr, int type, bool value)
-{
- if (value)
- vmx_enable_intercept_for_msr(vcpu, msr, type);
- else
- vmx_disable_intercept_for_msr(vcpu, msr, type);
-}
-
static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
{
u8 mode = 0;
@@ -4299,7 +4262,23 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
xsaves_enabled, false);
}
- vmx_adjust_sec_exec_feature(vmx, &exec_control, rdtscp, RDTSCP);
+ /*
+ * RDPID is also gated by ENABLE_RDTSCP, turn on the control if either
+ * feature is exposed to the guest. This creates a virtualization hole
+ * if both are supported in hardware but only one is exposed to the
+ * guest, but letting the guest execute RDTSCP or RDPID when either one
+ * is advertised is preferable to emulating the advertised instruction
+ * in KVM on #UD, and obviously better than incorrectly injecting #UD.
+ */
+ if (cpu_has_vmx_rdtscp()) {
+ bool rdpid_or_rdtscp_enabled =
+ guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) ||
+ guest_cpuid_has(vcpu, X86_FEATURE_RDPID);
+
+ vmx_adjust_secondary_exec_control(vmx, &exec_control,
+ SECONDARY_EXEC_ENABLE_RDTSCP,
+ rdpid_or_rdtscp_enabled, false);
+ }
vmx_adjust_sec_exec_feature(vmx, &exec_control, invpcid, INVPCID);
vmx_adjust_sec_exec_exiting(vmx, &exec_control, rdrand, RDRAND);
@@ -4314,15 +4293,6 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx)
vmx->secondary_exec_control = exec_control;
}
-static void ept_set_mmio_spte_mask(void)
-{
- /*
- * EPT Misconfigurations can be generated if the value of bits 2:0
- * of an EPT paging-structure entry is 110b (write/execute).
- */
- kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE, 0);
-}
-
#define VMX_XSS_EXIT_BITMAP 0
/*
@@ -4410,8 +4380,7 @@ static void init_vmcs(struct vcpu_vmx *vmx)
vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
}
- if (cpu_has_vmx_encls_vmexit())
- vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
+ vmx_write_encls_bitmap(&vmx->vcpu, NULL);
if (vmx_pt_mode_is_host_guest()) {
memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc));
@@ -4774,7 +4743,7 @@ static int handle_machine_check(struct kvm_vcpu *vcpu)
* - Guest has #AC detection enabled in CR0
* - Guest EFLAGS has AC bit set
*/
-static inline bool guest_inject_ac(struct kvm_vcpu *vcpu)
+bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu)
{
if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
return true;
@@ -4788,7 +4757,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct kvm_run *kvm_run = vcpu->run;
u32 intr_info, ex_no, error_code;
- unsigned long cr2, rip, dr6;
+ unsigned long cr2, dr6;
u32 vect_info;
vect_info = vmx->idt_vectoring_info;
@@ -4878,12 +4847,11 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
vmx->vcpu.arch.event_exit_inst_len =
vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
kvm_run->exit_reason = KVM_EXIT_DEBUG;
- rip = kvm_rip_read(vcpu);
- kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
+ kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu);
kvm_run->debug.arch.exception = ex_no;
break;
case AC_VECTOR:
- if (guest_inject_ac(vcpu)) {
+ if (vmx_guest_inject_ac(vcpu)) {
kvm_queue_exception_e(vcpu, AC_VECTOR, error_code);
return 1;
}
@@ -5020,7 +4988,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
reg = (exit_qualification >> 8) & 15;
switch ((exit_qualification >> 4) & 3) {
case 0: /* mov to cr */
- val = kvm_register_readl(vcpu, reg);
+ val = kvm_register_read(vcpu, reg);
trace_kvm_cr_write(cr, val);
switch (cr) {
case 0:
@@ -5143,7 +5111,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
kvm_register_write(vcpu, reg, val);
err = 0;
} else {
- err = kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg));
+ err = kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg));
}
out:
@@ -5184,17 +5152,6 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu)
return 1;
}
-static int handle_vmcall(struct kvm_vcpu *vcpu)
-{
- return kvm_emulate_hypercall(vcpu);
-}
-
-static int handle_invd(struct kvm_vcpu *vcpu)
-{
- /* Treat an INVD instruction as a NOP and just skip it. */
- return kvm_skip_emulated_instruction(vcpu);
-}
-
static int handle_invlpg(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
@@ -5203,28 +5160,6 @@ static int handle_invlpg(struct kvm_vcpu *vcpu)
return kvm_skip_emulated_instruction(vcpu);
}
-static int handle_rdpmc(struct kvm_vcpu *vcpu)
-{
- int err;
-
- err = kvm_rdpmc(vcpu);
- return kvm_complete_insn_gp(vcpu, err);
-}
-
-static int handle_wbinvd(struct kvm_vcpu *vcpu)
-{
- return kvm_emulate_wbinvd(vcpu);
-}
-
-static int handle_xsetbv(struct kvm_vcpu *vcpu)
-{
- u64 new_bv = kvm_read_edx_eax(vcpu);
- u32 index = kvm_rcx_read(vcpu);
-
- int err = kvm_set_xcr(vcpu, index, new_bv);
- return kvm_complete_insn_gp(vcpu, err);
-}
-
static int handle_apic_access(struct kvm_vcpu *vcpu)
{
if (likely(fasteoi)) {
@@ -5361,7 +5296,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
EPT_VIOLATION_EXECUTABLE))
? PFERR_PRESENT_MASK : 0;
- error_code |= (exit_qualification & 0x100) != 0 ?
+ error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) != 0 ?
PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
vcpu->arch.exit_qualification = exit_qualification;
@@ -5384,6 +5319,9 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
{
gpa_t gpa;
+ if (!vmx_can_emulate_instruction(vcpu, NULL, 0))
+ return 1;
+
/*
* A nested guest cannot optimize MMIO vmexits, because we have an
* nGPA here instead of the required GPA.
@@ -5485,18 +5423,6 @@ static void shrink_ple_window(struct kvm_vcpu *vcpu)
}
}
-static void vmx_enable_tdp(void)
-{
- kvm_mmu_set_mask_ptes(VMX_EPT_READABLE_MASK,
- enable_ept_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull,
- enable_ept_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull,
- 0ull, VMX_EPT_EXECUTABLE_MASK,
- cpu_has_vmx_ept_execute_only() ? 0ull : VMX_EPT_READABLE_MASK,
- VMX_EPT_RWX_MASK, 0ull);
-
- ept_set_mmio_spte_mask();
-}
-
/*
* Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
* exiting, so only get here on cpu with PAUSE-Loop-Exiting.
@@ -5516,34 +5442,11 @@ static int handle_pause(struct kvm_vcpu *vcpu)
return kvm_skip_emulated_instruction(vcpu);
}
-static int handle_nop(struct kvm_vcpu *vcpu)
-{
- return kvm_skip_emulated_instruction(vcpu);
-}
-
-static int handle_mwait(struct kvm_vcpu *vcpu)
-{
- printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
- return handle_nop(vcpu);
-}
-
-static int handle_invalid_op(struct kvm_vcpu *vcpu)
-{
- kvm_queue_exception(vcpu, UD_VECTOR);
- return 1;
-}
-
static int handle_monitor_trap(struct kvm_vcpu *vcpu)
{
return 1;
}
-static int handle_monitor(struct kvm_vcpu *vcpu)
-{
- printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
- return handle_nop(vcpu);
-}
-
static int handle_invpcid(struct kvm_vcpu *vcpu)
{
u32 vmx_instruction_info;
@@ -5560,7 +5463,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
}
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
- type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
+ type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf);
if (type > 3) {
kvm_inject_gp(vcpu, 0);
@@ -5632,16 +5535,18 @@ static int handle_vmx_instruction(struct kvm_vcpu *vcpu)
return 1;
}
+#ifndef CONFIG_X86_SGX_KVM
static int handle_encls(struct kvm_vcpu *vcpu)
{
/*
- * SGX virtualization is not yet supported. There is no software
- * enable bit for SGX, so we have to trap ENCLS and inject a #UD
- * to prevent the guest from executing ENCLS.
+ * SGX virtualization is disabled. There is no software enable bit for
+ * SGX, so KVM intercepts all ENCLS leafs and injects a #UD to prevent
+ * the guest from executing ENCLS (when SGX is supported by hardware).
*/
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
+#endif /* CONFIG_X86_SGX_KVM */
static int handle_bus_lock_vmexit(struct kvm_vcpu *vcpu)
{
@@ -5668,10 +5573,10 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_MSR_WRITE] = kvm_emulate_wrmsr,
[EXIT_REASON_INTERRUPT_WINDOW] = handle_interrupt_window,
[EXIT_REASON_HLT] = kvm_emulate_halt,
- [EXIT_REASON_INVD] = handle_invd,
+ [EXIT_REASON_INVD] = kvm_emulate_invd,
[EXIT_REASON_INVLPG] = handle_invlpg,
- [EXIT_REASON_RDPMC] = handle_rdpmc,
- [EXIT_REASON_VMCALL] = handle_vmcall,
+ [EXIT_REASON_RDPMC] = kvm_emulate_rdpmc,
+ [EXIT_REASON_VMCALL] = kvm_emulate_hypercall,
[EXIT_REASON_VMCLEAR] = handle_vmx_instruction,
[EXIT_REASON_VMLAUNCH] = handle_vmx_instruction,
[EXIT_REASON_VMPTRLD] = handle_vmx_instruction,
@@ -5685,8 +5590,8 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_APIC_ACCESS] = handle_apic_access,
[EXIT_REASON_APIC_WRITE] = handle_apic_write,
[EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced,
- [EXIT_REASON_WBINVD] = handle_wbinvd,
- [EXIT_REASON_XSETBV] = handle_xsetbv,
+ [EXIT_REASON_WBINVD] = kvm_emulate_wbinvd,
+ [EXIT_REASON_XSETBV] = kvm_emulate_xsetbv,
[EXIT_REASON_TASK_SWITCH] = handle_task_switch,
[EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check,
[EXIT_REASON_GDTR_IDTR] = handle_desc,
@@ -5694,13 +5599,13 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
[EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig,
[EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
- [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait,
+ [EXIT_REASON_MWAIT_INSTRUCTION] = kvm_emulate_mwait,
[EXIT_REASON_MONITOR_TRAP_FLAG] = handle_monitor_trap,
- [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor,
+ [EXIT_REASON_MONITOR_INSTRUCTION] = kvm_emulate_monitor,
[EXIT_REASON_INVEPT] = handle_vmx_instruction,
[EXIT_REASON_INVVPID] = handle_vmx_instruction,
- [EXIT_REASON_RDRAND] = handle_invalid_op,
- [EXIT_REASON_RDSEED] = handle_invalid_op,
+ [EXIT_REASON_RDRAND] = kvm_handle_invalid_op,
+ [EXIT_REASON_RDSEED] = kvm_handle_invalid_op,
[EXIT_REASON_PML_FULL] = handle_pml_full,
[EXIT_REASON_INVPCID] = handle_invpcid,
[EXIT_REASON_VMFUNC] = handle_vmx_instruction,
@@ -5787,12 +5692,23 @@ static void vmx_dump_dtsel(char *name, uint32_t limit)
vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
}
-void dump_vmcs(void)
+static void vmx_dump_msrs(char *name, struct vmx_msrs *m)
+{
+ unsigned int i;
+ struct vmx_msr_entry *e;
+
+ pr_err("MSR %s:\n", name);
+ for (i = 0, e = m->val; i < m->nr; ++i, ++e)
+ pr_err(" %2d: msr=0x%08x value=0x%016llx\n", i, e->index, e->value);
+}
+
+void dump_vmcs(struct kvm_vcpu *vcpu)
{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 vmentry_ctl, vmexit_ctl;
u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control;
unsigned long cr4;
- u64 efer;
+ int efer_slot;
if (!dump_invalid_vmcs) {
pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n");
@@ -5804,11 +5720,12 @@ void dump_vmcs(void)
cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
cr4 = vmcs_readl(GUEST_CR4);
- efer = vmcs_read64(GUEST_IA32_EFER);
secondary_exec_control = 0;
if (cpu_has_secondary_exec_ctrls())
secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+ pr_err("VMCS %p, last attempted VM-entry on CPU %d\n",
+ vmx->loaded_vmcs->vmcs, vcpu->arch.last_vmentry_cpu);
pr_err("*** Guest State ***\n");
pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW),
@@ -5816,9 +5733,7 @@ void dump_vmcs(void)
pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK));
pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3));
- if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) &&
- (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA))
- {
+ if (cpu_has_vmx_ept()) {
pr_err("PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n",
vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1));
pr_err("PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n",
@@ -5841,10 +5756,20 @@ void dump_vmcs(void)
vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR);
vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT);
vmx_dump_sel("TR: ", GUEST_TR_SELECTOR);
- if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) ||
- (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER)))
- pr_err("EFER = 0x%016llx PAT = 0x%016llx\n",
- efer, vmcs_read64(GUEST_IA32_PAT));
+ efer_slot = vmx_find_loadstore_msr_slot(&vmx->msr_autoload.guest, MSR_EFER);
+ if (vmentry_ctl & VM_ENTRY_LOAD_IA32_EFER)
+ pr_err("EFER= 0x%016llx\n", vmcs_read64(GUEST_IA32_EFER));
+ else if (efer_slot >= 0)
+ pr_err("EFER= 0x%016llx (autoload)\n",
+ vmx->msr_autoload.guest.val[efer_slot].value);
+ else if (vmentry_ctl & VM_ENTRY_IA32E_MODE)
+ pr_err("EFER= 0x%016llx (effective)\n",
+ vcpu->arch.efer | (EFER_LMA | EFER_LME));
+ else
+ pr_err("EFER= 0x%016llx (effective)\n",
+ vcpu->arch.efer & ~(EFER_LMA | EFER_LME));
+ if (vmentry_ctl & VM_ENTRY_LOAD_IA32_PAT)
+ pr_err("PAT = 0x%016llx\n", vmcs_read64(GUEST_IA32_PAT));
pr_err("DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n",
vmcs_read64(GUEST_IA32_DEBUGCTL),
vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
@@ -5860,6 +5785,10 @@ void dump_vmcs(void)
if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
pr_err("InterruptStatus = %04x\n",
vmcs_read16(GUEST_INTR_STATUS));
+ if (vmcs_read32(VM_ENTRY_MSR_LOAD_COUNT) > 0)
+ vmx_dump_msrs("guest autoload", &vmx->msr_autoload.guest);
+ if (vmcs_read32(VM_EXIT_MSR_STORE_COUNT) > 0)
+ vmx_dump_msrs("guest autostore", &vmx->msr_autostore.guest);
pr_err("*** Host State ***\n");
pr_err("RIP = 0x%016lx RSP = 0x%016lx\n",
@@ -5881,14 +5810,16 @@ void dump_vmcs(void)
vmcs_readl(HOST_IA32_SYSENTER_ESP),
vmcs_read32(HOST_IA32_SYSENTER_CS),
vmcs_readl(HOST_IA32_SYSENTER_EIP));
- if (vmexit_ctl & (VM_EXIT_LOAD_IA32_PAT | VM_EXIT_LOAD_IA32_EFER))
- pr_err("EFER = 0x%016llx PAT = 0x%016llx\n",
- vmcs_read64(HOST_IA32_EFER),
- vmcs_read64(HOST_IA32_PAT));
+ if (vmexit_ctl & VM_EXIT_LOAD_IA32_EFER)
+ pr_err("EFER= 0x%016llx\n", vmcs_read64(HOST_IA32_EFER));
+ if (vmexit_ctl & VM_EXIT_LOAD_IA32_PAT)
+ pr_err("PAT = 0x%016llx\n", vmcs_read64(HOST_IA32_PAT));
if (cpu_has_load_perf_global_ctrl() &&
vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
pr_err("PerfGlobCtl = 0x%016llx\n",
vmcs_read64(HOST_IA32_PERF_GLOBAL_CTRL));
+ if (vmcs_read32(VM_EXIT_MSR_LOAD_COUNT) > 0)
+ vmx_dump_msrs("host autoload", &vmx->msr_autoload.host);
pr_err("*** Control State ***\n");
pr_err("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n",
@@ -5997,7 +5928,7 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
}
if (exit_reason.failed_vmentry) {
- dump_vmcs();
+ dump_vmcs(vcpu);
vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
vcpu->run->fail_entry.hardware_entry_failure_reason
= exit_reason.full;
@@ -6006,7 +5937,7 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
}
if (unlikely(vmx->fail)) {
- dump_vmcs();
+ dump_vmcs(vcpu);
vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
vcpu->run->fail_entry.hardware_entry_failure_reason
= vmcs_read32(VM_INSTRUCTION_ERROR);
@@ -6027,19 +5958,19 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
exit_reason.basic != EXIT_REASON_PML_FULL &&
exit_reason.basic != EXIT_REASON_APIC_ACCESS &&
exit_reason.basic != EXIT_REASON_TASK_SWITCH)) {
+ int ndata = 3;
+
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
- vcpu->run->internal.ndata = 3;
vcpu->run->internal.data[0] = vectoring_info;
vcpu->run->internal.data[1] = exit_reason.full;
vcpu->run->internal.data[2] = vcpu->arch.exit_qualification;
if (exit_reason.basic == EXIT_REASON_EPT_MISCONFIG) {
- vcpu->run->internal.ndata++;
- vcpu->run->internal.data[3] =
+ vcpu->run->internal.data[ndata++] =
vmcs_read64(GUEST_PHYSICAL_ADDRESS);
}
- vcpu->run->internal.data[vcpu->run->internal.ndata++] =
- vcpu->arch.last_vmentry_cpu;
+ vcpu->run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu;
+ vcpu->run->internal.ndata = ndata;
return 0;
}
@@ -6092,7 +6023,7 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
unexpected_vmexit:
vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n",
exit_reason.full);
- dump_vmcs();
+ dump_vmcs(vcpu);
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror =
KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
@@ -6232,6 +6163,7 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
switch (kvm_get_apic_mode(vcpu)) {
case LAPIC_MODE_INVALID:
WARN_ONCE(true, "Invalid local APIC state");
+ break;
case LAPIC_MODE_DISABLED:
break;
case LAPIC_MODE_XAPIC:
@@ -6395,18 +6327,17 @@ static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
void vmx_do_interrupt_nmi_irqoff(unsigned long entry);
-static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu, u32 intr_info)
+static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu,
+ unsigned long entry)
{
- unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
- gate_desc *desc = (gate_desc *)host_idt_base + vector;
-
kvm_before_interrupt(vcpu);
- vmx_do_interrupt_nmi_irqoff(gate_offset(desc));
+ vmx_do_interrupt_nmi_irqoff(entry);
kvm_after_interrupt(vcpu);
}
static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
{
+ const unsigned long nmi_entry = (unsigned long)asm_exc_nmi_noist;
u32 intr_info = vmx_get_intr_info(&vmx->vcpu);
/* if exit due to PF check for async PF */
@@ -6417,18 +6348,20 @@ static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
kvm_machine_check();
/* We need to handle NMIs before interrupts are enabled */
else if (is_nmi(intr_info))
- handle_interrupt_nmi_irqoff(&vmx->vcpu, intr_info);
+ handle_interrupt_nmi_irqoff(&vmx->vcpu, nmi_entry);
}
static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
{
u32 intr_info = vmx_get_intr_info(vcpu);
+ unsigned int vector = intr_info & INTR_INFO_VECTOR_MASK;
+ gate_desc *desc = (gate_desc *)host_idt_base + vector;
if (WARN_ONCE(!is_external_intr(intr_info),
"KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info))
return;
- handle_interrupt_nmi_irqoff(vcpu, intr_info);
+ handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc));
}
static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
@@ -6580,8 +6513,8 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
int i, nr_msrs;
struct perf_guest_switch_msr *msrs;
+ /* Note, nr_msrs may be garbage if perf_guest_get_msrs() returns NULL. */
msrs = perf_guest_get_msrs(&nr_msrs);
-
if (!msrs)
return;
@@ -6642,25 +6575,7 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
struct vcpu_vmx *vmx)
{
- /*
- * VMENTER enables interrupts (host state), but the kernel state is
- * interrupts disabled when this is invoked. Also tell RCU about
- * it. This is the same logic as for exit_to_user_mode().
- *
- * This ensures that e.g. latency analysis on the host observes
- * guest mode as interrupt enabled.
- *
- * guest_enter_irqoff() informs context tracking about the
- * transition to guest mode and if enabled adjusts RCU state
- * accordingly.
- */
- instrumentation_begin();
- trace_hardirqs_on_prepare();
- lockdep_hardirqs_on_prepare(CALLER_ADDR0);
- instrumentation_end();
-
- guest_enter_irqoff();
- lockdep_hardirqs_on(CALLER_ADDR0);
+ kvm_guest_enter_irqoff();
/* L1D Flush includes CPU buffer clear to mitigate MDS */
if (static_branch_unlikely(&vmx_l1d_should_flush))
@@ -6676,24 +6591,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
vcpu->arch.cr2 = native_read_cr2();
- /*
- * VMEXIT disables interrupts (host state), but tracing and lockdep
- * have them in state 'on' as recorded before entering guest mode.
- * Same as enter_from_user_mode().
- *
- * guest_exit_irqoff() restores host context and reinstates RCU if
- * enabled and required.
- *
- * This needs to be done before the below as native_read_msr()
- * contains a tracepoint and x86_spec_ctrl_restore_host() calls
- * into world and some more.
- */
- lockdep_hardirqs_off(CALLER_ADDR0);
- guest_exit_irqoff();
-
- instrumentation_begin();
- trace_hardirqs_off_finish();
- instrumentation_end();
+ kvm_guest_exit_irqoff();
}
static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
@@ -6824,7 +6722,18 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
kvm_load_host_xsave_state(vcpu);
- vmx->nested.nested_run_pending = 0;
+ if (is_guest_mode(vcpu)) {
+ /*
+ * Track VMLAUNCH/VMRESUME that have made past guest state
+ * checking.
+ */
+ if (vmx->nested.nested_run_pending &&
+ !vmx->exit_reason.failed_vmentry)
+ ++vcpu->stat.nested_run;
+
+ vmx->nested.nested_run_pending = 0;
+ }
+
vmx->idt_vectoring_info = 0;
if (unlikely(vmx->fail)) {
@@ -6868,6 +6777,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
{
+ struct vmx_uret_msr *tsx_ctrl;
struct vcpu_vmx *vmx;
int i, cpu, err;
@@ -6890,43 +6800,19 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
goto free_vpid;
}
- BUILD_BUG_ON(ARRAY_SIZE(vmx_uret_msrs_list) != MAX_NR_USER_RETURN_MSRS);
-
- for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i) {
- u32 index = vmx_uret_msrs_list[i];
- u32 data_low, data_high;
- int j = vmx->nr_uret_msrs;
-
- if (rdmsr_safe(index, &data_low, &data_high) < 0)
- continue;
- if (wrmsr_safe(index, data_low, data_high) < 0)
- continue;
-
- vmx->guest_uret_msrs[j].slot = i;
- vmx->guest_uret_msrs[j].data = 0;
- switch (index) {
- case MSR_IA32_TSX_CTRL:
- /*
- * TSX_CTRL_CPUID_CLEAR is handled in the CPUID
- * interception. Keep the host value unchanged to avoid
- * changing CPUID bits under the host kernel's feet.
- *
- * hle=0, rtm=0, tsx_ctrl=1 can be found with some
- * combinations of new kernel and old userspace. If
- * those guests run on a tsx=off host, do allow guests
- * to use TSX_CTRL, but do not change the value on the
- * host so that TSX remains always disabled.
- */
- if (boot_cpu_has(X86_FEATURE_RTM))
- vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
- else
- vmx->guest_uret_msrs[j].mask = 0;
- break;
- default:
- vmx->guest_uret_msrs[j].mask = -1ull;
- break;
- }
- ++vmx->nr_uret_msrs;
+ for (i = 0; i < kvm_nr_uret_msrs; ++i) {
+ vmx->guest_uret_msrs[i].data = 0;
+ vmx->guest_uret_msrs[i].mask = -1ull;
+ }
+ if (boot_cpu_has(X86_FEATURE_RTM)) {
+ /*
+ * TSX_CTRL_CPUID_CLEAR is handled in the CPUID interception.
+ * Keep the host value unchanged to avoid changing CPUID bits
+ * under the host kernel's feet.
+ */
+ tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL);
+ if (tsx_ctrl)
+ vmx->guest_uret_msrs[i].mask = ~(u64)TSX_CTRL_CPUID_CLEAR;
}
err = alloc_loaded_vmcs(&vmx->vmcs01);
@@ -6938,9 +6824,11 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R);
+#ifdef CONFIG_X86_64
vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
vmx_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
+#endif
vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
@@ -6976,8 +6864,11 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
else
memset(&vmx->nested.msrs, 0, sizeof(vmx->nested.msrs));
+ vcpu_setup_sgx_lepubkeyhash(vcpu);
+
vmx->nested.posted_intr_nv = -1;
vmx->nested.current_vmptr = -1ull;
+ vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
vcpu->arch.microcode_version = 0x100000000ULL;
vmx->msr_ia32_feature_control_valid_bits = FEAT_CTL_LOCKED;
@@ -6989,8 +6880,6 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
vmx->pi_desc.nv = POSTED_INTR_VECTOR;
vmx->pi_desc.sn = 1;
- vmx->ept_pointer = INVALID_PAGE;
-
return 0;
free_vmcs:
@@ -7007,8 +6896,6 @@ free_vpid:
static int vmx_vm_init(struct kvm *kvm)
{
- spin_lock_init(&to_kvm_vmx(kvm)->ept_pointer_lock);
-
if (!ple_gap)
kvm->arch.pause_in_guest = true;
@@ -7035,7 +6922,6 @@ static int vmx_vm_init(struct kvm *kvm)
break;
}
}
- kvm_apicv_init(kvm, enable_apicv);
return 0;
}
@@ -7252,7 +7138,7 @@ static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output))
vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_TOPA;
- /* If CPUID.(EAX=14H,ECX=0):ECX[3]=1 FabircEn can be set */
+ /* If CPUID.(EAX=14H,ECX=0):ECX[3]=1 FabricEn can be set */
if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_output_subsys))
vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_FABRIC_EN;
@@ -7302,6 +7188,19 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
set_cr4_guest_host_mask(vmx);
+ vmx_write_encls_bitmap(vcpu, NULL);
+ if (guest_cpuid_has(vcpu, X86_FEATURE_SGX))
+ vmx->msr_ia32_feature_control_valid_bits |= FEAT_CTL_SGX_ENABLED;
+ else
+ vmx->msr_ia32_feature_control_valid_bits &= ~FEAT_CTL_SGX_ENABLED;
+
+ if (guest_cpuid_has(vcpu, X86_FEATURE_SGX_LC))
+ vmx->msr_ia32_feature_control_valid_bits |=
+ FEAT_CTL_SGX_LC_ENABLED;
+ else
+ vmx->msr_ia32_feature_control_valid_bits &=
+ ~FEAT_CTL_SGX_LC_ENABLED;
+
/* Refresh #PF interception to account for MAXPHYADDR changes. */
vmx_update_exception_bitmap(vcpu);
}
@@ -7322,6 +7221,13 @@ static __init void vmx_set_cpu_caps(void)
if (vmx_pt_mode_is_host_guest())
kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT);
+ if (!enable_sgx) {
+ kvm_cpu_cap_clear(X86_FEATURE_SGX);
+ kvm_cpu_cap_clear(X86_FEATURE_SGX_LC);
+ kvm_cpu_cap_clear(X86_FEATURE_SGX1);
+ kvm_cpu_cap_clear(X86_FEATURE_SGX2);
+ }
+
if (vmx_umip_emulated())
kvm_cpu_cap_set(X86_FEATURE_UMIP);
@@ -7330,9 +7236,11 @@ static __init void vmx_set_cpu_caps(void)
if (!cpu_has_vmx_xsaves())
kvm_cpu_cap_clear(X86_FEATURE_XSAVES);
- /* CPUID 0x80000001 */
- if (!cpu_has_vmx_rdtscp())
+ /* CPUID 0x80000001 and 0x7 (RDPID) */
+ if (!cpu_has_vmx_rdtscp()) {
kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
+ kvm_cpu_cap_clear(X86_FEATURE_RDPID);
+ }
if (cpu_has_vmx_waitpkg())
kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG);
@@ -7388,8 +7296,9 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
/*
* RDPID causes #UD if disabled through secondary execution controls.
* Because it is marked as EmulateOnUD, we need to intercept it here.
+ * Note, RDPID is hidden behind ENABLE_RDTSCP.
*/
- case x86_intercept_rdtscp:
+ case x86_intercept_rdpid:
if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_RDTSCP)) {
exception->vector = UD_VECTOR;
exception->error_code_valid = false;
@@ -7464,10 +7373,10 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
delta_tsc = 0;
/* Convert to host delta tsc if tsc scaling is enabled */
- if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio &&
+ if (vcpu->arch.l1_tsc_scaling_ratio != kvm_default_tsc_scaling_ratio &&
delta_tsc && u64_shl_div_u64(delta_tsc,
kvm_tsc_scaling_ratio_frac_bits,
- vcpu->arch.tsc_scaling_ratio, &delta_tsc))
+ vcpu->arch.l1_tsc_scaling_ratio, &delta_tsc))
return -ERANGE;
/*
@@ -7553,7 +7462,7 @@ static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
return !is_smm(vcpu);
}
-static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
+static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -7567,7 +7476,7 @@ static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
return 0;
}
-static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
+static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int ret;
@@ -7711,7 +7620,10 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
- .write_l1_tsc_offset = vmx_write_l1_tsc_offset,
+ .get_l2_tsc_offset = vmx_get_l2_tsc_offset,
+ .get_l2_tsc_multiplier = vmx_get_l2_tsc_multiplier,
+ .write_tsc_offset = vmx_write_tsc_offset,
+ .write_tsc_multiplier = vmx_write_tsc_multiplier,
.load_mmu_pgd = vmx_load_mmu_pgd,
@@ -7732,6 +7644,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.nested_ops = &vmx_nested_ops,
.update_pi_irte = pi_update_irte,
+ .start_assignment = vmx_pi_start_assignment,
#ifdef CONFIG_X86_64
.set_hv_timer = vmx_set_hv_timer,
@@ -7741,8 +7654,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.setup_mce = vmx_setup_mce,
.smi_allowed = vmx_smi_allowed,
- .pre_enter_smm = vmx_pre_enter_smm,
- .pre_leave_smm = vmx_pre_leave_smm,
+ .enter_smm = vmx_enter_smm,
+ .leave_smm = vmx_leave_smm,
.enable_smi_window = vmx_enable_smi_window,
.can_emulate_instruction = vmx_can_emulate_instruction,
@@ -7755,17 +7668,42 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
};
+static __init void vmx_setup_user_return_msrs(void)
+{
+
+ /*
+ * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm
+ * will emulate SYSCALL in legacy mode if the vendor string in guest
+ * CPUID.0:{EBX,ECX,EDX} is "AuthenticAMD" or "AMDisbetter!" To
+ * support this emulation, MSR_STAR is included in the list for i386,
+ * but is never loaded into hardware. MSR_CSTAR is also never loaded
+ * into hardware and is here purely for emulation purposes.
+ */
+ const u32 vmx_uret_msrs_list[] = {
+ #ifdef CONFIG_X86_64
+ MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
+ #endif
+ MSR_EFER, MSR_TSC_AUX, MSR_STAR,
+ MSR_IA32_TSX_CTRL,
+ };
+ int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(vmx_uret_msrs_list) != MAX_NR_USER_RETURN_MSRS);
+
+ for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i)
+ kvm_add_user_return_msr(vmx_uret_msrs_list[i]);
+}
+
static __init int hardware_setup(void)
{
unsigned long host_bndcfgs;
struct desc_ptr dt;
- int r, i, ept_lpage_level;
+ int r, ept_lpage_level;
store_idt(&dt);
host_idt_base = dt.address;
- for (i = 0; i < ARRAY_SIZE(vmx_uret_msrs_list); ++i)
- kvm_define_user_return_msr(i, vmx_uret_msrs_list[i]);
+ vmx_setup_user_return_msrs();
if (setup_vmcs_config(&vmcs_config, &vmx_capability) < 0)
return -EIO;
@@ -7792,6 +7730,12 @@ static __init int hardware_setup(void)
!cpu_has_vmx_invept_global())
enable_ept = 0;
+ /* NX support is required for shadow paging. */
+ if (!enable_ept && !boot_cpu_has(X86_FEATURE_NX)) {
+ pr_err_ratelimited("kvm: NX (Execute Disable) not supported\n");
+ return -EOPNOTSUPP;
+ }
+
if (!cpu_has_vmx_ept_ad_bits() || !enable_ept)
enable_ept_ad_bits = 0;
@@ -7848,7 +7792,8 @@ static __init int hardware_setup(void)
set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
if (enable_ept)
- vmx_enable_tdp();
+ kvm_mmu_set_ept_masks(enable_ept_ad_bits,
+ cpu_has_vmx_ept_execute_only());
if (!enable_ept)
ept_lpage_level = 0;
@@ -7909,6 +7854,8 @@ static __init int hardware_setup(void)
if (!enable_ept || !cpu_has_vmx_intel_pt())
pt_mode = PT_MODE_SYSTEM;
+ setup_default_sgx_lepubkeyhash();
+
if (nested) {
nested_vmx_setup_ctls_msrs(&vmcs_config.nested,
vmx_capability.ept);
@@ -7978,6 +7925,8 @@ static void vmx_exit(void)
}
#endif
vmx_cleanup_l1d_flush();
+
+ allow_smaller_maxphyaddr = false;
}
module_exit(vmx_exit);
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 89da5e1251f1..3979a947933a 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -36,7 +36,7 @@ struct vmx_msrs {
};
struct vmx_uret_msr {
- unsigned int slot; /* The MSR's slot in kvm_user_return_msrs. */
+ bool load_into_hardware;
u64 data;
u64 mask;
};
@@ -245,8 +245,16 @@ struct vcpu_vmx {
u32 idt_vectoring_info;
ulong rflags;
+ /*
+ * User return MSRs are always emulated when enabled in the guest, but
+ * only loaded into hardware when necessary, e.g. SYSCALL #UDs outside
+ * of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to
+ * be loaded into hardware if those conditions aren't met.
+ * nr_active_uret_msrs tracks the number of MSRs that need to be loaded
+ * into hardware when running the guest. guest_uret_msrs[] is resorted
+ * whenever the number of "active" uret MSRs is modified.
+ */
struct vmx_uret_msr guest_uret_msrs[MAX_NR_USER_RETURN_MSRS];
- int nr_uret_msrs;
int nr_active_uret_msrs;
bool guest_uret_msrs_loaded;
#ifdef CONFIG_X86_64
@@ -314,8 +322,6 @@ struct vcpu_vmx {
/* apic deadline value in host tsc */
u64 hv_deadline_tsc;
- u64 current_tsc_ratio;
-
unsigned long host_debugctlmsr;
/*
@@ -325,7 +331,8 @@ struct vcpu_vmx {
*/
u64 msr_ia32_feature_control;
u64 msr_ia32_feature_control_valid_bits;
- u64 ept_pointer;
+ /* SGX Launch Control public key hash */
+ u64 msr_ia32_sgxlepubkeyhash[4];
struct pt_desc pt_desc;
struct lbr_desc lbr_desc;
@@ -338,21 +345,12 @@ struct vcpu_vmx {
} shadow_msr_intercept;
};
-enum ept_pointers_status {
- EPT_POINTERS_CHECK = 0,
- EPT_POINTERS_MATCH = 1,
- EPT_POINTERS_MISMATCH = 2
-};
-
struct kvm_vmx {
struct kvm kvm;
unsigned int tss_addr;
bool ept_identity_pagetable_done;
gpa_t ept_identity_map_addr;
-
- enum ept_pointers_status ept_pointers_match;
- spinlock_t ept_pointer_lock;
};
bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
@@ -376,9 +374,9 @@ void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
void ept_save_pdptrs(struct kvm_vcpu *vcpu);
void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
-u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa,
- int root_level);
+u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
+bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
@@ -392,8 +390,22 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
void vmx_ept_load_pdptrs(struct kvm_vcpu *vcpu);
-void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu,
- u32 msr, int type, bool value);
+
+void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
+void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
+
+u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu);
+u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
+
+static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr,
+ int type, bool value)
+{
+ if (value)
+ vmx_enable_intercept_for_msr(vcpu, msr, type);
+ else
+ vmx_disable_intercept_for_msr(vcpu, msr, type);
+}
+
void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
static inline u8 vmx_get_rvi(void)
@@ -510,12 +522,6 @@ static inline struct vmcs *alloc_vmcs(bool shadow)
GFP_KERNEL_ACCOUNT);
}
-static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
-{
- vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
- vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
-}
-
static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
{
return vmx->secondary_exec_control &
@@ -543,6 +549,6 @@ static inline bool vmx_guest_state_valid(struct kvm_vcpu *vcpu)
return is_unrestricted_guest(vcpu) || __vmx_guest_state_valid(vcpu);
}
-void dump_vmcs(void);
+void dump_vmcs(struct kvm_vcpu *vcpu);
#endif /* __KVM_X86_VMX_H */
diff --git a/arch/x86/kvm/vmx/vmx_ops.h b/arch/x86/kvm/vmx/vmx_ops.h
index 692b0c31c9c8..164b64f65a8f 100644
--- a/arch/x86/kvm/vmx/vmx_ops.h
+++ b/arch/x86/kvm/vmx/vmx_ops.h
@@ -37,6 +37,10 @@ static __always_inline void vmcs_check32(unsigned long field)
{
BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
"32-bit accessor invalid for 16-bit field");
+ BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
+ "32-bit accessor invalid for 64-bit field");
+ BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
+ "32-bit accessor invalid for 64-bit high field");
BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
"32-bit accessor invalid for natural width field");
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2a20ce60152e..17468d983fbd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -58,6 +58,7 @@
#include <linux/sched/isolation.h>
#include <linux/mem_encrypt.h>
#include <linux/entry-kvm.h>
+#include <linux/suspend.h>
#include <trace/events/kvm.h>
@@ -75,6 +76,7 @@
#include <asm/tlbflush.h>
#include <asm/intel_pt.h>
#include <asm/emulate_prefix.h>
+#include <asm/sgx.h>
#include <clocksource/hyperv_timer.h>
#define CREATE_TRACE_POINTS
@@ -101,6 +103,8 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
+#define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE)
+
#define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
@@ -112,6 +116,9 @@ static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
static void store_regs(struct kvm_vcpu *vcpu);
static int sync_regs(struct kvm_vcpu *vcpu);
+static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
+static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
+
struct kvm_x86_ops kvm_x86_ops __read_mostly;
EXPORT_SYMBOL_GPL(kvm_x86_ops);
@@ -156,9 +163,9 @@ module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
/*
* lapic timer advance (tscdeadline mode only) in nanoseconds. '-1' enables
- * adaptive tuning starting from default advancment of 1000ns. '0' disables
+ * adaptive tuning starting from default advancement of 1000ns. '0' disables
* advancement entirely. Any other value is used as-is and disables adaptive
- * tuning, i.e. allows priveleged userspace to set an exact advancement time.
+ * tuning, i.e. allows privileged userspace to set an exact advancement time.
*/
static int __read_mostly lapic_timer_advance_ns = -1;
module_param(lapic_timer_advance_ns, int, S_IRUGO | S_IWUSR);
@@ -183,11 +190,6 @@ module_param(pi_inject_timer, bint, S_IRUGO | S_IWUSR);
*/
#define KVM_MAX_NR_USER_RETURN_MSRS 16
-struct kvm_user_return_msrs_global {
- int nr;
- u32 msrs[KVM_MAX_NR_USER_RETURN_MSRS];
-};
-
struct kvm_user_return_msrs {
struct user_return_notifier urn;
bool registered;
@@ -197,7 +199,9 @@ struct kvm_user_return_msrs {
} values[KVM_MAX_NR_USER_RETURN_MSRS];
};
-static struct kvm_user_return_msrs_global __read_mostly user_return_msrs_global;
+u32 __read_mostly kvm_nr_uret_msrs;
+EXPORT_SYMBOL_GPL(kvm_nr_uret_msrs);
+static u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS];
static struct kvm_user_return_msrs __percpu *user_return_msrs;
#define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
@@ -211,52 +215,78 @@ EXPORT_SYMBOL_GPL(host_efer);
bool __read_mostly allow_smaller_maxphyaddr = 0;
EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr);
+bool __read_mostly enable_apicv = true;
+EXPORT_SYMBOL_GPL(enable_apicv);
+
u64 __read_mostly host_xss;
EXPORT_SYMBOL_GPL(host_xss);
u64 __read_mostly supported_xss;
EXPORT_SYMBOL_GPL(supported_xss);
-struct kvm_stats_debugfs_item debugfs_entries[] = {
- VCPU_STAT("pf_fixed", pf_fixed),
- VCPU_STAT("pf_guest", pf_guest),
- VCPU_STAT("tlb_flush", tlb_flush),
- VCPU_STAT("invlpg", invlpg),
- VCPU_STAT("exits", exits),
- VCPU_STAT("io_exits", io_exits),
- VCPU_STAT("mmio_exits", mmio_exits),
- VCPU_STAT("signal_exits", signal_exits),
- VCPU_STAT("irq_window", irq_window_exits),
- VCPU_STAT("nmi_window", nmi_window_exits),
- VCPU_STAT("halt_exits", halt_exits),
- VCPU_STAT("halt_successful_poll", halt_successful_poll),
- VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
- VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
- VCPU_STAT("halt_wakeup", halt_wakeup),
- VCPU_STAT("hypercalls", hypercalls),
- VCPU_STAT("request_irq", request_irq_exits),
- VCPU_STAT("irq_exits", irq_exits),
- VCPU_STAT("host_state_reload", host_state_reload),
- VCPU_STAT("fpu_reload", fpu_reload),
- VCPU_STAT("insn_emulation", insn_emulation),
- VCPU_STAT("insn_emulation_fail", insn_emulation_fail),
- VCPU_STAT("irq_injections", irq_injections),
- VCPU_STAT("nmi_injections", nmi_injections),
- VCPU_STAT("req_event", req_event),
- VCPU_STAT("l1d_flush", l1d_flush),
- VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
- VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
- VM_STAT("mmu_shadow_zapped", mmu_shadow_zapped),
- VM_STAT("mmu_pte_write", mmu_pte_write),
- VM_STAT("mmu_pde_zapped", mmu_pde_zapped),
- VM_STAT("mmu_flooded", mmu_flooded),
- VM_STAT("mmu_recycled", mmu_recycled),
- VM_STAT("mmu_cache_miss", mmu_cache_miss),
- VM_STAT("mmu_unsync", mmu_unsync),
- VM_STAT("remote_tlb_flush", remote_tlb_flush),
- VM_STAT("largepages", lpages, .mode = 0444),
- VM_STAT("nx_largepages_splitted", nx_lpage_splits, .mode = 0444),
- VM_STAT("max_mmu_page_hash_collisions", max_mmu_page_hash_collisions),
- { NULL }
+const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+ KVM_GENERIC_VM_STATS(),
+ STATS_DESC_COUNTER(VM, mmu_shadow_zapped),
+ STATS_DESC_COUNTER(VM, mmu_pte_write),
+ STATS_DESC_COUNTER(VM, mmu_pde_zapped),
+ STATS_DESC_COUNTER(VM, mmu_flooded),
+ STATS_DESC_COUNTER(VM, mmu_recycled),
+ STATS_DESC_COUNTER(VM, mmu_cache_miss),
+ STATS_DESC_ICOUNTER(VM, mmu_unsync),
+ STATS_DESC_ICOUNTER(VM, lpages),
+ STATS_DESC_ICOUNTER(VM, nx_lpage_splits),
+ STATS_DESC_PCOUNTER(VM, max_mmu_page_hash_collisions)
+};
+static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
+ sizeof(struct kvm_vm_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vm_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vm_stats_desc),
+};
+
+const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ KVM_GENERIC_VCPU_STATS(),
+ STATS_DESC_COUNTER(VCPU, pf_fixed),
+ STATS_DESC_COUNTER(VCPU, pf_guest),
+ STATS_DESC_COUNTER(VCPU, tlb_flush),
+ STATS_DESC_COUNTER(VCPU, invlpg),
+ STATS_DESC_COUNTER(VCPU, exits),
+ STATS_DESC_COUNTER(VCPU, io_exits),
+ STATS_DESC_COUNTER(VCPU, mmio_exits),
+ STATS_DESC_COUNTER(VCPU, signal_exits),
+ STATS_DESC_COUNTER(VCPU, irq_window_exits),
+ STATS_DESC_COUNTER(VCPU, nmi_window_exits),
+ STATS_DESC_COUNTER(VCPU, l1d_flush),
+ STATS_DESC_COUNTER(VCPU, halt_exits),
+ STATS_DESC_COUNTER(VCPU, request_irq_exits),
+ STATS_DESC_COUNTER(VCPU, irq_exits),
+ STATS_DESC_COUNTER(VCPU, host_state_reload),
+ STATS_DESC_COUNTER(VCPU, fpu_reload),
+ STATS_DESC_COUNTER(VCPU, insn_emulation),
+ STATS_DESC_COUNTER(VCPU, insn_emulation_fail),
+ STATS_DESC_COUNTER(VCPU, hypercalls),
+ STATS_DESC_COUNTER(VCPU, irq_injections),
+ STATS_DESC_COUNTER(VCPU, nmi_injections),
+ STATS_DESC_COUNTER(VCPU, req_event),
+ STATS_DESC_COUNTER(VCPU, nested_run),
+ STATS_DESC_COUNTER(VCPU, directed_yield_attempted),
+ STATS_DESC_COUNTER(VCPU, directed_yield_successful),
+ STATS_DESC_ICOUNTER(VCPU, guest_mode)
+};
+static_assert(ARRAY_SIZE(kvm_vcpu_stats_desc) ==
+ sizeof(struct kvm_vcpu_stat) / sizeof(u64));
+
+const struct kvm_stats_header kvm_vcpu_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vcpu_stats_desc),
};
u64 __read_mostly host_xcr0;
@@ -271,8 +301,7 @@ static struct kmem_cache *x86_emulator_cache;
* When called, it means the previous get/set msr reached an invalid msr.
* Return true if we want to ignore/silent this failed msr access.
*/
-static bool kvm_msr_ignored_check(struct kvm_vcpu *vcpu, u32 msr,
- u64 data, bool write)
+static bool kvm_msr_ignored_check(u32 msr, u64 data, bool write)
{
const char *op = write ? "wrmsr" : "rdmsr";
@@ -327,23 +356,53 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
user_return_notifier_unregister(urn);
}
local_irq_restore(flags);
- for (slot = 0; slot < user_return_msrs_global.nr; ++slot) {
+ for (slot = 0; slot < kvm_nr_uret_msrs; ++slot) {
values = &msrs->values[slot];
if (values->host != values->curr) {
- wrmsrl(user_return_msrs_global.msrs[slot], values->host);
+ wrmsrl(kvm_uret_msrs_list[slot], values->host);
values->curr = values->host;
}
}
}
-void kvm_define_user_return_msr(unsigned slot, u32 msr)
+static int kvm_probe_user_return_msr(u32 msr)
+{
+ u64 val;
+ int ret;
+
+ preempt_disable();
+ ret = rdmsrl_safe(msr, &val);
+ if (ret)
+ goto out;
+ ret = wrmsrl_safe(msr, val);
+out:
+ preempt_enable();
+ return ret;
+}
+
+int kvm_add_user_return_msr(u32 msr)
+{
+ BUG_ON(kvm_nr_uret_msrs >= KVM_MAX_NR_USER_RETURN_MSRS);
+
+ if (kvm_probe_user_return_msr(msr))
+ return -1;
+
+ kvm_uret_msrs_list[kvm_nr_uret_msrs] = msr;
+ return kvm_nr_uret_msrs++;
+}
+EXPORT_SYMBOL_GPL(kvm_add_user_return_msr);
+
+int kvm_find_user_return_msr(u32 msr)
{
- BUG_ON(slot >= KVM_MAX_NR_USER_RETURN_MSRS);
- user_return_msrs_global.msrs[slot] = msr;
- if (slot >= user_return_msrs_global.nr)
- user_return_msrs_global.nr = slot + 1;
+ int i;
+
+ for (i = 0; i < kvm_nr_uret_msrs; ++i) {
+ if (kvm_uret_msrs_list[i] == msr)
+ return i;
+ }
+ return -1;
}
-EXPORT_SYMBOL_GPL(kvm_define_user_return_msr);
+EXPORT_SYMBOL_GPL(kvm_find_user_return_msr);
static void kvm_user_return_msr_cpu_online(void)
{
@@ -352,8 +411,8 @@ static void kvm_user_return_msr_cpu_online(void)
u64 value;
int i;
- for (i = 0; i < user_return_msrs_global.nr; ++i) {
- rdmsrl_safe(user_return_msrs_global.msrs[i], &value);
+ for (i = 0; i < kvm_nr_uret_msrs; ++i) {
+ rdmsrl_safe(kvm_uret_msrs_list[i], &value);
msrs->values[i].host = value;
msrs->values[i].curr = value;
}
@@ -368,7 +427,7 @@ int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
value = (value & mask) | (msrs->values[slot].host & ~mask);
if (value == msrs->values[slot].curr)
return 0;
- err = wrmsrl_safe(user_return_msrs_global.msrs[slot], value);
+ err = wrmsrl_safe(kvm_uret_msrs_list[slot], value);
if (err)
return 1;
@@ -544,8 +603,6 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
queue:
- if (has_error && !is_protmode(vcpu))
- has_error = false;
if (reinject) {
/*
* On vmentry, vcpu->arch.exception.pending is only
@@ -750,13 +807,6 @@ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
-static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
- void *data, int offset, int len, u32 access)
-{
- return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
- data, offset, len, access);
-}
-
static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
{
return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2);
@@ -791,6 +841,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
+ vcpu->arch.pdptrs_from_userspace = false;
out:
@@ -798,40 +849,14 @@ out:
}
EXPORT_SYMBOL_GPL(load_pdptrs);
-bool pdptrs_changed(struct kvm_vcpu *vcpu)
-{
- u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
- int offset;
- gfn_t gfn;
- int r;
-
- if (!is_pae_paging(vcpu))
- return false;
-
- if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR))
- return true;
-
- gfn = (kvm_read_cr3(vcpu) & 0xffffffe0ul) >> PAGE_SHIFT;
- offset = (kvm_read_cr3(vcpu) & 0xffffffe0ul) & (PAGE_SIZE - 1);
- r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
- PFERR_USER_MASK | PFERR_WRITE_MASK);
- if (r < 0)
- return true;
-
- return memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
-}
-EXPORT_SYMBOL_GPL(pdptrs_changed);
-
void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0)
{
- unsigned long update_bits = X86_CR0_PG | X86_CR0_WP;
-
if ((cr0 ^ old_cr0) & X86_CR0_PG) {
kvm_clear_async_pf_completion_queue(vcpu);
kvm_async_pf_hash_reset(vcpu);
}
- if ((cr0 ^ old_cr0) & update_bits)
+ if ((cr0 ^ old_cr0) & KVM_MMU_CR0_ROLE_BITS)
kvm_mmu_reset_context(vcpu);
if (((cr0 ^ old_cr0) & X86_CR0_CD) &&
@@ -984,14 +1009,17 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
return 0;
}
-int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
+int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu)
{
- if (static_call(kvm_x86_get_cpl)(vcpu) == 0)
- return __kvm_set_xcr(vcpu, index, xcr);
+ if (static_call(kvm_x86_get_cpl)(vcpu) != 0 ||
+ __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
- return 1;
+ return kvm_skip_emulated_instruction(vcpu);
}
-EXPORT_SYMBOL_GPL(kvm_set_xcr);
+EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv);
bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
@@ -1007,10 +1035,7 @@ EXPORT_SYMBOL_GPL(kvm_is_valid_cr4);
void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4)
{
- unsigned long mmu_role_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
- X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
-
- if (((cr4 ^ old_cr4) & mmu_role_bits) ||
+ if (((cr4 ^ old_cr4) & KVM_MMU_CR4_ROLE_BITS) ||
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
kvm_mmu_reset_context(vcpu);
}
@@ -1053,36 +1078,75 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
}
EXPORT_SYMBOL_GPL(kvm_set_cr4);
+static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid)
+{
+ struct kvm_mmu *mmu = vcpu->arch.mmu;
+ unsigned long roots_to_free = 0;
+ int i;
+
+ /*
+ * If neither the current CR3 nor any of the prev_roots use the given
+ * PCID, then nothing needs to be done here because a resync will
+ * happen anyway before switching to any other CR3.
+ */
+ if (kvm_get_active_pcid(vcpu) == pcid) {
+ kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
+ kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
+ }
+
+ for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
+ if (kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd) == pcid)
+ roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
+
+ kvm_mmu_free_roots(vcpu, mmu, roots_to_free);
+}
+
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
bool skip_tlb_flush = false;
+ unsigned long pcid = 0;
#ifdef CONFIG_X86_64
bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE);
if (pcid_enabled) {
skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH;
cr3 &= ~X86_CR3_PCID_NOFLUSH;
+ pcid = cr3 & X86_CR3_PCID_MASK;
}
#endif
- if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
- if (!skip_tlb_flush) {
- kvm_mmu_sync_roots(vcpu);
- kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
- }
- return 0;
- }
+ /* PDPTRs are always reloaded for PAE paging. */
+ if (cr3 == kvm_read_cr3(vcpu) && !is_pae_paging(vcpu))
+ goto handle_tlb_flush;
- if (is_long_mode(vcpu) && kvm_vcpu_is_illegal_gpa(vcpu, cr3))
+ /*
+ * Do not condition the GPA check on long mode, this helper is used to
+ * stuff CR3, e.g. for RSM emulation, and there is no guarantee that
+ * the current vCPU mode is accurate.
+ */
+ if (kvm_vcpu_is_illegal_gpa(vcpu, cr3))
return 1;
- else if (is_pae_paging(vcpu) &&
- !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
+
+ if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
return 1;
- kvm_mmu_new_pgd(vcpu, cr3, skip_tlb_flush, skip_tlb_flush);
+ if (cr3 != kvm_read_cr3(vcpu))
+ kvm_mmu_new_pgd(vcpu, cr3);
+
vcpu->arch.cr3 = cr3;
kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
+handle_tlb_flush:
+ /*
+ * A load of CR3 that flushes the TLB flushes only the current PCID,
+ * even if PCID is disabled, in which case PCID=0 is flushed. It's a
+ * moot point in the end because _disabling_ PCID will flush all PCIDs,
+ * and it's impossible to use a non-zero PCID when PCID is disabled,
+ * i.e. only PCID=0 can be relevant.
+ */
+ if (!skip_tlb_flush)
+ kvm_invalidate_pcid(vcpu, pcid);
+
return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_cr3);
@@ -1140,6 +1204,9 @@ static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM))
fixed |= DR6_RTM;
+
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT))
+ fixed |= DR6_BUS_LOCK;
return fixed;
}
@@ -1192,20 +1259,21 @@ void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
}
EXPORT_SYMBOL_GPL(kvm_get_dr);
-bool kvm_rdpmc(struct kvm_vcpu *vcpu)
+int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu)
{
u32 ecx = kvm_rcx_read(vcpu);
u64 data;
- int err;
- err = kvm_pmu_rdpmc(vcpu, ecx, &data);
- if (err)
- return err;
+ if (kvm_pmu_rdpmc(vcpu, ecx, &data)) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+
kvm_rax_write(vcpu, (u32)data);
kvm_rdx_write(vcpu, data >> 32);
- return err;
+ return kvm_skip_emulated_instruction(vcpu);
}
-EXPORT_SYMBOL_GPL(kvm_rdpmc);
+EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc);
/*
* List of msr numbers which we expose to userspace through KVM_GET_MSRS
@@ -1288,7 +1356,7 @@ static const u32 emulated_msrs_all[] = {
MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK,
MSR_IA32_TSC_ADJUST,
- MSR_IA32_TSCDEADLINE,
+ MSR_IA32_TSC_DEADLINE,
MSR_IA32_ARCH_CAPABILITIES,
MSR_IA32_PERF_CAPABILITIES,
MSR_IA32_MISC_ENABLE,
@@ -1373,7 +1441,7 @@ static u64 kvm_get_arch_capabilities(void)
/*
* If nx_huge_pages is enabled, KVM's shadow paging will ensure that
* the nested hypervisor runs with NX huge pages. If it is not,
- * L1 is anyway vulnerable to ITLB_MULTIHIT explots from other
+ * L1 is anyway vulnerable to ITLB_MULTIHIT exploits from other
* L1 guests, so it need not worry about its own (L2) guests.
*/
data |= ARCH_CAP_PSCHANGE_MC_NO;
@@ -1445,7 +1513,7 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
if (r == KVM_MSR_RET_INVALID) {
/* Unconditionally clear the output for simplicity */
*data = 0;
- if (kvm_msr_ignored_check(vcpu, index, 0, false))
+ if (kvm_msr_ignored_check(index, 0, false))
r = 0;
}
@@ -1526,35 +1594,44 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type)
{
+ struct kvm_x86_msr_filter *msr_filter;
+ struct msr_bitmap_range *ranges;
struct kvm *kvm = vcpu->kvm;
- struct msr_bitmap_range *ranges = kvm->arch.msr_filter.ranges;
- u32 count = kvm->arch.msr_filter.count;
- u32 i;
- bool r = kvm->arch.msr_filter.default_allow;
+ bool allowed;
int idx;
+ u32 i;
- /* MSR filtering not set up or x2APIC enabled, allow everything */
- if (!count || (index >= 0x800 && index <= 0x8ff))
+ /* x2APIC MSRs do not support filtering. */
+ if (index >= 0x800 && index <= 0x8ff)
return true;
- /* Prevent collision with set_msr_filter */
idx = srcu_read_lock(&kvm->srcu);
- for (i = 0; i < count; i++) {
+ msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu);
+ if (!msr_filter) {
+ allowed = true;
+ goto out;
+ }
+
+ allowed = msr_filter->default_allow;
+ ranges = msr_filter->ranges;
+
+ for (i = 0; i < msr_filter->count; i++) {
u32 start = ranges[i].base;
u32 end = start + ranges[i].nmsrs;
u32 flags = ranges[i].flags;
unsigned long *bitmap = ranges[i].bitmap;
if ((index >= start) && (index < end) && (flags & type)) {
- r = !!test_bit(index - start, bitmap);
+ allowed = !!test_bit(index - start, bitmap);
break;
}
}
+out:
srcu_read_unlock(&kvm->srcu, idx);
- return r;
+ return allowed;
}
EXPORT_SYMBOL_GPL(kvm_msr_allowed);
@@ -1596,6 +1673,30 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
* invokes 64-bit SYSENTER.
*/
data = get_canonical(data, vcpu_virt_addr_bits(vcpu));
+ break;
+ case MSR_TSC_AUX:
+ if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
+ return 1;
+
+ if (!host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
+ return 1;
+
+ /*
+ * Per Intel's SDM, bits 63:32 are reserved, but AMD's APM has
+ * incomplete and conflicting architectural behavior. Current
+ * AMD CPUs completely ignore bits 63:32, i.e. they aren't
+ * reserved and always read as zeros. Enforce Intel's reserved
+ * bits check if and only if the guest CPU is Intel, and clear
+ * the bits in all other cases. This ensures cross-vendor
+ * migration will provide consistent behavior for the guest.
+ */
+ if (guest_cpuid_is_intel(vcpu) && (data >> 32) != 0)
+ return 1;
+
+ data = (u32)data;
+ break;
}
msr.data = data;
@@ -1611,7 +1712,7 @@ static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
int ret = __kvm_set_msr(vcpu, index, data, host_initiated);
if (ret == KVM_MSR_RET_INVALID)
- if (kvm_msr_ignored_check(vcpu, index, data, true))
+ if (kvm_msr_ignored_check(index, data, true))
ret = 0;
return ret;
@@ -1632,6 +1733,18 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
return KVM_MSR_RET_FILTERED;
+ switch (index) {
+ case MSR_TSC_AUX:
+ if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
+ return 1;
+
+ if (!host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
+ return 1;
+ break;
+ }
+
msr.index = index;
msr.host_initiated = host_initiated;
@@ -1649,7 +1762,7 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
if (ret == KVM_MSR_RET_INVALID) {
/* Unconditionally clear *data for simplicity */
*data = 0;
- if (kvm_msr_ignored_check(vcpu, index, 0, false))
+ if (kvm_msr_ignored_check(index, 0, false))
ret = 0;
}
@@ -1783,6 +1896,40 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
+int kvm_emulate_as_nop(struct kvm_vcpu *vcpu)
+{
+ return kvm_skip_emulated_instruction(vcpu);
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_as_nop);
+
+int kvm_emulate_invd(struct kvm_vcpu *vcpu)
+{
+ /* Treat an INVD instruction as a NOP and just skip it. */
+ return kvm_emulate_as_nop(vcpu);
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_invd);
+
+int kvm_emulate_mwait(struct kvm_vcpu *vcpu)
+{
+ pr_warn_once("kvm: MWAIT instruction emulated as NOP!\n");
+ return kvm_emulate_as_nop(vcpu);
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_mwait);
+
+int kvm_handle_invalid_op(struct kvm_vcpu *vcpu)
+{
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+}
+EXPORT_SYMBOL_GPL(kvm_handle_invalid_op);
+
+int kvm_emulate_monitor(struct kvm_vcpu *vcpu)
+{
+ pr_warn_once("kvm: MONITOR instruction emulated as NOP!\n");
+ return kvm_emulate_as_nop(vcpu);
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_monitor);
+
static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
{
xfer_to_guest_mode_prepare();
@@ -1841,7 +1988,7 @@ fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
ret = EXIT_FASTPATH_EXIT_HANDLED;
}
break;
- case MSR_IA32_TSCDEADLINE:
+ case MSR_IA32_TSC_DEADLINE:
data = kvm_read_edx_eax(vcpu);
if (!handle_fastpath_set_tscdeadline(vcpu, data)) {
kvm_skip_emulated_instruction(vcpu);
@@ -2060,13 +2207,15 @@ static u32 adjust_tsc_khz(u32 khz, s32 ppm)
return v;
}
+static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier);
+
static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
{
u64 ratio;
/* Guest TSC same frequency as host TSC? */
if (!scale) {
- vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
+ kvm_vcpu_write_tsc_multiplier(vcpu, kvm_default_tsc_scaling_ratio);
return 0;
}
@@ -2092,7 +2241,7 @@ static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
return -1;
}
- vcpu->arch.tsc_scaling_ratio = ratio;
+ kvm_vcpu_write_tsc_multiplier(vcpu, ratio);
return 0;
}
@@ -2104,7 +2253,7 @@ static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
/* tsc_khz can be zero if TSC calibration fails */
if (user_tsc_khz == 0) {
/* set tsc_scaling_ratio to a safe value */
- vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
+ kvm_vcpu_write_tsc_multiplier(vcpu, kvm_default_tsc_scaling_ratio);
return -1;
}
@@ -2186,10 +2335,9 @@ static inline u64 __scale_tsc(u64 ratio, u64 tsc)
return mul_u64_u64_shr(tsc, ratio, kvm_tsc_scaling_ratio_frac_bits);
}
-u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
+u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc, u64 ratio)
{
u64 _tsc = tsc;
- u64 ratio = vcpu->arch.tsc_scaling_ratio;
if (ratio != kvm_default_tsc_scaling_ratio)
_tsc = __scale_tsc(ratio, tsc);
@@ -2198,25 +2346,86 @@ u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
}
EXPORT_SYMBOL_GPL(kvm_scale_tsc);
-static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
+static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
{
u64 tsc;
- tsc = kvm_scale_tsc(vcpu, rdtsc());
+ tsc = kvm_scale_tsc(vcpu, rdtsc(), vcpu->arch.l1_tsc_scaling_ratio);
return target_tsc - tsc;
}
u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
{
- return vcpu->arch.l1_tsc_offset + kvm_scale_tsc(vcpu, host_tsc);
+ return vcpu->arch.l1_tsc_offset +
+ kvm_scale_tsc(vcpu, host_tsc, vcpu->arch.l1_tsc_scaling_ratio);
}
EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
-static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier)
+{
+ u64 nested_offset;
+
+ if (l2_multiplier == kvm_default_tsc_scaling_ratio)
+ nested_offset = l1_offset;
+ else
+ nested_offset = mul_s64_u64_shr((s64) l1_offset, l2_multiplier,
+ kvm_tsc_scaling_ratio_frac_bits);
+
+ nested_offset += l2_offset;
+ return nested_offset;
+}
+EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_offset);
+
+u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier)
+{
+ if (l2_multiplier != kvm_default_tsc_scaling_ratio)
+ return mul_u64_u64_shr(l1_multiplier, l2_multiplier,
+ kvm_tsc_scaling_ratio_frac_bits);
+
+ return l1_multiplier;
+}
+EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_multiplier);
+
+static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset)
{
- vcpu->arch.l1_tsc_offset = offset;
- vcpu->arch.tsc_offset = static_call(kvm_x86_write_l1_tsc_offset)(vcpu, offset);
+ trace_kvm_write_tsc_offset(vcpu->vcpu_id,
+ vcpu->arch.l1_tsc_offset,
+ l1_offset);
+
+ vcpu->arch.l1_tsc_offset = l1_offset;
+
+ /*
+ * If we are here because L1 chose not to trap WRMSR to TSC then
+ * according to the spec this should set L1's TSC (as opposed to
+ * setting L1's offset for L2).
+ */
+ if (is_guest_mode(vcpu))
+ vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
+ l1_offset,
+ static_call(kvm_x86_get_l2_tsc_offset)(vcpu),
+ static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu));
+ else
+ vcpu->arch.tsc_offset = l1_offset;
+
+ static_call(kvm_x86_write_tsc_offset)(vcpu, vcpu->arch.tsc_offset);
+}
+
+static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier)
+{
+ vcpu->arch.l1_tsc_scaling_ratio = l1_multiplier;
+
+ /* Userspace is changing the multiplier while L2 is active */
+ if (is_guest_mode(vcpu))
+ vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier(
+ l1_multiplier,
+ static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu));
+ else
+ vcpu->arch.tsc_scaling_ratio = l1_multiplier;
+
+ if (kvm_has_tsc_control)
+ static_call(kvm_x86_write_tsc_multiplier)(
+ vcpu, vcpu->arch.tsc_scaling_ratio);
}
static inline bool kvm_check_tsc_unstable(void)
@@ -2242,7 +2451,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
bool synchronizing = false;
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
- offset = kvm_compute_tsc_offset(vcpu, data);
+ offset = kvm_compute_l1_tsc_offset(vcpu, data);
ns = get_kvmclock_base_ns();
elapsed = ns - kvm->arch.last_tsc_nsec;
@@ -2281,7 +2490,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
} else {
u64 delta = nsec_to_cycles(vcpu, elapsed);
data += delta;
- offset = kvm_compute_tsc_offset(vcpu, data);
+ offset = kvm_compute_l1_tsc_offset(vcpu, data);
}
matched = true;
already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
@@ -2320,7 +2529,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
kvm_vcpu_write_tsc_offset(vcpu, offset);
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
- spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
+ spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags);
if (!matched) {
kvm->arch.nr_vcpus_matched_tsc = 0;
} else if (!already_matched) {
@@ -2328,7 +2537,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
}
kvm_track_tsc_matching(vcpu);
- spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
+ spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags);
}
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
@@ -2340,9 +2549,10 @@ static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
{
- if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
+ if (vcpu->arch.l1_tsc_scaling_ratio != kvm_default_tsc_scaling_ratio)
WARN_ON(adjustment < 0);
- adjustment = kvm_scale_tsc(vcpu, (u64) adjustment);
+ adjustment = kvm_scale_tsc(vcpu, (u64) adjustment,
+ vcpu->arch.l1_tsc_scaling_ratio);
adjust_tsc_offset_guest(vcpu, adjustment);
}
@@ -2550,11 +2760,16 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
int i;
struct kvm_vcpu *vcpu;
struct kvm_arch *ka = &kvm->arch;
+ unsigned long flags;
+
+ kvm_hv_invalidate_tsc_page(kvm);
- spin_lock(&ka->pvclock_gtod_sync_lock);
kvm_make_mclock_inprogress_request(kvm);
+
/* no guest entries from this point */
+ spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
pvclock_update_vm_gtod_copy(kvm);
+ spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -2562,8 +2777,6 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
/* guest entries allowed */
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
-
- spin_unlock(&ka->pvclock_gtod_sync_lock);
#endif
}
@@ -2571,17 +2784,18 @@ u64 get_kvmclock_ns(struct kvm *kvm)
{
struct kvm_arch *ka = &kvm->arch;
struct pvclock_vcpu_time_info hv_clock;
+ unsigned long flags;
u64 ret;
- spin_lock(&ka->pvclock_gtod_sync_lock);
+ spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
if (!ka->use_master_clock) {
- spin_unlock(&ka->pvclock_gtod_sync_lock);
+ spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
return get_kvmclock_base_ns() + ka->kvmclock_offset;
}
hv_clock.tsc_timestamp = ka->master_cycle_now;
hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
- spin_unlock(&ka->pvclock_gtod_sync_lock);
+ spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
/* both __this_cpu_read() and rdtsc() should be on the same cpu */
get_cpu();
@@ -2675,13 +2889,13 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
* If the host uses TSC clock, then passthrough TSC as stable
* to the guest.
*/
- spin_lock(&ka->pvclock_gtod_sync_lock);
+ spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
use_master_clock = ka->use_master_clock;
if (use_master_clock) {
host_tsc = ka->master_cycle_now;
kernel_ns = ka->master_kernel_ns;
}
- spin_unlock(&ka->pvclock_gtod_sync_lock);
+ spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
/* Keep irq disabled to prevent changes to the clock */
local_irq_save(flags);
@@ -2721,7 +2935,8 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
/* With all the info we got, fill in the values */
if (kvm_has_tsc_control)
- tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz);
+ tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz,
+ v->arch.l1_tsc_scaling_ratio);
if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL,
@@ -2949,6 +3164,19 @@ static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu)
static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
{
++vcpu->stat.tlb_flush;
+
+ if (!tdp_enabled) {
+ /*
+ * A TLB flush on behalf of the guest is equivalent to
+ * INVPCID(all), toggling CR4.PGE, etc., which requires
+ * a forced sync of the shadow page tables. Unload the
+ * entire MMU here and the subsequent load will sync the
+ * shadow page tables, and also flush the TLB.
+ */
+ kvm_mmu_unload(vcpu);
+ return;
+ }
+
static_call(kvm_x86_tlb_flush_guest)(vcpu);
}
@@ -2978,10 +3206,14 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
* expensive IPIs.
*/
if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) {
+ u8 st_preempted = xchg(&st->preempted, 0);
+
trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
- st->preempted & KVM_VCPU_FLUSH_TLB);
- if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
+ st_preempted & KVM_VCPU_FLUSH_TLB);
+ if (st_preempted & KVM_VCPU_FLUSH_TLB)
kvm_vcpu_flush_tlb_guest(vcpu);
+ } else {
+ st->preempted = 0;
}
vcpu->arch.st.preempted = 0;
@@ -3075,7 +3307,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return kvm_set_apic_base(vcpu, msr_info);
case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
return kvm_x2apic_msr_write(vcpu, msr, data);
- case MSR_IA32_TSCDEADLINE:
+ case MSR_IA32_TSC_DEADLINE:
kvm_set_lapic_tscdeadline_msr(vcpu, data);
break;
case MSR_IA32_TSC_ADJUST:
@@ -3110,7 +3342,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (msr_info->host_initiated) {
kvm_synchronize_tsc(vcpu, data);
} else {
- u64 adj = kvm_compute_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset;
+ u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset;
adjust_tsc_offset_guest(vcpu, adj);
vcpu->arch.ia32_tsc_adjust_msr += adj;
}
@@ -3345,7 +3577,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_LASTBRANCHTOIP:
case MSR_IA32_LASTINTFROMIP:
case MSR_IA32_LASTINTTOIP:
- case MSR_K8_SYSCFG:
+ case MSR_AMD64_SYSCFG:
case MSR_K8_TSEG_ADDR:
case MSR_K8_TSEG_MASK:
case MSR_VM_HSAVE_PA:
@@ -3370,6 +3602,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = 0;
break;
case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
+ if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
+ return kvm_pmu_get_msr(vcpu, msr_info);
+ if (!msr_info->host_initiated)
+ return 1;
+ msr_info->data = 0;
+ break;
case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
@@ -3406,10 +3644,17 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
* return L1's TSC value to ensure backwards-compatible
* behavior for migration.
*/
- u64 tsc_offset = msr_info->host_initiated ? vcpu->arch.l1_tsc_offset :
- vcpu->arch.tsc_offset;
+ u64 offset, ratio;
+
+ if (msr_info->host_initiated) {
+ offset = vcpu->arch.l1_tsc_offset;
+ ratio = vcpu->arch.l1_tsc_scaling_ratio;
+ } else {
+ offset = vcpu->arch.tsc_offset;
+ ratio = vcpu->arch.tsc_scaling_ratio;
+ }
- msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + tsc_offset;
+ msr_info->data = kvm_scale_tsc(vcpu, rdtsc(), ratio) + offset;
break;
}
case MSR_MTRRcap:
@@ -3437,7 +3682,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
- case MSR_IA32_TSCDEADLINE:
+ case MSR_IA32_TSC_DEADLINE:
msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
break;
case MSR_IA32_TSC_ADJUST:
@@ -3733,6 +3978,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_HYPERV_TLBFLUSH:
case KVM_CAP_HYPERV_SEND_IPI:
case KVM_CAP_HYPERV_CPUID:
+ case KVM_CAP_HYPERV_ENFORCE_CPUID:
case KVM_CAP_SYS_HYPERV_CPUID:
case KVM_CAP_PCI_SEGMENT:
case KVM_CAP_DEBUGREGS:
@@ -3759,8 +4005,19 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_X86_USER_SPACE_MSR:
case KVM_CAP_X86_MSR_FILTER:
case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
+#ifdef CONFIG_X86_SGX_KVM
+ case KVM_CAP_SGX_ATTRIBUTE:
+#endif
+ case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM:
+ case KVM_CAP_SREGS2:
+ case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
r = 1;
break;
+ case KVM_CAP_EXIT_HYPERCALL:
+ r = KVM_EXIT_HYPERCALL_VALID_MASK;
+ break;
+ case KVM_CAP_SET_GUEST_DEBUG2:
+ return KVM_GUESTDBG_VALID_MASK;
#ifdef CONFIG_KVM_XEN
case KVM_CAP_XEN_HVM:
r = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR |
@@ -3986,7 +4243,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
mark_tsc_unstable("KVM discovered backwards TSC");
if (kvm_check_tsc_unstable()) {
- u64 offset = kvm_compute_tsc_offset(vcpu,
+ u64 offset = kvm_compute_l1_tsc_offset(vcpu,
vcpu->arch.last_guest_tsc);
kvm_vcpu_write_tsc_offset(vcpu, offset);
vcpu->arch.tsc_catchup = 1;
@@ -4013,7 +4270,6 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
{
struct kvm_host_map map;
struct kvm_steal_time *st;
- int idx;
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return;
@@ -4021,15 +4277,9 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
if (vcpu->arch.st.preempted)
return;
- /*
- * Take the srcu lock as memslots will be accessed to check the gfn
- * cache generation against the memslots generation.
- */
- idx = srcu_read_lock(&vcpu->kvm->srcu);
-
if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
&vcpu->arch.st.cache, true))
- goto out;
+ return;
st = map.hva +
offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
@@ -4037,20 +4287,25 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
-
-out:
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
+ int idx;
+
if (vcpu->preempted && !vcpu->arch.guest_state_protected)
vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
+ /*
+ * Take the srcu lock as memslots will be accessed to check the gfn
+ * cache generation against the memslots generation.
+ */
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
if (kvm_xen_msr_enabled(vcpu->kvm))
kvm_xen_runstate_set_preempted(vcpu);
else
kvm_steal_time_set_preempted(vcpu);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
static_call(kvm_x86_vcpu_put)(vcpu);
vcpu->arch.last_host_tsc = rdtsc();
@@ -4307,7 +4562,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
memset(&events->reserved, 0, sizeof(events->reserved));
}
-static void kvm_smm_changed(struct kvm_vcpu *vcpu);
+static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm);
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
@@ -4367,13 +4622,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
vcpu->arch.apic->sipi_vector = events->sipi_vector;
if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
- if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
- if (events->smi.smm)
- vcpu->arch.hflags |= HF_SMM_MASK;
- else
- vcpu->arch.hflags &= ~HF_SMM_MASK;
- kvm_smm_changed(vcpu);
- }
+ if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm)
+ kvm_smm_changed(vcpu, events->smi.smm);
vcpu->arch.smi_pending = events->smi.pending;
@@ -4657,13 +4907,15 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
return static_call(kvm_x86_enable_direct_tlbflush)(vcpu);
+ case KVM_CAP_HYPERV_ENFORCE_CPUID:
+ return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]);
+
case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
vcpu->arch.pv_cpuid.enforce = cap->args[0];
if (vcpu->arch.pv_cpuid.enforce)
kvm_update_pv_runtime(vcpu);
return 0;
-
default:
return -EINVAL;
}
@@ -4676,6 +4928,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
void __user *argp = (void __user *)arg;
int r;
union {
+ struct kvm_sregs2 *sregs2;
struct kvm_lapic_state *lapic;
struct kvm_xsave *xsave;
struct kvm_xcrs *xcrs;
@@ -5048,6 +5301,28 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
break;
}
#endif
+ case KVM_GET_SREGS2: {
+ u.sregs2 = kzalloc(sizeof(struct kvm_sregs2), GFP_KERNEL);
+ r = -ENOMEM;
+ if (!u.sregs2)
+ goto out;
+ __get_sregs2(vcpu, u.sregs2);
+ r = -EFAULT;
+ if (copy_to_user(argp, u.sregs2, sizeof(struct kvm_sregs2)))
+ goto out;
+ r = 0;
+ break;
+ }
+ case KVM_SET_SREGS2: {
+ u.sregs2 = memdup_user(argp, sizeof(struct kvm_sregs2));
+ if (IS_ERR(u.sregs2)) {
+ r = PTR_ERR(u.sregs2);
+ u.sregs2 = NULL;
+ goto out;
+ }
+ r = __set_sregs2(vcpu, u.sregs2);
+ break;
+ }
default:
r = -EINVAL;
}
@@ -5345,6 +5620,43 @@ split_irqchip_unlock:
kvm->arch.bus_lock_detection_enabled = true;
r = 0;
break;
+#ifdef CONFIG_X86_SGX_KVM
+ case KVM_CAP_SGX_ATTRIBUTE: {
+ unsigned long allowed_attributes = 0;
+
+ r = sgx_set_attribute(&allowed_attributes, cap->args[0]);
+ if (r)
+ break;
+
+ /* KVM only supports the PROVISIONKEY privileged attribute. */
+ if ((allowed_attributes & SGX_ATTR_PROVISIONKEY) &&
+ !(allowed_attributes & ~SGX_ATTR_PROVISIONKEY))
+ kvm->arch.sgx_provisioning_allowed = true;
+ else
+ r = -EINVAL;
+ break;
+ }
+#endif
+ case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM:
+ r = -EINVAL;
+ if (kvm_x86_ops.vm_copy_enc_context_from)
+ r = kvm_x86_ops.vm_copy_enc_context_from(kvm, cap->args[0]);
+ return r;
+ case KVM_CAP_EXIT_HYPERCALL:
+ if (cap->args[0] & ~KVM_EXIT_HYPERCALL_VALID_MASK) {
+ r = -EINVAL;
+ break;
+ }
+ kvm->arch.hypercall_exit_enabled = cap->args[0];
+ r = 0;
+ break;
+ case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
+ r = -EINVAL;
+ if (cap->args[0] & ~1)
+ break;
+ kvm->arch.exit_on_emulation_error = cap->args[0];
+ r = 0;
+ break;
default:
r = -EINVAL;
break;
@@ -5352,33 +5664,46 @@ split_irqchip_unlock:
return r;
}
-static void kvm_clear_msr_filter(struct kvm *kvm)
+static struct kvm_x86_msr_filter *kvm_alloc_msr_filter(bool default_allow)
+{
+ struct kvm_x86_msr_filter *msr_filter;
+
+ msr_filter = kzalloc(sizeof(*msr_filter), GFP_KERNEL_ACCOUNT);
+ if (!msr_filter)
+ return NULL;
+
+ msr_filter->default_allow = default_allow;
+ return msr_filter;
+}
+
+static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter)
{
u32 i;
- u32 count = kvm->arch.msr_filter.count;
- struct msr_bitmap_range ranges[16];
- mutex_lock(&kvm->lock);
- kvm->arch.msr_filter.count = 0;
- memcpy(ranges, kvm->arch.msr_filter.ranges, count * sizeof(ranges[0]));
- mutex_unlock(&kvm->lock);
- synchronize_srcu(&kvm->srcu);
+ if (!msr_filter)
+ return;
- for (i = 0; i < count; i++)
- kfree(ranges[i].bitmap);
+ for (i = 0; i < msr_filter->count; i++)
+ kfree(msr_filter->ranges[i].bitmap);
+
+ kfree(msr_filter);
}
-static int kvm_add_msr_filter(struct kvm *kvm, struct kvm_msr_filter_range *user_range)
+static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter,
+ struct kvm_msr_filter_range *user_range)
{
- struct msr_bitmap_range *ranges = kvm->arch.msr_filter.ranges;
- struct msr_bitmap_range range;
unsigned long *bitmap = NULL;
size_t bitmap_size;
- int r;
if (!user_range->nmsrs)
return 0;
+ if (user_range->flags & ~(KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE))
+ return -EINVAL;
+
+ if (!user_range->flags)
+ return -EINVAL;
+
bitmap_size = BITS_TO_LONGS(user_range->nmsrs) * sizeof(long);
if (!bitmap_size || bitmap_size > KVM_MSR_FILTER_MAX_BITMAP_SIZE)
return -EINVAL;
@@ -5387,42 +5712,25 @@ static int kvm_add_msr_filter(struct kvm *kvm, struct kvm_msr_filter_range *user
if (IS_ERR(bitmap))
return PTR_ERR(bitmap);
- range = (struct msr_bitmap_range) {
+ msr_filter->ranges[msr_filter->count] = (struct msr_bitmap_range) {
.flags = user_range->flags,
.base = user_range->base,
.nmsrs = user_range->nmsrs,
.bitmap = bitmap,
};
- if (range.flags & ~(KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE)) {
- r = -EINVAL;
- goto err;
- }
-
- if (!range.flags) {
- r = -EINVAL;
- goto err;
- }
-
- /* Everything ok, add this range identifier to our global pool */
- ranges[kvm->arch.msr_filter.count] = range;
- /* Make sure we filled the array before we tell anyone to walk it */
- smp_wmb();
- kvm->arch.msr_filter.count++;
-
+ msr_filter->count++;
return 0;
-err:
- kfree(bitmap);
- return r;
}
static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
{
struct kvm_msr_filter __user *user_msr_filter = argp;
+ struct kvm_x86_msr_filter *new_filter, *old_filter;
struct kvm_msr_filter filter;
bool default_allow;
- int r = 0;
bool empty = true;
+ int r = 0;
u32 i;
if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
@@ -5435,27 +5743,69 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
if (empty && !default_allow)
return -EINVAL;
- kvm_clear_msr_filter(kvm);
-
- kvm->arch.msr_filter.default_allow = default_allow;
+ new_filter = kvm_alloc_msr_filter(default_allow);
+ if (!new_filter)
+ return -ENOMEM;
- /*
- * Protect from concurrent calls to this function that could trigger
- * a TOCTOU violation on kvm->arch.msr_filter.count.
- */
- mutex_lock(&kvm->lock);
for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {
- r = kvm_add_msr_filter(kvm, &filter.ranges[i]);
- if (r)
- break;
+ r = kvm_add_msr_filter(new_filter, &filter.ranges[i]);
+ if (r) {
+ kvm_free_msr_filter(new_filter);
+ return r;
+ }
}
+ mutex_lock(&kvm->lock);
+
+ /* The per-VM filter is protected by kvm->lock... */
+ old_filter = srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1);
+
+ rcu_assign_pointer(kvm->arch.msr_filter, new_filter);
+ synchronize_srcu(&kvm->srcu);
+
+ kvm_free_msr_filter(old_filter);
+
kvm_make_all_cpus_request(kvm, KVM_REQ_MSR_FILTER_CHANGED);
mutex_unlock(&kvm->lock);
- return r;
+ return 0;
}
+#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
+static int kvm_arch_suspend_notifier(struct kvm *kvm)
+{
+ struct kvm_vcpu *vcpu;
+ int i, ret = 0;
+
+ mutex_lock(&kvm->lock);
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!vcpu->arch.pv_time_enabled)
+ continue;
+
+ ret = kvm_set_guest_paused(vcpu);
+ if (ret) {
+ kvm_err("Failed to pause guest VCPU%d: %d\n",
+ vcpu->vcpu_id, ret);
+ break;
+ }
+ }
+ mutex_unlock(&kvm->lock);
+
+ return ret ? NOTIFY_BAD : NOTIFY_DONE;
+}
+
+int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state)
+{
+ switch (state) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ return kvm_arch_suspend_notifier(kvm);
+ }
+
+ return NOTIFY_DONE;
+}
+#endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
+
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
@@ -5700,6 +6050,7 @@ set_pit2_out:
}
#endif
case KVM_SET_CLOCK: {
+ struct kvm_arch *ka = &kvm->arch;
struct kvm_clock_data user_ns;
u64 now_ns;
@@ -5718,8 +6069,22 @@ set_pit2_out:
* pvclock_update_vm_gtod_copy().
*/
kvm_gen_update_masterclock(kvm);
- now_ns = get_kvmclock_ns(kvm);
- kvm->arch.kvmclock_offset += user_ns.clock - now_ns;
+
+ /*
+ * This pairs with kvm_guest_time_update(): when masterclock is
+ * in use, we use master_kernel_ns + kvmclock_offset to set
+ * unsigned 'system_time' so if we use get_kvmclock_ns() (which
+ * is slightly ahead) here we risk going negative on unsigned
+ * 'system_time' when 'user_ns.clock' is very small.
+ */
+ spin_lock_irq(&ka->pvclock_gtod_sync_lock);
+ if (kvm->arch.use_master_clock)
+ now_ns = ka->master_kernel_ns;
+ else
+ now_ns = get_kvmclock_base_ns();
+ ka->kvmclock_offset = user_ns.clock - now_ns;
+ spin_unlock_irq(&ka->pvclock_gtod_sync_lock);
+
kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE);
break;
}
@@ -5819,7 +6184,8 @@ static void kvm_init_msr_list(void)
continue;
break;
case MSR_TSC_AUX:
- if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP))
+ if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP) &&
+ !kvm_cpu_cap_has(X86_FEATURE_RDPID))
continue;
break;
case MSR_IA32_UMWAIT_CONTROL:
@@ -5959,6 +6325,7 @@ gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
}
+EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read);
gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
@@ -5975,6 +6342,7 @@ gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
access |= PFERR_WRITE_MASK;
return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
}
+EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_write);
/* uses this to access any guest's mapped memory without checking CPL */
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
@@ -6603,7 +6971,7 @@ static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
int cpu = get_cpu();
cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
- smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
+ on_each_cpu_mask(vcpu->arch.wbinvd_dirty_mask,
wbinvd_ipi, NULL, 1);
put_cpu();
cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
@@ -6894,12 +7262,12 @@ static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt)
static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
{
- return kvm_register_read(emul_to_vcpu(ctxt), reg);
+ return kvm_register_read_raw(emul_to_vcpu(ctxt), reg);
}
static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val)
{
- kvm_register_write(emul_to_vcpu(ctxt), reg, val);
+ kvm_register_write_raw(emul_to_vcpu(ctxt), reg, val);
}
static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
@@ -6912,20 +7280,22 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
return emul_to_vcpu(ctxt)->arch.hflags;
}
-static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
+static void emulator_exiting_smm(struct x86_emulate_ctxt *ctxt)
{
- emul_to_vcpu(ctxt)->arch.hflags = emul_flags;
+ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+
+ kvm_smm_changed(vcpu, false);
}
-static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
+static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt,
const char *smstate)
{
- return static_call(kvm_x86_pre_leave_smm)(emul_to_vcpu(ctxt), smstate);
+ return static_call(kvm_x86_leave_smm)(emul_to_vcpu(ctxt), smstate);
}
-static void emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt)
+static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt)
{
- kvm_smm_changed(emul_to_vcpu(ctxt));
+ kvm_make_request(KVM_REQ_TRIPLE_FAULT, emul_to_vcpu(ctxt));
}
static int emulator_set_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr)
@@ -6974,9 +7344,9 @@ static const struct x86_emulate_ops emulate_ops = {
.guest_has_fxsr = emulator_guest_has_fxsr,
.set_nmi_mask = emulator_set_nmi_mask,
.get_hflags = emulator_get_hflags,
- .set_hflags = emulator_set_hflags,
- .pre_leave_smm = emulator_pre_leave_smm,
- .post_leave_smm = emulator_post_leave_smm,
+ .exiting_smm = emulator_exiting_smm,
+ .leave_smm = emulator_leave_smm,
+ .triple_fault = emulator_triple_fault,
.set_xcr = emulator_set_xcr,
};
@@ -7051,6 +7421,11 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
+ ctxt->interruptibility = 0;
+ ctxt->have_exception = false;
+ ctxt->exception.vector = -1;
+ ctxt->perm_ok = false;
+
init_decode_cache(ctxt);
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
}
@@ -7077,8 +7452,33 @@ void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
}
EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
+static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu)
+{
+ struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
+ u32 insn_size = ctxt->fetch.end - ctxt->fetch.data;
+ struct kvm_run *run = vcpu->run;
+
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ run->emulation_failure.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ run->emulation_failure.ndata = 0;
+ run->emulation_failure.flags = 0;
+
+ if (insn_size) {
+ run->emulation_failure.ndata = 3;
+ run->emulation_failure.flags |=
+ KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES;
+ run->emulation_failure.insn_size = insn_size;
+ memset(run->emulation_failure.insn_bytes, 0x90,
+ sizeof(run->emulation_failure.insn_bytes));
+ memcpy(run->emulation_failure.insn_bytes,
+ ctxt->fetch.data, insn_size);
+ }
+}
+
static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
{
+ struct kvm *kvm = vcpu->kvm;
+
++vcpu->stat.insn_emulation_fail;
trace_kvm_emulate_insn_failed(vcpu);
@@ -7087,10 +7487,9 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
return 1;
}
- if (emulation_type & EMULTYPE_SKIP) {
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
- vcpu->run->internal.ndata = 0;
+ if (kvm->arch.exit_on_emulation_error ||
+ (emulation_type & EMULTYPE_SKIP)) {
+ prepare_emulation_failure_exit(vcpu);
return 0;
}
@@ -7232,11 +7631,14 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
static int complete_emulated_pio(struct kvm_vcpu *vcpu);
-static void kvm_smm_changed(struct kvm_vcpu *vcpu)
+static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
{
- if (!(vcpu->arch.hflags & HF_SMM_MASK)) {
- /* This is a good place to trace that we are exiting SMM. */
- trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false);
+ trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm);
+
+ if (entering_smm) {
+ vcpu->arch.hflags |= HF_SMM_MASK;
+ } else {
+ vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK);
/* Process a latched INIT or SMI, if any. */
kvm_make_request(KVM_REQ_EVENT, vcpu);
@@ -7386,14 +7788,7 @@ int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
kvm_vcpu_check_breakpoint(vcpu, &r))
return r;
- ctxt->interruptibility = 0;
- ctxt->have_exception = false;
- ctxt->exception.vector = -1;
- ctxt->perm_ok = false;
-
- ctxt->ud = emulation_type & EMULTYPE_TRAP_UD;
-
- r = x86_decode_insn(ctxt, insn, insn_len);
+ r = x86_decode_insn(ctxt, insn, insn_len, emulation_type);
trace_kvm_emulate_insn_start(vcpu);
++vcpu->stat.insn_emulation;
@@ -7698,6 +8093,7 @@ static void kvm_hyperv_tsc_notifier(void)
struct kvm *kvm;
struct kvm_vcpu *vcpu;
int cpu;
+ unsigned long flags;
mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list)
@@ -7713,17 +8109,15 @@ static void kvm_hyperv_tsc_notifier(void)
list_for_each_entry(kvm, &vm_list, vm_list) {
struct kvm_arch *ka = &kvm->arch;
- spin_lock(&ka->pvclock_gtod_sync_lock);
-
+ spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
pvclock_update_vm_gtod_copy(kvm);
+ spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
kvm_for_each_vcpu(cpu, vcpu, kvm)
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
kvm_for_each_vcpu(cpu, vcpu, kvm)
kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
-
- spin_unlock(&ka->pvclock_gtod_sync_lock);
}
mutex_unlock(&kvm_lock);
}
@@ -7921,6 +8315,18 @@ static void pvclock_gtod_update_fn(struct work_struct *work)
static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
/*
+ * Indirection to move queue_work() out of the tk_core.seq write held
+ * region to prevent possible deadlocks against time accessors which
+ * are invoked with work related locks held.
+ */
+static void pvclock_irq_work_fn(struct irq_work *w)
+{
+ queue_work(system_long_wq, &pvclock_gtod_work);
+}
+
+static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn);
+
+/*
* Notification about pvclock gtod data update.
*/
static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
@@ -7931,13 +8337,14 @@ static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
update_pvclock_gtod(tk);
- /* disable master clock if host does not trust, or does not
- * use, TSC based clocksource.
+ /*
+ * Disable master clock if host does not trust, or does not use,
+ * TSC based clocksource. Delegate queue_work() to irq_work as
+ * this is invoked with tk_core.seq write held.
*/
if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) &&
atomic_read(&kvm_guest_has_master_clock) != 0)
- queue_work(system_long_wq, &pvclock_gtod_work);
-
+ irq_work_queue(&pvclock_irq_work);
return 0;
}
@@ -7999,14 +8406,12 @@ int kvm_arch_init(void *opaque)
printk(KERN_ERR "kvm: failed to allocate percpu kvm_user_return_msrs\n");
goto out_free_x86_emulator_cache;
}
+ kvm_nr_uret_msrs = 0;
r = kvm_mmu_module_init();
if (r)
goto out_free_percpu;
- kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
- PT_DIRTY_MASK, PT64_NX_MASK, 0,
- PT_PRESENT_MASK, 0, sme_me_mask);
kvm_timer_init();
perf_register_guest_info_callbacks(&kvm_guest_cbs);
@@ -8052,10 +8457,13 @@ void kvm_arch_exit(void)
cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
#ifdef CONFIG_X86_64
pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
+ irq_work_sync(&pvclock_irq_work);
+ cancel_work_sync(&pvclock_gtod_work);
#endif
kvm_x86_ops.hardware_enable = NULL;
kvm_mmu_module_exit();
free_percpu(user_return_msrs);
+ kmem_cache_destroy(x86_emulator_cache);
kmem_cache_destroy(x86_fpu_cache);
#ifdef CONFIG_KVM_XEN
static_key_deferred_flush(&kvm_xen_enabled);
@@ -8155,32 +8563,59 @@ bool kvm_apicv_activated(struct kvm *kvm)
}
EXPORT_SYMBOL_GPL(kvm_apicv_activated);
-void kvm_apicv_init(struct kvm *kvm, bool enable)
+static void kvm_apicv_init(struct kvm *kvm)
{
- if (enable)
+ if (enable_apicv)
clear_bit(APICV_INHIBIT_REASON_DISABLE,
&kvm->arch.apicv_inhibit_reasons);
else
set_bit(APICV_INHIBIT_REASON_DISABLE,
&kvm->arch.apicv_inhibit_reasons);
}
-EXPORT_SYMBOL_GPL(kvm_apicv_init);
-static void kvm_sched_yield(struct kvm *kvm, unsigned long dest_id)
+static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
{
struct kvm_vcpu *target = NULL;
struct kvm_apic_map *map;
+ vcpu->stat.directed_yield_attempted++;
+
+ if (single_task_running())
+ goto no_yield;
+
rcu_read_lock();
- map = rcu_dereference(kvm->arch.apic_map);
+ map = rcu_dereference(vcpu->kvm->arch.apic_map);
if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id])
target = map->phys_map[dest_id]->vcpu;
rcu_read_unlock();
- if (target && READ_ONCE(target->ready))
- kvm_vcpu_yield_to(target);
+ if (!target || !READ_ONCE(target->ready))
+ goto no_yield;
+
+ /* Ignore requests to yield to self */
+ if (vcpu == target)
+ goto no_yield;
+
+ if (kvm_vcpu_yield_to(target) <= 0)
+ goto no_yield;
+
+ vcpu->stat.directed_yield_successful++;
+
+no_yield:
+ return;
+}
+
+static int complete_hypercall_exit(struct kvm_vcpu *vcpu)
+{
+ u64 ret = vcpu->run->hypercall.ret;
+
+ if (!is_64_bit_mode(vcpu))
+ ret = (u32)ret;
+ kvm_rax_write(vcpu, ret);
+ ++vcpu->stat.hypercalls;
+ return kvm_skip_emulated_instruction(vcpu);
}
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
@@ -8227,7 +8662,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
break;
kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1);
- kvm_sched_yield(vcpu->kvm, a1);
+ kvm_sched_yield(vcpu, a1);
ret = 0;
break;
#ifdef CONFIG_X86_64
@@ -8245,9 +8680,31 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SCHED_YIELD))
break;
- kvm_sched_yield(vcpu->kvm, a0);
+ kvm_sched_yield(vcpu, a0);
ret = 0;
break;
+ case KVM_HC_MAP_GPA_RANGE: {
+ u64 gpa = a0, npages = a1, attrs = a2;
+
+ ret = -KVM_ENOSYS;
+ if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE)))
+ break;
+
+ if (!PAGE_ALIGNED(gpa) || !npages ||
+ gpa_to_gfn(gpa) + npages <= gpa_to_gfn(gpa)) {
+ ret = -KVM_EINVAL;
+ break;
+ }
+
+ vcpu->run->exit_reason = KVM_EXIT_HYPERCALL;
+ vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE;
+ vcpu->run->hypercall.args[0] = gpa;
+ vcpu->run->hypercall.args[1] = npages;
+ vcpu->run->hypercall.args[2] = attrs;
+ vcpu->run->hypercall.longmode = op_64_bit;
+ vcpu->arch.complete_userspace_io = complete_hypercall_exit;
+ return 0;
+ }
default:
ret = -KVM_ENOSYS;
break;
@@ -8328,7 +8785,25 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
static_call(kvm_x86_update_cr8_intercept)(vcpu, tpr, max_irr);
}
-static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit)
+
+int kvm_check_nested_events(struct kvm_vcpu *vcpu)
+{
+ if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
+ kvm_x86_ops.nested_ops->triple_fault(vcpu);
+ return 1;
+ }
+
+ return kvm_x86_ops.nested_ops->check_events(vcpu);
+}
+
+static void kvm_inject_exception(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.exception.error_code && !is_protmode(vcpu))
+ vcpu->arch.exception.error_code = false;
+ static_call(kvm_x86_queue_exception)(vcpu);
+}
+
+static int inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit)
{
int r;
bool can_inject = true;
@@ -8336,7 +8811,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
/* try to reinject previous events if any */
if (vcpu->arch.exception.injected) {
- static_call(kvm_x86_queue_exception)(vcpu);
+ kvm_inject_exception(vcpu);
can_inject = false;
}
/*
@@ -8373,9 +8848,9 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
* from L2 to L1.
*/
if (is_guest_mode(vcpu)) {
- r = kvm_x86_ops.nested_ops->check_events(vcpu);
+ r = kvm_check_nested_events(vcpu);
if (r < 0)
- goto busy;
+ goto out;
}
/* try to inject new event if pending */
@@ -8399,7 +8874,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
}
}
- static_call(kvm_x86_queue_exception)(vcpu);
+ kvm_inject_exception(vcpu);
can_inject = false;
}
@@ -8417,7 +8892,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
if (vcpu->arch.smi_pending) {
r = can_inject ? static_call(kvm_x86_smi_allowed)(vcpu, true) : -EBUSY;
if (r < 0)
- goto busy;
+ goto out;
if (r) {
vcpu->arch.smi_pending = false;
++vcpu->arch.smi_count;
@@ -8430,7 +8905,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
if (vcpu->arch.nmi_pending) {
r = can_inject ? static_call(kvm_x86_nmi_allowed)(vcpu, true) : -EBUSY;
if (r < 0)
- goto busy;
+ goto out;
if (r) {
--vcpu->arch.nmi_pending;
vcpu->arch.nmi_injected = true;
@@ -8445,7 +8920,7 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
if (kvm_cpu_has_injectable_intr(vcpu)) {
r = can_inject ? static_call(kvm_x86_interrupt_allowed)(vcpu, true) : -EBUSY;
if (r < 0)
- goto busy;
+ goto out;
if (r) {
kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false);
static_call(kvm_x86_set_irq)(vcpu);
@@ -8461,11 +8936,14 @@ static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit
*req_immediate_exit = true;
WARN_ON(vcpu->arch.exception.pending);
- return;
+ return 0;
-busy:
- *req_immediate_exit = true;
- return;
+out:
+ if (r == -EBUSY) {
+ *req_immediate_exit = true;
+ r = 0;
+ }
+ return r;
}
static void process_nmi(struct kvm_vcpu *vcpu)
@@ -8548,7 +9026,7 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu));
for (i = 0; i < 8; i++)
- put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read(vcpu, i));
+ put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read_raw(vcpu, i));
kvm_get_dr(vcpu, 6, &val);
put_smstate(u32, buf, 0x7fcc, (u32)val);
@@ -8594,7 +9072,7 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
int i;
for (i = 0; i < 16; i++)
- put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read(vcpu, i));
+ put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read_raw(vcpu, i));
put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu));
put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu));
@@ -8644,10 +9122,9 @@ static void enter_smm(struct kvm_vcpu *vcpu)
{
struct kvm_segment cs, ds;
struct desc_ptr dt;
+ unsigned long cr0;
char buf[512];
- u32 cr0;
- trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
memset(buf, 0, 512);
#ifdef CONFIG_X86_64
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
@@ -8657,13 +9134,13 @@ static void enter_smm(struct kvm_vcpu *vcpu)
enter_smm_save_state_32(vcpu, buf);
/*
- * Give pre_enter_smm() a chance to make ISA-specific changes to the
- * vCPU state (e.g. leave guest mode) after we've saved the state into
- * the SMM state-save area.
+ * Give enter_smm() a chance to make ISA-specific changes to the vCPU
+ * state (e.g. leave guest mode) after we've saved the state into the
+ * SMM state-save area.
*/
- static_call(kvm_x86_pre_enter_smm)(vcpu, buf);
+ static_call(kvm_x86_enter_smm)(vcpu, buf);
- vcpu->arch.hflags |= HF_SMM_MASK;
+ kvm_smm_changed(vcpu, true);
kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
if (static_call(kvm_x86_get_nmi_mask)(vcpu))
@@ -8752,6 +9229,15 @@ void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
vcpu->arch.apicv_active = kvm_apicv_activated(vcpu->kvm);
kvm_apic_update_apicv(vcpu);
static_call(kvm_x86_refresh_apicv_exec_ctrl)(vcpu);
+
+ /*
+ * When APICv gets disabled, we may still have injected interrupts
+ * pending. At the same time, KVM_REQ_EVENT may not be set as APICv was
+ * still active when the interrupt got accepted. Make sure
+ * inject_pending_event() is called to check for that.
+ */
+ if (!vcpu->arch.apicv_active)
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv);
@@ -8927,7 +9413,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
}
if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
kvm_vcpu_flush_tlb_current(vcpu);
- if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu))
+ if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
kvm_vcpu_flush_tlb_guest(vcpu);
if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
@@ -8936,10 +9422,14 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
goto out;
}
if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
- vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
- vcpu->mmio_needed = 0;
- r = 0;
- goto out;
+ if (is_guest_mode(vcpu)) {
+ kvm_x86_ops.nested_ops->triple_fault(vcpu);
+ } else {
+ vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
+ vcpu->mmio_needed = 0;
+ r = 0;
+ goto out;
+ }
}
if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
/* Page is swapped out. Do synthetic halt */
@@ -9016,13 +9506,21 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win ||
kvm_xen_has_interrupt(vcpu)) {
++vcpu->stat.req_event;
- kvm_apic_accept_events(vcpu);
+ r = kvm_apic_accept_events(vcpu);
+ if (r < 0) {
+ r = 0;
+ goto out;
+ }
if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
r = 1;
goto out;
}
- inject_pending_event(vcpu, &req_immediate_exit);
+ r = inject_pending_event(vcpu, &req_immediate_exit);
+ if (r < 0) {
+ r = 0;
+ goto out;
+ }
if (req_int_win)
static_call(kvm_x86_enable_irq_window)(vcpu);
@@ -9160,6 +9658,15 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
local_irq_disable();
kvm_after_interrupt(vcpu);
+ /*
+ * Wait until after servicing IRQs to account guest time so that any
+ * ticks that occurred while running the guest are properly accounted
+ * to the guest. Waiting until IRQs are enabled degrades the accuracy
+ * of accounting via context tracking, but the loss of accuracy is
+ * acceptable for all known use cases.
+ */
+ vtime_account_guest_exit();
+
if (lapic_in_kernel(vcpu)) {
s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta;
if (delta != S64_MIN) {
@@ -9215,7 +9722,8 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
return 1;
}
- kvm_apic_accept_events(vcpu);
+ if (kvm_apic_accept_events(vcpu) < 0)
+ return 0;
switch(vcpu->arch.mp_state) {
case KVM_MP_STATE_HALTED:
case KVM_MP_STATE_AP_RESET_HOLD:
@@ -9237,7 +9745,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
{
if (is_guest_mode(vcpu))
- kvm_x86_ops.nested_ops->check_events(vcpu);
+ kvm_check_nested_events(vcpu);
return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
!vcpu->arch.apf.halted);
@@ -9261,7 +9769,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
if (r <= 0)
break;
- kvm_clear_request(KVM_REQ_PENDING_TIMER, vcpu);
+ kvm_clear_request(KVM_REQ_UNBLOCK, vcpu);
if (kvm_cpu_has_pending_timer(vcpu))
kvm_inject_pending_timer_irqs(vcpu);
@@ -9439,7 +9947,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
goto out;
}
kvm_vcpu_block(vcpu);
- kvm_apic_accept_events(vcpu);
+ if (kvm_apic_accept_events(vcpu) < 0) {
+ r = 0;
+ goto out;
+ }
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
r = -EAGAIN;
if (signal_pending(current)) {
@@ -9588,7 +10099,7 @@ void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
}
EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
-static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+static void __get_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
{
struct desc_ptr dt;
@@ -9621,14 +10132,36 @@ skip_protected_regs:
sregs->cr8 = kvm_get_cr8(vcpu);
sregs->efer = vcpu->arch.efer;
sregs->apic_base = kvm_get_apic_base(vcpu);
+}
- memset(sregs->interrupt_bitmap, 0, sizeof(sregs->interrupt_bitmap));
+static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+ __get_sregs_common(vcpu, sregs);
+
+ if (vcpu->arch.guest_state_protected)
+ return;
if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft)
set_bit(vcpu->arch.interrupt.nr,
(unsigned long *)sregs->interrupt_bitmap);
}
+static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2)
+{
+ int i;
+
+ __get_sregs_common(vcpu, (struct kvm_sregs *)sregs2);
+
+ if (vcpu->arch.guest_state_protected)
+ return;
+
+ if (is_pae_paging(vcpu)) {
+ for (i = 0 ; i < 4 ; i++)
+ sregs2->pdptrs[i] = kvm_pdptr_read(vcpu, i);
+ sregs2->flags |= KVM_SREGS2_FLAGS_PDPTRS_VALID;
+ }
+}
+
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
@@ -9641,11 +10174,17 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
+ int r;
+
vcpu_load(vcpu);
if (kvm_mpx_supported())
kvm_load_guest_fpu(vcpu);
- kvm_apic_accept_events(vcpu);
+ r = kvm_apic_accept_events(vcpu);
+ if (r < 0)
+ goto out;
+ r = 0;
+
if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED ||
vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) &&
vcpu->arch.pv.pv_unhalted)
@@ -9653,10 +10192,11 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
else
mp_state->mp_state = vcpu->arch.mp_state;
+out:
if (kvm_mpx_supported())
kvm_put_guest_fpu(vcpu);
vcpu_put(vcpu);
- return 0;
+ return r;
}
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
@@ -9740,24 +10280,23 @@ static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
return kvm_is_valid_cr4(vcpu, sregs->cr4);
}
-static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs,
+ int *mmu_reset_needed, bool update_pdptrs)
{
struct msr_data apic_base_msr;
- int mmu_reset_needed = 0;
- int pending_vec, max_bits, idx;
+ int idx;
struct desc_ptr dt;
- int ret = -EINVAL;
if (!kvm_is_valid_sregs(vcpu, sregs))
- goto out;
+ return -EINVAL;
apic_base_msr.data = sregs->apic_base;
apic_base_msr.host_initiated = true;
if (kvm_set_apic_base(vcpu, &apic_base_msr))
- goto out;
+ return -EINVAL;
if (vcpu->arch.guest_state_protected)
- goto skip_protected_regs;
+ return 0;
dt.size = sregs->idt.limit;
dt.address = sregs->idt.base;
@@ -9767,31 +10306,30 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
static_call(kvm_x86_set_gdt)(vcpu, &dt);
vcpu->arch.cr2 = sregs->cr2;
- mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
+ *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
vcpu->arch.cr3 = sregs->cr3;
kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
kvm_set_cr8(vcpu, sregs->cr8);
- mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
+ *mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
static_call(kvm_x86_set_efer)(vcpu, sregs->efer);
- mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
+ *mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
static_call(kvm_x86_set_cr0)(vcpu, sregs->cr0);
vcpu->arch.cr0 = sregs->cr0;
- mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
+ *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4);
- idx = srcu_read_lock(&vcpu->kvm->srcu);
- if (is_pae_paging(vcpu)) {
- load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
- mmu_reset_needed = 1;
+ if (update_pdptrs) {
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ if (is_pae_paging(vcpu)) {
+ load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
+ *mmu_reset_needed = 1;
+ }
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
}
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
-
- if (mmu_reset_needed)
- kvm_mmu_reset_context(vcpu);
kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
@@ -9811,20 +10349,63 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
!is_protmode(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
-skip_protected_regs:
+ return 0;
+}
+
+static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+{
+ int pending_vec, max_bits;
+ int mmu_reset_needed = 0;
+ int ret = __set_sregs_common(vcpu, sregs, &mmu_reset_needed, true);
+
+ if (ret)
+ return ret;
+
+ if (mmu_reset_needed)
+ kvm_mmu_reset_context(vcpu);
+
max_bits = KVM_NR_INTERRUPTS;
pending_vec = find_first_bit(
(const unsigned long *)sregs->interrupt_bitmap, max_bits);
+
if (pending_vec < max_bits) {
kvm_queue_interrupt(vcpu, pending_vec, false);
pr_debug("Set back pending irq %d\n", pending_vec);
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
}
+ return 0;
+}
- kvm_make_request(KVM_REQ_EVENT, vcpu);
+static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2)
+{
+ int mmu_reset_needed = 0;
+ bool valid_pdptrs = sregs2->flags & KVM_SREGS2_FLAGS_PDPTRS_VALID;
+ bool pae = (sregs2->cr0 & X86_CR0_PG) && (sregs2->cr4 & X86_CR4_PAE) &&
+ !(sregs2->efer & EFER_LMA);
+ int i, ret;
- ret = 0;
-out:
- return ret;
+ if (sregs2->flags & ~KVM_SREGS2_FLAGS_PDPTRS_VALID)
+ return -EINVAL;
+
+ if (valid_pdptrs && (!pae || vcpu->arch.guest_state_protected))
+ return -EINVAL;
+
+ ret = __set_sregs_common(vcpu, (struct kvm_sregs *)sregs2,
+ &mmu_reset_needed, !valid_pdptrs);
+ if (ret)
+ return ret;
+
+ if (valid_pdptrs) {
+ for (i = 0; i < 4 ; i++)
+ kvm_pdptr_write(vcpu, i, sregs2->pdptrs[i]);
+
+ kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
+ mmu_reset_needed = 1;
+ vcpu->arch.pdptrs_from_userspace = true;
+ }
+ if (mmu_reset_needed)
+ kvm_mmu_reset_context(vcpu);
+ return 0;
}
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
@@ -9880,8 +10461,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
kvm_update_dr7(vcpu);
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
- vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
- get_segment_base(vcpu, VCPU_SREG_CS);
+ vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu);
/*
* Trigger an rflags update that will inject or remove the trace
@@ -10049,13 +10629,13 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
struct page *page;
int r;
+ vcpu->arch.last_vmentry_cpu = -1;
+
if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
else
vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
- kvm_set_tsc_khz(vcpu, max_tsc_khz);
-
r = kvm_mmu_create(vcpu);
if (r < 0)
return r;
@@ -10115,6 +10695,10 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.pending_external_vector = -1;
vcpu->arch.preempted_in_kernel = false;
+#if IS_ENABLED(CONFIG_HYPERV)
+ vcpu->arch.hv_root_tdp = INVALID_PAGE;
+#endif
+
r = static_call(kvm_x86_vcpu_create)(vcpu);
if (r)
goto free_guest_fpu;
@@ -10123,8 +10707,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
kvm_vcpu_mtrr_init(vcpu);
vcpu_load(vcpu);
+ kvm_set_tsc_khz(vcpu, max_tsc_khz);
kvm_vcpu_reset(vcpu, false);
- kvm_init_mmu(vcpu, false);
+ kvm_init_mmu(vcpu);
vcpu_put(vcpu);
return 0;
@@ -10198,6 +10783,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
{
+ unsigned long old_cr0 = kvm_read_cr0(vcpu);
+
kvm_lapic_reset(vcpu, init_event);
vcpu->arch.hflags = 0;
@@ -10266,6 +10853,17 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
vcpu->arch.ia32_xss = 0;
static_call(kvm_x86_vcpu_reset)(vcpu, init_event);
+
+ /*
+ * Reset the MMU context if paging was enabled prior to INIT (which is
+ * implied if CR0.PG=1 as CR0 will be '0' prior to RESET). Unlike the
+ * standard CR0/CR4/EFER modification paths, only CR0.PG needs to be
+ * checked because it is unconditionally cleared on INIT and all other
+ * paging related bits are ignored if paging is disabled, i.e. CR0.WP,
+ * CR4, and EFER changes are all irrelevant if CR0.PG was '0'.
+ */
+ if (old_cr0 & X86_CR0_PG)
+ kvm_mmu_reset_context(vcpu);
}
void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
@@ -10383,6 +10981,9 @@ int kvm_arch_hardware_setup(void *opaque)
int r;
rdmsrl_safe(MSR_EFER, &host_efer);
+ if (WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_NX) &&
+ !(host_efer & EFER_NX)))
+ return -EIO;
if (boot_cpu_has(X86_FEATURE_XSAVES))
rdmsrl(MSR_IA32_XSS, host_xss);
@@ -10498,9 +11099,15 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.guest_can_read_msr_platform_info = true;
+#if IS_ENABLED(CONFIG_HYPERV)
+ spin_lock_init(&kvm->arch.hv_root_tdp_lock);
+ kvm->arch.hv_root_tdp = INVALID_PAGE;
+#endif
+
INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
+ kvm_apicv_init(kvm);
kvm_hv_init_vm(kvm);
kvm_page_track_init(kvm);
kvm_mmu_init_vm(kvm);
@@ -10601,7 +11208,7 @@ void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
return (void __user *)hva;
} else {
if (!slot || !slot->npages)
- return 0;
+ return NULL;
old_npages = slot->npages;
hva = slot->userspace_addr;
@@ -10634,8 +11241,6 @@ void kvm_arch_pre_destroy_vm(struct kvm *kvm)
void kvm_arch_destroy_vm(struct kvm *kvm)
{
- u32 i;
-
if (current->mm == kvm->mm) {
/*
* Free memory regions allocated on behalf of userspace,
@@ -10651,8 +11256,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
mutex_unlock(&kvm->slots_lock);
}
static_call_cond(kvm_x86_vm_destroy)(kvm);
- for (i = 0; i < kvm->arch.msr_filter.count; i++)
- kfree(kvm->arch.msr_filter.ranges[i].bitmap);
+ kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
kvm_pic_destroy(kvm);
kvm_ioapic_destroy(kvm);
kvm_free_vcpus(kvm);
@@ -10664,17 +11268,23 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_hv_destroy_vm(kvm);
}
-void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
+static void memslot_rmap_free(struct kvm_memory_slot *slot)
{
int i;
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
kvfree(slot->arch.rmap[i]);
slot->arch.rmap[i] = NULL;
+ }
+}
- if (i == 0)
- continue;
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
+{
+ int i;
+ memslot_rmap_free(slot);
+
+ for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
kvfree(slot->arch.lpage_info[i - 1]);
slot->arch.lpage_info[i - 1] = NULL;
}
@@ -10682,11 +11292,79 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
kvm_page_track_free_memslot(slot);
}
-static int kvm_alloc_memslot_metadata(struct kvm_memory_slot *slot,
- unsigned long npages)
+static int memslot_rmap_alloc(struct kvm_memory_slot *slot,
+ unsigned long npages)
{
+ const int sz = sizeof(*slot->arch.rmap[0]);
int i;
+ for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
+ int level = i + 1;
+ int lpages = gfn_to_index(slot->base_gfn + npages - 1,
+ slot->base_gfn, level) + 1;
+
+ WARN_ON(slot->arch.rmap[i]);
+
+ slot->arch.rmap[i] = kvcalloc(lpages, sz, GFP_KERNEL_ACCOUNT);
+ if (!slot->arch.rmap[i]) {
+ memslot_rmap_free(slot);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+int alloc_all_memslots_rmaps(struct kvm *kvm)
+{
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *slot;
+ int r, i;
+
+ /*
+ * Check if memslots alreday have rmaps early before acquiring
+ * the slots_arch_lock below.
+ */
+ if (kvm_memslots_have_rmaps(kvm))
+ return 0;
+
+ mutex_lock(&kvm->slots_arch_lock);
+
+ /*
+ * Read memslots_have_rmaps again, under the slots arch lock,
+ * before allocating the rmaps
+ */
+ if (kvm_memslots_have_rmaps(kvm)) {
+ mutex_unlock(&kvm->slots_arch_lock);
+ return 0;
+ }
+
+ for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+ slots = __kvm_memslots(kvm, i);
+ kvm_for_each_memslot(slot, slots) {
+ r = memslot_rmap_alloc(slot, slot->npages);
+ if (r) {
+ mutex_unlock(&kvm->slots_arch_lock);
+ return r;
+ }
+ }
+ }
+
+ /*
+ * Ensure that memslots_have_rmaps becomes true strictly after
+ * all the rmap pointers are set.
+ */
+ smp_store_release(&kvm->arch.memslots_have_rmaps, true);
+ mutex_unlock(&kvm->slots_arch_lock);
+ return 0;
+}
+
+static int kvm_alloc_memslot_metadata(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ unsigned long npages)
+{
+ int i, r;
+
/*
* Clear out the previous array pointers for the KVM_MR_MOVE case. The
* old arrays will be freed by __kvm_set_memory_region() if installing
@@ -10694,7 +11372,13 @@ static int kvm_alloc_memslot_metadata(struct kvm_memory_slot *slot,
*/
memset(&slot->arch, 0, sizeof(slot->arch));
- for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
+ if (kvm_memslots_have_rmaps(kvm)) {
+ r = memslot_rmap_alloc(slot, npages);
+ if (r)
+ return r;
+ }
+
+ for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
struct kvm_lpage_info *linfo;
unsigned long ugfn;
int lpages;
@@ -10703,14 +11387,6 @@ static int kvm_alloc_memslot_metadata(struct kvm_memory_slot *slot,
lpages = gfn_to_index(slot->base_gfn + npages - 1,
slot->base_gfn, level) + 1;
- slot->arch.rmap[i] =
- kvcalloc(lpages, sizeof(*slot->arch.rmap[i]),
- GFP_KERNEL_ACCOUNT);
- if (!slot->arch.rmap[i])
- goto out_free;
- if (i == 0)
- continue;
-
linfo = kvcalloc(lpages, sizeof(*linfo), GFP_KERNEL_ACCOUNT);
if (!linfo)
goto out_free;
@@ -10740,12 +11416,9 @@ static int kvm_alloc_memslot_metadata(struct kvm_memory_slot *slot,
return 0;
out_free:
- for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
- kvfree(slot->arch.rmap[i]);
- slot->arch.rmap[i] = NULL;
- if (i == 0)
- continue;
+ memslot_rmap_free(slot);
+ for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
kvfree(slot->arch.lpage_info[i - 1]);
slot->arch.lpage_info[i - 1] = NULL;
}
@@ -10774,7 +11447,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
enum kvm_mr_change change)
{
if (change == KVM_MR_CREATE || change == KVM_MR_MOVE)
- return kvm_alloc_memslot_metadata(memslot,
+ return kvm_alloc_memslot_metadata(kvm, memslot,
mem->memory_size >> PAGE_SHIFT);
return 0;
}
@@ -10850,36 +11523,19 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
*/
kvm_mmu_zap_collapsible_sptes(kvm, new);
} else {
- /* By default, write-protect everything to log writes. */
- int level = PG_LEVEL_4K;
+ /*
+ * Initially-all-set does not require write protecting any page,
+ * because they're all assumed to be dirty.
+ */
+ if (kvm_dirty_log_manual_protect_and_init_set(kvm))
+ return;
if (kvm_x86_ops.cpu_dirty_log_size) {
- /*
- * Clear all dirty bits, unless pages are treated as
- * dirty from the get-go.
- */
- if (!kvm_dirty_log_manual_protect_and_init_set(kvm))
- kvm_mmu_slot_leaf_clear_dirty(kvm, new);
-
- /*
- * Write-protect large pages on write so that dirty
- * logging happens at 4k granularity. No need to
- * write-protect small SPTEs since write accesses are
- * logged by the CPU via dirty bits.
- */
- level = PG_LEVEL_2M;
- } else if (kvm_dirty_log_manual_protect_and_init_set(kvm)) {
- /*
- * If we're with initial-all-set, we don't need
- * to write protect any small page because
- * they're reported as dirty already. However
- * we still need to write-protect huge pages
- * so that the page split can happen lazily on
- * the first write to the huge page.
- */
- level = PG_LEVEL_2M;
+ kvm_mmu_slot_leaf_clear_dirty(kvm, new);
+ kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_2M);
+ } else {
+ kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_4K);
}
- kvm_mmu_slot_remove_write_access(kvm, new, level);
}
}
@@ -10966,6 +11622,14 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
}
+bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.apicv_active && static_call(kvm_x86_dy_apicv_has_pending_interrupt)(vcpu))
+ return true;
+
+ return false;
+}
+
bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
{
if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
@@ -10976,14 +11640,14 @@ bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
kvm_test_request(KVM_REQ_EVENT, vcpu))
return true;
- if (vcpu->arch.apicv_active && static_call(kvm_x86_dy_apicv_has_pending_interrupt)(vcpu))
- return true;
-
- return false;
+ return kvm_arch_dy_has_pending_interrupt(vcpu);
}
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
+ if (vcpu->arch.guest_state_protected)
+ return true;
+
return vcpu->arch.preempted_in_kernel;
}
@@ -11254,12 +11918,13 @@ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
if (!kvm_pv_async_pf_enabled(vcpu))
return true;
else
- return apf_pageready_slot_free(vcpu);
+ return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu);
}
void kvm_arch_start_assignment(struct kvm *kvm)
{
- atomic_inc(&kvm->arch.assigned_device_count);
+ if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1)
+ static_call_cond(kvm_x86_start_assignment)(kvm);
}
EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
@@ -11439,8 +12104,6 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
{
bool pcid_enabled;
struct x86_exception e;
- unsigned i;
- unsigned long roots_to_free = 0;
struct {
u64 pcid;
u64 gla;
@@ -11474,23 +12137,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
return 1;
}
- if (kvm_get_active_pcid(vcpu) == operand.pcid) {
- kvm_mmu_sync_roots(vcpu);
- kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
- }
-
- for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
- if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].pgd)
- == operand.pcid)
- roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
-
- kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free);
- /*
- * If neither the current cr3 nor any of the prev_roots use the
- * given PCID, then nothing needs to be done here because a
- * resync will happen anyway before switching to any other CR3.
- */
-
+ kvm_invalidate_pcid(vcpu, operand.pcid);
return kvm_skip_emulated_instruction(vcpu);
case INVPCID_TYPE_ALL_NON_GLOBAL:
@@ -11503,7 +12150,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
fallthrough;
case INVPCID_TYPE_ALL_INCL_GLOBAL:
- kvm_mmu_unload(vcpu);
+ kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
return kvm_skip_emulated_instruction(vcpu);
default:
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 39eb04887141..44ae10312740 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -8,6 +8,59 @@
#include "kvm_cache_regs.h"
#include "kvm_emulate.h"
+static __always_inline void kvm_guest_enter_irqoff(void)
+{
+ /*
+ * VMENTER enables interrupts (host state), but the kernel state is
+ * interrupts disabled when this is invoked. Also tell RCU about
+ * it. This is the same logic as for exit_to_user_mode().
+ *
+ * This ensures that e.g. latency analysis on the host observes
+ * guest mode as interrupt enabled.
+ *
+ * guest_enter_irqoff() informs context tracking about the
+ * transition to guest mode and if enabled adjusts RCU state
+ * accordingly.
+ */
+ instrumentation_begin();
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ instrumentation_end();
+
+ guest_enter_irqoff();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+}
+
+static __always_inline void kvm_guest_exit_irqoff(void)
+{
+ /*
+ * VMEXIT disables interrupts (host state), but tracing and lockdep
+ * have them in state 'on' as recorded before entering guest mode.
+ * Same as enter_from_user_mode().
+ *
+ * context_tracking_guest_exit() restores host context and reinstates
+ * RCU if enabled and required.
+ *
+ * This needs to be done immediately after VM-Exit, before any code
+ * that might contain tracepoints or call out to the greater world,
+ * e.g. before x86_spec_ctrl_restore_host().
+ */
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ context_tracking_guest_exit();
+
+ instrumentation_begin();
+ trace_hardirqs_off_finish();
+ instrumentation_end();
+}
+
+#define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check) \
+({ \
+ bool failed = (consistency_check); \
+ if (failed) \
+ trace_kvm_nested_vmenter_failed(#consistency_check, 0); \
+ failed; \
+})
+
#define KVM_DEFAULT_PLE_GAP 128
#define KVM_VMX_DEFAULT_PLE_WINDOW 4096
#define KVM_DEFAULT_PLE_WINDOW_GROW 2
@@ -48,6 +101,8 @@ static inline unsigned int __shrink_ple_window(unsigned int val,
#define MSR_IA32_CR_PAT_DEFAULT 0x0007040600070406ULL
+int kvm_check_nested_events(struct kvm_vcpu *vcpu);
+
static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
{
vcpu->arch.exception.pending = false;
@@ -102,16 +157,6 @@ static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
return cs_l;
}
-static inline bool is_la57_mode(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_X86_64
- return (vcpu->arch.efer & EFER_LMA) &&
- kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
-#else
- return 0;
-#endif
-}
-
static inline bool x86_exception_has_error_code(unsigned int vector)
{
static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
@@ -222,19 +267,19 @@ static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
return false;
}
-static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, int reg)
+static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
{
- unsigned long val = kvm_register_read(vcpu, reg);
+ unsigned long val = kvm_register_read_raw(vcpu, reg);
return is_64_bit_mode(vcpu) ? val : (u32)val;
}
-static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
+static inline void kvm_register_write(struct kvm_vcpu *vcpu,
int reg, unsigned long val)
{
if (!is_64_bit_mode(vcpu))
val = (u32)val;
- return kvm_register_write(vcpu, reg, val);
+ return kvm_register_write_raw(vcpu, reg, val);
}
static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
@@ -250,7 +295,6 @@ static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu)
void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs);
void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
-void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
u64 get_kvmclock_ns(struct kvm *kvm);
int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
index 3b6544111ac9..16bc9130e7a5 100644
--- a/arch/x86/lib/atomic64_386_32.S
+++ b/arch/x86/lib/atomic64_386_32.S
@@ -6,7 +6,7 @@
*/
#include <linux/linkage.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
/* if you want SMP support, implement these with real spinlocks */
.macro LOCK reg
diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
index 1c5c81c16b06..ce6935690766 100644
--- a/arch/x86/lib/atomic64_cx8_32.S
+++ b/arch/x86/lib/atomic64_cx8_32.S
@@ -6,7 +6,7 @@
*/
#include <linux/linkage.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
.macro read64 reg
movl %ebx, %eax
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
index 2402d4c489d2..db4b4f9197c7 100644
--- a/arch/x86/lib/copy_page_64.S
+++ b/arch/x86/lib/copy_page_64.S
@@ -3,7 +3,7 @@
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
#include <asm/export.h>
/*
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index 77b9b2a3b5c8..57b79c577496 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -11,7 +11,7 @@
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/cpufeatures.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/export.h>
diff --git a/arch/x86/lib/inat.c b/arch/x86/lib/inat.c
index 12539fca75c4..b0f3b2a62ae2 100644
--- a/arch/x86/lib/inat.c
+++ b/arch/x86/lib/inat.c
@@ -4,7 +4,7 @@
*
* Written by Masami Hiramatsu <mhiramat@redhat.com>
*/
-#include <asm/insn.h>
+#include <asm/insn.h> /* __ignore_sync_check__ */
/* Attribute tables are generated from opcode map */
#include "inat-tables.c"
diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c
index 4229950a5d78..a1d24fdc07cf 100644
--- a/arch/x86/lib/insn-eval.c
+++ b/arch/x86/lib/insn-eval.c
@@ -232,7 +232,7 @@ static int resolve_default_seg(struct insn *insn, struct pt_regs *regs, int off)
* resolve_seg_reg() - obtain segment register index
* @insn: Instruction with operands
* @regs: Register values as seen when entering kernel mode
- * @regoff: Operand offset, in pt_regs, used to deterimine segment register
+ * @regoff: Operand offset, in pt_regs, used to determine segment register
*
* Determine the segment register associated with the operands and, if
* applicable, prefixes and the instruction pointed by @insn.
@@ -404,10 +404,6 @@ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx)
case INAT_SEG_REG_FS:
return (unsigned short)(regs->fs & 0xffff);
case INAT_SEG_REG_GS:
- /*
- * GS may or may not be in regs as per CONFIG_X86_32_LAZY_GS.
- * The macro below takes care of both cases.
- */
return get_user_gs(regs);
case INAT_SEG_REG_IGNORE:
default:
@@ -517,7 +513,7 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
* @insn: Instruction containing ModRM byte
* @regs: Register values as seen when entering kernel mode
* @offs1: Offset of the first operand register
- * @offs2: Offset of the second opeand register, if applicable
+ * @offs2: Offset of the second operand register, if applicable
*
* Obtain the offset, in pt_regs, of the registers indicated by the ModRM byte
* in @insn. This function is to be used with 16-bit address encodings. The
@@ -576,7 +572,7 @@ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs,
* If ModRM.mod is 0 and ModRM.rm is 110b, then we use displacement-
* only addressing. This means that no registers are involved in
* computing the effective address. Thus, ensure that the first
- * register offset is invalild. The second register offset is already
+ * register offset is invalid. The second register offset is already
* invalid under the aforementioned conditions.
*/
if ((X86_MODRM_MOD(insn->modrm.value) == 0) &&
@@ -928,10 +924,11 @@ static int get_seg_base_limit(struct insn *insn, struct pt_regs *regs,
static int get_eff_addr_reg(struct insn *insn, struct pt_regs *regs,
int *regoff, long *eff_addr)
{
- insn_get_modrm(insn);
+ int ret;
- if (!insn->modrm.nbytes)
- return -EINVAL;
+ ret = insn_get_modrm(insn);
+ if (ret)
+ return ret;
if (X86_MODRM_MOD(insn->modrm.value) != 3)
return -EINVAL;
@@ -977,14 +974,14 @@ static int get_eff_addr_modrm(struct insn *insn, struct pt_regs *regs,
int *regoff, long *eff_addr)
{
long tmp;
+ int ret;
if (insn->addr_bytes != 8 && insn->addr_bytes != 4)
return -EINVAL;
- insn_get_modrm(insn);
-
- if (!insn->modrm.nbytes)
- return -EINVAL;
+ ret = insn_get_modrm(insn);
+ if (ret)
+ return ret;
if (X86_MODRM_MOD(insn->modrm.value) > 2)
return -EINVAL;
@@ -1106,18 +1103,21 @@ static int get_eff_addr_modrm_16(struct insn *insn, struct pt_regs *regs,
* @base_offset will have a register, as an offset from the base of pt_regs,
* that can be used to resolve the associated segment.
*
- * -EINVAL on error.
+ * Negative value on error.
*/
static int get_eff_addr_sib(struct insn *insn, struct pt_regs *regs,
int *base_offset, long *eff_addr)
{
long base, indx;
int indx_offset;
+ int ret;
if (insn->addr_bytes != 8 && insn->addr_bytes != 4)
return -EINVAL;
- insn_get_modrm(insn);
+ ret = insn_get_modrm(insn);
+ if (ret)
+ return ret;
if (!insn->modrm.nbytes)
return -EINVAL;
@@ -1125,7 +1125,9 @@ static int get_eff_addr_sib(struct insn *insn, struct pt_regs *regs,
if (X86_MODRM_MOD(insn->modrm.value) > 2)
return -EINVAL;
- insn_get_sib(insn);
+ ret = insn_get_sib(insn);
+ if (ret)
+ return ret;
if (!insn->sib.nbytes)
return -EINVAL;
@@ -1194,8 +1196,8 @@ static void __user *get_addr_ref_16(struct insn *insn, struct pt_regs *regs)
short eff_addr;
long tmp;
- insn_get_modrm(insn);
- insn_get_displacement(insn);
+ if (insn_get_displacement(insn))
+ goto out;
if (insn->addr_bytes != 2)
goto out;
@@ -1415,6 +1417,27 @@ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs)
}
}
+static int insn_get_effective_ip(struct pt_regs *regs, unsigned long *ip)
+{
+ unsigned long seg_base = 0;
+
+ /*
+ * If not in user-space long mode, a custom code segment could be in
+ * use. This is true in protected mode (if the process defined a local
+ * descriptor table), or virtual-8086 mode. In most of the cases
+ * seg_base will be zero as in USER_CS.
+ */
+ if (!user_64bit_mode(regs)) {
+ seg_base = insn_get_seg_base(regs, INAT_SEG_REG_CS);
+ if (seg_base == -1L)
+ return -EINVAL;
+ }
+
+ *ip = seg_base + regs->ip;
+
+ return 0;
+}
+
/**
* insn_fetch_from_user() - Copy instruction bytes from user-space memory
* @regs: Structure with register values as seen when entering kernel mode
@@ -1425,36 +1448,53 @@ void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs)
*
* Returns:
*
- * Number of instruction bytes copied.
- *
- * 0 if nothing was copied.
+ * - number of instruction bytes copied.
+ * - 0 if nothing was copied.
+ * - -EINVAL if the linear address of the instruction could not be calculated
*/
int insn_fetch_from_user(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE])
{
- unsigned long seg_base = 0;
+ unsigned long ip;
int not_copied;
- /*
- * If not in user-space long mode, a custom code segment could be in
- * use. This is true in protected mode (if the process defined a local
- * descriptor table), or virtual-8086 mode. In most of the cases
- * seg_base will be zero as in USER_CS.
- */
- if (!user_64bit_mode(regs)) {
- seg_base = insn_get_seg_base(regs, INAT_SEG_REG_CS);
- if (seg_base == -1L)
- return 0;
- }
+ if (insn_get_effective_ip(regs, &ip))
+ return -EINVAL;
+ not_copied = copy_from_user(buf, (void __user *)ip, MAX_INSN_SIZE);
- not_copied = copy_from_user(buf, (void __user *)(seg_base + regs->ip),
- MAX_INSN_SIZE);
+ return MAX_INSN_SIZE - not_copied;
+}
+
+/**
+ * insn_fetch_from_user_inatomic() - Copy instruction bytes from user-space memory
+ * while in atomic code
+ * @regs: Structure with register values as seen when entering kernel mode
+ * @buf: Array to store the fetched instruction
+ *
+ * Gets the linear address of the instruction and copies the instruction bytes
+ * to the buf. This function must be used in atomic context.
+ *
+ * Returns:
+ *
+ * - number of instruction bytes copied.
+ * - 0 if nothing was copied.
+ * - -EINVAL if the linear address of the instruction could not be calculated.
+ */
+int insn_fetch_from_user_inatomic(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE])
+{
+ unsigned long ip;
+ int not_copied;
+
+ if (insn_get_effective_ip(regs, &ip))
+ return -EINVAL;
+
+ not_copied = __copy_from_user_inatomic(buf, (void __user *)ip, MAX_INSN_SIZE);
return MAX_INSN_SIZE - not_copied;
}
/**
- * insn_decode() - Decode an instruction
+ * insn_decode_from_regs() - Decode an instruction
* @insn: Structure to store decoded instruction
* @regs: Structure with register values as seen when entering kernel mode
* @buf: Buffer containing the instruction bytes
@@ -1467,8 +1507,8 @@ int insn_fetch_from_user(struct pt_regs *regs, unsigned char buf[MAX_INSN_SIZE])
*
* True if instruction was decoded, False otherwise.
*/
-bool insn_decode(struct insn *insn, struct pt_regs *regs,
- unsigned char buf[MAX_INSN_SIZE], int buf_size)
+bool insn_decode_from_regs(struct insn *insn, struct pt_regs *regs,
+ unsigned char buf[MAX_INSN_SIZE], int buf_size)
{
int seg_defs;
@@ -1491,7 +1531,9 @@ bool insn_decode(struct insn *insn, struct pt_regs *regs,
insn->addr_bytes = INSN_CODE_SEG_ADDR_SZ(seg_defs);
insn->opnd_bytes = INSN_CODE_SEG_OPND_SZ(seg_defs);
- insn_get_length(insn);
+ if (insn_get_length(insn))
+ return false;
+
if (buf_size < insn->length)
return false;
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
index 435630a6ec97..058f19b20465 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -11,10 +11,13 @@
#else
#include <string.h>
#endif
-#include <asm/inat.h>
-#include <asm/insn.h>
+#include <asm/inat.h> /*__ignore_sync_check__ */
+#include <asm/insn.h> /* __ignore_sync_check__ */
-#include <asm/emulate_prefix.h>
+#include <linux/errno.h>
+#include <linux/kconfig.h>
+
+#include <asm/emulate_prefix.h> /* __ignore_sync_check__ */
#define leXX_to_cpu(t, r) \
({ \
@@ -51,6 +54,7 @@
* insn_init() - initialize struct insn
* @insn: &struct insn to be initialized
* @kaddr: address (in kernel memory) of instruction (or copy thereof)
+ * @buf_len: length of the insn buffer at @kaddr
* @x86_64: !0 for 64-bit kernel or 64-bit app
*/
void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
@@ -111,8 +115,12 @@ static void insn_get_emulate_prefix(struct insn *insn)
* Populates the @insn->prefixes bitmap, and updates @insn->next_byte
* to point to the (first) opcode. No effect if @insn->prefixes.got
* is already set.
+ *
+ * * Returns:
+ * 0: on success
+ * < 0: on error
*/
-void insn_get_prefixes(struct insn *insn)
+int insn_get_prefixes(struct insn *insn)
{
struct insn_field *prefixes = &insn->prefixes;
insn_attr_t attr;
@@ -120,7 +128,7 @@ void insn_get_prefixes(struct insn *insn)
int i, nb;
if (prefixes->got)
- return;
+ return 0;
insn_get_emulate_prefix(insn);
@@ -230,8 +238,10 @@ vex_end:
prefixes->got = 1;
+ return 0;
+
err_out:
- return;
+ return -ENODATA;
}
/**
@@ -243,16 +253,25 @@ err_out:
* If necessary, first collects any preceding (prefix) bytes.
* Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got
* is already 1.
+ *
+ * Returns:
+ * 0: on success
+ * < 0: on error
*/
-void insn_get_opcode(struct insn *insn)
+int insn_get_opcode(struct insn *insn)
{
struct insn_field *opcode = &insn->opcode;
+ int pfx_id, ret;
insn_byte_t op;
- int pfx_id;
+
if (opcode->got)
- return;
- if (!insn->prefixes.got)
- insn_get_prefixes(insn);
+ return 0;
+
+ if (!insn->prefixes.got) {
+ ret = insn_get_prefixes(insn);
+ if (ret)
+ return ret;
+ }
/* Get first opcode */
op = get_next(insn_byte_t, insn);
@@ -267,9 +286,13 @@ void insn_get_opcode(struct insn *insn)
insn->attr = inat_get_avx_attribute(op, m, p);
if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) ||
(!inat_accept_vex(insn->attr) &&
- !inat_is_group(insn->attr)))
- insn->attr = 0; /* This instruction is bad */
- goto end; /* VEX has only 1 byte for opcode */
+ !inat_is_group(insn->attr))) {
+ /* This instruction is bad */
+ insn->attr = 0;
+ return -EINVAL;
+ }
+ /* VEX has only 1 byte for opcode */
+ goto end;
}
insn->attr = inat_get_opcode_attribute(op);
@@ -280,13 +303,18 @@ void insn_get_opcode(struct insn *insn)
pfx_id = insn_last_prefix_id(insn);
insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr);
}
- if (inat_must_vex(insn->attr))
- insn->attr = 0; /* This instruction is bad */
+
+ if (inat_must_vex(insn->attr)) {
+ /* This instruction is bad */
+ insn->attr = 0;
+ return -EINVAL;
+ }
end:
opcode->got = 1;
+ return 0;
err_out:
- return;
+ return -ENODATA;
}
/**
@@ -296,15 +324,25 @@ err_out:
* Populates @insn->modrm and updates @insn->next_byte to point past the
* ModRM byte, if any. If necessary, first collects the preceding bytes
* (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1.
+ *
+ * Returns:
+ * 0: on success
+ * < 0: on error
*/
-void insn_get_modrm(struct insn *insn)
+int insn_get_modrm(struct insn *insn)
{
struct insn_field *modrm = &insn->modrm;
insn_byte_t pfx_id, mod;
+ int ret;
+
if (modrm->got)
- return;
- if (!insn->opcode.got)
- insn_get_opcode(insn);
+ return 0;
+
+ if (!insn->opcode.got) {
+ ret = insn_get_opcode(insn);
+ if (ret)
+ return ret;
+ }
if (inat_has_modrm(insn->attr)) {
mod = get_next(insn_byte_t, insn);
@@ -313,17 +351,22 @@ void insn_get_modrm(struct insn *insn)
pfx_id = insn_last_prefix_id(insn);
insn->attr = inat_get_group_attribute(mod, pfx_id,
insn->attr);
- if (insn_is_avx(insn) && !inat_accept_vex(insn->attr))
- insn->attr = 0; /* This is bad */
+ if (insn_is_avx(insn) && !inat_accept_vex(insn->attr)) {
+ /* Bad insn */
+ insn->attr = 0;
+ return -EINVAL;
+ }
}
}
if (insn->x86_64 && inat_is_force64(insn->attr))
insn->opnd_bytes = 8;
+
modrm->got = 1;
+ return 0;
err_out:
- return;
+ return -ENODATA;
}
@@ -337,11 +380,16 @@ err_out:
int insn_rip_relative(struct insn *insn)
{
struct insn_field *modrm = &insn->modrm;
+ int ret;
if (!insn->x86_64)
return 0;
- if (!modrm->got)
- insn_get_modrm(insn);
+
+ if (!modrm->got) {
+ ret = insn_get_modrm(insn);
+ if (ret)
+ return 0;
+ }
/*
* For rip-relative instructions, the mod field (top 2 bits)
* is zero and the r/m field (bottom 3 bits) is 0x5.
@@ -355,15 +403,25 @@ int insn_rip_relative(struct insn *insn)
*
* If necessary, first collects the instruction up to and including the
* ModRM byte.
+ *
+ * Returns:
+ * 0: if decoding succeeded
+ * < 0: otherwise.
*/
-void insn_get_sib(struct insn *insn)
+int insn_get_sib(struct insn *insn)
{
insn_byte_t modrm;
+ int ret;
if (insn->sib.got)
- return;
- if (!insn->modrm.got)
- insn_get_modrm(insn);
+ return 0;
+
+ if (!insn->modrm.got) {
+ ret = insn_get_modrm(insn);
+ if (ret)
+ return ret;
+ }
+
if (insn->modrm.nbytes) {
modrm = insn->modrm.bytes[0];
if (insn->addr_bytes != 2 &&
@@ -374,8 +432,10 @@ void insn_get_sib(struct insn *insn)
}
insn->sib.got = 1;
+ return 0;
+
err_out:
- return;
+ return -ENODATA;
}
@@ -386,15 +446,25 @@ err_out:
* If necessary, first collects the instruction up to and including the
* SIB byte.
* Displacement value is sign-expanded.
+ *
+ * * Returns:
+ * 0: if decoding succeeded
+ * < 0: otherwise.
*/
-void insn_get_displacement(struct insn *insn)
+int insn_get_displacement(struct insn *insn)
{
insn_byte_t mod, rm, base;
+ int ret;
if (insn->displacement.got)
- return;
- if (!insn->sib.got)
- insn_get_sib(insn);
+ return 0;
+
+ if (!insn->sib.got) {
+ ret = insn_get_sib(insn);
+ if (ret)
+ return ret;
+ }
+
if (insn->modrm.nbytes) {
/*
* Interpreting the modrm byte:
@@ -436,9 +506,10 @@ void insn_get_displacement(struct insn *insn)
}
out:
insn->displacement.got = 1;
+ return 0;
err_out:
- return;
+ return -ENODATA;
}
/* Decode moffset16/32/64. Return 0 if failed */
@@ -537,20 +608,30 @@ err_out:
}
/**
- * insn_get_immediate() - Get the immediates of instruction
+ * insn_get_immediate() - Get the immediate in an instruction
* @insn: &struct insn containing instruction
*
* If necessary, first collects the instruction up to and including the
* displacement bytes.
* Basically, most of immediates are sign-expanded. Unsigned-value can be
- * get by bit masking with ((1 << (nbytes * 8)) - 1)
+ * computed by bit masking with ((1 << (nbytes * 8)) - 1)
+ *
+ * Returns:
+ * 0: on success
+ * < 0: on error
*/
-void insn_get_immediate(struct insn *insn)
+int insn_get_immediate(struct insn *insn)
{
+ int ret;
+
if (insn->immediate.got)
- return;
- if (!insn->displacement.got)
- insn_get_displacement(insn);
+ return 0;
+
+ if (!insn->displacement.got) {
+ ret = insn_get_displacement(insn);
+ if (ret)
+ return ret;
+ }
if (inat_has_moffset(insn->attr)) {
if (!__get_moffset(insn))
@@ -597,9 +678,10 @@ void insn_get_immediate(struct insn *insn)
}
done:
insn->immediate.got = 1;
+ return 0;
err_out:
- return;
+ return -ENODATA;
}
/**
@@ -608,13 +690,65 @@ err_out:
*
* If necessary, first collects the instruction up to and including the
* immediates bytes.
- */
-void insn_get_length(struct insn *insn)
+ *
+ * Returns:
+ * - 0 on success
+ * - < 0 on error
+*/
+int insn_get_length(struct insn *insn)
{
+ int ret;
+
if (insn->length)
- return;
- if (!insn->immediate.got)
- insn_get_immediate(insn);
+ return 0;
+
+ if (!insn->immediate.got) {
+ ret = insn_get_immediate(insn);
+ if (ret)
+ return ret;
+ }
+
insn->length = (unsigned char)((unsigned long)insn->next_byte
- (unsigned long)insn->kaddr);
+
+ return 0;
+}
+
+/* Ensure this instruction is decoded completely */
+static inline int insn_complete(struct insn *insn)
+{
+ return insn->opcode.got && insn->modrm.got && insn->sib.got &&
+ insn->displacement.got && insn->immediate.got;
+}
+
+/**
+ * insn_decode() - Decode an x86 instruction
+ * @insn: &struct insn to be initialized
+ * @kaddr: address (in kernel memory) of instruction (or copy thereof)
+ * @buf_len: length of the insn buffer at @kaddr
+ * @m: insn mode, see enum insn_mode
+ *
+ * Returns:
+ * 0: if decoding succeeded
+ * < 0: otherwise.
+ */
+int insn_decode(struct insn *insn, const void *kaddr, int buf_len, enum insn_mode m)
+{
+ int ret;
+
+/* #define INSN_MODE_KERN -1 __ignore_sync_check__ mode is only valid in the kernel */
+
+ if (m == INSN_MODE_KERN)
+ insn_init(insn, kaddr, buf_len, IS_ENABLED(CONFIG_X86_64));
+ else
+ insn_init(insn, kaddr, buf_len, m == INSN_MODE_64);
+
+ ret = insn_get_length(insn);
+ if (ret)
+ return ret;
+
+ if (insn_complete(insn))
+ return 0;
+
+ return -EINVAL;
}
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 1e299ac73c86..1cc9da6e29c7 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -4,7 +4,7 @@
#include <linux/linkage.h>
#include <asm/errno.h>
#include <asm/cpufeatures.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
#include <asm/export.h>
.pushsection .noinstr.text, "ax"
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index 41902fe8b859..64801010d312 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -8,7 +8,7 @@
*/
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
#include <asm/export.h>
#undef memmove
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 0bfd26e4ca9e..9827ae267f96 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -3,7 +3,7 @@
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
#include <asm/export.h>
/*
diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
index 419365c48b2a..cc5f4ea943d3 100644
--- a/arch/x86/lib/mmx_32.c
+++ b/arch/x86/lib/mmx_32.c
@@ -14,7 +14,7 @@
* tested so far for any MMX solution figured.
*
* 22/09/2000 - Arjan van de Ven
- * Improved for non-egineering-sample Athlons
+ * Improved for non-engineering-sample Athlons
*
*/
#include <linux/hardirq.h>
diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c
index 75a0915b0d01..40bbe56bde32 100644
--- a/arch/x86/lib/msr-smp.c
+++ b/arch/x86/lib/msr-smp.c
@@ -252,7 +252,7 @@ static void __wrmsr_safe_regs_on_cpu(void *info)
rv->err = wrmsr_safe_regs(rv->regs);
}
-int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
+int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
{
int err;
struct msr_regs_info rv;
@@ -265,7 +265,7 @@ int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
}
EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
-int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
+int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
{
int err;
struct msr_regs_info rv;
diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c
index 3bd905e10ee2..b09cd2ad426c 100644
--- a/arch/x86/lib/msr.c
+++ b/arch/x86/lib/msr.c
@@ -36,7 +36,7 @@ EXPORT_SYMBOL(msrs_free);
* argument @m.
*
*/
-int msr_read(u32 msr, struct msr *m)
+static int msr_read(u32 msr, struct msr *m)
{
int err;
u64 val;
@@ -54,7 +54,7 @@ int msr_read(u32 msr, struct msr *m)
* @msr: MSR to write
* @m: value to write
*/
-int msr_write(u32 msr, struct msr *m)
+static int msr_write(u32 msr, struct msr *m)
{
return wrmsrl_safe(msr, m->q);
}
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index f6fb1d218dcc..ec9922cba30a 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -4,33 +4,69 @@
#include <linux/linkage.h>
#include <asm/dwarf2.h>
#include <asm/cpufeatures.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
#include <asm/export.h>
#include <asm/nospec-branch.h>
#include <asm/unwind_hints.h>
#include <asm/frame.h>
-.macro THUNK reg
.section .text.__x86.indirect_thunk
- .align 32
-SYM_FUNC_START(__x86_indirect_thunk_\reg)
- JMP_NOSPEC \reg
-SYM_FUNC_END(__x86_indirect_thunk_\reg)
-
-SYM_FUNC_START_NOALIGN(__x86_retpoline_\reg)
+.macro RETPOLINE reg
ANNOTATE_INTRA_FUNCTION_CALL
- call .Ldo_rop_\@
+ call .Ldo_rop_\@
.Lspec_trap_\@:
UNWIND_HINT_EMPTY
pause
lfence
- jmp .Lspec_trap_\@
+ jmp .Lspec_trap_\@
.Ldo_rop_\@:
- mov %\reg, (%_ASM_SP)
+ mov %\reg, (%_ASM_SP)
UNWIND_HINT_FUNC
ret
-SYM_FUNC_END(__x86_retpoline_\reg)
+.endm
+
+.macro THUNK reg
+
+ .align 32
+
+SYM_FUNC_START(__x86_indirect_thunk_\reg)
+
+ ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
+ __stringify(RETPOLINE \reg), X86_FEATURE_RETPOLINE, \
+ __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_AMD
+
+SYM_FUNC_END(__x86_indirect_thunk_\reg)
+
+.endm
+
+/*
+ * This generates .altinstr_replacement symbols for use by objtool. They,
+ * however, must not actually live in .altinstr_replacement since that will be
+ * discarded after init, but module alternatives will also reference these
+ * symbols.
+ *
+ * Their names matches the "__x86_indirect_" prefix to mark them as retpolines.
+ */
+.macro ALT_THUNK reg
+
+ .align 1
+
+SYM_FUNC_START_NOALIGN(__x86_indirect_alt_call_\reg)
+ ANNOTATE_RETPOLINE_SAFE
+1: call *%\reg
+2: .skip 5-(2b-1b), 0x90
+SYM_FUNC_END(__x86_indirect_alt_call_\reg)
+
+STACK_FRAME_NON_STANDARD(__x86_indirect_alt_call_\reg)
+
+SYM_FUNC_START_NOALIGN(__x86_indirect_alt_jmp_\reg)
+ ANNOTATE_RETPOLINE_SAFE
+1: jmp *%\reg
+2: .skip 5-(2b-1b), 0x90
+SYM_FUNC_END(__x86_indirect_alt_jmp_\reg)
+
+STACK_FRAME_NON_STANDARD(__x86_indirect_alt_jmp_\reg)
.endm
@@ -48,7 +84,6 @@ SYM_FUNC_END(__x86_retpoline_\reg)
#define __EXPORT_THUNK(sym) _ASM_NOKPROBE(sym); EXPORT_SYMBOL(sym)
#define EXPORT_THUNK(reg) __EXPORT_THUNK(__x86_indirect_thunk_ ## reg)
-#define EXPORT_RETPOLINE(reg) __EXPORT_THUNK(__x86_retpoline_ ## reg)
#undef GEN
#define GEN(reg) THUNK reg
@@ -59,5 +94,13 @@ SYM_FUNC_END(__x86_retpoline_\reg)
#include <asm/GEN-for-each-reg.h>
#undef GEN
-#define GEN(reg) EXPORT_RETPOLINE(reg)
+#define GEN(reg) ALT_THUNK reg
+#include <asm/GEN-for-each-reg.h>
+
+#undef GEN
+#define GEN(reg) __EXPORT_THUNK(__x86_indirect_alt_call_ ## reg)
+#include <asm/GEN-for-each-reg.h>
+
+#undef GEN
+#define GEN(reg) __EXPORT_THUNK(__x86_indirect_alt_jmp_ ## reg)
#include <asm/GEN-for-each-reg.h>
diff --git a/arch/x86/math-emu/fpu_trig.c b/arch/x86/math-emu/fpu_trig.c
index 4a9887851ad8..990d847ae902 100644
--- a/arch/x86/math-emu/fpu_trig.c
+++ b/arch/x86/math-emu/fpu_trig.c
@@ -547,7 +547,7 @@ static void frndint_(FPU_REG *st0_ptr, u_char st0_tag)
single_arg_error(st0_ptr, st0_tag);
}
-static int fsin(FPU_REG *st0_ptr, u_char tag)
+static int f_sin(FPU_REG *st0_ptr, u_char tag)
{
u_char arg_sign = getsign(st0_ptr);
@@ -608,6 +608,11 @@ static int fsin(FPU_REG *st0_ptr, u_char tag)
}
}
+static void fsin(FPU_REG *st0_ptr, u_char tag)
+{
+ f_sin(st0_ptr, tag);
+}
+
static int f_cos(FPU_REG *st0_ptr, u_char tag)
{
u_char st0_sign;
@@ -724,7 +729,7 @@ static void fsincos(FPU_REG *st0_ptr, u_char st0_tag)
}
reg_copy(st0_ptr, &arg);
- if (!fsin(st0_ptr, st0_tag)) {
+ if (!f_sin(st0_ptr, st0_tag)) {
push();
FPU_copy_to_reg0(&arg, st0_tag);
f_cos(&st(0), st0_tag);
@@ -1635,7 +1640,7 @@ void FPU_triga(void)
}
static FUNC_ST0 const trig_table_b[] = {
- fprem, fyl2xp1, fsqrt_, fsincos, frndint_, fscale, (FUNC_ST0) fsin, fcos
+ fprem, fyl2xp1, fsqrt_, fsincos, frndint_, fscale, fsin, fcos
};
void FPU_trigb(void)
diff --git a/arch/x86/math-emu/reg_ld_str.c b/arch/x86/math-emu/reg_ld_str.c
index fe6246ff9887..7ca6417c0c8d 100644
--- a/arch/x86/math-emu/reg_ld_str.c
+++ b/arch/x86/math-emu/reg_ld_str.c
@@ -964,7 +964,7 @@ int FPU_store_bcd(FPU_REG *st0_ptr, u_char st0_tag, u_char __user *d)
/* The return value (in eax) is zero if the result is exact,
if bits are changed due to rounding, truncation, etc, then
a non-zero value is returned */
-/* Overflow is signalled by a non-zero return value (in eax).
+/* Overflow is signaled by a non-zero return value (in eax).
In the case of overflow, the returned significand always has the
largest possible value */
int FPU_round_to_int(FPU_REG *r, u_char tag)
diff --git a/arch/x86/math-emu/reg_round.S b/arch/x86/math-emu/reg_round.S
index 11a1f798451b..4a9fc3cc5a4d 100644
--- a/arch/x86/math-emu/reg_round.S
+++ b/arch/x86/math-emu/reg_round.S
@@ -575,7 +575,7 @@ Normalise_result:
#ifdef PECULIAR_486
/*
* This implements a special feature of 80486 behaviour.
- * Underflow will be signalled even if the number is
+ * Underflow will be signaled even if the number is
* not a denormal after rounding.
* This difference occurs only for masked underflow, and not
* in the unmasked case.
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index b93d6cd08a7f..121921b2927c 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -5,7 +5,7 @@
#include <xen/xen.h>
#include <asm/fpu/internal.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
#include <asm/traps.h>
#include <asm/kdebug.h>
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index a73347e2cdfc..2d27932c9ac7 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -836,8 +836,8 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
if (si_code == SEGV_PKUERR)
force_sig_pkuerr((void __user *)address, pkey);
-
- force_sig_fault(SIGSEGV, si_code, (void __user *)address);
+ else
+ force_sig_fault(SIGSEGV, si_code, (void __user *)address);
local_irq_disable();
}
@@ -1186,7 +1186,7 @@ do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
return;
/* kprobes don't want to hook the spurious faults: */
- if (kprobe_page_fault(regs, X86_TRAP_PF))
+ if (WARN_ON_ONCE(kprobe_page_fault(regs, X86_TRAP_PF)))
return;
/*
@@ -1239,7 +1239,7 @@ void do_user_addr_fault(struct pt_regs *regs,
}
/* kprobes don't want to hook the spurious faults: */
- if (unlikely(kprobe_page_fault(regs, X86_TRAP_PF)))
+ if (WARN_ON_ONCE(kprobe_page_fault(regs, X86_TRAP_PF)))
return;
/*
@@ -1497,7 +1497,7 @@ DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
* userspace task is trying to access some valid (from guest's point of
* view) memory which is not currently mapped by the host (e.g. the
* memory is swapped out). Note, the corresponding "page ready" event
- * which is injected when the memory becomes available, is delived via
+ * which is injected when the memory becomes available, is delivered via
* an interrupt mechanism and not a #PF exception
* (see arch/x86/kernel/kvm.c: sysvec_kvm_asyncpf_interrupt()).
*
@@ -1523,7 +1523,7 @@ DEFINE_IDTENTRY_RAW_ERRORCODE(exc_page_fault)
*
* In case the fault hit a RCU idle region the conditional entry
* code reenabled RCU to avoid subsequent wreckage which helps
- * debugability.
+ * debuggability.
*/
state = irqentry_enter(regs);
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index dd694fb93916..75ef19aa8903 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -29,7 +29,7 @@
/*
* We need to define the tracepoints somewhere, and tlb.c
- * is only compied when SMP=y.
+ * is only compiled when SMP=y.
*/
#define CREATE_TRACE_POINTS
#include <trace/events/tlb.h>
@@ -756,7 +756,7 @@ void __init init_mem_mapping(void)
#ifdef CONFIG_X86_64
if (max_pfn > max_low_pfn) {
- /* can we preseve max_low_pfn ?*/
+ /* can we preserve max_low_pfn ?*/
max_low_pfn = max_pfn;
}
#else
@@ -939,7 +939,7 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
{
/*
* end could be not aligned, and We can not align that,
- * decompresser could be confused by aligned initrd_end
+ * decompressor could be confused by aligned initrd_end
* We already reserve the end partial page before in
* - i386_start_kernel()
* - x86_64_start_kernel()
@@ -1017,7 +1017,7 @@ void __init zone_sizes_init(void)
free_area_init(max_zone_pfns);
}
-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
+__visible DEFINE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate) = {
.loaded_mm = &init_mm,
.next_asid = 1,
.cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index da31c2635ee4..74b78840182d 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -651,7 +651,7 @@ void __init find_low_pfn_range(void)
highmem_pfn_init();
}
-#ifndef CONFIG_NEED_MULTIPLE_NODES
+#ifndef CONFIG_NUMA
void __init initmem_init(void)
{
#ifdef CONFIG_HIGHMEM
@@ -677,7 +677,7 @@ void __init initmem_init(void)
setup_bootmem_allocator();
}
-#endif /* !CONFIG_NEED_MULTIPLE_NODES */
+#endif /* !CONFIG_NUMA */
void __init setup_bootmem_allocator(void)
{
@@ -755,8 +755,6 @@ void __init mem_init(void)
after_bootmem = 1;
x86_init.hyper.init_after_bootmem();
- mem_init_print_info(NULL);
-
/*
* Check boundaries twice: Some fundamental inconsistencies can
* be detected at build time already.
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index b5a3fa4033d3..65ea58527176 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -33,6 +33,7 @@
#include <linux/nmi.h>
#include <linux/gfp.h>
#include <linux/kcore.h>
+#include <linux/bootmem_info.h>
#include <asm/processor.h>
#include <asm/bios_ebda.h>
@@ -172,7 +173,7 @@ static void sync_global_pgds_l4(unsigned long start, unsigned long end)
/*
* With folded p4d, pgd_none() is always false, we need to
- * handle synchonization on p4d level.
+ * handle synchronization on p4d level.
*/
MAYBE_BUILD_BUG_ON(pgd_none(*pgd_ref));
p4d_ref = p4d_offset(pgd_ref, addr);
@@ -826,6 +827,106 @@ void __init paging_init(void)
zone_sizes_init();
}
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+#define PAGE_UNUSED 0xFD
+
+/*
+ * The unused vmemmap range, which was not yet memset(PAGE_UNUSED), ranges
+ * from unused_pmd_start to next PMD_SIZE boundary.
+ */
+static unsigned long unused_pmd_start __meminitdata;
+
+static void __meminit vmemmap_flush_unused_pmd(void)
+{
+ if (!unused_pmd_start)
+ return;
+ /*
+ * Clears (unused_pmd_start, PMD_END]
+ */
+ memset((void *)unused_pmd_start, PAGE_UNUSED,
+ ALIGN(unused_pmd_start, PMD_SIZE) - unused_pmd_start);
+ unused_pmd_start = 0;
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+/* Returns true if the PMD is completely unused and thus it can be freed */
+static bool __meminit vmemmap_pmd_is_unused(unsigned long addr, unsigned long end)
+{
+ unsigned long start = ALIGN_DOWN(addr, PMD_SIZE);
+
+ /*
+ * Flush the unused range cache to ensure that memchr_inv() will work
+ * for the whole range.
+ */
+ vmemmap_flush_unused_pmd();
+ memset((void *)addr, PAGE_UNUSED, end - addr);
+
+ return !memchr_inv((void *)start, PAGE_UNUSED, PMD_SIZE);
+}
+#endif
+
+static void __meminit __vmemmap_use_sub_pmd(unsigned long start)
+{
+ /*
+ * As we expect to add in the same granularity as we remove, it's
+ * sufficient to mark only some piece used to block the memmap page from
+ * getting removed when removing some other adjacent memmap (just in
+ * case the first memmap never gets initialized e.g., because the memory
+ * block never gets onlined).
+ */
+ memset((void *)start, 0, sizeof(struct page));
+}
+
+static void __meminit vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
+{
+ /*
+ * We only optimize if the new used range directly follows the
+ * previously unused range (esp., when populating consecutive sections).
+ */
+ if (unused_pmd_start == start) {
+ if (likely(IS_ALIGNED(end, PMD_SIZE)))
+ unused_pmd_start = 0;
+ else
+ unused_pmd_start = end;
+ return;
+ }
+
+ /*
+ * If the range does not contiguously follows previous one, make sure
+ * to mark the unused range of the previous one so it can be removed.
+ */
+ vmemmap_flush_unused_pmd();
+ __vmemmap_use_sub_pmd(start);
+}
+
+
+static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
+{
+ vmemmap_flush_unused_pmd();
+
+ /*
+ * Could be our memmap page is filled with PAGE_UNUSED already from a
+ * previous remove. Make sure to reset it.
+ */
+ __vmemmap_use_sub_pmd(start);
+
+ /*
+ * Mark with PAGE_UNUSED the unused parts of the new memmap range
+ */
+ if (!IS_ALIGNED(start, PMD_SIZE))
+ memset((void *)start, PAGE_UNUSED,
+ start - ALIGN_DOWN(start, PMD_SIZE));
+
+ /*
+ * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
+ * consecutive sections. Remember for the last added PMD where the
+ * unused range begins.
+ */
+ if (!IS_ALIGNED(end, PMD_SIZE))
+ unused_pmd_start = end;
+}
+#endif
+
/*
* Memory hotplug specific functions
*/
@@ -871,8 +972,6 @@ int arch_add_memory(int nid, u64 start, u64 size,
return add_pages(nid, start_pfn, nr_pages, params);
}
-#define PAGE_INUSE 0xFD
-
static void __meminit free_pagetable(struct page *page, int order)
{
unsigned long magic;
@@ -962,7 +1061,6 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
{
unsigned long next, pages = 0;
pte_t *pte;
- void *page_addr;
phys_addr_t phys_addr;
pte = pte_start + pte_index(addr);
@@ -983,42 +1081,15 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
if (phys_addr < (phys_addr_t)0x40000000)
return;
- if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) {
- /*
- * Do not free direct mapping pages since they were
- * freed when offlining, or simplely not in use.
- */
- if (!direct)
- free_pagetable(pte_page(*pte), 0);
-
- spin_lock(&init_mm.page_table_lock);
- pte_clear(&init_mm, addr, pte);
- spin_unlock(&init_mm.page_table_lock);
-
- /* For non-direct mapping, pages means nothing. */
- pages++;
- } else {
- /*
- * If we are here, we are freeing vmemmap pages since
- * direct mapped memory ranges to be freed are aligned.
- *
- * If we are not removing the whole page, it means
- * other page structs in this page are being used and
- * we canot remove them. So fill the unused page_structs
- * with 0xFD, and remove the page when it is wholly
- * filled with 0xFD.
- */
- memset((void *)addr, PAGE_INUSE, next - addr);
+ if (!direct)
+ free_pagetable(pte_page(*pte), 0);
- page_addr = page_address(pte_page(*pte));
- if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
- free_pagetable(pte_page(*pte), 0);
+ spin_lock(&init_mm.page_table_lock);
+ pte_clear(&init_mm, addr, pte);
+ spin_unlock(&init_mm.page_table_lock);
- spin_lock(&init_mm.page_table_lock);
- pte_clear(&init_mm, addr, pte);
- spin_unlock(&init_mm.page_table_lock);
- }
- }
+ /* For non-direct mapping, pages means nothing. */
+ pages++;
}
/* Call free_pte_table() in remove_pmd_table(). */
@@ -1034,7 +1105,6 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
unsigned long next, pages = 0;
pte_t *pte_base;
pmd_t *pmd;
- void *page_addr;
pmd = pmd_start + pmd_index(addr);
for (; addr < end; addr = next, pmd++) {
@@ -1054,22 +1124,16 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
pmd_clear(pmd);
spin_unlock(&init_mm.page_table_lock);
pages++;
- } else {
- /* If here, we are freeing vmemmap pages. */
- memset((void *)addr, PAGE_INUSE, next - addr);
-
- page_addr = page_address(pmd_page(*pmd));
- if (!memchr_inv(page_addr, PAGE_INUSE,
- PMD_SIZE)) {
+ }
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ else if (vmemmap_pmd_is_unused(addr, next)) {
free_hugepage_table(pmd_page(*pmd),
altmap);
-
spin_lock(&init_mm.page_table_lock);
pmd_clear(pmd);
spin_unlock(&init_mm.page_table_lock);
- }
}
-
+#endif
continue;
}
@@ -1090,7 +1154,6 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
unsigned long next, pages = 0;
pmd_t *pmd_base;
pud_t *pud;
- void *page_addr;
pud = pud_start + pud_index(addr);
for (; addr < end; addr = next, pud++) {
@@ -1099,33 +1162,13 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
if (!pud_present(*pud))
continue;
- if (pud_large(*pud)) {
- if (IS_ALIGNED(addr, PUD_SIZE) &&
- IS_ALIGNED(next, PUD_SIZE)) {
- if (!direct)
- free_pagetable(pud_page(*pud),
- get_order(PUD_SIZE));
-
- spin_lock(&init_mm.page_table_lock);
- pud_clear(pud);
- spin_unlock(&init_mm.page_table_lock);
- pages++;
- } else {
- /* If here, we are freeing vmemmap pages. */
- memset((void *)addr, PAGE_INUSE, next - addr);
-
- page_addr = page_address(pud_page(*pud));
- if (!memchr_inv(page_addr, PAGE_INUSE,
- PUD_SIZE)) {
- free_pagetable(pud_page(*pud),
- get_order(PUD_SIZE));
-
- spin_lock(&init_mm.page_table_lock);
- pud_clear(pud);
- spin_unlock(&init_mm.page_table_lock);
- }
- }
-
+ if (pud_large(*pud) &&
+ IS_ALIGNED(addr, PUD_SIZE) &&
+ IS_ALIGNED(next, PUD_SIZE)) {
+ spin_lock(&init_mm.page_table_lock);
+ pud_clear(pud);
+ spin_unlock(&init_mm.page_table_lock);
+ pages++;
continue;
}
@@ -1197,6 +1240,9 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct,
void __ref vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap)
{
+ VM_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE));
+ VM_BUG_ON(!IS_ALIGNED(end, PAGE_SIZE));
+
remove_pagetable(start, end, false, altmap);
}
@@ -1224,7 +1270,7 @@ static struct kcore_list kcore_vsyscall;
static void __init register_page_bootmem_info(void)
{
-#ifdef CONFIG_NUMA
+#if defined(CONFIG_NUMA) || defined(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP)
int i;
for_each_online_node(i)
@@ -1306,8 +1352,6 @@ void __init mem_init(void)
kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER);
preallocate_vmalloc_pages();
-
- mem_init_print_info(NULL);
}
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
@@ -1538,11 +1582,17 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start,
addr_end = addr + PMD_SIZE;
p_end = p + PMD_SIZE;
+
+ if (!IS_ALIGNED(addr, PMD_SIZE) ||
+ !IS_ALIGNED(next, PMD_SIZE))
+ vmemmap_use_new_sub_pmd(addr, next);
+
continue;
} else if (altmap)
return -ENOMEM; /* no fallback */
} else if (pmd_large(*pmd)) {
vmemmap_verify((pte_t *)pmd, node, addr, next);
+ vmemmap_use_sub_pmd(addr, next);
continue;
}
if (vmemmap_populate_basepages(addr, next, node, NULL))
@@ -1556,6 +1606,9 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
{
int err;
+ VM_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE));
+ VM_BUG_ON(!IS_ALIGNED(end, PAGE_SIZE));
+
if (end - start < PAGES_PER_SECTION * sizeof(struct page))
err = vmemmap_populate_basepages(start, end, node, NULL);
else if (boot_cpu_has(X86_FEATURE_PSE))
@@ -1571,7 +1624,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
return err;
}
-#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
+#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
void register_page_bootmem_memmap(unsigned long section_nr,
struct page *start_page, unsigned long nr_pages)
{
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 9e5ccc56f8e0..60ade7dd71bd 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -118,7 +118,9 @@ static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *des
if (!IS_ENABLED(CONFIG_EFI))
return;
- if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA)
+ if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA ||
+ (efi_mem_type(addr) == EFI_BOOT_SERVICES_DATA &&
+ efi_mem_attributes(addr) & EFI_MEMORY_RUNTIME))
desc->flags |= IORES_MAP_ENCRYPTED;
}
@@ -481,25 +483,6 @@ void iounmap(volatile void __iomem *addr)
}
EXPORT_SYMBOL(iounmap);
-int __init arch_ioremap_p4d_supported(void)
-{
- return 0;
-}
-
-int __init arch_ioremap_pud_supported(void)
-{
-#ifdef CONFIG_X86_64
- return boot_cpu_has(X86_FEATURE_GBPAGES);
-#else
- return 0;
-#endif
-}
-
-int __init arch_ioremap_pmd_supported(void)
-{
- return boot_cpu_has(X86_FEATURE_PSE);
-}
-
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
* access
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 6e6b39710e5f..557f0fe25dff 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -96,7 +96,7 @@ void __init kernel_randomize_memory(void)
memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
- /* Adapt phyiscal memory region size based on available memory */
+ /* Adapt physical memory region size based on available memory */
if (memory_tb < kaslr_regions[0].size_tb)
kaslr_regions[0].size_tb = memory_tb;
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index be020a7bc414..d3efbc5b3449 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Support for MMIO probes.
- * Benfit many code from kprobes
+ * Benefit many code from kprobes
* (C) 2002 Louis Zhuang <louis.zhuang@intel.com>.
* 2007 Alexander Eichner
* 2008 Pekka Paalanen <pq@iki.fi>
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 4b01f7dbaf30..ff08dc463634 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
+#include <linux/virtio_config.h>
#include <asm/tlbflush.h>
#include <asm/fixmap.h>
@@ -44,8 +45,6 @@ EXPORT_SYMBOL(sme_me_mask);
DEFINE_STATIC_KEY_FALSE(sev_enable_key);
EXPORT_SYMBOL_GPL(sev_enable_key);
-bool sev_enabled __section(".data");
-
/* Buffer used for early in-place encryption by BSP, no locking needed */
static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE);
@@ -262,7 +261,7 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
if (pgprot_val(old_prot) == pgprot_val(new_prot))
return;
- pa = pfn << page_level_shift(level);
+ pa = pfn << PAGE_SHIFT;
size = page_level_size(level);
/*
@@ -373,14 +372,14 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
* up under SME the trampoline area cannot be encrypted, whereas under SEV
* the trampoline area must be encrypted.
*/
-bool sme_active(void)
+bool sev_active(void)
{
- return sme_me_mask && !sev_enabled;
+ return sev_status & MSR_AMD64_SEV_ENABLED;
}
-bool sev_active(void)
+bool sme_active(void)
{
- return sev_status & MSR_AMD64_SEV_ENABLED;
+ return sme_me_mask && !sev_active();
}
EXPORT_SYMBOL_GPL(sev_active);
@@ -484,3 +483,8 @@ void __init mem_encrypt_init(void)
print_mem_encrypt_feature_info();
}
+int arch_has_restricted_virtio_memory_access(void)
+{
+ return sev_active();
+}
+EXPORT_SYMBOL_GPL(arch_has_restricted_virtio_memory_access);
diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S
index 7a84fc8bc5c3..17d292b7072f 100644
--- a/arch/x86/mm/mem_encrypt_boot.S
+++ b/arch/x86/mm/mem_encrypt_boot.S
@@ -27,7 +27,7 @@ SYM_FUNC_START(sme_encrypt_execute)
* - stack page (PAGE_SIZE)
* - encryption routine page (PAGE_SIZE)
* - intermediate copy buffer (PMD_PAGE_SIZE)
- * R8 - physcial address of the pagetables to use for encryption
+ * R8 - physical address of the pagetables to use for encryption
*/
push %rbp
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index 6c5eb6f3f14f..470b20208430 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -503,14 +503,6 @@ void __init sme_enable(struct boot_params *bp)
#define AMD_SME_BIT BIT(0)
#define AMD_SEV_BIT BIT(1)
- /*
- * Set the feature mask (SME or SEV) based on whether we are
- * running under a hypervisor.
- */
- eax = 1;
- ecx = 0;
- native_cpuid(&eax, &ebx, &ecx, &edx);
- feature_mask = (ecx & BIT(31)) ? AMD_SEV_BIT : AMD_SME_BIT;
/*
* Check for the SME/SEV feature:
@@ -523,29 +515,40 @@ void __init sme_enable(struct boot_params *bp)
eax = 0x8000001f;
ecx = 0;
native_cpuid(&eax, &ebx, &ecx, &edx);
- if (!(eax & feature_mask))
+ /* Check whether SEV or SME is supported */
+ if (!(eax & (AMD_SEV_BIT | AMD_SME_BIT)))
return;
me_mask = 1UL << (ebx & 0x3f);
+ /* Check the SEV MSR whether SEV or SME is enabled */
+ sev_status = __rdmsr(MSR_AMD64_SEV);
+ feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
+
/* Check if memory encryption is enabled */
if (feature_mask == AMD_SME_BIT) {
+ /*
+ * No SME if Hypervisor bit is set. This check is here to
+ * prevent a guest from trying to enable SME. For running as a
+ * KVM guest the MSR_AMD64_SYSCFG will be sufficient, but there
+ * might be other hypervisors which emulate that MSR as non-zero
+ * or even pass it through to the guest.
+ * A malicious hypervisor can still trick a guest into this
+ * path, but there is no way to protect against that.
+ */
+ eax = 1;
+ ecx = 0;
+ native_cpuid(&eax, &ebx, &ecx, &edx);
+ if (ecx & BIT(31))
+ return;
+
/* For SME, check the SYSCFG MSR */
- msr = __rdmsr(MSR_K8_SYSCFG);
- if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
+ msr = __rdmsr(MSR_AMD64_SYSCFG);
+ if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
return;
} else {
- /* For SEV, check the SEV MSR */
- msr = __rdmsr(MSR_AMD64_SEV);
- if (!(msr & MSR_AMD64_SEV_ENABLED))
- return;
-
- /* Save SEV_STATUS to avoid reading MSR again */
- sev_status = msr;
-
/* SEV state cannot be controlled by a command line option */
sme_me_mask = me_mask;
- sev_enabled = true;
physical_mask &= ~sme_me_mask;
return;
}
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 5eb4dc2b97da..e94da744386f 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -254,7 +254,13 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
/* make sure all non-reserved blocks are inside the limits */
bi->start = max(bi->start, low);
- bi->end = min(bi->end, high);
+
+ /* preserve info for non-RAM areas above 'max_pfn': */
+ if (bi->end > high) {
+ numa_add_memblk_to(bi->nid, high, bi->end,
+ &numa_reserved_meminfo);
+ bi->end = high;
+ }
/* and there's no empty block */
if (bi->start >= bi->end)
diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c
index ca311aaa67b8..3112ca7786ed 100644
--- a/arch/x86/mm/pat/memtype.c
+++ b/arch/x86/mm/pat/memtype.c
@@ -695,7 +695,7 @@ int memtype_free(u64 start, u64 end)
/**
- * lookup_memtype - Looksup the memory type for a physical address
+ * lookup_memtype - Looks up the memory type for a physical address
* @paddr: physical address of which memory type needs to be looked up
*
* Only to be called when PAT is enabled
@@ -800,6 +800,7 @@ void memtype_free_io(resource_size_t start, resource_size_t end)
memtype_free(start, end);
}
+#ifdef CONFIG_X86_PAT
int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
{
enum page_cache_mode type = _PAGE_CACHE_MODE_WC;
@@ -813,6 +814,7 @@ void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
memtype_free_io(start, start + size);
}
EXPORT_SYMBOL(arch_io_free_memtype_wc);
+#endif
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index 16f878c26667..156cd235659f 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -16,6 +16,8 @@
#include <linux/pci.h>
#include <linux/vmalloc.h>
#include <linux/libnvdimm.h>
+#include <linux/vmstat.h>
+#include <linux/kernel.h>
#include <asm/e820/api.h>
#include <asm/processor.h>
@@ -91,6 +93,12 @@ static void split_page_count(int level)
return;
direct_pages_count[level]--;
+ if (system_state == SYSTEM_RUNNING) {
+ if (level == PG_LEVEL_2M)
+ count_vm_event(DIRECT_MAP_LEVEL2_SPLIT);
+ else if (level == PG_LEVEL_1G)
+ count_vm_event(DIRECT_MAP_LEVEL3_SPLIT);
+ }
direct_pages_count[level - 1] += PTRS_PER_PTE;
}
@@ -680,7 +688,7 @@ pmd_t *lookup_pmd_address(unsigned long address)
* end up in this kind of memory, for instance.
*
* This could be optimized, but it is only intended to be
- * used at inititalization time, and keeping it
+ * used at initialization time, and keeping it
* unoptimized should increase the testing coverage for
* the more obscure platforms.
*/
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index f6a9e2e36642..1303ff6ef7be 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -682,6 +682,7 @@ int p4d_clear_huge(p4d_t *p4d)
}
#endif
+#if CONFIG_PGTABLE_LEVELS > 3
/**
* pud_set_huge - setup kernel PUD mapping
*
@@ -721,6 +722,23 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
}
/**
+ * pud_clear_huge - clear kernel PUD mapping when it is set
+ *
+ * Returns 1 on success and 0 on failure (no PUD map is found).
+ */
+int pud_clear_huge(pud_t *pud)
+{
+ if (pud_large(*pud)) {
+ pud_clear(pud);
+ return 1;
+ }
+
+ return 0;
+}
+#endif
+
+#if CONFIG_PGTABLE_LEVELS > 2
+/**
* pmd_set_huge - setup kernel PMD mapping
*
* See text over pud_set_huge() above.
@@ -751,21 +769,6 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
}
/**
- * pud_clear_huge - clear kernel PUD mapping when it is set
- *
- * Returns 1 on success and 0 on failure (no PUD map is found).
- */
-int pud_clear_huge(pud_t *pud)
-{
- if (pud_large(*pud)) {
- pud_clear(pud);
- return 1;
- }
-
- return 0;
-}
-
-/**
* pmd_clear_huge - clear kernel PMD mapping when it is set
*
* Returns 1 on success and 0 on failure (no PMD map is found).
@@ -779,14 +782,7 @@ int pmd_clear_huge(pmd_t *pmd)
return 0;
}
-
-/*
- * Until we support 512GB pages, skip them in the vmap area.
- */
-int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
-{
- return 0;
-}
+#endif
#ifdef CONFIG_X86_64
/**
@@ -861,11 +857,6 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
#else /* !CONFIG_X86_64 */
-int pud_free_pmd_page(pud_t *pud, unsigned long addr)
-{
- return pud_none(*pud);
-}
-
/*
* Disable free page handling on x86-PAE. This assures that ioremap()
* does not update sync'd pmd entries. See vmalloc_sync_one().
diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
index 8873ed1438a9..4a67b922bce1 100644
--- a/arch/x86/mm/pkeys.c
+++ b/arch/x86/mm/pkeys.c
@@ -128,7 +128,7 @@ u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | PKRU_AD_KEY( 3) |
/*
* Called from the FPU code when creating a fresh set of FPU
* registers. This is called from a very specific context where
- * we know the FPU regstiers are safe for use and we can use PKRU
+ * we know the FPU registers are safe for use and we can use PKRU
* directly.
*/
void copy_init_pkru_to_fpregs(void)
@@ -192,6 +192,10 @@ static const struct file_operations fops_init_pkru = {
static int __init create_init_pkru_value(void)
{
+ /* Do not expose the file if pkeys are not supported. */
+ if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
+ return 0;
+
debugfs_create_file("init_pkru", S_IRUSR | S_IWUSR,
arch_debugfs_dir, NULL, &fops_init_pkru);
return 0;
diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
index 1aab92930569..5d5c7bb50ce9 100644
--- a/arch/x86/mm/pti.c
+++ b/arch/x86/mm/pti.c
@@ -361,7 +361,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
* global, so set it as global in both copies. Note:
* the X86_FEATURE_PGE check is not _required_ because
* the CPU ignores _PAGE_GLOBAL when PGE is not
- * supported. The check keeps consistentency with
+ * supported. The check keeps consistency with
* code that only set this bit when supported.
*/
if (boot_cpu_has(X86_FEATURE_PGE))
@@ -440,10 +440,9 @@ static void __init pti_clone_user_shared(void)
for_each_possible_cpu(cpu) {
/*
- * The SYSCALL64 entry code needs to be able to find the
- * thread stack and needs one word of scratch space in which
- * to spill a register. All of this lives in the TSS, in
- * the sp1 and sp2 slots.
+ * The SYSCALL64 entry code needs one word of scratch space
+ * in which to spill a register. It lives in the sp2 slot
+ * of the CPU's TSS.
*
* This is done for all possible CPUs during boot to ensure
* that it's propagated to all mms.
@@ -512,7 +511,7 @@ static void pti_clone_entry_text(void)
static inline bool pti_kernel_image_global_ok(void)
{
/*
- * Systems with PCIDs get litlle benefit from global
+ * Systems with PCIDs get little benefit from global
* kernel text and are not worth the downsides.
*/
if (cpu_feature_enabled(X86_FEATURE_PCID))
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 569ac1d57f55..cfe6b1e85fa6 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -14,6 +14,7 @@
#include <asm/nospec-branch.h>
#include <asm/cache.h>
#include <asm/apic.h>
+#include <asm/perf_event.h>
#include "mm_internal.h"
@@ -24,7 +25,7 @@
# define __flush_tlb_local native_flush_tlb_local
# define __flush_tlb_global native_flush_tlb_global
# define __flush_tlb_one_user(addr) native_flush_tlb_one_user(addr)
-# define __flush_tlb_others(msk, info) native_flush_tlb_others(msk, info)
+# define __flush_tlb_multi(msk, info) native_flush_tlb_multi(msk, info)
#endif
/*
@@ -106,7 +107,7 @@ static inline u16 kern_pcid(u16 asid)
#ifdef CONFIG_PAGE_TABLE_ISOLATION
/*
- * Make sure that the dynamic ASID space does not confict with the
+ * Make sure that the dynamic ASID space does not conflict with the
* bit we are using to switch between user and kernel ASIDs.
*/
BUILD_BUG_ON(TLB_NR_DYN_ASIDS >= (1 << X86_CR3_PTI_PCID_USER_BIT));
@@ -300,7 +301,7 @@ void leave_mm(int cpu)
return;
/* Warn if we're not lazy. */
- WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy));
+ WARN_ON(!this_cpu_read(cpu_tlbstate_shared.is_lazy));
switch_mm(NULL, &init_mm, NULL);
}
@@ -316,7 +317,7 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
local_irq_restore(flags);
}
-static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
+static unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
{
unsigned long next_tif = task_thread_info(next)->flags;
unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB;
@@ -404,9 +405,14 @@ static inline void cr4_update_pce_mm(struct mm_struct *mm)
{
if (static_branch_unlikely(&rdpmc_always_available_key) ||
(!static_branch_unlikely(&rdpmc_never_available_key) &&
- atomic_read(&mm->context.perf_rdpmc_allowed)))
+ atomic_read(&mm->context.perf_rdpmc_allowed))) {
+ /*
+ * Clear the existing dirty counters to
+ * prevent the leak for an RDPMC task.
+ */
+ perf_clear_dirty_counters();
cr4_set_bits_irqsoff(X86_CR4_PCE);
- else
+ } else
cr4_clear_bits_irqsoff(X86_CR4_PCE);
}
@@ -424,7 +430,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
{
struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
u16 prev_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
- bool was_lazy = this_cpu_read(cpu_tlbstate.is_lazy);
+ bool was_lazy = this_cpu_read(cpu_tlbstate_shared.is_lazy);
unsigned cpu = smp_processor_id();
u64 next_tlb_gen;
bool need_flush;
@@ -439,7 +445,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
* NB: leave_mm() calls us with prev == NULL and tsk == NULL.
*/
- /* We don't want flush_tlb_func_* to run concurrently with us. */
+ /* We don't want flush_tlb_func() to run concurrently with us. */
if (IS_ENABLED(CONFIG_PROVE_LOCKING))
WARN_ON_ONCE(!irqs_disabled());
@@ -469,7 +475,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
__flush_tlb_all();
}
#endif
- this_cpu_write(cpu_tlbstate.is_lazy, false);
+ if (was_lazy)
+ this_cpu_write(cpu_tlbstate_shared.is_lazy, false);
/*
* The membarrier system call requires a full memory barrier and
@@ -490,7 +497,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
/*
* Even in lazy TLB mode, the CPU should stay set in the
* mm_cpumask. The TLB shootdown code can figure out from
- * from cpu_tlbstate.is_lazy whether or not to send an IPI.
+ * cpu_tlbstate_shared.is_lazy whether or not to send an IPI.
*/
if (WARN_ON_ONCE(real_prev != &init_mm &&
!cpumask_test_cpu(cpu, mm_cpumask(next))))
@@ -598,7 +605,7 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
return;
- this_cpu_write(cpu_tlbstate.is_lazy, true);
+ this_cpu_write(cpu_tlbstate_shared.is_lazy, true);
}
/*
@@ -647,14 +654,13 @@ void initialize_tlbstate_and_flush(void)
}
/*
- * flush_tlb_func_common()'s memory ordering requirement is that any
+ * flush_tlb_func()'s memory ordering requirement is that any
* TLB fills that happen after we flush the TLB are ordered after we
* read active_mm's tlb_gen. We don't need any explicit barriers
* because all x86 flush operations are serializing and the
* atomic64_read operation won't be reordered by the compiler.
*/
-static void flush_tlb_func_common(const struct flush_tlb_info *f,
- bool local, enum tlb_flush_reason reason)
+static void flush_tlb_func(void *info)
{
/*
* We have three different tlb_gen values in here. They are:
@@ -665,28 +671,40 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
* - f->new_tlb_gen: the generation that the requester of the flush
* wants us to catch up to.
*/
+ const struct flush_tlb_info *f = info;
struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
u64 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen);
u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
+ bool local = smp_processor_id() == f->initiating_cpu;
+ unsigned long nr_invalidate = 0;
/* This code cannot presently handle being reentered. */
VM_WARN_ON(!irqs_disabled());
+ if (!local) {
+ inc_irq_stat(irq_tlb_count);
+ count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+
+ /* Can only happen on remote CPUs */
+ if (f->mm && f->mm != loaded_mm)
+ return;
+ }
+
if (unlikely(loaded_mm == &init_mm))
return;
VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
loaded_mm->context.ctx_id);
- if (this_cpu_read(cpu_tlbstate.is_lazy)) {
+ if (this_cpu_read(cpu_tlbstate_shared.is_lazy)) {
/*
* We're in lazy mode. We need to at least flush our
* paging-structure cache to avoid speculatively reading
* garbage into our TLB. Since switching to init_mm is barely
* slower than a minimal flush, just switch to init_mm.
*
- * This should be rare, with native_flush_tlb_others skipping
+ * This should be rare, with native_flush_tlb_multi() skipping
* IPIs to lazy TLB mode CPUs.
*/
switch_mm_irqs_off(NULL, &init_mm, NULL);
@@ -700,8 +718,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
* be handled can catch us all the way up, leaving no work for
* the second flush.
*/
- trace_tlb_flush(reason, 0);
- return;
+ goto done;
}
WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen);
@@ -736,7 +753,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
* 3, we'd be break the invariant: we'd update local_tlb_gen above
* 1 without the full flush that's needed for tlb_gen 2.
*
- * 2. f->new_tlb_gen == mm_tlb_gen. This is purely an optimiation.
+ * 2. f->new_tlb_gen == mm_tlb_gen. This is purely an optimization.
* Partial TLB flushes are not all that much cheaper than full TLB
* flushes, so it seems unlikely that it would be a performance win
* to do a partial flush if that won't bring our TLB fully up to
@@ -748,56 +765,54 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
f->new_tlb_gen == local_tlb_gen + 1 &&
f->new_tlb_gen == mm_tlb_gen) {
/* Partial flush */
- unsigned long nr_invalidate = (f->end - f->start) >> f->stride_shift;
unsigned long addr = f->start;
+ nr_invalidate = (f->end - f->start) >> f->stride_shift;
+
while (addr < f->end) {
flush_tlb_one_user(addr);
addr += 1UL << f->stride_shift;
}
if (local)
count_vm_tlb_events(NR_TLB_LOCAL_FLUSH_ONE, nr_invalidate);
- trace_tlb_flush(reason, nr_invalidate);
} else {
/* Full flush. */
+ nr_invalidate = TLB_FLUSH_ALL;
+
flush_tlb_local();
if (local)
count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
- trace_tlb_flush(reason, TLB_FLUSH_ALL);
}
/* Both paths above update our state to mm_tlb_gen. */
this_cpu_write(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen, mm_tlb_gen);
-}
-static void flush_tlb_func_local(const void *info, enum tlb_flush_reason reason)
-{
- const struct flush_tlb_info *f = info;
-
- flush_tlb_func_common(f, true, reason);
+ /* Tracing is done in a unified manner to reduce the code size */
+done:
+ trace_tlb_flush(!local ? TLB_REMOTE_SHOOTDOWN :
+ (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN :
+ TLB_LOCAL_MM_SHOOTDOWN,
+ nr_invalidate);
}
-static void flush_tlb_func_remote(void *info)
+static bool tlb_is_not_lazy(int cpu)
{
- const struct flush_tlb_info *f = info;
-
- inc_irq_stat(irq_tlb_count);
-
- if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm))
- return;
-
- count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
- flush_tlb_func_common(f, false, TLB_REMOTE_SHOOTDOWN);
+ return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
}
-static bool tlb_is_not_lazy(int cpu, void *data)
-{
- return !per_cpu(cpu_tlbstate.is_lazy, cpu);
-}
+static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask);
+
+DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
+EXPORT_PER_CPU_SYMBOL(cpu_tlbstate_shared);
-STATIC_NOPV void native_flush_tlb_others(const struct cpumask *cpumask,
+STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask,
const struct flush_tlb_info *info)
{
+ /*
+ * Do accounting and tracing. Note that there are (and have always been)
+ * cases in which a remote TLB flush will be traced, but eventually
+ * would not happen.
+ */
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
if (info->end == TLB_FLUSH_ALL)
trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
@@ -815,18 +830,42 @@ STATIC_NOPV void native_flush_tlb_others(const struct cpumask *cpumask,
* up on the new contents of what used to be page tables, while
* doing a speculative memory access.
*/
- if (info->freed_tables)
- smp_call_function_many(cpumask, flush_tlb_func_remote,
- (void *)info, 1);
- else
- on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func_remote,
- (void *)info, 1, cpumask);
+ if (info->freed_tables) {
+ on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true);
+ } else {
+ /*
+ * Although we could have used on_each_cpu_cond_mask(),
+ * open-coding it has performance advantages, as it eliminates
+ * the need for indirect calls or retpolines. In addition, it
+ * allows to use a designated cpumask for evaluating the
+ * condition, instead of allocating one.
+ *
+ * This code works under the assumption that there are no nested
+ * TLB flushes, an assumption that is already made in
+ * flush_tlb_mm_range().
+ *
+ * cond_cpumask is logically a stack-local variable, but it is
+ * more efficient to have it off the stack and not to allocate
+ * it on demand. Preemption is disabled and this code is
+ * non-reentrant.
+ */
+ struct cpumask *cond_cpumask = this_cpu_ptr(&flush_tlb_mask);
+ int cpu;
+
+ cpumask_clear(cond_cpumask);
+
+ for_each_cpu(cpu, cpumask) {
+ if (tlb_is_not_lazy(cpu))
+ __cpumask_set_cpu(cpu, cond_cpumask);
+ }
+ on_each_cpu_mask(cond_cpumask, flush_tlb_func, (void *)info, true);
+ }
}
-void flush_tlb_others(const struct cpumask *cpumask,
+void flush_tlb_multi(const struct cpumask *cpumask,
const struct flush_tlb_info *info)
{
- __flush_tlb_others(cpumask, info);
+ __flush_tlb_multi(cpumask, info);
}
/*
@@ -847,7 +886,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct flush_tlb_info, flush_tlb_info);
static DEFINE_PER_CPU(unsigned int, flush_tlb_info_idx);
#endif
-static inline struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
+static struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
unsigned long start, unsigned long end,
unsigned int stride_shift, bool freed_tables,
u64 new_tlb_gen)
@@ -869,14 +908,15 @@ static inline struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm,
info->stride_shift = stride_shift;
info->freed_tables = freed_tables;
info->new_tlb_gen = new_tlb_gen;
+ info->initiating_cpu = smp_processor_id();
return info;
}
-static inline void put_flush_tlb_info(void)
+static void put_flush_tlb_info(void)
{
#ifdef CONFIG_DEBUG_VM
- /* Complete reentrency prevention checks */
+ /* Complete reentrancy prevention checks */
barrier();
this_cpu_dec(flush_tlb_info_idx);
#endif
@@ -905,16 +945,20 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
info = get_flush_tlb_info(mm, start, end, stride_shift, freed_tables,
new_tlb_gen);
- if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
+ /*
+ * flush_tlb_multi() is not optimized for the common case in which only
+ * a local TLB flush is needed. Optimize this use-case by calling
+ * flush_tlb_func_local() directly in this case.
+ */
+ if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) {
+ flush_tlb_multi(mm_cpumask(mm), info);
+ } else if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) {
lockdep_assert_irqs_enabled();
local_irq_disable();
- flush_tlb_func_local(info, TLB_LOCAL_MM_SHOOTDOWN);
+ flush_tlb_func(info);
local_irq_enable();
}
- if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
- flush_tlb_others(mm_cpumask(mm), info);
-
put_flush_tlb_info();
put_cpu();
}
@@ -1119,34 +1163,30 @@ void __flush_tlb_all(void)
}
EXPORT_SYMBOL_GPL(__flush_tlb_all);
-/*
- * arch_tlbbatch_flush() performs a full TLB flush regardless of the active mm.
- * This means that the 'struct flush_tlb_info' that describes which mappings to
- * flush is actually fixed. We therefore set a single fixed struct and use it in
- * arch_tlbbatch_flush().
- */
-static const struct flush_tlb_info full_flush_tlb_info = {
- .mm = NULL,
- .start = 0,
- .end = TLB_FLUSH_ALL,
-};
-
void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
{
+ struct flush_tlb_info *info;
+
int cpu = get_cpu();
- if (cpumask_test_cpu(cpu, &batch->cpumask)) {
+ info = get_flush_tlb_info(NULL, 0, TLB_FLUSH_ALL, 0, false, 0);
+ /*
+ * flush_tlb_multi() is not optimized for the common case in which only
+ * a local TLB flush is needed. Optimize this use-case by calling
+ * flush_tlb_func_local() directly in this case.
+ */
+ if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) {
+ flush_tlb_multi(&batch->cpumask, info);
+ } else if (cpumask_test_cpu(cpu, &batch->cpumask)) {
lockdep_assert_irqs_enabled();
local_irq_disable();
- flush_tlb_func_local(&full_flush_tlb_info, TLB_LOCAL_SHOOTDOWN);
+ flush_tlb_func(info);
local_irq_enable();
}
- if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
- flush_tlb_others(&batch->cpumask, &full_flush_tlb_info);
-
cpumask_clear(&batch->cpumask);
+ put_flush_tlb_info();
put_cpu();
}
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 79e7a0ec1da5..e835164189f1 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -31,7 +31,7 @@ static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
}
#define EMIT(bytes, len) \
- do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
+ do { prog = emit_code(prog, bytes, len); } while (0)
#define EMIT1(b1) EMIT(b1, 1)
#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
@@ -239,7 +239,6 @@ struct jit_context {
static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
{
u8 *prog = *pprog;
- int cnt = 0;
if (callee_regs_used[0])
EMIT1(0x53); /* push rbx */
@@ -255,7 +254,6 @@ static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
{
u8 *prog = *pprog;
- int cnt = 0;
if (callee_regs_used[3])
EMIT2(0x41, 0x5F); /* pop r15 */
@@ -277,13 +275,12 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
bool tail_call_reachable, bool is_subprog)
{
u8 *prog = *pprog;
- int cnt = X86_PATCH_SIZE;
/* BPF trampoline can be made to work without these nops,
* but let's waste 5 bytes for now and optimize later
*/
- memcpy(prog, ideal_nops[NOP_ATOMIC5], cnt);
- prog += cnt;
+ memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
+ prog += X86_PATCH_SIZE;
if (!ebpf_from_cbpf) {
if (tail_call_reachable && !is_subprog)
EMIT2(0x31, 0xC0); /* xor eax, eax */
@@ -303,7 +300,6 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
{
u8 *prog = *pprog;
- int cnt = 0;
s64 offset;
offset = func - (ip + X86_PATCH_SIZE);
@@ -330,7 +326,7 @@ static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
void *old_addr, void *new_addr,
const bool text_live)
{
- const u8 *nop_insn = ideal_nops[NOP_ATOMIC5];
+ const u8 *nop_insn = x86_nops[5];
u8 old_insn[X86_PATCH_SIZE];
u8 new_insn[X86_PATCH_SIZE];
u8 *prog;
@@ -423,7 +419,6 @@ static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
int off1 = 42;
int off2 = 31;
int off3 = 9;
- int cnt = 0;
/* count the additional bytes used for popping callee regs from stack
* that need to be taken into account for each of the offsets that
@@ -513,7 +508,6 @@ static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
int pop_bytes = 0;
int off1 = 20;
int poke_off;
- int cnt = 0;
/* count the additional bytes used for popping callee regs to stack
* that need to be taken into account for jump offset that is used for
@@ -560,7 +554,7 @@ static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
if (stack_depth)
EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
- memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
+ memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
prog += X86_PATCH_SIZE;
/* out: */
@@ -615,7 +609,6 @@ static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
{
u8 *prog = *pprog;
u8 b1, b2, b3;
- int cnt = 0;
/*
* Optimization: if imm32 is positive, use 'mov %eax, imm32'
@@ -655,7 +648,6 @@ static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
const u32 imm32_hi, const u32 imm32_lo)
{
u8 *prog = *pprog;
- int cnt = 0;
if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
/*
@@ -678,7 +670,6 @@ static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
{
u8 *prog = *pprog;
- int cnt = 0;
if (is64) {
/* mov dst, src */
@@ -697,7 +688,6 @@ static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
{
u8 *prog = *pprog;
- int cnt = 0;
if (is_imm8(off)) {
/* 1-byte signed displacement.
@@ -720,7 +710,6 @@ static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
{
u8 *prog = *pprog;
- int cnt = 0;
if (is64)
EMIT1(add_2mod(0x48, dst_reg, src_reg));
@@ -733,7 +722,6 @@ static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
{
u8 *prog = *pprog;
- int cnt = 0;
switch (size) {
case BPF_B:
@@ -764,7 +752,6 @@ static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
{
u8 *prog = *pprog;
- int cnt = 0;
switch (size) {
case BPF_B:
@@ -799,7 +786,6 @@ static int emit_atomic(u8 **pprog, u8 atomic_op,
u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
{
u8 *prog = *pprog;
- int cnt = 0;
EMIT1(0xF0); /* lock prefix */
@@ -869,10 +855,10 @@ static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
}
}
-static int emit_nops(u8 **pprog, int len)
+static void emit_nops(u8 **pprog, int len)
{
u8 *prog = *pprog;
- int i, noplen, cnt = 0;
+ int i, noplen;
while (len > 0) {
noplen = len;
@@ -881,13 +867,11 @@ static int emit_nops(u8 **pprog, int len)
noplen = ASM_NOP_MAX;
for (i = 0; i < noplen; i++)
- EMIT1(ideal_nops[noplen][i]);
+ EMIT1(x86_nops[noplen][i]);
len -= noplen;
}
*pprog = prog;
-
- return cnt;
}
#define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
@@ -902,7 +886,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
bool tail_call_seen = false;
bool seen_exit = false;
u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
- int i, cnt = 0, excnt = 0;
+ int i, excnt = 0;
int ilen, proglen = 0;
u8 *prog = temp;
int err;
@@ -1297,7 +1281,7 @@ st: if (is_imm8(insn->off))
emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
struct exception_table_entry *ex;
- u8 *_insn = image + proglen;
+ u8 *_insn = image + proglen + (start_of_ldx - temp);
s64 delta;
/* populate jmp_offset for JMP above */
@@ -1349,6 +1333,7 @@ st: if (is_imm8(insn->off))
insn->imm == (BPF_XOR | BPF_FETCH)) {
u8 *branch_target;
bool is64 = BPF_SIZE(insn->code) == BPF_DW;
+ u32 real_src_reg = src_reg;
/*
* Can't be implemented with a single x86 insn.
@@ -1357,6 +1342,9 @@ st: if (is_imm8(insn->off))
/* Will need RAX as a CMPXCHG operand so save R0 */
emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
+ if (src_reg == BPF_REG_0)
+ real_src_reg = BPF_REG_AX;
+
branch_target = prog;
/* Load old value */
emit_ldx(&prog, BPF_SIZE(insn->code),
@@ -1366,9 +1354,9 @@ st: if (is_imm8(insn->off))
* put the result in the AUX_REG.
*/
emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
- maybe_emit_mod(&prog, AUX_REG, src_reg, is64);
+ maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
- add_2reg(0xC0, AUX_REG, src_reg));
+ add_2reg(0xC0, AUX_REG, real_src_reg));
/* Attempt to swap in new value */
err = emit_atomic(&prog, BPF_CMPXCHG,
dst_reg, AUX_REG, insn->off,
@@ -1381,7 +1369,7 @@ st: if (is_imm8(insn->off))
*/
EMIT2(X86_JNE, -(prog - branch_target) - 2);
/* Return the pre-modification value */
- emit_mov_reg(&prog, is64, src_reg, BPF_REG_0);
+ emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
/* Restore R0 after clobbering RAX */
emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
break;
@@ -1552,7 +1540,7 @@ emit_cond_jmp: /* Convert BPF opcode to x86 */
if (is_imm8(jmp_offset)) {
if (jmp_padding) {
/* To keep the jmp_offset valid, the extra bytes are
- * padded before the jump insn, so we substract the
+ * padded before the jump insn, so we subtract the
* 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
*
* If the previous pass already emits an imm8
@@ -1572,7 +1560,7 @@ emit_cond_jmp: /* Convert BPF opcode to x86 */
nops);
return -EFAULT;
}
- cnt += emit_nops(&prog, nops);
+ emit_nops(&prog, nops);
}
EMIT2(jmp_cond, jmp_offset);
} else if (is_simm32(jmp_offset)) {
@@ -1618,7 +1606,7 @@ emit_cond_jmp: /* Convert BPF opcode to x86 */
nops);
return -EFAULT;
}
- cnt += emit_nops(&prog, nops);
+ emit_nops(&prog, nops);
}
break;
}
@@ -1627,7 +1615,7 @@ emit_jmp:
if (jmp_padding) {
/* To avoid breaking jmp_offset, the extra bytes
* are padded before the actual jmp insn, so
- * 2 bytes is substracted from INSN_SZ_DIFF.
+ * 2 bytes is subtracted from INSN_SZ_DIFF.
*
* If the previous pass already emits an imm8
* jmp, there is nothing to pad (0 byte).
@@ -1643,7 +1631,7 @@ emit_jmp:
nops);
return -EFAULT;
}
- cnt += emit_nops(&prog, INSN_SZ_DIFF - 2);
+ emit_nops(&prog, INSN_SZ_DIFF - 2);
}
EMIT2(0xEB, jmp_offset);
} else if (is_simm32(jmp_offset)) {
@@ -1685,7 +1673,16 @@ emit_jmp:
}
if (image) {
- if (unlikely(proglen + ilen > oldproglen)) {
+ /*
+ * When populating the image, assert that:
+ *
+ * i) We do not write beyond the allocated space, and
+ * ii) addrs[i] did not change from the prior run, in order
+ * to validate assumptions made for computing branch
+ * displacements.
+ */
+ if (unlikely(proglen + ilen > oldproglen ||
+ proglen + ilen != addrs[i])) {
pr_err("bpf_jit: fatal error\n");
return -EFAULT;
}
@@ -1741,7 +1738,6 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
{
u8 *prog = *pprog;
u8 *jmp_insn;
- int cnt = 0;
/* arg1: mov rdi, progs[i] */
emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
@@ -1809,7 +1805,6 @@ static void emit_align(u8 **pprog, u32 align)
static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
{
u8 *prog = *pprog;
- int cnt = 0;
s64 offset;
offset = func - (ip + 2 + 4);
@@ -1841,7 +1836,7 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
u8 **branches)
{
u8 *prog = *pprog;
- int i, cnt = 0;
+ int i;
/* The first fmod_ret program will receive a garbage return value.
* Set this to 0 to avoid confusing the program.
@@ -1932,12 +1927,12 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
* add rsp, 8 // skip eth_type_trans's frame
* ret // return to its caller
*/
-int arch_prepare_bpf_trampoline(void *image, void *image_end,
+int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
const struct btf_func_model *m, u32 flags,
struct bpf_tramp_progs *tprogs,
void *orig_call)
{
- int ret, i, cnt = 0, nr_args = m->nr_args;
+ int ret, i, nr_args = m->nr_args;
int stack_size = nr_args * 8;
struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY];
struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT];
@@ -1971,6 +1966,15 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
save_regs(m, &prog, nr_args, stack_size);
+ if (flags & BPF_TRAMP_F_CALL_ORIG) {
+ /* arg1: mov rdi, im */
+ emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
+ if (emit_call(&prog, __bpf_tramp_enter, prog)) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
+ }
+
if (fentry->nr_progs)
if (invoke_bpf(m, &prog, fentry, stack_size))
return -EINVAL;
@@ -1989,8 +1993,7 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
}
if (flags & BPF_TRAMP_F_CALL_ORIG) {
- if (fentry->nr_progs || fmod_ret->nr_progs)
- restore_regs(m, &prog, nr_args, stack_size);
+ restore_regs(m, &prog, nr_args, stack_size);
/* call original function */
if (emit_call(&prog, orig_call, prog)) {
@@ -1999,6 +2002,9 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
}
/* remember return value in a stack for bpf prog to access */
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
+ im->ip_after_call = prog;
+ memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
+ prog += X86_PATCH_SIZE;
}
if (fmod_ret->nr_progs) {
@@ -2029,9 +2035,17 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
* the return value is only updated on the stack and still needs to be
* restored to R0.
*/
- if (flags & BPF_TRAMP_F_CALL_ORIG)
+ if (flags & BPF_TRAMP_F_CALL_ORIG) {
+ im->ip_epilogue = prog;
+ /* arg1: mov rdi, im */
+ emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
+ if (emit_call(&prog, __bpf_tramp_exit, prog)) {
+ ret = -EINVAL;
+ goto cleanup;
+ }
/* restore original return value back into RAX */
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
+ }
EMIT1(0x5B); /* pop rbx */
EMIT1(0xC9); /* leave */
@@ -2063,8 +2077,6 @@ static int emit_fallback_jump(u8 **pprog)
*/
err = emit_jump(&prog, __x86_indirect_thunk_rdx, prog);
#else
- int cnt = 0;
-
EMIT2(0xFF, 0xE2); /* jmp rdx */
#endif
*pprog = prog;
@@ -2074,7 +2086,7 @@ static int emit_fallback_jump(u8 **pprog)
static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
{
u8 *jg_reloc, *prog = *pprog;
- int pivot, err, jg_bytes = 1, cnt = 0;
+ int pivot, err, jg_bytes = 1;
s64 jg_offset;
if (a == b) {
@@ -2221,7 +2233,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
padding = true;
goto skip_init_addrs;
}
- addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
+ addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
if (!addrs) {
prog = orig_prog;
goto out_addrs;
@@ -2313,7 +2325,7 @@ out_image:
if (image)
bpf_prog_fill_jited_linfo(prog, addrs + 1);
out_addrs:
- kfree(addrs);
+ kvfree(addrs);
kfree(jit_data);
prog->aux->jit_data = NULL;
}
@@ -2323,3 +2335,8 @@ out:
tmp : orig_prog);
return prog;
}
+
+bool bpf_jit_supports_kfunc_call(void)
+{
+ return true;
+}
diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
index d17b67c69f89..3da88ded6ee3 100644
--- a/arch/x86/net/bpf_jit_comp32.c
+++ b/arch/x86/net/bpf_jit_comp32.c
@@ -1390,6 +1390,19 @@ static inline void emit_push_r64(const u8 src[], u8 **pprog)
*pprog = prog;
}
+static void emit_push_r32(const u8 src[], u8 **pprog)
+{
+ u8 *prog = *pprog;
+ int cnt = 0;
+
+ /* mov ecx,dword ptr [ebp+off] */
+ EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(src_lo));
+ /* push ecx */
+ EMIT1(0x51);
+
+ *pprog = prog;
+}
+
static u8 get_cond_jmp_opcode(const u8 op, bool is_cmp_lo)
{
u8 jmp_cond;
@@ -1459,6 +1472,174 @@ static u8 get_cond_jmp_opcode(const u8 op, bool is_cmp_lo)
return jmp_cond;
}
+/* i386 kernel compiles with "-mregparm=3". From gcc document:
+ *
+ * ==== snippet ====
+ * regparm (number)
+ * On x86-32 targets, the regparm attribute causes the compiler
+ * to pass arguments number one to (number) if they are of integral
+ * type in registers EAX, EDX, and ECX instead of on the stack.
+ * Functions that take a variable number of arguments continue
+ * to be passed all of their arguments on the stack.
+ * ==== snippet ====
+ *
+ * The first three args of a function will be considered for
+ * putting into the 32bit register EAX, EDX, and ECX.
+ *
+ * Two 32bit registers are used to pass a 64bit arg.
+ *
+ * For example,
+ * void foo(u32 a, u32 b, u32 c, u32 d):
+ * u32 a: EAX
+ * u32 b: EDX
+ * u32 c: ECX
+ * u32 d: stack
+ *
+ * void foo(u64 a, u32 b, u32 c):
+ * u64 a: EAX (lo32) EDX (hi32)
+ * u32 b: ECX
+ * u32 c: stack
+ *
+ * void foo(u32 a, u64 b, u32 c):
+ * u32 a: EAX
+ * u64 b: EDX (lo32) ECX (hi32)
+ * u32 c: stack
+ *
+ * void foo(u32 a, u32 b, u64 c):
+ * u32 a: EAX
+ * u32 b: EDX
+ * u64 c: stack
+ *
+ * The return value will be stored in the EAX (and EDX for 64bit value).
+ *
+ * For example,
+ * u32 foo(u32 a, u32 b, u32 c):
+ * return value: EAX
+ *
+ * u64 foo(u32 a, u32 b, u32 c):
+ * return value: EAX (lo32) EDX (hi32)
+ *
+ * Notes:
+ * The verifier only accepts function having integer and pointers
+ * as its args and return value, so it does not have
+ * struct-by-value.
+ *
+ * emit_kfunc_call() finds out the btf_func_model by calling
+ * bpf_jit_find_kfunc_model(). A btf_func_model
+ * has the details about the number of args, size of each arg,
+ * and the size of the return value.
+ *
+ * It first decides how many args can be passed by EAX, EDX, and ECX.
+ * That will decide what args should be pushed to the stack:
+ * [first_stack_regno, last_stack_regno] are the bpf regnos
+ * that should be pushed to the stack.
+ *
+ * It will first push all args to the stack because the push
+ * will need to use ECX. Then, it moves
+ * [BPF_REG_1, first_stack_regno) to EAX, EDX, and ECX.
+ *
+ * When emitting a call (0xE8), it needs to figure out
+ * the jmp_offset relative to the jit-insn address immediately
+ * following the call (0xE8) instruction. At this point, it knows
+ * the end of the jit-insn address after completely translated the
+ * current (BPF_JMP | BPF_CALL) bpf-insn. It is passed as "end_addr"
+ * to the emit_kfunc_call(). Thus, it can learn the "immediate-follow-call"
+ * address by figuring out how many jit-insn is generated between
+ * the call (0xE8) and the end_addr:
+ * - 0-1 jit-insn (3 bytes each) to restore the esp pointer if there
+ * is arg pushed to the stack.
+ * - 0-2 jit-insns (3 bytes each) to handle the return value.
+ */
+static int emit_kfunc_call(const struct bpf_prog *bpf_prog, u8 *end_addr,
+ const struct bpf_insn *insn, u8 **pprog)
+{
+ const u8 arg_regs[] = { IA32_EAX, IA32_EDX, IA32_ECX };
+ int i, cnt = 0, first_stack_regno, last_stack_regno;
+ int free_arg_regs = ARRAY_SIZE(arg_regs);
+ const struct btf_func_model *fm;
+ int bytes_in_stack = 0;
+ const u8 *cur_arg_reg;
+ u8 *prog = *pprog;
+ s64 jmp_offset;
+
+ fm = bpf_jit_find_kfunc_model(bpf_prog, insn);
+ if (!fm)
+ return -EINVAL;
+
+ first_stack_regno = BPF_REG_1;
+ for (i = 0; i < fm->nr_args; i++) {
+ int regs_needed = fm->arg_size[i] > sizeof(u32) ? 2 : 1;
+
+ if (regs_needed > free_arg_regs)
+ break;
+
+ free_arg_regs -= regs_needed;
+ first_stack_regno++;
+ }
+
+ /* Push the args to the stack */
+ last_stack_regno = BPF_REG_0 + fm->nr_args;
+ for (i = last_stack_regno; i >= first_stack_regno; i--) {
+ if (fm->arg_size[i - 1] > sizeof(u32)) {
+ emit_push_r64(bpf2ia32[i], &prog);
+ bytes_in_stack += 8;
+ } else {
+ emit_push_r32(bpf2ia32[i], &prog);
+ bytes_in_stack += 4;
+ }
+ }
+
+ cur_arg_reg = &arg_regs[0];
+ for (i = BPF_REG_1; i < first_stack_regno; i++) {
+ /* mov e[adc]x,dword ptr [ebp+off] */
+ EMIT3(0x8B, add_2reg(0x40, IA32_EBP, *cur_arg_reg++),
+ STACK_VAR(bpf2ia32[i][0]));
+ if (fm->arg_size[i - 1] > sizeof(u32))
+ /* mov e[adc]x,dword ptr [ebp+off] */
+ EMIT3(0x8B, add_2reg(0x40, IA32_EBP, *cur_arg_reg++),
+ STACK_VAR(bpf2ia32[i][1]));
+ }
+
+ if (bytes_in_stack)
+ /* add esp,"bytes_in_stack" */
+ end_addr -= 3;
+
+ /* mov dword ptr [ebp+off],edx */
+ if (fm->ret_size > sizeof(u32))
+ end_addr -= 3;
+
+ /* mov dword ptr [ebp+off],eax */
+ if (fm->ret_size)
+ end_addr -= 3;
+
+ jmp_offset = (u8 *)__bpf_call_base + insn->imm - end_addr;
+ if (!is_simm32(jmp_offset)) {
+ pr_err("unsupported BPF kernel function jmp_offset:%lld\n",
+ jmp_offset);
+ return -EINVAL;
+ }
+
+ EMIT1_off32(0xE8, jmp_offset);
+
+ if (fm->ret_size)
+ /* mov dword ptr [ebp+off],eax */
+ EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX),
+ STACK_VAR(bpf2ia32[BPF_REG_0][0]));
+
+ if (fm->ret_size > sizeof(u32))
+ /* mov dword ptr [ebp+off],edx */
+ EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EDX),
+ STACK_VAR(bpf2ia32[BPF_REG_0][1]));
+
+ if (bytes_in_stack)
+ /* add esp,"bytes_in_stack" */
+ EMIT3(0x83, add_1reg(0xC0, IA32_ESP), bytes_in_stack);
+
+ *pprog = prog;
+
+ return 0;
+}
+
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
int oldproglen, struct jit_context *ctx)
{
@@ -1888,6 +2069,18 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
if (insn->src_reg == BPF_PSEUDO_CALL)
goto notyet;
+ if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
+ int err;
+
+ err = emit_kfunc_call(bpf_prog,
+ image + addrs[i],
+ insn, &prog);
+
+ if (err)
+ return err;
+ break;
+ }
+
func = (u8 *) __bpf_call_base + imm32;
jmp_offset = func - (image + addrs[i]);
@@ -2276,7 +2469,16 @@ notyet:
}
if (image) {
- if (unlikely(proglen + ilen > oldproglen)) {
+ /*
+ * When populating the image, assert that:
+ *
+ * i) We do not write beyond the allocated space, and
+ * ii) addrs[i] did not change from the prior run, in order
+ * to validate assumptions made for computing branch
+ * displacements.
+ */
+ if (unlikely(proglen + ilen > oldproglen ||
+ proglen + ilen != addrs[i])) {
pr_err("bpf_jit: fatal error\n");
return -EFAULT;
}
@@ -2393,3 +2595,8 @@ out:
tmp : orig_prog);
return prog;
}
+
+bool bpf_jit_supports_kfunc_call(void)
+{
+ return true;
+}
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index bfa50e65ef6c..dd40d3fea74e 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -126,7 +126,7 @@ static int __init early_root_info_init(void)
node = (reg >> 4) & 0x07;
link = (reg >> 8) & 0x03;
- info = alloc_pci_root_info(min_bus, max_bus, node, link);
+ alloc_pci_root_info(min_bus, max_bus, node, link);
}
/*
@@ -284,7 +284,7 @@ static int __init early_root_info_init(void)
/* need to take out [4G, TOM2) for RAM*/
/* SYS_CFG */
- address = MSR_K8_SYSCFG;
+ address = MSR_AMD64_SYSCFG;
rdmsrl(address, val);
/* TOP_MEM2 is enabled? */
if (val & (1<<21)) {
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 0a0e168be1cb..2edd86649468 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -375,7 +375,7 @@ static const struct dmi_system_id msi_k8t_dmi_table[] = {
* The BIOS only gives options "DISABLED" and "AUTO". This code sets
* the corresponding register-value to enable the soundcard.
*
- * The soundcard is only enabled, if the mainborad is identified
+ * The soundcard is only enabled, if the mainboard is identified
* via DMI-tables and the soundcard is detected to be off.
*/
static void pci_fixup_msi_k8t_onboard_sound(struct pci_dev *dev)
@@ -779,4 +779,48 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
+#define RS690_LOWER_TOP_OF_DRAM2 0x30
+#define RS690_LOWER_TOP_OF_DRAM2_VALID 0x1
+#define RS690_UPPER_TOP_OF_DRAM2 0x31
+#define RS690_HTIU_NB_INDEX 0xA8
+#define RS690_HTIU_NB_INDEX_WR_ENABLE 0x100
+#define RS690_HTIU_NB_DATA 0xAC
+
+/*
+ * Some BIOS implementations support RAM above 4GB, but do not configure the
+ * PCI host to respond to bus master accesses for these addresses. These
+ * implementations set the TOP_OF_DRAM_SLOT1 register correctly, so PCI DMA
+ * works as expected for addresses below 4GB.
+ *
+ * Reference: "AMD RS690 ASIC Family Register Reference Guide" (pg. 2-57)
+ * https://www.amd.com/system/files/TechDocs/43372_rs690_rrg_3.00o.pdf
+ */
+static void rs690_fix_64bit_dma(struct pci_dev *pdev)
+{
+ u32 val = 0;
+ phys_addr_t top_of_dram = __pa(high_memory - 1) + 1;
+
+ if (top_of_dram <= (1ULL << 32))
+ return;
+
+ pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
+ RS690_LOWER_TOP_OF_DRAM2);
+ pci_read_config_dword(pdev, RS690_HTIU_NB_DATA, &val);
+
+ if (val)
+ return;
+
+ pci_info(pdev, "Adjusting top of DRAM to %pa for 64-bit DMA support\n", &top_of_dram);
+
+ pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
+ RS690_UPPER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
+ pci_write_config_dword(pdev, RS690_HTIU_NB_DATA, top_of_dram >> 32);
+
+ pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
+ RS690_LOWER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
+ pci_write_config_dword(pdev, RS690_HTIU_NB_DATA,
+ top_of_dram | RS690_LOWER_TOP_OF_DRAM2_VALID);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma);
+
#endif
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 8a26e705cb06..147c30a81f15 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -468,7 +468,7 @@ void __init efi_init(void)
*/
if (!efi_runtime_supported())
- pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
+ pr_err("No EFI runtime due to 32/64-bit mismatch with kernel\n");
if (!efi_runtime_supported() || efi_runtime_disabled()) {
efi_memmap_unmap();
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 1b82d77019b1..7515e78ef898 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -47,7 +47,7 @@
#include <asm/realmode.h>
#include <asm/time.h>
#include <asm/pgalloc.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
/*
* We allocate runtime services regions top-down, starting from -4G, i.e.
@@ -195,7 +195,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
}
/*
- * Certain firmware versions are way too sentimential and still believe
+ * Certain firmware versions are way too sentimental and still believe
* they are exclusive and unquestionable owners of the first physical page,
* even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
* (but then write-access it later during SetVirtualAddressMap()).
@@ -457,7 +457,7 @@ void __init efi_dump_pagetable(void)
* in a kernel thread and user context. Preemption needs to remain disabled
* while the EFI-mm is borrowed. mmgrab()/mmdrop() is not used because the mm
* can not change under us.
- * It should be ensured that there are no concurent calls to this function.
+ * It should be ensured that there are no concurrent calls to this function.
*/
void efi_enter_mm(void)
{
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 67d93a243c35..b15ebfe40a73 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -441,7 +441,7 @@ void __init efi_free_boot_services(void)
* 1.4.4 with SGX enabled booting Linux via Fedora 24's
* grub2-efi on a hard disk. (And no, I don't know why
* this happened, but Linux should still try to boot rather
- * panicing early.)
+ * panicking early.)
*/
rm_size = real_mode_size_needed();
if (rm_size && (start + rm_size) < (1<<20) && size >= rm_size) {
@@ -450,6 +450,18 @@ void __init efi_free_boot_services(void)
size -= rm_size;
}
+ /*
+ * Don't free memory under 1M for two reasons:
+ * - BIOS might clobber it
+ * - Crash kernel needs it to be reserved
+ */
+ if (start + size < SZ_1M)
+ continue;
+ if (start < SZ_1M) {
+ size -= (SZ_1M - start);
+ start = SZ_1M;
+ }
+
memblock_free_late(start, size);
}
@@ -726,7 +738,7 @@ void efi_crash_gracefully_on_page_fault(unsigned long phys_addr)
* Buggy efi_reset_system() is handled differently from other EFI
* Runtime Services as it doesn't use efi_rts_wq. Although,
* native_machine_emergency_restart() says that machine_real_restart()
- * could fail, it's better not to compilcate this fault handler
+ * could fail, it's better not to complicate this fault handler
* because this case occurs *very* rarely and hence could be improved
* on a need by basis.
*/
diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c
index 0286fe1b14b5..d3d456925b2a 100644
--- a/arch/x86/platform/intel-quark/imr.c
+++ b/arch/x86/platform/intel-quark/imr.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* imr.c -- Intel Isolated Memory Region driver
*
* Copyright(c) 2013 Intel Corporation.
@@ -551,7 +551,7 @@ static void __init imr_fixup_memmap(struct imr_device *idev)
/*
* Setup an unlocked IMR around the physical extent of the kernel
- * from the beginning of the .text secton to the end of the
+ * from the beginning of the .text section to the end of the
* .rodata section as one physically contiguous block.
*
* We don't round up @size since it is already PAGE_SIZE aligned.
diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c
index 570e3062faac..761f3689f60a 100644
--- a/arch/x86/platform/intel-quark/imr_selftest.c
+++ b/arch/x86/platform/intel-quark/imr_selftest.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* imr_selftest.c -- Intel Isolated Memory Region self-test driver
*
* Copyright(c) 2013 Intel Corporation.
diff --git a/arch/x86/platform/intel/iosf_mbi.c b/arch/x86/platform/intel/iosf_mbi.c
index 526f70f27c1c..fdd49d70b437 100644
--- a/arch/x86/platform/intel/iosf_mbi.c
+++ b/arch/x86/platform/intel/iosf_mbi.c
@@ -187,7 +187,7 @@ bool iosf_mbi_available(void)
EXPORT_SYMBOL(iosf_mbi_available);
/*
- **************** P-Unit/kernel shared I2C bus arbritration ****************
+ **************** P-Unit/kernel shared I2C bus arbitration ****************
*
* Some Bay Trail and Cherry Trail devices have the P-Unit and us (the kernel)
* share a single I2C bus to the PMIC. Below are helpers to arbitrate the
@@ -493,7 +493,7 @@ static void iosf_sideband_debug_init(void)
/* mcrx */
debugfs_create_x32("mcrx", 0660, iosf_dbg, &dbg_mcrx);
- /* mcr - initiates mailbox tranaction */
+ /* mcr - initiates mailbox transaction */
debugfs_create_file("mcr", 0660, iosf_dbg, &dbg_mcr, &iosf_mcr_fops);
}
diff --git a/arch/x86/platform/iris/iris.c b/arch/x86/platform/iris/iris.c
index 1ac8578258af..b42bfdab01a9 100644
--- a/arch/x86/platform/iris/iris.c
+++ b/arch/x86/platform/iris/iris.c
@@ -27,7 +27,6 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sébastien Hinderer <Sebastien.Hinderer@ens-lyon.org>");
MODULE_DESCRIPTION("A power_off handler for Iris devices from EuroBraille");
-MODULE_SUPPORTED_DEVICE("Eurobraille/Iris");
static bool force;
diff --git a/arch/x86/platform/olpc/olpc-xo15-sci.c b/arch/x86/platform/olpc/olpc-xo15-sci.c
index 85f4638764d6..994a229cb79f 100644
--- a/arch/x86/platform/olpc/olpc-xo15-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo15-sci.c
@@ -27,7 +27,7 @@ static bool lid_wake_on_close;
* wake-on-close. This is implemented as standard by the XO-1.5 DSDT.
*
* We provide here a sysfs attribute that will additionally enable
- * wake-on-close behavior. This is useful (e.g.) when we oportunistically
+ * wake-on-close behavior. This is useful (e.g.) when we opportunistically
* suspend with the display running; if the lid is then closed, we want to
* wake up to turn the display off.
*
diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
index 26d1f6693789..75e3319e8bee 100644
--- a/arch/x86/platform/olpc/olpc_dt.c
+++ b/arch/x86/platform/olpc/olpc_dt.c
@@ -131,7 +131,7 @@ void * __init prom_early_alloc(unsigned long size)
const size_t chunk_size = max(PAGE_SIZE, size);
/*
- * To mimimize the number of allocations, grab at least
+ * To minimize the number of allocations, grab at least
* PAGE_SIZE of memory (that's an arbitrary choice that's
* fast enough on the platforms we care about while minimizing
* wasted bootmem) and hand off chunks of it to callers.
diff --git a/arch/x86/platform/pvh/head.S b/arch/x86/platform/pvh/head.S
index d2ccadc247e6..72c1e42d121d 100644
--- a/arch/x86/platform/pvh/head.S
+++ b/arch/x86/platform/pvh/head.S
@@ -30,10 +30,10 @@
* the boot start info structure.
* - `cr0`: bit 0 (PE) must be set. All the other writeable bits are cleared.
* - `cr4`: all bits are cleared.
- * - `cs `: must be a 32-bit read/execute code segment with a base of ‘0’
- * and a limit of ‘0xFFFFFFFF’. The selector value is unspecified.
+ * - `cs `: must be a 32-bit read/execute code segment with a base of `0`
+ * and a limit of `0xFFFFFFFF`. The selector value is unspecified.
* - `ds`, `es`: must be a 32-bit read/write data segment with a base of
- * ‘0’ and a limit of ‘0xFFFFFFFF’. The selector values are all
+ * `0` and a limit of `0xFFFFFFFF`. The selector values are all
* unspecified.
* - `tr`: must be a 32-bit TSS (active) with a base of '0' and a limit
* of '0x67'.
@@ -46,10 +46,8 @@
#define PVH_GDT_ENTRY_CS 1
#define PVH_GDT_ENTRY_DS 2
-#define PVH_GDT_ENTRY_CANARY 3
#define PVH_CS_SEL (PVH_GDT_ENTRY_CS * 8)
#define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8)
-#define PVH_CANARY_SEL (PVH_GDT_ENTRY_CANARY * 8)
SYM_CODE_START_LOCAL(pvh_start_xen)
cld
@@ -111,17 +109,6 @@ SYM_CODE_START_LOCAL(pvh_start_xen)
#else /* CONFIG_X86_64 */
- /* Set base address in stack canary descriptor. */
- movl $_pa(gdt_start),%eax
- movl $_pa(canary),%ecx
- movw %cx, (PVH_GDT_ENTRY_CANARY * 8) + 2(%eax)
- shrl $16, %ecx
- movb %cl, (PVH_GDT_ENTRY_CANARY * 8) + 4(%eax)
- movb %ch, (PVH_GDT_ENTRY_CANARY * 8) + 7(%eax)
-
- mov $PVH_CANARY_SEL,%eax
- mov %eax,%gs
-
call mk_early_pgtbl_32
mov $_pa(initial_page_table), %eax
@@ -165,7 +152,6 @@ SYM_DATA_START_LOCAL(gdt_start)
.quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* PVH_CS_SEL */
#endif
.quad GDT_ENTRY(0xc092, 0, 0xfffff) /* PVH_DS_SEL */
- .quad GDT_ENTRY(0x4090, 0, 0x18) /* PVH_CANARY_SEL */
SYM_DATA_END_LABEL(gdt_start, SYM_L_LOCAL, gdt_end)
.balign 16
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index eafc530c8767..1e9ff28bc2e0 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -24,6 +24,7 @@
#include <asm/kdebug.h>
#include <asm/local64.h>
#include <asm/nmi.h>
+#include <asm/reboot.h>
#include <asm/traps.h>
#include <asm/uv/uv.h>
#include <asm/uv/uv_hub.h>
@@ -91,6 +92,8 @@ static atomic_t uv_nmi_cpus_in_nmi = ATOMIC_INIT(-1);
static atomic_t uv_nmi_slave_continue;
static cpumask_var_t uv_nmi_cpu_mask;
+static atomic_t uv_nmi_kexec_failed;
+
/* Values for uv_nmi_slave_continue */
#define SLAVE_CLEAR 0
#define SLAVE_CONTINUE 1
@@ -834,38 +837,35 @@ static void uv_nmi_touch_watchdogs(void)
touch_nmi_watchdog();
}
-static atomic_t uv_nmi_kexec_failed;
-
-#if defined(CONFIG_KEXEC_CORE)
-static void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
+static void uv_nmi_kdump(int cpu, int main, struct pt_regs *regs)
{
+ /* Check if kdump kernel loaded for both main and secondary CPUs */
+ if (!kexec_crash_image) {
+ if (main)
+ pr_err("UV: NMI error: kdump kernel not loaded\n");
+ return;
+ }
+
/* Call crash to dump system state */
- if (master) {
+ if (main) {
pr_emerg("UV: NMI executing crash_kexec on CPU%d\n", cpu);
crash_kexec(regs);
- pr_emerg("UV: crash_kexec unexpectedly returned, ");
+ pr_emerg("UV: crash_kexec unexpectedly returned\n");
atomic_set(&uv_nmi_kexec_failed, 1);
- if (!kexec_crash_image) {
- pr_cont("crash kernel not loaded\n");
- return;
- }
- pr_cont("kexec busy, stalling cpus while waiting\n");
- }
- /* If crash exec fails the slaves should return, otherwise stall */
- while (atomic_read(&uv_nmi_kexec_failed) == 0)
- mdelay(10);
-}
+ } else { /* secondary */
-#else /* !CONFIG_KEXEC_CORE */
-static inline void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
-{
- if (master)
- pr_err("UV: NMI kdump: KEXEC not supported in this kernel\n");
- atomic_set(&uv_nmi_kexec_failed, 1);
+ /* If kdump kernel fails, secondaries will exit this loop */
+ while (atomic_read(&uv_nmi_kexec_failed) == 0) {
+
+ /* Once shootdown cpus starts, they do not return */
+ run_crash_ipi_callback(regs);
+
+ mdelay(10);
+ }
+ }
}
-#endif /* !CONFIG_KEXEC_CORE */
#ifdef CONFIG_KGDB
#ifdef CONFIG_KGDB_KDB
@@ -889,7 +889,7 @@ static inline int uv_nmi_kdb_reason(void)
* Call KGDB/KDB from NMI handler
*
* Note that if both KGDB and KDB are configured, then the action of 'kgdb' or
- * 'kdb' has no affect on which is used. See the KGDB documention for further
+ * 'kdb' has no affect on which is used. See the KGDB documentation for further
* information.
*/
static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index db1378c6ff26..3a070e7cdb8b 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -99,11 +99,8 @@ static void __save_processor_state(struct saved_context *ctxt)
/*
* segment registers
*/
-#ifdef CONFIG_X86_32_LAZY_GS
savesegment(gs, ctxt->gs);
-#endif
#ifdef CONFIG_X86_64
- savesegment(gs, ctxt->gs);
savesegment(fs, ctxt->fs);
savesegment(ds, ctxt->ds);
savesegment(es, ctxt->es);
@@ -232,7 +229,6 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
#else
loadsegment(fs, __KERNEL_PERCPU);
- loadsegment(gs, __KERNEL_STACK_CANARY);
#endif
/* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
@@ -255,7 +251,7 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
*/
wrmsrl(MSR_FS_BASE, ctxt->fs_base);
wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
-#elif defined(CONFIG_X86_32_LAZY_GS)
+#else
loadsegment(gs, ctxt->gs);
#endif
@@ -321,7 +317,7 @@ int hibernate_resume_nonboot_cpu_disable(void)
/*
* When bsp_check() is called in hibernate and suspend, cpu hotplug
- * is disabled already. So it's unnessary to handle race condition between
+ * is disabled already. So it's unnecessary to handle race condition between
* cpumask query and cpu hotplug.
*/
static int bsp_check(void)
diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c
index cd3914fc9f3d..e94e0050a583 100644
--- a/arch/x86/power/hibernate.c
+++ b/arch/x86/power/hibernate.c
@@ -13,8 +13,8 @@
#include <linux/kdebug.h>
#include <linux/cpu.h>
#include <linux/pgtable.h>
-
-#include <crypto/hash.h>
+#include <linux/types.h>
+#include <linux/crc32.h>
#include <asm/e820/api.h>
#include <asm/init.h>
@@ -54,95 +54,33 @@ int pfn_is_nosave(unsigned long pfn)
return pfn >= nosave_begin_pfn && pfn < nosave_end_pfn;
}
-
-#define MD5_DIGEST_SIZE 16
-
struct restore_data_record {
unsigned long jump_address;
unsigned long jump_address_phys;
unsigned long cr3;
unsigned long magic;
- u8 e820_digest[MD5_DIGEST_SIZE];
+ unsigned long e820_checksum;
};
-#if IS_BUILTIN(CONFIG_CRYPTO_MD5)
/**
- * get_e820_md5 - calculate md5 according to given e820 table
+ * compute_e820_crc32 - calculate crc32 of a given e820 table
*
* @table: the e820 table to be calculated
- * @buf: the md5 result to be stored to
+ *
+ * Return: the resulting checksum
*/
-static int get_e820_md5(struct e820_table *table, void *buf)
+static inline u32 compute_e820_crc32(struct e820_table *table)
{
- struct crypto_shash *tfm;
- struct shash_desc *desc;
- int size;
- int ret = 0;
-
- tfm = crypto_alloc_shash("md5", 0, 0);
- if (IS_ERR(tfm))
- return -ENOMEM;
-
- desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
- GFP_KERNEL);
- if (!desc) {
- ret = -ENOMEM;
- goto free_tfm;
- }
-
- desc->tfm = tfm;
-
- size = offsetof(struct e820_table, entries) +
+ int size = offsetof(struct e820_table, entries) +
sizeof(struct e820_entry) * table->nr_entries;
- if (crypto_shash_digest(desc, (u8 *)table, size, buf))
- ret = -EINVAL;
-
- kfree_sensitive(desc);
-
-free_tfm:
- crypto_free_shash(tfm);
- return ret;
-}
-
-static int hibernation_e820_save(void *buf)
-{
- return get_e820_md5(e820_table_firmware, buf);
-}
-
-static bool hibernation_e820_mismatch(void *buf)
-{
- int ret;
- u8 result[MD5_DIGEST_SIZE];
-
- memset(result, 0, MD5_DIGEST_SIZE);
- /* If there is no digest in suspend kernel, let it go. */
- if (!memcmp(result, buf, MD5_DIGEST_SIZE))
- return false;
-
- ret = get_e820_md5(e820_table_firmware, result);
- if (ret)
- return true;
-
- return memcmp(result, buf, MD5_DIGEST_SIZE) ? true : false;
-}
-#else
-static int hibernation_e820_save(void *buf)
-{
- return 0;
-}
-
-static bool hibernation_e820_mismatch(void *buf)
-{
- /* If md5 is not builtin for restore kernel, let it go. */
- return false;
+ return ~crc32_le(~0, (unsigned char const *)table, size);
}
-#endif
#ifdef CONFIG_X86_64
-#define RESTORE_MAGIC 0x23456789ABCDEF01UL
+#define RESTORE_MAGIC 0x23456789ABCDEF02UL
#else
-#define RESTORE_MAGIC 0x12345678UL
+#define RESTORE_MAGIC 0x12345679UL
#endif
/**
@@ -179,7 +117,8 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
*/
rdr->cr3 = restore_cr3 & ~CR3_PCID_MASK;
- return hibernation_e820_save(rdr->e820_digest);
+ rdr->e820_checksum = compute_e820_crc32(e820_table_firmware);
+ return 0;
}
/**
@@ -200,7 +139,7 @@ int arch_hibernation_header_restore(void *addr)
jump_address_phys = rdr->jump_address_phys;
restore_cr3 = rdr->cr3;
- if (hibernation_e820_mismatch(rdr->e820_digest)) {
+ if (rdr->e820_checksum != compute_e820_crc32(e820_table_firmware)) {
pr_crit("Hibernate inconsistent memory map detected!\n");
return -ENODEV;
}
diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c
index f03b64d9cb51..7558139920f8 100644
--- a/arch/x86/purgatory/purgatory.c
+++ b/arch/x86/purgatory/purgatory.c
@@ -9,6 +9,8 @@
*/
#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
#include <crypto/sha2.h>
#include <asm/purgatory.h>
diff --git a/arch/x86/realmode/Makefile b/arch/x86/realmode/Makefile
index 6b1f3a4eeb44..a0b491ae2de8 100644
--- a/arch/x86/realmode/Makefile
+++ b/arch/x86/realmode/Makefile
@@ -10,7 +10,6 @@
# Sanitizer runtimes are unavailable and cannot be linked here.
KASAN_SANITIZE := n
KCSAN_SANITIZE := n
-OBJECT_FILES_NON_STANDARD := y
subdir- := rm
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index 22fda7d99159..6534c92d0f83 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -9,7 +9,7 @@
#include <asm/realmode.h>
#include <asm/tlbflush.h>
#include <asm/crash.h>
-#include <asm/sev-es.h>
+#include <asm/sev.h>
struct real_mode_header *real_mode_header;
u32 *trampoline_cr4_features;
@@ -29,14 +29,16 @@ void __init reserve_real_mode(void)
/* Has to be under 1M so we can execute real-mode AP code. */
mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
- if (!mem) {
+ if (!mem)
pr_info("No sub-1M memory is available for the trampoline\n");
- return;
- }
+ else
+ set_real_mode_mem(mem);
- memblock_reserve(mem, size);
- set_real_mode_mem(mem);
- crash_reserve_low_1M();
+ /*
+ * Unconditionally reserve the entire fisrt 1M, see comment in
+ * setup_arch().
+ */
+ memblock_reserve(0, SZ_1M);
}
static void sme_sev_setup_real_mode(struct trampoline_header *th)
@@ -103,7 +105,7 @@ static void __init setup_real_mode(void)
*ptr += phys_base;
}
- /* Must be perfomed *after* relocation. */
+ /* Must be performed *after* relocation. */
trampoline_header = (struct trampoline_header *)
__va(real_mode_header->trampoline_header);
diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
index 84c5d1b33d10..cc8391f86cdb 100644
--- a/arch/x86/realmode/rm/trampoline_64.S
+++ b/arch/x86/realmode/rm/trampoline_64.S
@@ -123,9 +123,9 @@ SYM_CODE_START(startup_32)
*/
btl $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags
jnc .Ldone
- movl $MSR_K8_SYSCFG, %ecx
+ movl $MSR_AMD64_SYSCFG, %ecx
rdmsr
- bts $MSR_K8_SYSCFG_MEM_ENCRYPT_BIT, %eax
+ bts $MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT, %eax
jc .Ldone
/*
diff --git a/arch/x86/tools/insn_decoder_test.c b/arch/x86/tools/insn_decoder_test.c
index 34eda63c124b..472540aeabc2 100644
--- a/arch/x86/tools/insn_decoder_test.c
+++ b/arch/x86/tools/insn_decoder_test.c
@@ -120,7 +120,7 @@ int main(int argc, char **argv)
while (fgets(line, BUFSIZE, stdin)) {
char copy[BUFSIZE], *s, *tab1, *tab2;
- int nb = 0;
+ int nb = 0, ret;
unsigned int b;
if (line[0] == '<') {
@@ -148,10 +148,12 @@ int main(int argc, char **argv)
} else
break;
}
+
/* Decode an instruction */
- insn_init(&insn, insn_buff, sizeof(insn_buff), x86_64);
- insn_get_length(&insn);
- if (insn.length != nb) {
+ ret = insn_decode(&insn, insn_buff, sizeof(insn_buff),
+ x86_64 ? INSN_MODE_64 : INSN_MODE_32);
+
+ if (ret < 0 || insn.length != nb) {
warnings++;
pr_warn("Found an x86 instruction decoder bug, "
"please report this.\n", sym);
diff --git a/arch/x86/tools/insn_sanity.c b/arch/x86/tools/insn_sanity.c
index c6a0000ae635..213f35f94feb 100644
--- a/arch/x86/tools/insn_sanity.c
+++ b/arch/x86/tools/insn_sanity.c
@@ -218,8 +218,8 @@ static void parse_args(int argc, char **argv)
int main(int argc, char **argv)
{
+ int insns = 0, ret;
struct insn insn;
- int insns = 0;
int errors = 0;
unsigned long i;
unsigned char insn_buff[MAX_INSN_SIZE * 2];
@@ -237,15 +237,15 @@ int main(int argc, char **argv)
continue;
/* Decode an instruction */
- insn_init(&insn, insn_buff, sizeof(insn_buff), x86_64);
- insn_get_length(&insn);
+ ret = insn_decode(&insn, insn_buff, sizeof(insn_buff),
+ x86_64 ? INSN_MODE_64 : INSN_MODE_32);
if (insn.next_byte <= insn.kaddr ||
insn.kaddr + MAX_INSN_SIZE < insn.next_byte) {
/* Access out-of-range memory */
dump_stream(stderr, "Error: Found an access violation", i, insn_buff, &insn);
errors++;
- } else if (verbose && !insn_complete(&insn))
+ } else if (verbose && ret < 0)
dump_stream(stdout, "Info: Found an undecodable input", i, insn_buff, &insn);
else if (verbose >= 2)
dump_insn(stdout, &insn);
diff --git a/arch/x86/um/Makefile b/arch/x86/um/Makefile
index 77f70b969d14..5ccb18290d71 100644
--- a/arch/x86/um/Makefile
+++ b/arch/x86/um/Makefile
@@ -21,6 +21,7 @@ obj-y += checksum_32.o syscalls_32.o
obj-$(CONFIG_ELF_CORE) += elfcore.o
subarch-y = ../lib/string_32.o ../lib/atomic64_32.o ../lib/atomic64_cx8_32.o
+subarch-y += ../lib/cmpxchg8b_emu.o ../lib/atomic64_386_32.o
subarch-y += ../kernel/sys_ia32.o
else
diff --git a/arch/x86/um/asm/elf.h b/arch/x86/um/asm/elf.h
index c907b20d4993..dcaf3b38a9e0 100644
--- a/arch/x86/um/asm/elf.h
+++ b/arch/x86/um/asm/elf.h
@@ -212,6 +212,6 @@ extern int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
extern long elf_aux_hwcap;
#define ELF_HWCAP (elf_aux_hwcap)
-#define SET_PERSONALITY(ex) do ; while(0)
+#define SET_PERSONALITY(ex) do {} while(0)
#endif
diff --git a/arch/x86/um/shared/sysdep/stub_32.h b/arch/x86/um/shared/sysdep/stub_32.h
index c3891c1ada26..b95db9daf0e8 100644
--- a/arch/x86/um/shared/sysdep/stub_32.h
+++ b/arch/x86/um/shared/sysdep/stub_32.h
@@ -77,7 +77,7 @@ static inline void trap_myself(void)
__asm("int3");
}
-static void inline remap_stack_and_trap(void)
+static inline void remap_stack_and_trap(void)
{
__asm__ volatile (
"movl %%esp,%%ebx ;"
diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c
index 2ed81e581755..0575decb5e54 100644
--- a/arch/x86/um/sys_call_table_32.c
+++ b/arch/x86/um/sys_call_table_32.c
@@ -7,7 +7,6 @@
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
-#include <asm/unistd.h>
#include <asm/syscall.h>
#define __NO_STUBS
@@ -26,20 +25,17 @@
#define old_mmap sys_old_mmap
-#define __SYSCALL_I386(nr, sym) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
+
+#define __SYSCALL(nr, sym) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
#include <asm/syscalls_32.h>
-#undef __SYSCALL_I386
-#define __SYSCALL_I386(nr, sym) [ nr ] = sym,
+#undef __SYSCALL
+#define __SYSCALL(nr, sym) sym,
extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
- /*
- * Smells like a compiler bug -- it doesn't work
- * when the & below is removed.
- */
- [0 ... __NR_syscall_max] = &sys_ni_syscall,
#include <asm/syscalls_32.h>
};
diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c
index 2e8544dafbb0..95725b5a41ac 100644
--- a/arch/x86/um/sys_call_table_64.c
+++ b/arch/x86/um/sys_call_table_64.c
@@ -7,7 +7,6 @@
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
-#include <asm/unistd.h>
#include <asm/syscall.h>
#define __NO_STUBS
@@ -36,23 +35,15 @@
#define stub_execveat sys_execveat
#define stub_rt_sigreturn sys_rt_sigreturn
-#define __SYSCALL_X32(nr, sym)
-#define __SYSCALL_COMMON(nr, sym) __SYSCALL_64(nr, sym)
-
-#define __SYSCALL_64(nr, sym) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
+#define __SYSCALL(nr, sym) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
#include <asm/syscalls_64.h>
-#undef __SYSCALL_64
-#define __SYSCALL_64(nr, sym) [ nr ] = sym,
+#undef __SYSCALL
+#define __SYSCALL(nr, sym) sym,
extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
- /*
- * Smells like a compiler bug -- it doesn't work
- * when the & below is removed.
- */
- [0 ... __NR_syscall_max] = &sys_ni_syscall,
#include <asm/syscalls_64.h>
};
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index aa9f50fccc5d..c79bd0af2e8c 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -6,6 +6,7 @@
#include <linux/cpu.h>
#include <linux/kexec.h>
#include <linux/slab.h>
+#include <linux/panic_notifier.h>
#include <xen/xen.h>
#include <xen/features.h>
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index dc0a337f985b..03149422dce2 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -592,8 +592,10 @@ DEFINE_IDTENTRY_RAW(xenpv_exc_debug)
DEFINE_IDTENTRY_RAW(exc_xen_unknown_trap)
{
/* This should never happen and there is no way to handle it. */
+ instrumentation_begin();
pr_err("Unknown trap in Xen PV mode.");
BUG();
+ instrumentation_end();
}
#ifdef CONFIG_X86_MCE
@@ -1070,8 +1072,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
.read_pmc = xen_read_pmc,
- .iret = xen_iret,
-
.load_tr_desc = paravirt_nop,
.set_ldt = xen_set_ldt,
.load_gdt = xen_load_gdt,
@@ -1204,7 +1204,6 @@ static void __init xen_setup_gdt(int cpu)
pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry_boot;
pv_ops.cpu.load_gdt = xen_load_gdt_boot;
- setup_stack_canary_segment(cpu);
switch_to_new_gdt(cpu);
pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry;
@@ -1233,8 +1232,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
/* Install Xen paravirt ops */
pv_info = xen_info;
- pv_ops.init.patch = paravirt_patch_default;
pv_ops.cpu = xen_cpu_ops;
+ paravirt_iret = xen_iret;
xen_init_irq_ops();
/*
@@ -1276,16 +1275,16 @@ asmlinkage __visible void __init xen_start_kernel(void)
/* Get mfn list */
xen_build_dynamic_phys_to_machine();
+ /* Work out if we support NX */
+ get_cpu_cap(&boot_cpu_data);
+ x86_configure_nx();
+
/*
* Set up kernel GDT and segment registers, mainly so that
* -fstack-protector code can be executed.
*/
xen_setup_gdt(0);
- /* Work out if we support NX */
- get_cpu_cap(&boot_cpu_data);
- x86_configure_nx();
-
/* Determine virtual and physical address sizes */
get_cpu_address_sizes(&boot_cpu_data);
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index cf2ade864c30..ade789e73ee4 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1247,8 +1247,8 @@ static void xen_flush_tlb_one_user(unsigned long addr)
preempt_enable();
}
-static void xen_flush_tlb_others(const struct cpumask *cpus,
- const struct flush_tlb_info *info)
+static void xen_flush_tlb_multi(const struct cpumask *cpus,
+ const struct flush_tlb_info *info)
{
struct {
struct mmuext_op op;
@@ -1258,7 +1258,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
const size_t mc_entry_size = sizeof(args->op) +
sizeof(args->mask[0]) * BITS_TO_LONGS(num_possible_cpus());
- trace_xen_mmu_flush_tlb_others(cpus, info->mm, info->start, info->end);
+ trace_xen_mmu_flush_tlb_multi(cpus, info->mm, info->start, info->end);
if (cpumask_empty(cpus))
return; /* nothing to do */
@@ -1267,9 +1267,8 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
args = mcs.args;
args->op.arg2.vcpumask = to_cpumask(args->mask);
- /* Remove us, and any offline CPUS. */
+ /* Remove any offline CPUs */
cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
- cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
if (info->end != TLB_FLUSH_ALL &&
@@ -2086,7 +2085,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.flush_tlb_user = xen_flush_tlb,
.flush_tlb_kernel = xen_flush_tlb,
.flush_tlb_one_user = xen_flush_tlb_one_user,
- .flush_tlb_others = xen_flush_tlb_others,
+ .flush_tlb_multi = xen_flush_tlb_multi,
.tlb_remove_table = tlb_remove_table,
.pgd_alloc = xen_pgd_alloc,
@@ -2410,7 +2409,7 @@ int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
rmd.prot = prot;
/*
* We use the err_ptr to indicate if there we are doing a contiguous
- * mapping or a discontigious mapping.
+ * mapping or a discontiguous mapping.
*/
rmd.contiguous = !err_ptr;
rmd.no_translate = no_translate;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index a3cc33091f46..ac06ca32e9ef 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -98,8 +98,8 @@ EXPORT_SYMBOL_GPL(xen_p2m_size);
unsigned long xen_max_p2m_pfn __read_mostly;
EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
-#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
-#define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
+#ifdef CONFIG_XEN_MEMORY_HOTPLUG_LIMIT
+#define P2M_LIMIT CONFIG_XEN_MEMORY_HOTPLUG_LIMIT
#else
#define P2M_LIMIT 0
#endif
@@ -416,9 +416,6 @@ void __init xen_vmalloc_p2m_tree(void)
xen_p2m_last_pfn = xen_max_p2m_pfn;
p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
- if (!p2m_limit && IS_ENABLED(CONFIG_XEN_UNPOPULATED_ALLOC))
- p2m_limit = xen_start_info->nr_pages * XEN_EXTRA_MEM_RATIO;
-
vm.flags = VM_ALLOC;
vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
PMD_SIZE * PMDS_PER_MID_PAGE);
@@ -741,7 +738,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
map_ops[i].status = GNTST_general_error;
unmap[0].host_addr = map_ops[i].host_addr,
unmap[0].handle = map_ops[i].handle;
- map_ops[i].handle = ~0;
+ map_ops[i].handle = INVALID_GRANT_HANDLE;
if (map_ops[i].flags & GNTMAP_device_map)
unmap[0].dev_bus_addr = map_ops[i].dev_bus_addr;
else
@@ -751,7 +748,7 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
kmap_ops[i].status = GNTST_general_error;
unmap[1].host_addr = kmap_ops[i].host_addr,
unmap[1].handle = kmap_ops[i].handle;
- kmap_ops[i].handle = ~0;
+ kmap_ops[i].handle = INVALID_GRANT_HANDLE;
if (kmap_ops[i].flags & GNTMAP_device_map)
unmap[1].dev_bus_addr = kmap_ops[i].dev_bus_addr;
else
@@ -776,7 +773,6 @@ int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
out:
return ret;
}
-EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_unmap_grant_ref *kunmap_ops,
@@ -802,7 +798,6 @@ int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
return ret;
}
-EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
#ifdef CONFIG_XEN_DEBUG_FS
#include <linux/debugfs.h>
diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c
index 19ae3e4fe4e9..54f9aa7e8457 100644
--- a/arch/x86/xen/pci-swiotlb-xen.c
+++ b/arch/x86/xen/pci-swiotlb-xen.c
@@ -59,7 +59,7 @@ int __init pci_xen_swiotlb_detect(void)
void __init pci_xen_swiotlb_init(void)
{
if (xen_swiotlb) {
- xen_swiotlb_init(1, true /* early */);
+ xen_swiotlb_init_early();
dma_ops = &xen_swiotlb_dma_ops;
#ifdef CONFIG_PCI
@@ -76,7 +76,7 @@ int pci_xen_swiotlb_init_late(void)
if (xen_swiotlb)
return 0;
- rc = xen_swiotlb_init(1, false /* late */);
+ rc = xen_swiotlb_init();
if (rc)
return rc;
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 1a3b75652fa4..8bfc10330107 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -59,6 +59,18 @@ static struct {
} xen_remap_buf __initdata __aligned(PAGE_SIZE);
static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
+/*
+ * The maximum amount of extra memory compared to the base size. The
+ * main scaling factor is the size of struct page. At extreme ratios
+ * of base:extra, all the base memory can be filled with page
+ * structures for the extra memory, leaving no space for anything
+ * else.
+ *
+ * 10x seems like a reasonable balance between scaling flexibility and
+ * leaving a practically usable system.
+ */
+#define EXTRA_MEM_RATIO (10)
+
static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
static void __init xen_parse_512gb(void)
@@ -778,13 +790,13 @@ char * __init xen_memory_setup(void)
extra_pages += max_pages - max_pfn;
/*
- * Clamp the amount of extra memory to a XEN_EXTRA_MEM_RATIO
+ * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
* factor the base size.
*
* Make sure we have no memory above max_pages, as this area
* isn't handled by the p2m management.
*/
- extra_pages = min3(XEN_EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
+ extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
extra_pages, max_pages - max_pfn);
i = 0;
addr = xen_e820_table.entries[0].addr;
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 91f5b330dcc6..d9c945ee1100 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -379,11 +379,6 @@ void xen_timer_resume(void)
}
}
-static const struct pv_time_ops xen_time_ops __initconst = {
- .sched_clock = xen_sched_clock,
- .steal_clock = xen_steal_clock,
-};
-
static struct pvclock_vsyscall_time_info *xen_clock __read_mostly;
static u64 xen_clock_value_saved;
@@ -525,17 +520,24 @@ static void __init xen_time_init(void)
pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
}
-void __init xen_init_time_ops(void)
+static void __init xen_init_time_common(void)
{
xen_sched_clock_offset = xen_clocksource_read();
- pv_ops.time = xen_time_ops;
+ static_call_update(pv_steal_clock, xen_steal_clock);
+ paravirt_set_sched_clock(xen_sched_clock);
+
+ x86_platform.calibrate_tsc = xen_tsc_khz;
+ x86_platform.get_wallclock = xen_get_wallclock;
+}
+
+void __init xen_init_time_ops(void)
+{
+ xen_init_time_common();
x86_init.timers.timer_init = xen_time_init;
x86_init.timers.setup_percpu_clockev = x86_init_noop;
x86_cpuinit.setup_percpu_clockev = x86_init_noop;
- x86_platform.calibrate_tsc = xen_tsc_khz;
- x86_platform.get_wallclock = xen_get_wallclock;
/* Dom0 uses the native method to set the hardware RTC. */
if (!xen_initial_domain())
x86_platform.set_wallclock = xen_set_wallclock;
@@ -569,13 +571,11 @@ void __init xen_hvm_init_time_ops(void)
return;
}
- xen_sched_clock_offset = xen_clocksource_read();
- pv_ops.time = xen_time_ops;
+ xen_init_time_common();
+
x86_init.timers.setup_percpu_clockev = xen_time_init;
x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
- x86_platform.calibrate_tsc = xen_tsc_khz;
- x86_platform.get_wallclock = xen_get_wallclock;
x86_platform.set_wallclock = xen_set_wallclock;
}
#endif
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 9ad6b7b82707..2332b2156993 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -7,6 +7,7 @@ config XTENSA
select ARCH_HAS_SYNC_DMA_FOR_CPU if MMU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if MMU
select ARCH_HAS_DMA_SET_UNCACHED if MMU
+ select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_FRAME_POINTERS
@@ -84,6 +85,12 @@ config KASAN_SHADOW_OFFSET
hex
default 0x6e400000
+config CPU_BIG_ENDIAN
+ def_bool $(success,test "$(shell,echo __XTENSA_EB__ | $(CC) -E -P -)" = 1)
+
+config CPU_LITTLE_ENDIAN
+ def_bool !CPU_BIG_ENDIAN
+
menu "Processor type and features"
choice
@@ -387,6 +394,28 @@ config PARSE_BOOTPARAM
If unsure, say Y.
+choice
+ prompt "Semihosting interface"
+ default XTENSA_SIMCALL_ISS
+ depends on XTENSA_PLATFORM_ISS
+ help
+ Choose semihosting interface that will be used for serial port,
+ block device and networking.
+
+config XTENSA_SIMCALL_ISS
+ bool "simcall"
+ help
+ Use simcall instruction. simcall is only available on simulators,
+ it does nothing on hardware.
+
+config XTENSA_SIMCALL_GDBIO
+ bool "GDBIO"
+ help
+ Use break instruction. It is available on real hardware when GDB
+ is attached to it via JTAG.
+
+endchoice
+
config BLK_DEV_SIMDISK
tristate "Host file-based simulated block device support"
default n
@@ -466,7 +495,7 @@ config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
then enter your normal kernel breakpoints once the MMU was mapped
to the kernel mappings (0XC0000000).
- This unfortunately won't work for U-Boot and likely also wont
+ This unfortunately won't work for U-Boot and likely also won't
work for using KEXEC to have a hot kernel ready for doing a
KDUMP.
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index cf0940708702..e9c8f064c44d 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -19,12 +19,8 @@ variant-y := $(patsubst "%",%,$(CONFIG_XTENSA_VARIANT_NAME))
VARIANT = $(variant-y)
export VARIANT
-# Test for cross compiling
-
ifneq ($(VARIANT),)
- COMPILE_ARCH = $(shell uname -m)
-
- ifneq ($(COMPILE_ARCH), xtensa)
+ ifdef cross_compiling
ifndef CROSS_COMPILE
CROSS_COMPILE = xtensa_$(VARIANT)-
endif
@@ -52,14 +48,7 @@ ifneq ($(CONFIG_LD_NO_RELAX),)
KBUILD_LDFLAGS := --no-relax
endif
-ifeq ($(shell echo __XTENSA_EB__ | $(CC) -E - | grep -v "\#"),1)
-CHECKFLAGS += -D__XTENSA_EB__
-KBUILD_CPPFLAGS += -DCONFIG_CPU_BIG_ENDIAN
-endif
-ifeq ($(shell echo __XTENSA_EL__ | $(CC) -E - | grep -v "\#"),1)
-CHECKFLAGS += -D__XTENSA_EL__
-KBUILD_CPPFLAGS += -DCONFIG_CPU_LITTLE_ENDIAN
-endif
+CHECKFLAGS += -D$(if $(CONFIG_CPU_BIG_ENDIAN),__XTENSA_EB__,__XTENSA_EL__)
vardirs := $(patsubst %,arch/xtensa/variants/%/,$(variant-y))
plfdirs := $(patsubst %,arch/xtensa/platforms/%/,$(platform-y))
diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile
index f6bb352f94b4..a65b7a9ebff2 100644
--- a/arch/xtensa/boot/Makefile
+++ b/arch/xtensa/boot/Makefile
@@ -12,10 +12,6 @@
KBUILD_CFLAGS += -fno-builtin -Iarch/$(ARCH)/boot/include
HOSTFLAGS += -Iarch/$(ARCH)/boot/include
-BIG_ENDIAN := $(shell echo __XTENSA_EB__ | $(CC) -E - | grep -v "\#")
-
-export BIG_ENDIAN
-
subdir-y := lib
targets += vmlinux.bin vmlinux.bin.gz
targets += uImage xipImage
diff --git a/arch/xtensa/boot/boot-elf/Makefile b/arch/xtensa/boot/boot-elf/Makefile
index f7c775d53012..faec2002923b 100644
--- a/arch/xtensa/boot/boot-elf/Makefile
+++ b/arch/xtensa/boot/boot-elf/Makefile
@@ -4,15 +4,10 @@
# for more details.
#
-ifeq ($(BIG_ENDIAN),1)
-OBJCOPY_ARGS := -O elf32-xtensa-be
-else
-OBJCOPY_ARGS := -O elf32-xtensa-le
-endif
+OBJCOPY_ARGS := -O $(if $(CONFIG_CPU_BIG_ENDIAN),elf32-xtensa-be,elf32-xtensa-le)
-export OBJCOPY_ARGS
-export CPPFLAGS_boot.lds += -P -C
-export KBUILD_AFLAGS += -mtext-section-literals
+CPPFLAGS_boot.lds += -P -C
+KBUILD_AFLAGS += -mtext-section-literals
boot-y := bootstrap.o
targets += $(boot-y) boot.lds
diff --git a/arch/xtensa/boot/boot-redboot/Makefile b/arch/xtensa/boot/boot-redboot/Makefile
index 07cb24afedc2..1d1d46215b1c 100644
--- a/arch/xtensa/boot/boot-redboot/Makefile
+++ b/arch/xtensa/boot/boot-redboot/Makefile
@@ -4,11 +4,7 @@
# for more details.
#
-ifeq ($(BIG_ENDIAN),1)
-OBJCOPY_ARGS := -O elf32-xtensa-be
-else
-OBJCOPY_ARGS := -O elf32-xtensa-le
-endif
+OBJCOPY_ARGS := -O $(if $(CONFIG_CPU_BIG_ENDIAN),elf32-xtensa-be,elf32-xtensa-le)
LD_ARGS = -T $(srctree)/$(obj)/boot.ld
diff --git a/arch/xtensa/configs/xip_kc705_defconfig b/arch/xtensa/configs/xip_kc705_defconfig
index 4f1ff9531f6a..062148e17135 100644
--- a/arch/xtensa/configs/xip_kc705_defconfig
+++ b/arch/xtensa/configs/xip_kc705_defconfig
@@ -72,7 +72,6 @@ CONFIG_MARVELL_PHY=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
-CONFIG_DEVKMEM=y
CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index 744c2f463845..4361fe4247e3 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -43,7 +43,7 @@
*
* Atomically reads the value of @v.
*/
-#define atomic_read(v) READ_ONCE((v)->counter)
+#define arch_atomic_read(v) READ_ONCE((v)->counter)
/**
* atomic_set - set atomic variable
@@ -52,11 +52,11 @@
*
* Atomically sets the value of @v to @i.
*/
-#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
+#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
#if XCHAL_HAVE_EXCLUSIVE
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t *v) \
+static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
@@ -74,7 +74,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
@@ -95,7 +95,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t *v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
int result; \
@@ -116,7 +116,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
#elif XCHAL_HAVE_S32C1I
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t * v) \
+static inline void arch_atomic_##op(int i, atomic_t * v) \
{ \
unsigned long tmp; \
int result; \
@@ -135,7 +135,7 @@ static inline void atomic_##op(int i, atomic_t * v) \
} \
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t * v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t * v) \
{ \
unsigned long tmp; \
int result; \
@@ -157,7 +157,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
}
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t * v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \
{ \
unsigned long tmp; \
int result; \
@@ -180,7 +180,7 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \
#else /* XCHAL_HAVE_S32C1I */
#define ATOMIC_OP(op) \
-static inline void atomic_##op(int i, atomic_t * v) \
+static inline void arch_atomic_##op(int i, atomic_t * v) \
{ \
unsigned int vval; \
\
@@ -198,7 +198,7 @@ static inline void atomic_##op(int i, atomic_t * v) \
} \
#define ATOMIC_OP_RETURN(op) \
-static inline int atomic_##op##_return(int i, atomic_t * v) \
+static inline int arch_atomic_##op##_return(int i, atomic_t * v) \
{ \
unsigned int vval; \
\
@@ -218,7 +218,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
}
#define ATOMIC_FETCH_OP(op) \
-static inline int atomic_fetch_##op(int i, atomic_t * v) \
+static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \
{ \
unsigned int tmp, vval; \
\
@@ -257,7 +257,7 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#endif /* _XTENSA_ATOMIC_H */
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h
index a175f8aec3fb..3699e2818efb 100644
--- a/arch/xtensa/include/asm/cmpxchg.h
+++ b/arch/xtensa/include/asm/cmpxchg.h
@@ -80,7 +80,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
}
}
-#define cmpxchg(ptr,o,n) \
+#define arch_cmpxchg(ptr,o,n) \
({ __typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
@@ -97,7 +97,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
case 4:
return __cmpxchg_u32(ptr, old, new);
default:
- return __cmpxchg_local_generic(ptr, old, new, size);
+ return __generic_cmpxchg_local(ptr, old, new, size);
}
return old;
@@ -107,11 +107,11 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
-#define cmpxchg_local(ptr, o, n) \
- ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+#define arch_cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o),\
(unsigned long)(n), sizeof(*(ptr))))
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
+#define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
/*
* xchg_u32
@@ -169,7 +169,7 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
#endif
}
-#define xchg(ptr,x) \
+#define arch_xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
static inline u32 xchg_small(volatile void *ptr, u32 x, int size)
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
index 05cb13dfe6f4..9793b49fc641 100644
--- a/arch/xtensa/include/asm/initialize_mmu.h
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -73,7 +73,7 @@
_j 2f
.align 4
-1: movi a2, 0x10000000
+1:
#if CONFIG_KERNEL_LOAD_ADDRESS < 0x40000000ul
#define TEMP_MAPPING_VADDR 0x40000000
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index 37ce25ef92d6..493eb7083b1a 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -192,10 +192,6 @@ static inline unsigned long ___pa(unsigned long va)
#define pfn_valid(pfn) \
((pfn) >= ARCH_PFN_OFFSET && ((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
-#ifdef CONFIG_DISCONTIGMEM
-# error CONFIG_DISCONTIGMEM not supported
-#endif
-
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h
index d3a22da4d2c9..eeb2de3a89e5 100644
--- a/arch/xtensa/include/asm/pgalloc.h
+++ b/arch/xtensa/include/asm/pgalloc.h
@@ -25,7 +25,6 @@
(pmd_val(*(pmdp)) = ((unsigned long)ptep))
#define pmd_populate(mm, pmdp, page) \
(pmd_val(*(pmdp)) = ((unsigned long)page_to_virt(page)))
-#define pmd_pgtable(pmd) pmd_page(pmd)
static inline pgd_t*
pgd_alloc(struct mm_struct *mm)
@@ -63,7 +62,6 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
return page;
}
-#define pmd_pgtable(pmd) pmd_page(pmd)
#endif /* CONFIG_MMU */
#endif /* _XTENSA_PGALLOC_H */
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 4dc04e6c01d7..bd5aeb795567 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -59,7 +59,6 @@
#define PTRS_PER_PGD 1024
#define PGD_ORDER 0
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
#define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT)
#ifdef CONFIG_MMU
@@ -280,7 +279,9 @@ static inline pte_t pte_mkyoung(pte_t pte)
static inline pte_t pte_mkwrite(pte_t pte)
{ pte_val(pte) |= _PAGE_WRITABLE; return pte; }
-#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CA_MASK))
+#define pgprot_noncached(prot) \
+ ((__pgprot((pgprot_val(prot) & ~_PAGE_CA_MASK) | \
+ _PAGE_CA_BYPASS)))
/*
* Conversion functions: convert a page and protection to a page entry,
diff --git a/arch/xtensa/include/asm/tlbflush.h b/arch/xtensa/include/asm/tlbflush.h
index 856e2da2e397..573df8cea200 100644
--- a/arch/xtensa/include/asm/tlbflush.h
+++ b/arch/xtensa/include/asm/tlbflush.h
@@ -26,8 +26,8 @@
*
* - flush_tlb_all() flushes all processes TLB entries
* - flush_tlb_mm(mm) flushes the specified mm context TLB entries
- * - flush_tlb_page(mm, vmaddr) flushes a single page
- * - flush_tlb_range(mm, start, end) flushes a range of pages
+ * - flush_tlb_page(vma, page) flushes a single page
+ * - flush_tlb_range(vma, vmaddr, end) flushes a range of pages
*/
void local_flush_tlb_all(void);
diff --git a/arch/xtensa/include/asm/unaligned.h b/arch/xtensa/include/asm/unaligned.h
deleted file mode 100644
index 8e7ed046bfed..000000000000
--- a/arch/xtensa/include/asm/unaligned.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Xtensa doesn't handle unaligned accesses efficiently.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-#ifndef _ASM_XTENSA_UNALIGNED_H
-#define _ASM_XTENSA_UNALIGNED_H
-
-#include <asm/byteorder.h>
-
-#ifdef __LITTLE_ENDIAN
-# include <linux/unaligned/le_struct.h>
-# include <linux/unaligned/be_byteshift.h>
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_le
-# define put_unaligned __put_unaligned_le
-#else
-# include <linux/unaligned/be_struct.h>
-# include <linux/unaligned/le_byteshift.h>
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_be
-# define put_unaligned __put_unaligned_be
-#endif
-
-#endif /* _ASM_XTENSA_UNALIGNED_H */
diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h
index e5e643752947..b3a22095371b 100644
--- a/arch/xtensa/include/uapi/asm/mman.h
+++ b/arch/xtensa/include/uapi/asm/mman.h
@@ -106,6 +106,9 @@
#define MADV_COLD 20 /* deactivate these pages */
#define MADV_PAGEOUT 21 /* reclaim these pages */
+#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */
+#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
index c426b846beef..45cc0ae0af6f 100644
--- a/arch/xtensa/kernel/coprocessor.S
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -100,37 +100,6 @@
LOAD_CP_REGS_TAB(7)
/*
- * coprocessor_flush(struct thread_info*, index)
- * a2 a3
- *
- * Save coprocessor registers for coprocessor 'index'.
- * The register values are saved to or loaded from the coprocessor area
- * inside the task_info structure.
- *
- * Note that this function doesn't update the coprocessor_owner information!
- *
- */
-
-ENTRY(coprocessor_flush)
-
- /* reserve 4 bytes on stack to save a0 */
- abi_entry(4)
-
- s32i a0, a1, 0
- movi a0, .Lsave_cp_regs_jump_table
- addx8 a3, a3, a0
- l32i a4, a3, 4
- l32i a3, a3, 0
- add a2, a2, a4
- beqz a3, 1f
- callx0 a3
-1: l32i a0, a1, 0
-
- abi_ret(4)
-
-ENDPROC(coprocessor_flush)
-
-/*
* Entry condition:
*
* a0: trashed, original value saved on stack (PT_AREG0)
@@ -245,6 +214,39 @@ ENTRY(fast_coprocessor)
ENDPROC(fast_coprocessor)
+ .text
+
+/*
+ * coprocessor_flush(struct thread_info*, index)
+ * a2 a3
+ *
+ * Save coprocessor registers for coprocessor 'index'.
+ * The register values are saved to or loaded from the coprocessor area
+ * inside the task_info structure.
+ *
+ * Note that this function doesn't update the coprocessor_owner information!
+ *
+ */
+
+ENTRY(coprocessor_flush)
+
+ /* reserve 4 bytes on stack to save a0 */
+ abi_entry(4)
+
+ s32i a0, a1, 0
+ movi a0, .Lsave_cp_regs_jump_table
+ addx8 a3, a3, a0
+ l32i a4, a3, 4
+ l32i a3, a3, 0
+ add a2, a2, a4
+ beqz a3, 1f
+ callx0 a3
+1: l32i a0, a1, 0
+
+ abi_ret(4)
+
+ENDPROC(coprocessor_flush)
+
.data
ENTRY(coprocessor_owner)
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index e0c1fac0910f..b9b81e76beea 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -212,7 +212,7 @@ ENTRY(_startup)
*
* The linker script used to build the Linux kernel image
* creates a table located at __boot_reloc_table_start
- * that contans the information what data needs to be unpacked.
+ * that contains the information what data needs to be unpacked.
*
* Uses a2-a7.
*/
@@ -222,7 +222,7 @@ ENTRY(_startup)
1: beq a2, a3, 3f # no more entries?
l32i a4, a2, 0 # start destination (in RAM)
- l32i a5, a2, 4 # end desination (in RAM)
+ l32i a5, a2, 4 # end destination (in RAM)
l32i a6, a2, 8 # start source (in ROM)
addi a2, a2, 12 # next entry
beq a4, a5, 1b # skip, empty entry
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
index 3f32e275997a..62c900e400d6 100644
--- a/arch/xtensa/kernel/pci.c
+++ b/arch/xtensa/kernel/pci.c
@@ -76,7 +76,7 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
struct pci_controller *pci_ctrl = (struct pci_controller*) pdev->sysdata;
resource_size_t ioaddr = pci_resource_start(pdev, bar);
- if (pci_ctrl == 0)
+ if (!pci_ctrl)
return -EINVAL; /* should never happen */
/* Convert to an offset within this PCI controller */
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 9534ef515d74..060165340612 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -304,7 +304,7 @@ unsigned long get_wchan(struct task_struct *p)
unsigned long stack_page = (unsigned long) task_stack_page(p);
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
+ if (!p || p == current || task_is_running(p))
return 0;
sp = p->thread.sp;
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
index cd85a7a2722b..1254da07ead1 100644
--- a/arch/xtensa/kernel/smp.c
+++ b/arch/xtensa/kernel/smp.c
@@ -145,7 +145,6 @@ void secondary_start_kernel(void)
cpumask_set_cpu(cpu, mm_cpumask(mm));
enter_lazy_tlb(mm, current);
- preempt_disable();
trace_hardirqs_off();
calibrate_delay();
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index 2c415fce6801..201356faa7e6 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -17,7 +17,6 @@
*/
#include <linux/uaccess.h>
#include <asm/syscall.h>
-#include <asm/unistd.h>
#include <linux/linkage.h>
#include <linux/stringify.h>
#include <linux/errno.h>
@@ -28,12 +27,9 @@
#include <linux/sched/mm.h>
#include <linux/shm.h>
-syscall_t sys_call_table[__NR_syscalls] /* FIXME __cacheline_aligned */= {
- [0 ... __NR_syscalls - 1] = (syscall_t)&sys_ni_syscall,
-
-#define __SYSCALL(nr, entry, nargs)[nr] = (syscall_t)entry,
+syscall_t sys_call_table[] /* FIXME __cacheline_aligned */= {
+#define __SYSCALL(nr, entry) (syscall_t)entry,
#include <asm/syscall_table.h>
-#undef __SYSCALL
};
#define COLOUR_ALIGN(addr, pgoff) \
diff --git a/arch/xtensa/kernel/syscalls/Makefile b/arch/xtensa/kernel/syscalls/Makefile
index 285aaba832d9..6713c65a25e1 100644
--- a/arch/xtensa/kernel/syscalls/Makefile
+++ b/arch/xtensa/kernel/syscalls/Makefile
@@ -6,20 +6,14 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
$(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
syscall := $(src)/syscall.tbl
-syshdr := $(srctree)/$(src)/syscallhdr.sh
-systbl := $(srctree)/$(src)/syscalltbl.sh
+syshdr := $(srctree)/scripts/syscallhdr.sh
+systbl := $(srctree)/scripts/syscalltbl.sh
quiet_cmd_syshdr = SYSHDR $@
- cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
- '$(syshdr_abis_$(basetarget))' \
- '$(syshdr_pfx_$(basetarget))' \
- '$(syshdr_offset_$(basetarget))'
+ cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr $< $@
quiet_cmd_systbl = SYSTBL $@
- cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \
- '$(systbl_abis_$(basetarget))' \
- '$(systbl_abi_$(basetarget))' \
- '$(systbl_offset_$(basetarget))'
+ cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@
$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl
index 365a9b849224..235d67d6ceb4 100644
--- a/arch/xtensa/kernel/syscalls/syscall.tbl
+++ b/arch/xtensa/kernel/syscalls/syscall.tbl
@@ -413,3 +413,7 @@
440 common process_madvise sys_process_madvise
441 common epoll_pwait2 sys_epoll_pwait2
442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
diff --git a/arch/xtensa/kernel/syscalls/syscallhdr.sh b/arch/xtensa/kernel/syscalls/syscallhdr.sh
deleted file mode 100644
index eebfb8a8ace6..000000000000
--- a/arch/xtensa/kernel/syscalls/syscallhdr.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-prefix="$4"
-offset="$5"
-
-fileguard=_UAPI_ASM_XTENSA_`basename "$out" | sed \
- -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
- -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- printf "#ifndef %s\n" "${fileguard}"
- printf "#define %s\n" "${fileguard}"
- printf "\n"
-
- nxt=0
- while read nr abi name entry ; do
- if [ -z "$offset" ]; then
- printf "#define __NR_%s%s\t%s\n" \
- "${prefix}" "${name}" "${nr}"
- else
- printf "#define __NR_%s%s\t(%s + %s)\n" \
- "${prefix}" "${name}" "${offset}" "${nr}"
- fi
- nxt=$((nr+1))
- done
-
- printf "\n"
- printf "#ifdef __KERNEL__\n"
- printf "#define __NR_syscalls\t%s\n" "${nxt}"
- printf "#endif\n"
- printf "\n"
- printf "#endif /* %s */\n" "${fileguard}"
-) > "$out"
diff --git a/arch/xtensa/kernel/syscalls/syscalltbl.sh b/arch/xtensa/kernel/syscalls/syscalltbl.sh
deleted file mode 100644
index 85d78d9309ad..000000000000
--- a/arch/xtensa/kernel/syscalls/syscalltbl.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-my_abi="$4"
-offset="$5"
-
-emit() {
- t_nxt="$1"
- t_nr="$2"
- t_entry="$3"
-
- while [ $t_nxt -lt $t_nr ]; do
- printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}"
- t_nxt=$((t_nxt+1))
- done
- printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}"
-}
-
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- nxt=0
- if [ -z "$offset" ]; then
- offset=0
- fi
-
- while read nr abi name entry ; do
- emit $((nxt+offset)) $((nr+offset)) $entry
- nxt=$((nr+1))
- done
-) > "$out"
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index 085b8c77b9d9..19e5a478a7e8 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -81,13 +81,8 @@ static inline void kmap_invalidate_coherent(struct page *page,
static inline void *coherent_kvaddr(struct page *page, unsigned long base,
unsigned long vaddr, unsigned long *paddr)
{
- if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
- *paddr = page_to_phys(page);
- return (void *)(base + (vaddr & DCACHE_ALIAS_MASK));
- } else {
- *paddr = 0;
- return page_to_virt(page);
- }
+ *paddr = page_to_phys(page);
+ return (void *)(base + (vaddr & DCACHE_ALIAS_MASK));
}
void clear_user_highpage(struct page *page, unsigned long vaddr)
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index 7666408ce12a..95a74890c7e9 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -112,8 +112,11 @@ good_area:
*/
fault = handle_mm_fault(vma, address, flags, regs);
- if (fault_signal_pending(fault, regs))
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ goto bad_page_fault;
return;
+ }
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 2daeba9e454e..6a32b2cf2718 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -119,7 +119,6 @@ void __init mem_init(void)
memblock_free_all();
- mem_init_print_info(NULL);
pr_info("virtual kernel memory layout:\n"
#ifdef CONFIG_KASAN
" kasan : 0x%08lx - 0x%08lx (%5lu MB)\n"
diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S
index 25cd67debee6..0527bf6e3211 100644
--- a/arch/xtensa/mm/misc.S
+++ b/arch/xtensa/mm/misc.S
@@ -118,20 +118,13 @@ ENTRY(clear_page_alias)
abi_entry_default
- /* Skip setting up a temporary DTLB if not aliased low page. */
-
movi a5, PAGE_OFFSET
- movi a6, 0
- beqz a3, 1f
-
- /* Setup a temporary DTLB for the addr. */
-
addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE)
mov a4, a2
wdtlb a6, a2
dsync
-1: movi a3, 0
+ movi a3, 0
__loopi a2, a7, PAGE_SIZE, 32
s32i a3, a2, 0
s32i a3, a2, 4
@@ -143,12 +136,9 @@ ENTRY(clear_page_alias)
s32i a3, a2, 28
__endla a2, a7, 32
- bnez a6, 1f
- abi_ret_default
-
- /* We need to invalidate the temporary idtlb entry, if any. */
+ /* We need to invalidate the temporary dtlb entry. */
-1: idtlb a4
+ idtlb a4
dsync
abi_ret_default
@@ -166,22 +156,12 @@ ENTRY(copy_page_alias)
abi_entry_default
- /* Skip setting up a temporary DTLB for destination if not aliased. */
-
- movi a6, 0
- movi a7, 0
- beqz a4, 1f
-
/* Setup a temporary DTLB for destination. */
addi a6, a4, (PAGE_KERNEL | _PAGE_HW_WRITE)
wdtlb a6, a2
dsync
- /* Skip setting up a temporary DTLB for source if not aliased. */
-
-1: beqz a5, 1f
-
/* Setup a temporary DTLB for source. */
addi a7, a5, PAGE_KERNEL
@@ -219,17 +199,11 @@ ENTRY(copy_page_alias)
/* We need to invalidate any temporary mapping! */
- bnez a6, 1f
- bnez a7, 2f
- abi_ret_default
-
-1: addi a2, a2, -PAGE_SIZE
+ addi a2, a2, -PAGE_SIZE
idtlb a2
dsync
- bnez a7, 2f
- abi_ret_default
-2: addi a3, a3, -PAGE_SIZE+1
+ addi a3, a3, -PAGE_SIZE+1
idtlb a3
dsync
diff --git a/arch/xtensa/platforms/iss/console.c b/arch/xtensa/platforms/iss/console.c
index af81a62faba6..a3dda25a4e45 100644
--- a/arch/xtensa/platforms/iss/console.c
+++ b/arch/xtensa/platforms/iss/console.c
@@ -31,48 +31,23 @@
#define SERIAL_MAX_NUM_LINES 1
#define SERIAL_TIMER_VALUE (HZ / 10)
+static void rs_poll(struct timer_list *);
+
static struct tty_driver *serial_driver;
static struct tty_port serial_port;
-static struct timer_list serial_timer;
-
+static DEFINE_TIMER(serial_timer, rs_poll);
static DEFINE_SPINLOCK(timer_lock);
-static char *serial_version = "0.1";
-static char *serial_name = "ISS serial driver";
-
-/*
- * This routine is called whenever a serial port is opened. It
- * enables interrupts for a serial port, linking in its async structure into
- * the IRQ chain. It also performs the serial-specific
- * initialization for the tty structure.
- */
-
-static void rs_poll(struct timer_list *);
-
static int rs_open(struct tty_struct *tty, struct file * filp)
{
- tty->port = &serial_port;
spin_lock_bh(&timer_lock);
- if (tty->count == 1) {
- timer_setup(&serial_timer, rs_poll, 0);
+ if (tty->count == 1)
mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE);
- }
spin_unlock_bh(&timer_lock);
return 0;
}
-
-/*
- * ------------------------------------------------------------
- * iss_serial_close()
- *
- * This routine is called when the serial port gets closed. First, we
- * wait for the last remaining data to be sent. Then, we unlink its
- * async structure from the interrupt chain if necessary, and we free
- * that IRQ if nothing is left in the chain.
- * ------------------------------------------------------------
- */
static void rs_close(struct tty_struct *tty, struct file * filp)
{
spin_lock_bh(&timer_lock);
@@ -149,7 +124,7 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
static int rs_proc_show(struct seq_file *m, void *v)
{
- seq_printf(m, "serinfo:1.0 driver:%s\n", serial_version);
+ seq_printf(m, "serinfo:1.0 driver:0.1\n");
return 0;
}
@@ -166,14 +141,12 @@ static const struct tty_operations serial_ops = {
.proc_show = rs_proc_show,
};
-int __init rs_init(void)
+static int __init rs_init(void)
{
tty_port_init(&serial_port);
serial_driver = alloc_tty_driver(SERIAL_MAX_NUM_LINES);
- pr_info("%s %s\n", serial_name, serial_version);
-
/* Initialize the tty_driver structure */
serial_driver->driver_name = "iss_serial";
@@ -198,11 +171,7 @@ int __init rs_init(void)
static __exit void rs_exit(void)
{
- int error;
-
- if ((error = tty_unregister_driver(serial_driver)))
- pr_err("ISS_SERIAL: failed to unregister serial driver (%d)\n",
- error);
+ tty_unregister_driver(serial_driver);
put_tty_driver(serial_driver);
tty_port_destroy(&serial_port);
}
diff --git a/arch/xtensa/platforms/iss/include/platform/simcall-gdbio.h b/arch/xtensa/platforms/iss/include/platform/simcall-gdbio.h
new file mode 100644
index 000000000000..e642860e25a8
--- /dev/null
+++ b/arch/xtensa/platforms/iss/include/platform/simcall-gdbio.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021 Cadence Design Systems Inc. */
+
+#ifndef _XTENSA_PLATFORM_ISS_SIMCALL_GDBIO_H
+#define _XTENSA_PLATFORM_ISS_SIMCALL_GDBIO_H
+
+/*
+ * System call like services offered by the GDBIO host.
+ */
+
+#define SYS_open -2
+#define SYS_close -3
+#define SYS_read -4
+#define SYS_write -5
+#define SYS_lseek -6
+
+static int errno;
+
+static inline int __simc(int a, int b, int c, int d)
+{
+ register int a1 asm("a2") = a;
+ register int b1 asm("a6") = b;
+ register int c1 asm("a3") = c;
+ register int d1 asm("a4") = d;
+ __asm__ __volatile__ (
+ "break 1, 14\n"
+ : "+r"(a1), "+r"(c1)
+ : "r"(b1), "r"(d1)
+ : "memory");
+ errno = c1;
+ return a1;
+}
+
+#endif /* _XTENSA_PLATFORM_ISS_SIMCALL_GDBIO_H */
diff --git a/arch/xtensa/platforms/iss/include/platform/simcall-iss.h b/arch/xtensa/platforms/iss/include/platform/simcall-iss.h
new file mode 100644
index 000000000000..5a1e7a1f182e
--- /dev/null
+++ b/arch/xtensa/platforms/iss/include/platform/simcall-iss.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021 Cadence Design Systems Inc. */
+
+#ifndef _XTENSA_PLATFORM_ISS_SIMCALL_ISS_H
+#define _XTENSA_PLATFORM_ISS_SIMCALL_ISS_H
+
+/*
+ * System call like services offered by the simulator host.
+ */
+
+#define SYS_nop 0 /* unused */
+#define SYS_exit 1 /*x*/
+#define SYS_fork 2
+#define SYS_read 3 /*x*/
+#define SYS_write 4 /*x*/
+#define SYS_open 5 /*x*/
+#define SYS_close 6 /*x*/
+#define SYS_rename 7 /*x 38 - waitpid */
+#define SYS_creat 8 /*x*/
+#define SYS_link 9 /*x (not implemented on WIN32) */
+#define SYS_unlink 10 /*x*/
+#define SYS_execv 11 /* n/a - execve */
+#define SYS_execve 12 /* 11 - chdir */
+#define SYS_pipe 13 /* 42 - time */
+#define SYS_stat 14 /* 106 - mknod */
+#define SYS_chmod 15
+#define SYS_chown 16 /* 202 - lchown */
+#define SYS_utime 17 /* 30 - break */
+#define SYS_wait 18 /* n/a - oldstat */
+#define SYS_lseek 19 /*x*/
+#define SYS_getpid 20
+#define SYS_isatty 21 /* n/a - mount */
+#define SYS_fstat 22 /* 108 - oldumount */
+#define SYS_time 23 /* 13 - setuid */
+#define SYS_gettimeofday 24 /*x 78 - getuid (not implemented on WIN32) */
+#define SYS_times 25 /*X 43 - stime (Xtensa-specific implementation) */
+#define SYS_socket 26
+#define SYS_sendto 27
+#define SYS_recvfrom 28
+#define SYS_select_one 29 /* not compatible select, one file descriptor at the time */
+#define SYS_bind 30
+#define SYS_ioctl 31
+
+#define SYS_iss_argc 1000 /* returns value of argc */
+#define SYS_iss_argv_size 1001 /* bytes needed for argv & arg strings */
+#define SYS_iss_set_argv 1002 /* saves argv & arg strings at given addr */
+
+/*
+ * SYS_select_one specifiers
+ */
+
+#define XTISS_SELECT_ONE_READ 1
+#define XTISS_SELECT_ONE_WRITE 2
+#define XTISS_SELECT_ONE_EXCEPT 3
+
+static int errno;
+
+static inline int __simc(int a, int b, int c, int d)
+{
+ register int a1 asm("a2") = a;
+ register int b1 asm("a3") = b;
+ register int c1 asm("a4") = c;
+ register int d1 asm("a5") = d;
+ __asm__ __volatile__ (
+ "simcall\n"
+ : "+r"(a1), "+r"(b1)
+ : "r"(c1), "r"(d1)
+ : "memory");
+ errno = b1;
+ return a1;
+}
+
+#endif /* _XTENSA_PLATFORM_ISS_SIMCALL_ISS_H */
diff --git a/arch/xtensa/platforms/iss/include/platform/simcall.h b/arch/xtensa/platforms/iss/include/platform/simcall.h
index f42870ab551b..e1ec50ce39ee 100644
--- a/arch/xtensa/platforms/iss/include/platform/simcall.h
+++ b/arch/xtensa/platforms/iss/include/platform/simcall.h
@@ -6,82 +6,29 @@
* for more details.
*
* Copyright (C) 2001 Tensilica Inc.
- * Copyright (C) 2017 Cadence Design Systems Inc.
+ * Copyright (C) 2017 - 2021 Cadence Design Systems Inc.
*/
#ifndef _XTENSA_PLATFORM_ISS_SIMCALL_H
#define _XTENSA_PLATFORM_ISS_SIMCALL_H
+#include <linux/bug.h>
-/*
- * System call like services offered by the simulator host.
- */
-
-#define SYS_nop 0 /* unused */
-#define SYS_exit 1 /*x*/
-#define SYS_fork 2
-#define SYS_read 3 /*x*/
-#define SYS_write 4 /*x*/
-#define SYS_open 5 /*x*/
-#define SYS_close 6 /*x*/
-#define SYS_rename 7 /*x 38 - waitpid */
-#define SYS_creat 8 /*x*/
-#define SYS_link 9 /*x (not implemented on WIN32) */
-#define SYS_unlink 10 /*x*/
-#define SYS_execv 11 /* n/a - execve */
-#define SYS_execve 12 /* 11 - chdir */
-#define SYS_pipe 13 /* 42 - time */
-#define SYS_stat 14 /* 106 - mknod */
-#define SYS_chmod 15
-#define SYS_chown 16 /* 202 - lchown */
-#define SYS_utime 17 /* 30 - break */
-#define SYS_wait 18 /* n/a - oldstat */
-#define SYS_lseek 19 /*x*/
-#define SYS_getpid 20
-#define SYS_isatty 21 /* n/a - mount */
-#define SYS_fstat 22 /* 108 - oldumount */
-#define SYS_time 23 /* 13 - setuid */
-#define SYS_gettimeofday 24 /*x 78 - getuid (not implemented on WIN32) */
-#define SYS_times 25 /*X 43 - stime (Xtensa-specific implementation) */
-#define SYS_socket 26
-#define SYS_sendto 27
-#define SYS_recvfrom 28
-#define SYS_select_one 29 /* not compitible select, one file descriptor at the time */
-#define SYS_bind 30
-#define SYS_ioctl 31
-
-#define SYS_iss_argc 1000 /* returns value of argc */
-#define SYS_iss_argv_size 1001 /* bytes needed for argv & arg strings */
-#define SYS_iss_set_argv 1002 /* saves argv & arg strings at given addr */
-
-/*
- * SYS_select_one specifiers
- */
-
-#define XTISS_SELECT_ONE_READ 1
-#define XTISS_SELECT_ONE_WRITE 2
-#define XTISS_SELECT_ONE_EXCEPT 3
-
-static int errno;
-
-static inline int __simc(int a, int b, int c, int d)
-{
- register int a1 asm("a2") = a;
- register int b1 asm("a3") = b;
- register int c1 asm("a4") = c;
- register int d1 asm("a5") = d;
- __asm__ __volatile__ (
- "simcall\n"
- : "+r"(a1), "+r"(b1)
- : "r"(c1), "r"(d1)
- : "memory");
- errno = b1;
- return a1;
-}
+#ifdef CONFIG_XTENSA_SIMCALL_ISS
+#include <platform/simcall-iss.h>
+#endif
+#ifdef CONFIG_XTENSA_SIMCALL_GDBIO
+#include <platform/simcall-gdbio.h>
+#endif
static inline int simc_exit(int exit_code)
{
+#ifdef SYS_exit
return __simc(SYS_exit, exit_code, 0, 0);
+#else
+ WARN_ONCE(1, "%s: not implemented\n", __func__);
+ return -1;
+#endif
}
static inline int simc_open(const char *file, int flags, int mode)
@@ -96,7 +43,12 @@ static inline int simc_close(int fd)
static inline int simc_ioctl(int fd, int request, void *arg)
{
+#ifdef SYS_ioctl
return __simc(SYS_ioctl, fd, request, (int) arg);
+#else
+ WARN_ONCE(1, "%s: not implemented\n", __func__);
+ return -1;
+#endif
}
static inline int simc_read(int fd, void *buf, size_t count)
@@ -111,9 +63,14 @@ static inline int simc_write(int fd, const void *buf, size_t count)
static inline int simc_poll(int fd)
{
+#ifdef SYS_select_one
long timeval[2] = { 0, 0 };
return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&timeval);
+#else
+ WARN_ONCE(1, "%s: not implemented\n", __func__);
+ return -1;
+#endif
}
static inline int simc_lseek(int fd, uint32_t off, int whence)
@@ -123,18 +80,31 @@ static inline int simc_lseek(int fd, uint32_t off, int whence)
static inline int simc_argc(void)
{
+#ifdef SYS_iss_argc
return __simc(SYS_iss_argc, 0, 0, 0);
+#else
+ WARN_ONCE(1, "%s: not implemented\n", __func__);
+ return 0;
+#endif
}
static inline int simc_argv_size(void)
{
+#ifdef SYS_iss_argv_size
return __simc(SYS_iss_argv_size, 0, 0, 0);
+#else
+ WARN_ONCE(1, "%s: not implemented\n", __func__);
+ return 0;
+#endif
}
static inline void simc_argv(void *buf)
{
+#ifdef SYS_iss_set_argv
__simc(SYS_iss_set_argv, (int)buf, 0, 0);
+#else
+ WARN_ONCE(1, "%s: not implemented\n", __func__);
+#endif
}
#endif /* _XTENSA_PLATFORM_ISS_SIMCALL_H */
-
diff --git a/arch/xtensa/platforms/iss/setup.c b/arch/xtensa/platforms/iss/setup.c
index ed519aee0ec8..d3433e1bb94e 100644
--- a/arch/xtensa/platforms/iss/setup.c
+++ b/arch/xtensa/platforms/iss/setup.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/notifier.h>
+#include <linux/panic_notifier.h>
#include <linux/printk.h>
#include <linux/string.h>
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
index fc09be7b1347..3cdfa00738e0 100644
--- a/arch/xtensa/platforms/iss/simdisk.c
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -27,7 +27,6 @@
struct simdisk {
const char *filename;
spinlock_t lock;
- struct request_queue *queue;
struct gendisk *gd;
struct proc_dir_entry *procfile;
int users;
@@ -266,21 +265,13 @@ static int __init simdisk_setup(struct simdisk *dev, int which,
spin_lock_init(&dev->lock);
dev->users = 0;
- dev->queue = blk_alloc_queue(NUMA_NO_NODE);
- if (dev->queue == NULL) {
- pr_err("blk_alloc_queue failed\n");
- goto out_alloc_queue;
- }
-
- dev->gd = alloc_disk(SIMDISK_MINORS);
- if (dev->gd == NULL) {
- pr_err("alloc_disk failed\n");
- goto out_alloc_disk;
- }
+ dev->gd = blk_alloc_disk(NUMA_NO_NODE);
+ if (!dev->gd)
+ return -ENOMEM;
dev->gd->major = simdisk_major;
dev->gd->first_minor = which;
+ dev->gd->minors = SIMDISK_MINORS;
dev->gd->fops = &simdisk_ops;
- dev->gd->queue = dev->queue;
dev->gd->private_data = dev;
snprintf(dev->gd->disk_name, 32, "simdisk%d", which);
set_capacity(dev->gd, 0);
@@ -288,12 +279,6 @@ static int __init simdisk_setup(struct simdisk *dev, int which,
dev->procfile = proc_create_data(tmp, 0644, procdir, &simdisk_proc_ops, dev);
return 0;
-
-out_alloc_disk:
- blk_cleanup_queue(dev->queue);
- dev->queue = NULL;
-out_alloc_queue:
- return -ENOMEM;
}
static int __init simdisk_init(void)
@@ -343,10 +328,10 @@ static void simdisk_teardown(struct simdisk *dev, int which,
char tmp[2] = { '0' + which, 0 };
simdisk_detach(dev);
- if (dev->gd)
+ if (dev->gd) {
del_gendisk(dev->gd);
- if (dev->queue)
- blk_cleanup_queue(dev->queue);
+ blk_cleanup_disk(dev->gd);
+ }
remove_proc_entry(tmp, procdir);
}